problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_3443
rasdani/github-patches
git_diff
crytic__slither-1971
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Suggestion to make the recommendation in the `msgvalue-inside-a-loop` detector wiki clearer ### Describe the desired feature Reference: https://github.com/crytic/slither/wiki/Detector-Documentation#msgvalue-inside-a-loop This is the current recommendation for the `msgvalue-inside-a-loop` detector: ```solidity Track msg.value through a local variable and decrease its amount on every iteration/usage. ``` This is a vague recommendation - it does not address the issue head-on, i.e., what mathematical technique the developer should use to remedy the bug. My suggestions: 1. Recommend dividing by the number of `receivers` 2. Recommend providing an explicit array of amounts alongside the `receivers` array, and check that the sum of each element in that array matches `msg.value` </issue> <code> [start of slither/detectors/statements/msg_value_in_loop.py] 1 from typing import List, Optional 2 from slither.core.cfg.node import NodeType, Node 3 from slither.detectors.abstract_detector import ( 4 AbstractDetector, 5 DetectorClassification, 6 DETECTOR_INFO, 7 ) 8 from slither.slithir.operations import InternalCall 9 from slither.core.declarations import SolidityVariableComposed, Contract 10 from slither.utils.output import Output 11 12 13 def detect_msg_value_in_loop(contract: Contract) -> List[Node]: 14 results: List[Node] = [] 15 for f in contract.functions_entry_points: 16 if f.is_implemented and f.payable: 17 msg_value_in_loop(f.entry_point, 0, [], results) 18 return results 19 20 21 def msg_value_in_loop( 22 node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node] 23 ) -> None: 24 25 if node is None: 26 return 27 28 if node in visited: 29 return 30 # shared visited 31 visited.append(node) 32 33 if node.type == NodeType.STARTLOOP: 34 in_loop_counter += 1 35 elif node.type == NodeType.ENDLOOP: 36 in_loop_counter -= 1 37 38 for ir in node.all_slithir_operations(): 39 if in_loop_counter > 0 and SolidityVariableComposed("msg.value") in ir.read: 40 results.append(ir.node) 41 if isinstance(ir, (InternalCall)): 42 msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results) 43 44 for son in node.sons: 45 msg_value_in_loop(son, in_loop_counter, visited, results) 46 47 48 class MsgValueInLoop(AbstractDetector): 49 """ 50 Detect the use of msg.value inside a loop 51 """ 52 53 ARGUMENT = "msg-value-loop" 54 HELP = "msg.value inside a loop" 55 IMPACT = DetectorClassification.HIGH 56 CONFIDENCE = DetectorClassification.MEDIUM 57 58 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop" 59 60 WIKI_TITLE = "`msg.value` inside a loop" 61 WIKI_DESCRIPTION = "Detect the use of `msg.value` inside a loop." 62 63 # region wiki_exploit_scenario 64 WIKI_EXPLOIT_SCENARIO = """ 65 ```solidity 66 contract MsgValueInLoop{ 67 68 mapping (address => uint256) balances; 69 70 function bad(address[] memory receivers) public payable { 71 for (uint256 i=0; i < receivers.length; i++) { 72 balances[receivers[i]] += msg.value; 73 } 74 } 75 76 } 77 ``` 78 """ 79 # endregion wiki_exploit_scenario 80 81 WIKI_RECOMMENDATION = """ 82 Track msg.value through a local variable and decrease its amount on every iteration/usage. 83 """ 84 85 def _detect(self) -> List[Output]: 86 """""" 87 results: List[Output] = [] 88 for c in self.compilation_unit.contracts_derived: 89 values = detect_msg_value_in_loop(c) 90 for node in values: 91 func = node.function 92 93 info: DETECTOR_INFO = [func, " use msg.value in a loop: ", node, "\n"] 94 res = self.generate_result(info) 95 results.append(res) 96 97 return results 98 [end of slither/detectors/statements/msg_value_in_loop.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py --- a/slither/detectors/statements/msg_value_in_loop.py +++ b/slither/detectors/statements/msg_value_in_loop.py @@ -79,7 +79,7 @@ # endregion wiki_exploit_scenario WIKI_RECOMMENDATION = """ -Track msg.value through a local variable and decrease its amount on every iteration/usage. +Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`. """ def _detect(self) -> List[Output]:
{"golden_diff": "diff --git a/slither/detectors/statements/msg_value_in_loop.py b/slither/detectors/statements/msg_value_in_loop.py\n--- a/slither/detectors/statements/msg_value_in_loop.py\n+++ b/slither/detectors/statements/msg_value_in_loop.py\n@@ -79,7 +79,7 @@\n # endregion wiki_exploit_scenario\n \n WIKI_RECOMMENDATION = \"\"\"\n-Track msg.value through a local variable and decrease its amount on every iteration/usage.\n+Provide an explicit array of amounts alongside the receivers array, and check that the sum of all amounts matches `msg.value`.\n \"\"\"\n \n def _detect(self) -> List[Output]:\n", "issue": "Suggestion to make the recommendation in the `msgvalue-inside-a-loop` detector wiki clearer\n### Describe the desired feature\n\nReference: https://github.com/crytic/slither/wiki/Detector-Documentation#msgvalue-inside-a-loop\r\n\r\nThis is the current recommendation for the `msgvalue-inside-a-loop` detector:\r\n\r\n```solidity\r\nTrack msg.value through a local variable and decrease its amount on every iteration/usage.\r\n```\r\n\r\nThis is a vague recommendation - it does not address the issue head-on, i.e., what mathematical technique the developer should use to remedy the bug.\r\n\r\nMy suggestions:\r\n\r\n1. Recommend dividing by the number of `receivers`\r\n2. Recommend providing an explicit array of amounts alongside the `receivers` array, and check that the sum of each element in that array matches `msg.value`\n", "before_files": [{"content": "from typing import List, Optional\nfrom slither.core.cfg.node import NodeType, Node\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import InternalCall\nfrom slither.core.declarations import SolidityVariableComposed, Contract\nfrom slither.utils.output import Output\n\n\ndef detect_msg_value_in_loop(contract: Contract) -> List[Node]:\n results: List[Node] = []\n for f in contract.functions_entry_points:\n if f.is_implemented and f.payable:\n msg_value_in_loop(f.entry_point, 0, [], results)\n return results\n\n\ndef msg_value_in_loop(\n node: Optional[Node], in_loop_counter: int, visited: List[Node], results: List[Node]\n) -> None:\n\n if node is None:\n return\n\n if node in visited:\n return\n # shared visited\n visited.append(node)\n\n if node.type == NodeType.STARTLOOP:\n in_loop_counter += 1\n elif node.type == NodeType.ENDLOOP:\n in_loop_counter -= 1\n\n for ir in node.all_slithir_operations():\n if in_loop_counter > 0 and SolidityVariableComposed(\"msg.value\") in ir.read:\n results.append(ir.node)\n if isinstance(ir, (InternalCall)):\n msg_value_in_loop(ir.function.entry_point, in_loop_counter, visited, results)\n\n for son in node.sons:\n msg_value_in_loop(son, in_loop_counter, visited, results)\n\n\nclass MsgValueInLoop(AbstractDetector):\n \"\"\"\n Detect the use of msg.value inside a loop\n \"\"\"\n\n ARGUMENT = \"msg-value-loop\"\n HELP = \"msg.value inside a loop\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation/#msgvalue-inside-a-loop\"\n\n WIKI_TITLE = \"`msg.value` inside a loop\"\n WIKI_DESCRIPTION = \"Detect the use of `msg.value` inside a loop.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract MsgValueInLoop{\n\n mapping (address => uint256) balances;\n\n function bad(address[] memory receivers) public payable {\n for (uint256 i=0; i < receivers.length; i++) {\n balances[receivers[i]] += msg.value;\n }\n }\n\n}\n```\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"\"\"\nTrack msg.value through a local variable and decrease its amount on every iteration/usage.\n\"\"\"\n\n def _detect(self) -> List[Output]:\n \"\"\"\"\"\"\n results: List[Output] = []\n for c in self.compilation_unit.contracts_derived:\n values = detect_msg_value_in_loop(c)\n for node in values:\n func = node.function\n\n info: DETECTOR_INFO = [func, \" use msg.value in a loop: \", node, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n return results\n", "path": "slither/detectors/statements/msg_value_in_loop.py"}]}
1,601
147
gh_patches_debug_39882
rasdani/github-patches
git_diff
DDMAL__CantusDB-223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Promote people to superuser in Django admin Replace the current `is_staff` checkbox with a `is_superuser` checkbox. This way, the project manager can give superuser access (access to Django admin interface) to people without touching the command line. Project managers can access the Cantus editing functionalities (within the Cantus DB website, not the Admin interface). Superusers can access the Django admin interface. If we want someone to have access to both, they need to be in the `project manager` group and have `is_superuser` as `True`. </issue> <code> [start of django/cantusdb_project/main_app/admin.py] 1 from django.contrib import admin 2 from main_app.models import * 3 # Register your models here. 4 admin.site.register(Chant) 5 admin.site.register(Feast) 6 admin.site.register(Genre) 7 admin.site.register(Indexer) 8 admin.site.register(Notation) 9 admin.site.register(Office) 10 admin.site.register(Provenance) 11 admin.site.register(Segment) 12 admin.site.register(Sequence) 13 admin.site.register(Source) 14 [end of django/cantusdb_project/main_app/admin.py] [start of django/cantusdb_project/users/admin.py] 1 from django.contrib import admin 2 from .models import * 3 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin 4 from django.contrib.auth.forms import UserCreationForm, UserChangeForm 5 6 # Register your models here. 7 8 class CustomUserCreationForm(UserCreationForm): 9 10 class Meta: 11 model = User 12 fields = ( 13 'email', 'password', 'is_active', 'date_joined', 'last_login', 14 'full_name', 'first_name', 'last_name', 'institution', 'city', 15 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit', 16 ) 17 18 19 class CustomUserChangeForm(UserChangeForm): 20 21 class Meta: 22 model = User 23 fields = ( 24 'email', 'password', 'is_active', 'date_joined', 'last_login', 25 'full_name', 'first_name', 'last_name', 'institution', 'city', 26 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit', 27 ) 28 29 30 class UserAdmin(BaseUserAdmin): 31 form = CustomUserChangeForm 32 add_form = CustomUserCreationForm 33 list_display = ('email', 'first_name', 'last_name', 'institution',) 34 list_filter = ('groups',) 35 fieldsets = ( 36 ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}), 37 ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}), 38 ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}), 39 ) 40 add_fieldsets = ( 41 ('Account info', {'fields': ('email', ('password1', 'password2'),)}), 42 ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}), 43 ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}), 44 ) 45 search_fields = ('email', 'first_name', 'last_name', 'institution',) 46 ordering = ('email',) 47 filter_horizontal = ('groups', 'sources_user_can_edit',) 48 49 admin.site.register(User, UserAdmin) 50 [end of django/cantusdb_project/users/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py --- a/django/cantusdb_project/main_app/admin.py +++ b/django/cantusdb_project/main_app/admin.py @@ -1,6 +1,15 @@ from django.contrib import admin from main_app.models import * +from django.contrib.auth import get_user_model # Register your models here. + +class SourcesUserCanEditInline(admin.TabularInline): + model = get_user_model().sources_user_can_edit.through + +class SourceAdmin(admin.ModelAdmin): + inlines = [SourcesUserCanEditInline] + filter_horizontal = ('century', 'notation', 'current_editors', 'inventoried_by', 'full_text_entered_by', 'melodies_entered_by', 'proofreaders', 'other_editors') + admin.site.register(Chant) admin.site.register(Feast) admin.site.register(Genre) @@ -10,4 +19,4 @@ admin.site.register(Provenance) admin.site.register(Segment) admin.site.register(Sequence) -admin.site.register(Source) +admin.site.register(Source, SourceAdmin) diff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py --- a/django/cantusdb_project/users/admin.py +++ b/django/cantusdb_project/users/admin.py @@ -5,42 +5,19 @@ # Register your models here. -class CustomUserCreationForm(UserCreationForm): - - class Meta: - model = User - fields = ( - 'email', 'password', 'is_active', 'date_joined', 'last_login', - 'full_name', 'first_name', 'last_name', 'institution', 'city', - 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit', - ) - - -class CustomUserChangeForm(UserChangeForm): - - class Meta: - model = User - fields = ( - 'email', 'password', 'is_active', 'date_joined', 'last_login', - 'full_name', 'first_name', 'last_name', 'institution', 'city', - 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit', - ) - - class UserAdmin(BaseUserAdmin): - form = CustomUserChangeForm - add_form = CustomUserCreationForm + readonly_fields = ('date_joined', 'last_login',) list_display = ('email', 'first_name', 'last_name', 'institution',) list_filter = ('groups',) fieldsets = ( ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}), ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}), - ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}), + ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}), ) add_fieldsets = ( ('Account info', {'fields': ('email', ('password1', 'password2'),)}), ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}), - ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}), + ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}), ) search_fields = ('email', 'first_name', 'last_name', 'institution',) ordering = ('email',)
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py\n--- a/django/cantusdb_project/main_app/admin.py\n+++ b/django/cantusdb_project/main_app/admin.py\n@@ -1,6 +1,15 @@\n from django.contrib import admin\n from main_app.models import *\n+from django.contrib.auth import get_user_model\n # Register your models here.\n+\n+class SourcesUserCanEditInline(admin.TabularInline):\n+ model = get_user_model().sources_user_can_edit.through\n+\n+class SourceAdmin(admin.ModelAdmin):\n+ inlines = [SourcesUserCanEditInline]\n+ filter_horizontal = ('century', 'notation', 'current_editors', 'inventoried_by', 'full_text_entered_by', 'melodies_entered_by', 'proofreaders', 'other_editors')\n+\n admin.site.register(Chant)\n admin.site.register(Feast)\n admin.site.register(Genre)\n@@ -10,4 +19,4 @@\n admin.site.register(Provenance)\n admin.site.register(Segment)\n admin.site.register(Sequence)\n-admin.site.register(Source)\n+admin.site.register(Source, SourceAdmin)\ndiff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py\n--- a/django/cantusdb_project/users/admin.py\n+++ b/django/cantusdb_project/users/admin.py\n@@ -5,42 +5,19 @@\n \n # Register your models here.\n \n-class CustomUserCreationForm(UserCreationForm):\n-\n- class Meta:\n- model = User\n- fields = (\n- 'email', 'password', 'is_active', 'date_joined', 'last_login', \n- 'full_name', 'first_name', 'last_name', 'institution', 'city', \n- 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n- )\n-\n-\n-class CustomUserChangeForm(UserChangeForm):\n-\n- class Meta:\n- model = User\n- fields = (\n- 'email', 'password', 'is_active', 'date_joined', 'last_login', \n- 'full_name', 'first_name', 'last_name', 'institution', 'city', \n- 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n- )\n-\n-\n class UserAdmin(BaseUserAdmin):\n- form = CustomUserChangeForm\n- add_form = CustomUserCreationForm \n+ readonly_fields = ('date_joined', 'last_login',)\n list_display = ('email', 'first_name', 'last_name', 'institution',)\n list_filter = ('groups',)\n fieldsets = (\n ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n- ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n+ ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),\n )\n add_fieldsets = (\n ('Account info', {'fields': ('email', ('password1', 'password2'),)}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n- ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n+ ('Permissions', {'fields': ('is_staff', 'is_superuser', 'groups', 'sources_user_can_edit',)}),\n )\n search_fields = ('email', 'first_name', 'last_name', 'institution',)\n ordering = ('email',)\n", "issue": "Promote people to superuser in Django admin\nReplace the current `is_staff` checkbox with a `is_superuser` checkbox. This way, the project manager can give superuser access (access to Django admin interface) to people without touching the command line. \r\n\r\nProject managers can access the Cantus editing functionalities (within the Cantus DB website, not the Admin interface). \r\nSuperusers can access the Django admin interface. \r\nIf we want someone to have access to both, they need to be in the `project manager` group and have `is_superuser` as `True`.\n", "before_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\n# Register your models here.\nadmin.site.register(Chant)\nadmin.site.register(Feast)\nadmin.site.register(Genre)\nadmin.site.register(Indexer)\nadmin.site.register(Notation)\nadmin.site.register(Office)\nadmin.site.register(Provenance)\nadmin.site.register(Segment)\nadmin.site.register(Sequence)\nadmin.site.register(Source)\n", "path": "django/cantusdb_project/main_app/admin.py"}, {"content": "from django.contrib import admin\nfrom .models import *\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\n# Register your models here.\n\nclass CustomUserCreationForm(UserCreationForm):\n\n class Meta:\n model = User\n fields = (\n 'email', 'password', 'is_active', 'date_joined', 'last_login', \n 'full_name', 'first_name', 'last_name', 'institution', 'city', \n 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n )\n\n\nclass CustomUserChangeForm(UserChangeForm):\n\n class Meta:\n model = User\n fields = (\n 'email', 'password', 'is_active', 'date_joined', 'last_login', \n 'full_name', 'first_name', 'last_name', 'institution', 'city', \n 'country', 'website', 'is_staff', 'groups', 'sources_user_can_edit',\n )\n\n\nclass UserAdmin(BaseUserAdmin):\n form = CustomUserChangeForm\n add_form = CustomUserCreationForm \n list_display = ('email', 'first_name', 'last_name', 'institution',)\n list_filter = ('groups',)\n fieldsets = (\n ('Account info', {'fields': (('email', 'password'), 'is_active', ('date_joined', 'last_login'))}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n )\n add_fieldsets = (\n ('Account info', {'fields': ('email', ('password1', 'password2'),)}),\n ('Personal info', {'fields': ('full_name', ('first_name', 'last_name'), 'institution', ('city', 'country'), 'website',)}),\n ('Permissions', {'fields': ('is_staff', 'groups', 'sources_user_can_edit',)}),\n )\n search_fields = ('email', 'first_name', 'last_name', 'institution',)\n ordering = ('email',)\n filter_horizontal = ('groups', 'sources_user_can_edit',)\n\nadmin.site.register(User, UserAdmin)\n", "path": "django/cantusdb_project/users/admin.py"}]}
1,374
843
gh_patches_debug_13542
rasdani/github-patches
git_diff
bokeh__bokeh-10032
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation spelling and formatting There are various typos scattered throughout the documentation. </issue> <code> [start of sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py] 1 import numpy as np 2 3 from bokeh.layouts import column, row 4 from bokeh.models import CustomJS, Div, Slider 5 from bokeh.plotting import ColumnDataSource, figure, output_file, show 6 7 x = np.linspace(0, 10, 500) 8 y = np.sin(x) 9 10 source = ColumnDataSource(data=dict(x=x, y=y)) 11 12 plot = figure(y_range=(-10, 10), plot_width=400, plot_height=200, background_fill_color="#fafafa", sizing_mode="scale_width") 13 14 plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6) 15 16 amp = Slider(start=0.1, end=10, value=1, step=.1, title="Amplitude", sizing_mode="stretch_both") 17 freq = Slider(start=0.1, end=10, value=1, step=.1, title="Frequency", sizing_mode="stretch_both") 18 phase = Slider(start=0, end=6.4, value=0, step=.1, title="Phase", sizing_mode="stretch_both") 19 offset = Slider(start=-5, end=5, value=0, step=.1, title="Offset", sizing_mode="stretch_both") 20 21 widgets = column(amp, freq, phase, offset, sizing_mode="fixed", height=250, width=150) 22 23 callback = CustomJS(args=dict(source=source, amp=amp, freq=freq, phase=phase, offset=offset), 24 code=""" 25 const data = source.data; 26 const A = amp.value; 27 const k = freq.value; 28 const phi = phase.value; 29 const B = offset.value; 30 const x = data['x'] 31 const y = data['y'] 32 for (var i = 0; i < x.length; i++) { 33 y[i] = B + A*Math.sin(k*x[i]+phi); 34 } 35 source.change.emit(); 36 """) 37 38 amp.js_on_change('value', callback) 39 freq.js_on_change('value', callback) 40 phase.js_on_change('value', callback) 41 offset.js_on_change('value', callback) 42 43 heading = Div(sizing_mode="stretch_width", height=80, text="Lorem ipsum dolor sit amet, consectetur adipiscing elit. " 44 "Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. " 45 "Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.") 46 47 layout = column(heading, row(widgets, plot), sizing_mode="stretch_both") 48 49 output_file("slider.html", title="slider.py example") 50 51 show(layout) 52 [end of sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py --- a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py +++ b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py @@ -40,9 +40,8 @@ phase.js_on_change('value', callback) offset.js_on_change('value', callback) -heading = Div(sizing_mode="stretch_width", height=80, text="Lorem ipsum dolor sit amet, consectetur adipiscing elit. " -"Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. " -"Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.") +heading = Div(sizing_mode="stretch_width", height=80, text="In this wave example, the sliders on the left " + "can be used to change the amplitude, frequency, phase and offset of the wave.") layout = column(heading, row(widgets, plot), sizing_mode="stretch_both")
{"golden_diff": "diff --git a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py\n--- a/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py\n+++ b/sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py\n@@ -40,9 +40,8 @@\n phase.js_on_change('value', callback)\n offset.js_on_change('value', callback)\n \n-heading = Div(sizing_mode=\"stretch_width\", height=80, text=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. \"\n-\"Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. \"\n-\"Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.\")\n+heading = Div(sizing_mode=\"stretch_width\", height=80, text=\"In this wave example, the sliders on the left \"\n+ \"can be used to change the amplitude, frequency, phase and offset of the wave.\")\n \n layout = column(heading, row(widgets, plot), sizing_mode=\"stretch_both\")\n", "issue": "Documentation spelling and formatting\nThere are various typos scattered throughout the documentation.\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.layouts import column, row\nfrom bokeh.models import CustomJS, Div, Slider\nfrom bokeh.plotting import ColumnDataSource, figure, output_file, show\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\nplot = figure(y_range=(-10, 10), plot_width=400, plot_height=200, background_fill_color=\"#fafafa\", sizing_mode=\"scale_width\")\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\namp = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\", sizing_mode=\"stretch_both\")\nfreq = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\", sizing_mode=\"stretch_both\")\nphase = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\", sizing_mode=\"stretch_both\")\noffset = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\", sizing_mode=\"stretch_both\")\n\nwidgets = column(amp, freq, phase, offset, sizing_mode=\"fixed\", height=250, width=150)\n\ncallback = CustomJS(args=dict(source=source, amp=amp, freq=freq, phase=phase, offset=offset),\n code=\"\"\"\n const data = source.data;\n const A = amp.value;\n const k = freq.value;\n const phi = phase.value;\n const B = offset.value;\n const x = data['x']\n const y = data['y']\n for (var i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.change.emit();\n\"\"\")\n\namp.js_on_change('value', callback)\nfreq.js_on_change('value', callback)\nphase.js_on_change('value', callback)\noffset.js_on_change('value', callback)\n\nheading = Div(sizing_mode=\"stretch_width\", height=80, text=\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. \"\n\"Sed elementum lorem lacus, eget scelerisque diam rutrum ac. Nunc est urna, fringilla nec placerat vitae, venenatis egestas eros. \"\n\"Aliquam eleifend orci gravida nulla tempor, sit amet malesuada lacus efficitur. Mauris placerat.\")\n\nlayout = column(heading, row(widgets, plot), sizing_mode=\"stretch_both\")\n\noutput_file(\"slider.html\", title=\"slider.py example\")\n\nshow(layout)\n", "path": "sphinx/source/docs/user_guide/examples/layout_sizing_mode_multiple.py"}]}
1,258
260
gh_patches_debug_23031
rasdani/github-patches
git_diff
nilearn__nilearn-4373
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DOC] Minor discrepancy in comparing means example ### Is there an existing issue for this? - [X] I have searched the existing issues ### Describe your proposed suggestion in detail. Using a movie watching dataset as an example but calling it "resting state" in several places ### List any pages that would be impacted. The "comparing means of images with `math_img`" example: https://nilearn.github.io/stable/auto_examples/06_manipulating_images/plot_compare_mean_image.html </issue> <code> [start of examples/06_manipulating_images/plot_compare_mean_image.py] 1 """ 2 Comparing the means of 2 images 3 =============================== 4 5 The goal of this example is to illustrate the use of the function 6 :func:`nilearn.image.math_img` with a list of images as input. 7 We compare the means of 2 resting state 4D images. The mean of the images 8 could have been computed with nilearn :func:`nilearn.image.mean_img` function. 9 """ 10 11 # %% 12 # Fetching 2 subject movie watching brain development :term:`fMRI` datasets. 13 from nilearn import datasets 14 15 dataset = datasets.fetch_development_fmri(n_subjects=2) 16 17 18 # %% 19 # Print basic information on the adhd subjects resting state datasets. 20 print(f"Subject 1 resting state dataset at: {dataset.func[0]}") 21 print(f"Subject 2 resting state dataset at: {dataset.func[1]}") 22 23 24 # %% 25 # Comparing the means of the 2 movie watching datasets. 26 from nilearn import image, plotting 27 28 result_img = image.math_img( 29 "np.mean(img1, axis=-1) - np.mean(img2, axis=-1)", 30 img1=dataset.func[0], 31 img2=dataset.func[1], 32 ) 33 34 plotting.plot_stat_map( 35 result_img, title="Comparing means of 2 resting state 4D images." 36 ) 37 plotting.show() 38 [end of examples/06_manipulating_images/plot_compare_mean_image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/06_manipulating_images/plot_compare_mean_image.py b/examples/06_manipulating_images/plot_compare_mean_image.py --- a/examples/06_manipulating_images/plot_compare_mean_image.py +++ b/examples/06_manipulating_images/plot_compare_mean_image.py @@ -4,7 +4,7 @@ The goal of this example is to illustrate the use of the function :func:`nilearn.image.math_img` with a list of images as input. -We compare the means of 2 resting state 4D images. The mean of the images +We compare the means of 2 movie watching 4D images. The mean of the images could have been computed with nilearn :func:`nilearn.image.mean_img` function. """ @@ -16,9 +16,9 @@ # %% -# Print basic information on the adhd subjects resting state datasets. -print(f"Subject 1 resting state dataset at: {dataset.func[0]}") -print(f"Subject 2 resting state dataset at: {dataset.func[1]}") +# Print file locations for both subjects. +print(f"Subject 1 dataset at: {dataset.func[0]}") +print(f"Subject 2 dataset at: {dataset.func[1]}") # %% @@ -32,6 +32,6 @@ ) plotting.plot_stat_map( - result_img, title="Comparing means of 2 resting state 4D images." + result_img, title="Comparing means of 2 movie watching 4D images." ) plotting.show()
{"golden_diff": "diff --git a/examples/06_manipulating_images/plot_compare_mean_image.py b/examples/06_manipulating_images/plot_compare_mean_image.py\n--- a/examples/06_manipulating_images/plot_compare_mean_image.py\n+++ b/examples/06_manipulating_images/plot_compare_mean_image.py\n@@ -4,7 +4,7 @@\n \n The goal of this example is to illustrate the use of the function\n :func:`nilearn.image.math_img` with a list of images as input.\n-We compare the means of 2 resting state 4D images. The mean of the images\n+We compare the means of 2 movie watching 4D images. The mean of the images\n could have been computed with nilearn :func:`nilearn.image.mean_img` function.\n \"\"\"\n \n@@ -16,9 +16,9 @@\n \n \n # %%\n-# Print basic information on the adhd subjects resting state datasets.\n-print(f\"Subject 1 resting state dataset at: {dataset.func[0]}\")\n-print(f\"Subject 2 resting state dataset at: {dataset.func[1]}\")\n+# Print file locations for both subjects.\n+print(f\"Subject 1 dataset at: {dataset.func[0]}\")\n+print(f\"Subject 2 dataset at: {dataset.func[1]}\")\n \n \n # %%\n@@ -32,6 +32,6 @@\n )\n \n plotting.plot_stat_map(\n- result_img, title=\"Comparing means of 2 resting state 4D images.\"\n+ result_img, title=\"Comparing means of 2 movie watching 4D images.\"\n )\n plotting.show()\n", "issue": "[DOC] Minor discrepancy in comparing means example\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe your proposed suggestion in detail.\n\nUsing a movie watching dataset as an example but calling it \"resting state\" in several places\n\n### List any pages that would be impacted.\n\nThe \"comparing means of images with `math_img`\" example: https://nilearn.github.io/stable/auto_examples/06_manipulating_images/plot_compare_mean_image.html\n", "before_files": [{"content": "\"\"\"\nComparing the means of 2 images\n===============================\n\nThe goal of this example is to illustrate the use of the function\n:func:`nilearn.image.math_img` with a list of images as input.\nWe compare the means of 2 resting state 4D images. The mean of the images\ncould have been computed with nilearn :func:`nilearn.image.mean_img` function.\n\"\"\"\n\n# %%\n# Fetching 2 subject movie watching brain development :term:`fMRI` datasets.\nfrom nilearn import datasets\n\ndataset = datasets.fetch_development_fmri(n_subjects=2)\n\n\n# %%\n# Print basic information on the adhd subjects resting state datasets.\nprint(f\"Subject 1 resting state dataset at: {dataset.func[0]}\")\nprint(f\"Subject 2 resting state dataset at: {dataset.func[1]}\")\n\n\n# %%\n# Comparing the means of the 2 movie watching datasets.\nfrom nilearn import image, plotting\n\nresult_img = image.math_img(\n \"np.mean(img1, axis=-1) - np.mean(img2, axis=-1)\",\n img1=dataset.func[0],\n img2=dataset.func[1],\n)\n\nplotting.plot_stat_map(\n result_img, title=\"Comparing means of 2 resting state 4D images.\"\n)\nplotting.show()\n", "path": "examples/06_manipulating_images/plot_compare_mean_image.py"}]}
1,013
349
gh_patches_debug_2739
rasdani/github-patches
git_diff
translate__translate-3435
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> multistring needs a __hash__ method In old ttk you could do something like ``` python foo = multistring("foo") foodict = {foo: "bar"} assert 'foo' in foodict ``` It seems this no longer works - not sure why, but a `__hash__` method that returns `hash(str(self))` should fix the problem i believe @claudep @julen any thoughts on this? </issue> <code> [start of translate/misc/multistring.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2006 Zuza Software Foundation 4 # 5 # This file is part of translate. 6 # 7 # translate is free software; you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation; either version 2 of the License, or 10 # (at your option) any later version. 11 # 12 # translate is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with this program; if not, see <http://www.gnu.org/licenses/>. 19 20 """Supports a hybrid Unicode string that can also have a list of alternate 21 strings in the strings attribute 22 """ 23 24 import warnings 25 26 import six 27 28 from .deprecation import RemovedInTTK2Warning 29 30 31 def _create_text_type(newtype, string, encoding): 32 """Helper to construct a text type out of characters or bytes. Required to 33 temporarily preserve backwards compatibility. Must be removed in TTK2. 34 """ 35 if isinstance(string, six.text_type): 36 return six.text_type.__new__(newtype, string) 37 38 warnings.warn( 39 'Passing non-ASCII bytes as well as the `encoding` argument to ' 40 '`multistring` is deprecated. Always pass unicode characters instead.', 41 RemovedInTTK2Warning, stacklevel=2, 42 ) 43 return six.text_type.__new__(newtype, string or six.binary_type(), encoding) 44 45 46 class multistring(six.text_type): 47 48 def __new__(newtype, string=u"", *args, **kwargs): 49 encoding = kwargs.pop('encoding', 'utf-8') 50 if isinstance(string, list): 51 if not string: 52 raise ValueError("multistring must contain at least one string") 53 newstring = _create_text_type(newtype, string[0], encoding) 54 newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]] 55 else: 56 newstring = _create_text_type(newtype, string, encoding) 57 newstring.strings = [newstring] 58 return newstring 59 60 def __init__(self, *args, **kwargs): 61 super(multistring, self).__init__() 62 if not hasattr(self, "strings"): 63 self.strings = [] 64 65 def __cmp__(self, otherstring): 66 def cmp_compat(s1, s2): 67 # Python 3 compatible cmp() equivalent 68 return (s1 > s2) - (s1 < s2) 69 if isinstance(otherstring, multistring): 70 parentcompare = cmp_compat(six.text_type(self), otherstring) 71 if parentcompare: 72 return parentcompare 73 else: 74 return cmp_compat(self.strings[1:], otherstring.strings[1:]) 75 elif isinstance(otherstring, six.text_type): 76 return cmp_compat(six.text_type(self), otherstring) 77 elif isinstance(otherstring, bytes): 78 return cmp_compat(self.encode('utf-8'), otherstring) 79 elif isinstance(otherstring, list) and otherstring: 80 return cmp_compat(self, multistring(otherstring)) 81 else: 82 return cmp_compat(str(type(self)), str(type(otherstring))) 83 84 def __hash__(self): 85 return hash(''.join(self.strings)) 86 87 def __ne__(self, otherstring): 88 return self.__cmp__(otherstring) != 0 89 90 def __eq__(self, otherstring): 91 return self.__cmp__(otherstring) == 0 92 93 def __repr__(self): 94 _repr = u"multistring(%r)" % ( 95 [six.text_type(item) for item in self.strings] 96 ) 97 return _repr.encode('utf-8') if six.PY2 else _repr 98 99 def __str__(self): 100 if six.PY2: 101 return self.encode('utf-8') 102 return super(multistring, self).__str__() 103 104 def replace(self, old, new, count=None): 105 if count is None: 106 newstr = multistring(super(multistring, self).replace(old, new)) 107 else: 108 newstr = multistring(super(multistring, self).replace(old, new, count)) 109 for s in self.strings[1:]: 110 if count is None: 111 newstr.strings.append(s.replace(old, new)) 112 else: 113 newstr.strings.append(s.replace(old, new, count)) 114 return newstr 115 [end of translate/misc/multistring.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py --- a/translate/misc/multistring.py +++ b/translate/misc/multistring.py @@ -82,7 +82,7 @@ return cmp_compat(str(type(self)), str(type(otherstring))) def __hash__(self): - return hash(''.join(self.strings)) + return hash(str(self)) def __ne__(self, otherstring): return self.__cmp__(otherstring) != 0
{"golden_diff": "diff --git a/translate/misc/multistring.py b/translate/misc/multistring.py\n--- a/translate/misc/multistring.py\n+++ b/translate/misc/multistring.py\n@@ -82,7 +82,7 @@\n return cmp_compat(str(type(self)), str(type(otherstring)))\n \n def __hash__(self):\n- return hash(''.join(self.strings))\n+ return hash(str(self))\n \n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n", "issue": "multistring needs a __hash__ method\nIn old ttk you could do something like\n\n``` python\nfoo = multistring(\"foo\")\nfoodict = {foo: \"bar\"}\nassert 'foo' in foodict\n```\n\nIt seems this no longer works - not sure why, but a `__hash__` method that returns `hash(str(self))` should fix the problem i believe\n\n@claudep @julen any thoughts on this?\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2006 Zuza Software Foundation\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Supports a hybrid Unicode string that can also have a list of alternate\nstrings in the strings attribute\n\"\"\"\n\nimport warnings\n\nimport six\n\nfrom .deprecation import RemovedInTTK2Warning\n\n\ndef _create_text_type(newtype, string, encoding):\n \"\"\"Helper to construct a text type out of characters or bytes. Required to\n temporarily preserve backwards compatibility. Must be removed in TTK2.\n \"\"\"\n if isinstance(string, six.text_type):\n return six.text_type.__new__(newtype, string)\n\n warnings.warn(\n 'Passing non-ASCII bytes as well as the `encoding` argument to '\n '`multistring` is deprecated. Always pass unicode characters instead.',\n RemovedInTTK2Warning, stacklevel=2,\n )\n return six.text_type.__new__(newtype, string or six.binary_type(), encoding)\n\n\nclass multistring(six.text_type):\n\n def __new__(newtype, string=u\"\", *args, **kwargs):\n encoding = kwargs.pop('encoding', 'utf-8')\n if isinstance(string, list):\n if not string:\n raise ValueError(\"multistring must contain at least one string\")\n newstring = _create_text_type(newtype, string[0], encoding)\n newstring.strings = [newstring] + [multistring.__new__(newtype, altstring) for altstring in string[1:]]\n else:\n newstring = _create_text_type(newtype, string, encoding)\n newstring.strings = [newstring]\n return newstring\n\n def __init__(self, *args, **kwargs):\n super(multistring, self).__init__()\n if not hasattr(self, \"strings\"):\n self.strings = []\n\n def __cmp__(self, otherstring):\n def cmp_compat(s1, s2):\n # Python 3 compatible cmp() equivalent\n return (s1 > s2) - (s1 < s2)\n if isinstance(otherstring, multistring):\n parentcompare = cmp_compat(six.text_type(self), otherstring)\n if parentcompare:\n return parentcompare\n else:\n return cmp_compat(self.strings[1:], otherstring.strings[1:])\n elif isinstance(otherstring, six.text_type):\n return cmp_compat(six.text_type(self), otherstring)\n elif isinstance(otherstring, bytes):\n return cmp_compat(self.encode('utf-8'), otherstring)\n elif isinstance(otherstring, list) and otherstring:\n return cmp_compat(self, multistring(otherstring))\n else:\n return cmp_compat(str(type(self)), str(type(otherstring)))\n\n def __hash__(self):\n return hash(''.join(self.strings))\n\n def __ne__(self, otherstring):\n return self.__cmp__(otherstring) != 0\n\n def __eq__(self, otherstring):\n return self.__cmp__(otherstring) == 0\n\n def __repr__(self):\n _repr = u\"multistring(%r)\" % (\n [six.text_type(item) for item in self.strings]\n )\n return _repr.encode('utf-8') if six.PY2 else _repr\n\n def __str__(self):\n if six.PY2:\n return self.encode('utf-8')\n return super(multistring, self).__str__()\n\n def replace(self, old, new, count=None):\n if count is None:\n newstr = multistring(super(multistring, self).replace(old, new))\n else:\n newstr = multistring(super(multistring, self).replace(old, new, count))\n for s in self.strings[1:]:\n if count is None:\n newstr.strings.append(s.replace(old, new))\n else:\n newstr.strings.append(s.replace(old, new, count))\n return newstr\n", "path": "translate/misc/multistring.py"}]}
1,851
115
gh_patches_debug_45322
rasdani/github-patches
git_diff
zestedesavoir__zds-site-3531
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [v18] J'ai perdu mes sujets suivis Version : 18 OS Windows Navigateur Firefox Alors qu'une synchro a été faite entre la prod et la beta, je ne retrouve plus mes sujets suivis dans la page https://beta.zestedesavoir.com/forums/ comme on peut le voir sur la capture ci-dessous (il ne me reste plus qu'un sujet suivis au lieu de beaucoup plus) ![suivis](https://cloud.githubusercontent.com/assets/6066015/14254805/a75c8ac6-fa91-11e5-9852-d5dc2d60547b.PNG) </issue> <code> [start of zds/notification/management/commands/migrate_subscriptions.py] 1 # coding: utf-8 2 from django.core.management import BaseCommand 3 from django.db.models import F, Q 4 from zds.forum.models import TopicRead 5 from zds.member.models import Profile 6 from zds.mp.models import PrivateTopicRead, PrivateTopic 7 from zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \ 8 PrivateTopicAnswerSubscription 9 from zds.tutorialv2.models.models_database import ContentReaction, ContentRead 10 11 12 class Command(BaseCommand): 13 help = 'Migrate old subscriptions and notifications for new models.' 14 15 def handle(self, *args, **options): 16 for profile in Profile.objects.all(): 17 self.stdout.write(u'Migrate all notifications of {}...'.format(profile.user.username)) 18 # Forums. 19 self.stdout.write(u'Starting migration with topics...') 20 topics_followed = TopicFollowed.objects.filter(user=profile.user).values("topic").distinct().all() 21 topics_never_read = TopicRead.objects\ 22 .filter(user=profile.user)\ 23 .filter(topic__in=topics_followed)\ 24 .select_related("topic")\ 25 .exclude(post=F('topic__last_message')).all() 26 27 for topic_never_read in topics_never_read: 28 content = topic_never_read.topic.first_unread_post(profile.user) 29 if content is None: 30 content = topic_never_read.topic.last_message 31 32 # Migrate subscriptions. 33 content_object = topic_never_read.topic 34 subscription = TopicAnswerSubscription.objects.get_or_create_active( 35 user=profile.user, content_object=content_object) 36 37 # Migrate notifications. 38 subscription.send_notification(content=content, sender=content.author) 39 notification = TopicAnswerSubscription.objects\ 40 .get_existing(profile.user, content_object, is_active=True).last_notification 41 notification.pubdate = content.pubdate 42 notification.save() 43 44 self.stdout.write(u'Migration about « {} » [OK]'.format(topic_never_read.topic.title)) 45 46 # Private messages. 47 self.stdout.write(u'Starting migration with private topics...') 48 topics_never_read = list(PrivateTopicRead.objects 49 .filter(user=profile.user) 50 .filter(privatepost=F('privatetopic__last_message')).all()) 51 52 tnrs = [] 53 for tnr in topics_never_read: 54 tnrs.append(tnr.privatetopic.pk) 55 56 private_topics_unread = PrivateTopic.objects \ 57 .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \ 58 .exclude(pk__in=tnrs) \ 59 .order_by("-pubdate") \ 60 .distinct() 61 62 for private_topic_unread in private_topics_unread: 63 answer = private_topic_unread.last_read_post(profile.user) 64 if answer is None: 65 answer = private_topic_unread.get_last_answer() 66 67 # Migrate subscriptions. 68 subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active( 69 user=profile.user, content_object=private_topic_unread) 70 71 # Migrate notifications. 72 subscription.send_notification(content=answer, sender=answer.author, send_email=False) 73 notification = PrivateTopicAnswerSubscription.objects\ 74 .get_existing(profile.user, private_topic_unread, is_active=True).last_notification 75 notification.pubdate = answer.pubdate 76 notification.save() 77 78 self.stdout.write(u'Migration about « {} » [OK]'.format(private_topic_unread.title)) 79 80 # Contents. 81 self.stdout.write(u'Starting migration with contents...') 82 content_followed_pk = ContentReaction.objects\ 83 .filter(author=profile.user, related_content__public_version__isnull=False)\ 84 .values_list('related_content__pk', flat=True) 85 86 content_to_read = ContentRead.objects\ 87 .select_related('note')\ 88 .select_related('note__author')\ 89 .select_related('content')\ 90 .select_related('note__related_content__public_version')\ 91 .filter(user=profile.user)\ 92 .exclude(note__pk=F('content__last_note__pk')).all() 93 94 for content_read in content_to_read: 95 content = content_read.content 96 if content.pk not in content_followed_pk and profile.user not in content.authors.all(): 97 continue 98 reaction = content.first_unread_note(user=profile.user) 99 if reaction is None: 100 reaction = content.first_note() 101 if reaction is None: 102 continue 103 104 # Migrate subscriptions. 105 content_object = reaction.related_content 106 subscription = ContentReactionAnswerSubscription.objects.get_or_create_active( 107 user=profile.user, content_object=content_object) 108 109 # Migrate notifications. 110 subscription.send_notification(content=reaction, sender=reaction.author) 111 notification = ContentReactionAnswerSubscription.objects\ 112 .get_existing(profile.user, content_object, is_active=True).last_notification 113 notification.pubdate = reaction.pubdate 114 notification.save() 115 116 self.stdout.write(u'Migration about « {} » [OK]'.format(content.title)) 117 [end of zds/notification/management/commands/migrate_subscriptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/notification/management/commands/migrate_subscriptions.py b/zds/notification/management/commands/migrate_subscriptions.py --- a/zds/notification/management/commands/migrate_subscriptions.py +++ b/zds/notification/management/commands/migrate_subscriptions.py @@ -1,12 +1,12 @@ # coding: utf-8 from django.core.management import BaseCommand from django.db.models import F, Q -from zds.forum.models import TopicRead +from zds.forum.models import TopicRead, Topic from zds.member.models import Profile from zds.mp.models import PrivateTopicRead, PrivateTopic from zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \ PrivateTopicAnswerSubscription -from zds.tutorialv2.models.models_database import ContentReaction, ContentRead +from zds.tutorialv2.models.models_database import ContentReaction, ContentRead, PublishableContent class Command(BaseCommand): @@ -24,12 +24,17 @@ .select_related("topic")\ .exclude(post=F('topic__last_message')).all() + # Migrate subscriptions. + for topic_followed in TopicFollowed.objects \ + .filter(user=profile.user).values_list('topic', flat=True).distinct().all(): + content_object = Topic.objects.get(id=topic_followed) + TopicAnswerSubscription.objects.get_or_create_active(user=profile.user, content_object=content_object) + for topic_never_read in topics_never_read: content = topic_never_read.topic.first_unread_post(profile.user) if content is None: content = topic_never_read.topic.last_message - # Migrate subscriptions. content_object = topic_never_read.topic subscription = TopicAnswerSubscription.objects.get_or_create_active( user=profile.user, content_object=content_object) @@ -53,6 +58,16 @@ for tnr in topics_never_read: tnrs.append(tnr.privatetopic.pk) + # Migrate subscriptions. + private_topics = PrivateTopic.objects \ + .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \ + .order_by("-pubdate") \ + .distinct() + + for private_topic in private_topics: + PrivateTopicAnswerSubscription.objects.get_or_create_active( + user=profile.user, content_object=private_topic) + private_topics_unread = PrivateTopic.objects \ .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \ .exclude(pk__in=tnrs) \ @@ -64,7 +79,6 @@ if answer is None: answer = private_topic_unread.get_last_answer() - # Migrate subscriptions. subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active( user=profile.user, content_object=private_topic_unread) @@ -79,6 +93,16 @@ # Contents. self.stdout.write(u'Starting migration with contents...') + # Migrate subscriptions. + contents_followed = ContentReaction.objects \ + .filter(author=profile.user, related_content__public_version__isnull=False) \ + .values_list('related_content', flat=True) + + for content_followed in contents_followed: + content_object = PublishableContent.objects.get(id=content_followed) + ContentReactionAnswerSubscription.objects.get_or_create_active( + user=profile.user, content_object=content_object) + content_followed_pk = ContentReaction.objects\ .filter(author=profile.user, related_content__public_version__isnull=False)\ .values_list('related_content__pk', flat=True) @@ -101,7 +125,6 @@ if reaction is None: continue - # Migrate subscriptions. content_object = reaction.related_content subscription = ContentReactionAnswerSubscription.objects.get_or_create_active( user=profile.user, content_object=content_object)
{"golden_diff": "diff --git a/zds/notification/management/commands/migrate_subscriptions.py b/zds/notification/management/commands/migrate_subscriptions.py\n--- a/zds/notification/management/commands/migrate_subscriptions.py\n+++ b/zds/notification/management/commands/migrate_subscriptions.py\n@@ -1,12 +1,12 @@\n # coding: utf-8\n from django.core.management import BaseCommand\n from django.db.models import F, Q\n-from zds.forum.models import TopicRead\n+from zds.forum.models import TopicRead, Topic\n from zds.member.models import Profile\n from zds.mp.models import PrivateTopicRead, PrivateTopic\n from zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \\\n PrivateTopicAnswerSubscription\n-from zds.tutorialv2.models.models_database import ContentReaction, ContentRead\n+from zds.tutorialv2.models.models_database import ContentReaction, ContentRead, PublishableContent\n \n \n class Command(BaseCommand):\n@@ -24,12 +24,17 @@\n .select_related(\"topic\")\\\n .exclude(post=F('topic__last_message')).all()\n \n+ # Migrate subscriptions.\n+ for topic_followed in TopicFollowed.objects \\\n+ .filter(user=profile.user).values_list('topic', flat=True).distinct().all():\n+ content_object = Topic.objects.get(id=topic_followed)\n+ TopicAnswerSubscription.objects.get_or_create_active(user=profile.user, content_object=content_object)\n+\n for topic_never_read in topics_never_read:\n content = topic_never_read.topic.first_unread_post(profile.user)\n if content is None:\n content = topic_never_read.topic.last_message\n \n- # Migrate subscriptions.\n content_object = topic_never_read.topic\n subscription = TopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n@@ -53,6 +58,16 @@\n for tnr in topics_never_read:\n tnrs.append(tnr.privatetopic.pk)\n \n+ # Migrate subscriptions.\n+ private_topics = PrivateTopic.objects \\\n+ .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n+ .order_by(\"-pubdate\") \\\n+ .distinct()\n+\n+ for private_topic in private_topics:\n+ PrivateTopicAnswerSubscription.objects.get_or_create_active(\n+ user=profile.user, content_object=private_topic)\n+\n private_topics_unread = PrivateTopic.objects \\\n .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n .exclude(pk__in=tnrs) \\\n@@ -64,7 +79,6 @@\n if answer is None:\n answer = private_topic_unread.get_last_answer()\n \n- # Migrate subscriptions.\n subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=private_topic_unread)\n \n@@ -79,6 +93,16 @@\n \n # Contents.\n self.stdout.write(u'Starting migration with contents...')\n+ # Migrate subscriptions.\n+ contents_followed = ContentReaction.objects \\\n+ .filter(author=profile.user, related_content__public_version__isnull=False) \\\n+ .values_list('related_content', flat=True)\n+\n+ for content_followed in contents_followed:\n+ content_object = PublishableContent.objects.get(id=content_followed)\n+ ContentReactionAnswerSubscription.objects.get_or_create_active(\n+ user=profile.user, content_object=content_object)\n+\n content_followed_pk = ContentReaction.objects\\\n .filter(author=profile.user, related_content__public_version__isnull=False)\\\n .values_list('related_content__pk', flat=True)\n@@ -101,7 +125,6 @@\n if reaction is None:\n continue\n \n- # Migrate subscriptions.\n content_object = reaction.related_content\n subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n", "issue": "[v18] J'ai perdu mes sujets suivis\nVersion : 18\nOS Windows\nNavigateur Firefox\n\nAlors qu'une synchro a \u00e9t\u00e9 faite entre la prod et la beta, je ne retrouve plus mes sujets suivis dans la page https://beta.zestedesavoir.com/forums/ comme on peut le voir sur la capture ci-dessous (il ne me reste plus qu'un sujet suivis au lieu de beaucoup plus)\n\n![suivis](https://cloud.githubusercontent.com/assets/6066015/14254805/a75c8ac6-fa91-11e5-9852-d5dc2d60547b.PNG)\n\n", "before_files": [{"content": "# coding: utf-8\nfrom django.core.management import BaseCommand\nfrom django.db.models import F, Q\nfrom zds.forum.models import TopicRead\nfrom zds.member.models import Profile\nfrom zds.mp.models import PrivateTopicRead, PrivateTopic\nfrom zds.notification.models import TopicFollowed, TopicAnswerSubscription, ContentReactionAnswerSubscription, \\\n PrivateTopicAnswerSubscription\nfrom zds.tutorialv2.models.models_database import ContentReaction, ContentRead\n\n\nclass Command(BaseCommand):\n help = 'Migrate old subscriptions and notifications for new models.'\n\n def handle(self, *args, **options):\n for profile in Profile.objects.all():\n self.stdout.write(u'Migrate all notifications of {}...'.format(profile.user.username))\n # Forums.\n self.stdout.write(u'Starting migration with topics...')\n topics_followed = TopicFollowed.objects.filter(user=profile.user).values(\"topic\").distinct().all()\n topics_never_read = TopicRead.objects\\\n .filter(user=profile.user)\\\n .filter(topic__in=topics_followed)\\\n .select_related(\"topic\")\\\n .exclude(post=F('topic__last_message')).all()\n\n for topic_never_read in topics_never_read:\n content = topic_never_read.topic.first_unread_post(profile.user)\n if content is None:\n content = topic_never_read.topic.last_message\n\n # Migrate subscriptions.\n content_object = topic_never_read.topic\n subscription = TopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n # Migrate notifications.\n subscription.send_notification(content=content, sender=content.author)\n notification = TopicAnswerSubscription.objects\\\n .get_existing(profile.user, content_object, is_active=True).last_notification\n notification.pubdate = content.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(topic_never_read.topic.title))\n\n # Private messages.\n self.stdout.write(u'Starting migration with private topics...')\n topics_never_read = list(PrivateTopicRead.objects\n .filter(user=profile.user)\n .filter(privatepost=F('privatetopic__last_message')).all())\n\n tnrs = []\n for tnr in topics_never_read:\n tnrs.append(tnr.privatetopic.pk)\n\n private_topics_unread = PrivateTopic.objects \\\n .filter(Q(author=profile.user) | Q(participants__in=[profile.user])) \\\n .exclude(pk__in=tnrs) \\\n .order_by(\"-pubdate\") \\\n .distinct()\n\n for private_topic_unread in private_topics_unread:\n answer = private_topic_unread.last_read_post(profile.user)\n if answer is None:\n answer = private_topic_unread.get_last_answer()\n\n # Migrate subscriptions.\n subscription = PrivateTopicAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=private_topic_unread)\n\n # Migrate notifications.\n subscription.send_notification(content=answer, sender=answer.author, send_email=False)\n notification = PrivateTopicAnswerSubscription.objects\\\n .get_existing(profile.user, private_topic_unread, is_active=True).last_notification\n notification.pubdate = answer.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(private_topic_unread.title))\n\n # Contents.\n self.stdout.write(u'Starting migration with contents...')\n content_followed_pk = ContentReaction.objects\\\n .filter(author=profile.user, related_content__public_version__isnull=False)\\\n .values_list('related_content__pk', flat=True)\n\n content_to_read = ContentRead.objects\\\n .select_related('note')\\\n .select_related('note__author')\\\n .select_related('content')\\\n .select_related('note__related_content__public_version')\\\n .filter(user=profile.user)\\\n .exclude(note__pk=F('content__last_note__pk')).all()\n\n for content_read in content_to_read:\n content = content_read.content\n if content.pk not in content_followed_pk and profile.user not in content.authors.all():\n continue\n reaction = content.first_unread_note(user=profile.user)\n if reaction is None:\n reaction = content.first_note()\n if reaction is None:\n continue\n\n # Migrate subscriptions.\n content_object = reaction.related_content\n subscription = ContentReactionAnswerSubscription.objects.get_or_create_active(\n user=profile.user, content_object=content_object)\n\n # Migrate notifications.\n subscription.send_notification(content=reaction, sender=reaction.author)\n notification = ContentReactionAnswerSubscription.objects\\\n .get_existing(profile.user, content_object, is_active=True).last_notification\n notification.pubdate = reaction.pubdate\n notification.save()\n\n self.stdout.write(u'Migration about \u00ab {} \u00bb [OK]'.format(content.title))\n", "path": "zds/notification/management/commands/migrate_subscriptions.py"}]}
2,002
877
gh_patches_debug_14068
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-3269
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add openAPI Specification for /users/ endpoint Generate spec for `users` endpoint </issue> <code> [start of config/settings/openapi.py] 1 def custom_preprocessing_hook(endpoints): 2 filtered = [] 3 for (path, path_regex, method, callback) in endpoints: 4 # Remove all but DRF API endpoints 5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"): 6 filtered.append((path, path_regex, method, callback)) 7 return filtered 8 9 10 def remove_url_prefix_hook(result, **kwargs): 11 # Remove namespace and version URL prefix from the operation Id of the generated API schema 12 for path, path_info in result['paths'].items(): 13 for method, operation in path_info.items(): 14 operation_id = operation.get('operationId') 15 if operation_id: 16 if path.startswith('/api/db/v0/'): 17 operation['operationId'] = operation_id.replace('db_v0_', '') 18 elif path.startswith('/api/ui/v0/'): 19 operation['operationId'] = operation_id.replace('ui_v0_', '') 20 21 return result 22 [end of config/settings/openapi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/config/settings/openapi.py b/config/settings/openapi.py --- a/config/settings/openapi.py +++ b/config/settings/openapi.py @@ -1,9 +1,15 @@ def custom_preprocessing_hook(endpoints): - filtered = [] - for (path, path_regex, method, callback) in endpoints: - # Remove all but DRF API endpoints - if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"): - filtered.append((path, path_regex, method, callback)) + prefixes = [ + "/api/db/v0/databases/", + "/api/db/v0/data_files/", + "/api/db/v0/schemas/", + "/api/db/v0/tables/", + "/api/db/v0/links/", + "/api/db/v0/queries/", + "/api/ui/v0/databases/", + "/api/ui/v0/users/" + ] + filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)] return filtered
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -1,9 +1,15 @@\n def custom_preprocessing_hook(endpoints):\n- filtered = []\n- for (path, path_regex, method, callback) in endpoints:\n- # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n- filtered.append((path, path_regex, method, callback))\n+ prefixes = [\n+ \"/api/db/v0/databases/\",\n+ \"/api/db/v0/data_files/\",\n+ \"/api/db/v0/schemas/\",\n+ \"/api/db/v0/tables/\",\n+ \"/api/db/v0/links/\",\n+ \"/api/db/v0/queries/\",\n+ \"/api/ui/v0/databases/\",\n+ \"/api/ui/v0/users/\"\n+ ]\n+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n", "issue": "Add openAPI Specification for /users/ endpoint\nGenerate spec for `users` endpoint\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
816
272
gh_patches_debug_36550
rasdani/github-patches
git_diff
python-gitlab__python-gitlab-2816
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support /projects/:id/job_token_scope/allowlist I would like to access the job token allow list through the Gitlab API. Docs: - [Get a project’s CI/CD job token inbound allowlist](https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist) - [Allow access to your project with a job token](https://docs.gitlab.com/ee/ci/jobs/ci_job_token.html#allow-access-to-your-project-with-a-job-token) API endpoint: ``` GET/POST/DELETE /projects/:id/job_token_scope/allowlist ``` I currently have the following manager to access the job token scope allow list: ``` class ProjectJobTokenScopeAllowlistManager( gitlab.mixins.ListMixin, gitlab.mixins.CreateMixin, gitlab.mixins.DeleteMixin, gitlab.base.RESTManager, ): _path = "/projects/{project_id}/job_token_scope/allowlist" _obj_cls = gitlab.v4.objects.Project _from_parent_attrs = {"project_id": "id"} _create_attrs = gitlab.types.RequiredOptional(required=("target_project_id",)) ``` </issue> <code> [start of gitlab/v4/objects/job_token_scope.py] 1 from typing import Any, cast 2 3 from gitlab.base import RESTManager, RESTObject 4 from gitlab.mixins import ( 5 GetWithoutIdMixin, 6 RefreshMixin, 7 SaveMixin, 8 UpdateMethod, 9 UpdateMixin, 10 ) 11 12 __all__ = [ 13 "ProjectJobTokenScope", 14 "ProjectJobTokenScopeManager", 15 ] 16 17 18 class ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject): 19 _id_attr = None 20 21 22 class ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager): 23 _path = "/projects/{project_id}/job_token_scope" 24 _obj_cls = ProjectJobTokenScope 25 _from_parent_attrs = {"project_id": "id"} 26 _update_method = UpdateMethod.PATCH 27 28 def get(self, **kwargs: Any) -> ProjectJobTokenScope: 29 return cast(ProjectJobTokenScope, super().get(**kwargs)) 30 [end of gitlab/v4/objects/job_token_scope.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gitlab/v4/objects/job_token_scope.py b/gitlab/v4/objects/job_token_scope.py --- a/gitlab/v4/objects/job_token_scope.py +++ b/gitlab/v4/objects/job_token_scope.py @@ -2,12 +2,17 @@ from gitlab.base import RESTManager, RESTObject from gitlab.mixins import ( + CreateMixin, + DeleteMixin, GetWithoutIdMixin, + ListMixin, + ObjectDeleteMixin, RefreshMixin, SaveMixin, UpdateMethod, UpdateMixin, ) +from gitlab.types import RequiredOptional __all__ = [ "ProjectJobTokenScope", @@ -18,6 +23,9 @@ class ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject): _id_attr = None + allowlist: "AllowlistProjectManager" + groups_allowlist: "AllowlistGroupManager" + class ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager): _path = "/projects/{project_id}/job_token_scope" @@ -27,3 +35,43 @@ def get(self, **kwargs: Any) -> ProjectJobTokenScope: return cast(ProjectJobTokenScope, super().get(**kwargs)) + + +class AllowlistProject(ObjectDeleteMixin, RESTObject): + _id_attr = "target_project_id" # note: only true for create endpoint + + def get_id(self) -> int: + """Returns the id of the resource. This override deals with + the fact that either an `id` or a `target_project_id` attribute + is returned by the server depending on the endpoint called.""" + target_project_id = cast(int, super().get_id()) + if target_project_id is not None: + return target_project_id + return cast(int, self.id) + + +class AllowlistProjectManager(ListMixin, CreateMixin, DeleteMixin, RESTManager): + _path = "/projects/{project_id}/job_token_scope/allowlist" + _obj_cls = AllowlistProject + _from_parent_attrs = {"project_id": "project_id"} + _create_attrs = RequiredOptional(required=("target_project_id",)) + + +class AllowlistGroup(ObjectDeleteMixin, RESTObject): + _id_attr = "target_group_id" # note: only true for create endpoint + + def get_id(self) -> int: + """Returns the id of the resource. This override deals with + the fact that either an `id` or a `target_group_id` attribute + is returned by the server depending on the endpoint called.""" + target_group_id = cast(int, super().get_id()) + if target_group_id is not None: + return target_group_id + return cast(int, self.id) + + +class AllowlistGroupManager(ListMixin, CreateMixin, DeleteMixin, RESTManager): + _path = "/projects/{project_id}/job_token_scope/groups_allowlist" + _obj_cls = AllowlistGroup + _from_parent_attrs = {"project_id": "project_id"} + _create_attrs = RequiredOptional(required=("target_group_id",))
{"golden_diff": "diff --git a/gitlab/v4/objects/job_token_scope.py b/gitlab/v4/objects/job_token_scope.py\n--- a/gitlab/v4/objects/job_token_scope.py\n+++ b/gitlab/v4/objects/job_token_scope.py\n@@ -2,12 +2,17 @@\n \n from gitlab.base import RESTManager, RESTObject\n from gitlab.mixins import (\n+ CreateMixin,\n+ DeleteMixin,\n GetWithoutIdMixin,\n+ ListMixin,\n+ ObjectDeleteMixin,\n RefreshMixin,\n SaveMixin,\n UpdateMethod,\n UpdateMixin,\n )\n+from gitlab.types import RequiredOptional\n \n __all__ = [\n \"ProjectJobTokenScope\",\n@@ -18,6 +23,9 @@\n class ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):\n _id_attr = None\n \n+ allowlist: \"AllowlistProjectManager\"\n+ groups_allowlist: \"AllowlistGroupManager\"\n+\n \n class ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope\"\n@@ -27,3 +35,43 @@\n \n def get(self, **kwargs: Any) -> ProjectJobTokenScope:\n return cast(ProjectJobTokenScope, super().get(**kwargs))\n+\n+\n+class AllowlistProject(ObjectDeleteMixin, RESTObject):\n+ _id_attr = \"target_project_id\" # note: only true for create endpoint\n+\n+ def get_id(self) -> int:\n+ \"\"\"Returns the id of the resource. This override deals with\n+ the fact that either an `id` or a `target_project_id` attribute\n+ is returned by the server depending on the endpoint called.\"\"\"\n+ target_project_id = cast(int, super().get_id())\n+ if target_project_id is not None:\n+ return target_project_id\n+ return cast(int, self.id)\n+\n+\n+class AllowlistProjectManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):\n+ _path = \"/projects/{project_id}/job_token_scope/allowlist\"\n+ _obj_cls = AllowlistProject\n+ _from_parent_attrs = {\"project_id\": \"project_id\"}\n+ _create_attrs = RequiredOptional(required=(\"target_project_id\",))\n+\n+\n+class AllowlistGroup(ObjectDeleteMixin, RESTObject):\n+ _id_attr = \"target_group_id\" # note: only true for create endpoint\n+\n+ def get_id(self) -> int:\n+ \"\"\"Returns the id of the resource. This override deals with\n+ the fact that either an `id` or a `target_group_id` attribute\n+ is returned by the server depending on the endpoint called.\"\"\"\n+ target_group_id = cast(int, super().get_id())\n+ if target_group_id is not None:\n+ return target_group_id\n+ return cast(int, self.id)\n+\n+\n+class AllowlistGroupManager(ListMixin, CreateMixin, DeleteMixin, RESTManager):\n+ _path = \"/projects/{project_id}/job_token_scope/groups_allowlist\"\n+ _obj_cls = AllowlistGroup\n+ _from_parent_attrs = {\"project_id\": \"project_id\"}\n+ _create_attrs = RequiredOptional(required=(\"target_group_id\",))\n", "issue": "Support /projects/:id/job_token_scope/allowlist\nI would like to access the job token allow list through the Gitlab API. \r\n\r\nDocs: \r\n- [Get a project\u2019s CI/CD job token inbound allowlist](https://docs.gitlab.com/ee/api/project_job_token_scopes.html#get-a-projects-cicd-job-token-inbound-allowlist)\r\n- [Allow access to your project with a job token](https://docs.gitlab.com/ee/ci/jobs/ci_job_token.html#allow-access-to-your-project-with-a-job-token)\r\n\r\nAPI endpoint: \r\n```\r\nGET/POST/DELETE /projects/:id/job_token_scope/allowlist\r\n```\r\n\r\nI currently have the following manager to access the job token scope allow list:\r\n\r\n```\r\nclass ProjectJobTokenScopeAllowlistManager(\r\n gitlab.mixins.ListMixin,\r\n gitlab.mixins.CreateMixin,\r\n gitlab.mixins.DeleteMixin,\r\n gitlab.base.RESTManager,\r\n):\r\n _path = \"/projects/{project_id}/job_token_scope/allowlist\"\r\n _obj_cls = gitlab.v4.objects.Project\r\n _from_parent_attrs = {\"project_id\": \"id\"}\r\n _create_attrs = gitlab.types.RequiredOptional(required=(\"target_project_id\",))\r\n```\n", "before_files": [{"content": "from typing import Any, cast\n\nfrom gitlab.base import RESTManager, RESTObject\nfrom gitlab.mixins import (\n GetWithoutIdMixin,\n RefreshMixin,\n SaveMixin,\n UpdateMethod,\n UpdateMixin,\n)\n\n__all__ = [\n \"ProjectJobTokenScope\",\n \"ProjectJobTokenScopeManager\",\n]\n\n\nclass ProjectJobTokenScope(RefreshMixin, SaveMixin, RESTObject):\n _id_attr = None\n\n\nclass ProjectJobTokenScopeManager(GetWithoutIdMixin, UpdateMixin, RESTManager):\n _path = \"/projects/{project_id}/job_token_scope\"\n _obj_cls = ProjectJobTokenScope\n _from_parent_attrs = {\"project_id\": \"id\"}\n _update_method = UpdateMethod.PATCH\n\n def get(self, **kwargs: Any) -> ProjectJobTokenScope:\n return cast(ProjectJobTokenScope, super().get(**kwargs))\n", "path": "gitlab/v4/objects/job_token_scope.py"}]}
1,051
705
gh_patches_debug_6178
rasdani/github-patches
git_diff
Nitrate__Nitrate-440
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Django 2.2.x Add a testenv in tox.ini for Django 2.2. Add "Framework :: Django :: 2.2" to ``setup.py``. Update README to add version `2.2`. </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 3 from setuptools import setup, find_packages 4 5 6 with open('VERSION.txt', 'r') as f: 7 pkg_version = f.read().strip() 8 9 10 def get_long_description(): 11 with open('README.rst', 'r') as f: 12 return f.read() 13 14 15 install_requires = [ 16 'beautifulsoup4 >= 4.1.1', 17 'django >= 2.0,<3.0', 18 'django-contrib-comments == 1.8.0', 19 'django-tinymce == 2.7.0', 20 'django-uuslug == 1.1.8', 21 'html2text', 22 'odfpy >= 0.9.6', 23 'python-bugzilla', 24 'xmltodict', 25 'kobo == 0.9.0' 26 ] 27 28 extras_require = { 29 'mysql': ['mysqlclient >= 1.2.3'], 30 'pgsql': ['psycopg2 == 2.7.5'], 31 32 # Required for tcms.auth.backends.KerberosBackend 33 'krbauth': [ 34 'kerberos == 1.2.5' 35 ], 36 37 # Packages for building documentation 38 'docs': [ 39 'Sphinx >= 1.1.2', 40 'sphinx_rtd_theme', 41 ], 42 43 # Necessary packages for running tests 44 'tests': [ 45 'beautifulsoup4', 46 'coverage', 47 'factory_boy', 48 'flake8', 49 'mock', 50 'pytest < 4.2.0', 51 'pytest-cov', 52 'pytest-django', 53 ], 54 55 # Contain tools that assists the development 56 'devtools': [ 57 'django-debug-toolbar == 1.7', 58 'tox', 59 'django-extensions', 60 'pygraphviz', 61 'future-breakpoint', 62 ], 63 64 # Required packages required to run async tasks 65 'async': [ 66 'celery == 4.2.0', 67 ], 68 69 'multiauth': [ 70 'social-auth-app-django == 3.1.0', 71 ] 72 } 73 74 setup( 75 name='Nitrate', 76 version=pkg_version, 77 description='Test Case Management System', 78 long_description=get_long_description(), 79 author='Nitrate Team', 80 maintainer='Chenxiong Qi', 81 maintainer_email='[email protected]', 82 url='https://github.com/Nitrate/Nitrate/', 83 license='GPLv2+', 84 keywords='test case', 85 install_requires=install_requires, 86 extras_require=extras_require, 87 python_requires='>=3.6', 88 package_dir={'': 'src'}, 89 packages=find_packages('src', exclude=['test*']), 90 include_package_data=True, 91 zip_safe=False, 92 classifiers=[ 93 'Framework :: Django', 94 'Framework :: Django :: 2.0', 95 'Framework :: Django :: 2.1', 96 'Intended Audience :: Developers', 97 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 98 'Programming Language :: Python :: 3', 99 'Programming Language :: Python :: 3.6', 100 'Programming Language :: Python :: 3.7', 101 'Programming Language :: Python :: 3 :: Only', 102 'Topic :: Software Development :: Quality Assurance', 103 'Topic :: Software Development :: Testing', 104 ], 105 project_urls={ 106 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues', 107 'Source Code': 'https://github.com/Nitrate/Nitrate', 108 'Documentation': 'https://nitrate.readthedocs.io/', 109 }, 110 ) 111 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -93,6 +93,7 @@ 'Framework :: Django', 'Framework :: Django :: 2.0', 'Framework :: Django :: 2.1', + 'Framework :: Django :: 2.2', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 'Programming Language :: Python :: 3',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -93,6 +93,7 @@\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n+ 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n", "issue": "Add Django 2.2.x\nAdd a testenv in tox.ini for Django 2.2.\r\nAdd \"Framework :: Django :: 2.2\" to ``setup.py``.\r\nUpdate README to add version `2.2`.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]}
1,593
112
gh_patches_debug_14972
rasdani/github-patches
git_diff
kserve__kserve-1349
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Test] Fix lightgbm test in new test-infra During the process I helped kfserving migrate to new test-infra, noticed that lightgbm failed every time, and to mitigate the issue, I disabled it in the pytest. I'm working on the lightgbm test cases, and see if I can reproduce locally. Need to spend time and dig into it Detailed can be found https://github.com/kubeflow/kfserving/pull/1329#issuecomment-770325947 /cc @yuzisun /assign </issue> <code> [start of python/lgbserver/setup.py] 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import setup, find_packages 16 17 tests_require = [ 18 'pytest', 19 'pytest-asyncio', 20 'pytest-tornasync', 21 'mypy' 22 ] 23 24 setup( 25 name='lgbserver', 26 version='0.1.0', 27 author_email='[email protected]', 28 license='../../LICENSE.txt', 29 url='https://github.com/kubeflow/kfserving/python/kfserving/lgbserver', 30 description='Model Server implementation for LightGBM. \ 31 Not intended for use outside KFServing Frameworks Images', 32 long_description=open('README.md').read(), 33 python_requires='>3.4', 34 packages=find_packages("lgbserver"), 35 install_requires=[ 36 "kfserving>=0.4.0", 37 "lightgbm == 2.3.1", 38 "pandas == 0.25.3", 39 "argparse >= 1.4.0" 40 ], 41 tests_require=tests_require, 42 extras_require={'test': tests_require} 43 ) 44 [end of python/lgbserver/setup.py] [start of python/aixexplainer/setup.py] 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import setup, find_packages 16 17 tests_require = [ 18 'pytest', 19 'pytest-tornasync', 20 'mypy' 21 ] 22 setup( 23 name='aixserver', 24 version='0.2.1', 25 author_email='[email protected]', 26 license='https://github.com/kubeflow/kfserving/LICENSE', 27 url='https://github.com/kubeflow/kfserving/python/aixserver', 28 description='Model Server implementation for AI eXplainability with LIME. \ 29 Not intended for use outside KFServing Frameworks Images', 30 long_description=open('README.md').read(), 31 python_requires='>3.4', 32 packages=find_packages("aixserver"), 33 install_requires=[ 34 "kfserving>=0.5.0", 35 "argparse >= 1.4.0", 36 "aix360 >= 0.1.0", 37 "lime >= 0.1.1.37", 38 "nest_asyncio>=1.4.0" 39 ], 40 tests_require=tests_require, 41 extras_require={'test': tests_require} 42 ) 43 [end of python/aixexplainer/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/aixexplainer/setup.py b/python/aixexplainer/setup.py --- a/python/aixexplainer/setup.py +++ b/python/aixexplainer/setup.py @@ -35,7 +35,8 @@ "argparse >= 1.4.0", "aix360 >= 0.1.0", "lime >= 0.1.1.37", - "nest_asyncio>=1.4.0" + "nest_asyncio>=1.4.0", + "cvxpy == 1.1.7" ], tests_require=tests_require, extras_require={'test': tests_require} diff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py --- a/python/lgbserver/setup.py +++ b/python/lgbserver/setup.py @@ -36,7 +36,8 @@ "kfserving>=0.4.0", "lightgbm == 2.3.1", "pandas == 0.25.3", - "argparse >= 1.4.0" + "argparse >= 1.4.0", + "numpy == 1.19.5", ], tests_require=tests_require, extras_require={'test': tests_require}
{"golden_diff": "diff --git a/python/aixexplainer/setup.py b/python/aixexplainer/setup.py\n--- a/python/aixexplainer/setup.py\n+++ b/python/aixexplainer/setup.py\n@@ -35,7 +35,8 @@\n \"argparse >= 1.4.0\",\n \"aix360 >= 0.1.0\",\n \"lime >= 0.1.1.37\",\n- \"nest_asyncio>=1.4.0\"\n+ \"nest_asyncio>=1.4.0\",\n+ \"cvxpy == 1.1.7\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\ndiff --git a/python/lgbserver/setup.py b/python/lgbserver/setup.py\n--- a/python/lgbserver/setup.py\n+++ b/python/lgbserver/setup.py\n@@ -36,7 +36,8 @@\n \"kfserving>=0.4.0\",\n \"lightgbm == 2.3.1\",\n \"pandas == 0.25.3\",\n- \"argparse >= 1.4.0\"\n+ \"argparse >= 1.4.0\",\n+ \"numpy == 1.19.5\",\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n", "issue": "[Test] Fix lightgbm test in new test-infra\nDuring the process I helped kfserving migrate to new test-infra, noticed that lightgbm failed every time, and to mitigate the issue, I disabled it in the pytest.\r\n\r\nI'm working on the lightgbm test cases, and see if I can reproduce locally. \r\n\r\nNeed to spend time and dig into it \r\n\r\nDetailed can be found https://github.com/kubeflow/kfserving/pull/1329#issuecomment-770325947\r\n\r\n/cc @yuzisun \r\n/assign \n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='lgbserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/lgbserver',\n description='Model Server implementation for LightGBM. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"lgbserver\"),\n install_requires=[\n \"kfserving>=0.4.0\",\n \"lightgbm == 2.3.1\",\n \"pandas == 0.25.3\",\n \"argparse >= 1.4.0\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/lgbserver/setup.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\nsetup(\n name='aixserver',\n version='0.2.1',\n author_email='[email protected]',\n license='https://github.com/kubeflow/kfserving/LICENSE',\n url='https://github.com/kubeflow/kfserving/python/aixserver',\n description='Model Server implementation for AI eXplainability with LIME. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"aixserver\"),\n install_requires=[\n \"kfserving>=0.5.0\",\n \"argparse >= 1.4.0\",\n \"aix360 >= 0.1.0\",\n \"lime >= 0.1.1.37\",\n \"nest_asyncio>=1.4.0\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/aixexplainer/setup.py"}]}
1,572
295
gh_patches_debug_13923
rasdani/github-patches
git_diff
litestar-org__litestar-1791
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enhancement: Only run Sonar & Snyk on forks ### Summary When forking the repo, tests are brought with it but Snyk & Sonar CI fails because the secrets are not set. ### Basic Example Add ` && github.repository_owner == 'starlite-api'` to the if-check in `ci.yaml` to prevent these two CI items from running on non-upstream repos. ### Drawbacks and Impact _No response_ ### Unresolved questions _No response_ </issue> <code> [start of litestar/types/composite_types.py] 1 from functools import partial 2 from os import PathLike 3 from pathlib import Path 4 from typing import ( 5 TYPE_CHECKING, 6 Any, 7 AsyncIterable, 8 AsyncIterator, 9 Callable, 10 Dict, 11 Iterable, 12 Iterator, 13 Literal, 14 Mapping, 15 Sequence, 16 Set, 17 Tuple, 18 Type, 19 TypeVar, 20 Union, 21 ) 22 23 from litestar.enums import ScopeType 24 25 from .asgi_types import ASGIApp 26 from .callable_types import AnyCallable, ExceptionHandler 27 28 if TYPE_CHECKING: 29 from litestar.datastructures.cookie import Cookie 30 from litestar.datastructures.response_header import ResponseHeader 31 from litestar.di import Provide 32 from litestar.middleware.base import DefineMiddleware, MiddlewareProtocol 33 from litestar.params import ParameterKwarg 34 else: 35 BaseHTTPMiddleware = Any 36 Cookie = Any 37 DefineMiddleware = Any 38 ImmutableState = Any 39 MiddlewareProtocol = Any 40 ParameterKwarg = Any 41 Provide = Any 42 ResponseHeader = Any 43 44 T = TypeVar("T") 45 46 47 Dependencies = Mapping[str, Union[Provide, AnyCallable]] 48 ExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler] 49 MaybePartial = Union[T, partial] 50 Middleware = Union[ 51 Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol] 52 ] 53 ParametersMap = Mapping[str, ParameterKwarg] 54 PathType = Union[Path, PathLike, str] 55 ResponseCookies = Union[Sequence[Cookie], Mapping[str, str]] 56 ResponseHeaders = Union[Sequence[ResponseHeader], Mapping[str, str]] 57 Scopes = Set[Literal[ScopeType.HTTP, ScopeType.WEBSOCKET]] 58 StreamType = Union[Iterable[T], Iterator[T], AsyncIterable[T], AsyncIterator[T]] 59 TypeEncodersMap = Mapping[Any, Callable[[Any], Any]] 60 [end of litestar/types/composite_types.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/types/composite_types.py b/litestar/types/composite_types.py --- a/litestar/types/composite_types.py +++ b/litestar/types/composite_types.py @@ -12,6 +12,7 @@ Iterator, Literal, Mapping, + MutableMapping, Sequence, Set, Tuple, @@ -45,7 +46,7 @@ Dependencies = Mapping[str, Union[Provide, AnyCallable]] -ExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler] +ExceptionHandlersMap = MutableMapping[Union[int, Type[Exception]], ExceptionHandler] MaybePartial = Union[T, partial] Middleware = Union[ Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]
{"golden_diff": "diff --git a/litestar/types/composite_types.py b/litestar/types/composite_types.py\n--- a/litestar/types/composite_types.py\n+++ b/litestar/types/composite_types.py\n@@ -12,6 +12,7 @@\n Iterator,\n Literal,\n Mapping,\n+ MutableMapping,\n Sequence,\n Set,\n Tuple,\n@@ -45,7 +46,7 @@\n \n \n Dependencies = Mapping[str, Union[Provide, AnyCallable]]\n-ExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler]\n+ExceptionHandlersMap = MutableMapping[Union[int, Type[Exception]], ExceptionHandler]\n MaybePartial = Union[T, partial]\n Middleware = Union[\n Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]\n", "issue": "Enhancement: Only run Sonar & Snyk on forks\n### Summary\n\nWhen forking the repo, tests are brought with it but Snyk & Sonar CI fails because the secrets are not set.\n\n### Basic Example\n\nAdd ` && github.repository_owner == 'starlite-api'` to the if-check in `ci.yaml` to prevent these two CI items from running on non-upstream repos.\n\n### Drawbacks and Impact\n\n_No response_\n\n### Unresolved questions\n\n_No response_\n", "before_files": [{"content": "from functools import partial\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncIterable,\n AsyncIterator,\n Callable,\n Dict,\n Iterable,\n Iterator,\n Literal,\n Mapping,\n Sequence,\n Set,\n Tuple,\n Type,\n TypeVar,\n Union,\n)\n\nfrom litestar.enums import ScopeType\n\nfrom .asgi_types import ASGIApp\nfrom .callable_types import AnyCallable, ExceptionHandler\n\nif TYPE_CHECKING:\n from litestar.datastructures.cookie import Cookie\n from litestar.datastructures.response_header import ResponseHeader\n from litestar.di import Provide\n from litestar.middleware.base import DefineMiddleware, MiddlewareProtocol\n from litestar.params import ParameterKwarg\nelse:\n BaseHTTPMiddleware = Any\n Cookie = Any\n DefineMiddleware = Any\n ImmutableState = Any\n MiddlewareProtocol = Any\n ParameterKwarg = Any\n Provide = Any\n ResponseHeader = Any\n\nT = TypeVar(\"T\")\n\n\nDependencies = Mapping[str, Union[Provide, AnyCallable]]\nExceptionHandlersMap = Mapping[Union[int, Type[Exception]], ExceptionHandler]\nMaybePartial = Union[T, partial]\nMiddleware = Union[\n Callable[..., ASGIApp], DefineMiddleware, Iterator[Tuple[ASGIApp, Dict[str, Any]]], Type[MiddlewareProtocol]\n]\nParametersMap = Mapping[str, ParameterKwarg]\nPathType = Union[Path, PathLike, str]\nResponseCookies = Union[Sequence[Cookie], Mapping[str, str]]\nResponseHeaders = Union[Sequence[ResponseHeader], Mapping[str, str]]\nScopes = Set[Literal[ScopeType.HTTP, ScopeType.WEBSOCKET]]\nStreamType = Union[Iterable[T], Iterator[T], AsyncIterable[T], AsyncIterator[T]]\nTypeEncodersMap = Mapping[Any, Callable[[Any], Any]]\n", "path": "litestar/types/composite_types.py"}]}
1,151
181
gh_patches_debug_7610
rasdani/github-patches
git_diff
pytorch__ignite-1675
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Loss automatically detaching inputs breaks some workflows ## 🐛 Bug description https://github.com/pytorch/ignite/blob/master/ignite/metrics/loss.py#L60 Before 0.4.3, the `y_pred` and `y` were passed without calling detach. Now because of detach a number of usecases are no longer possible. An example is: https://docs.gpytorch.ai/en/v1.3.1/marginal_log_likelihoods.html#variationalelbo The output of `model(train_x)` is an object which does not support detach. This leads to a crash when passing the `mll` function to Loss as a `loss_fn`. Another example is https://github.com/y0ast/deterministic-uncertainty-quantification/blob/master/train_duq_cifar.py#L153 The loss is dependent on the gradient, which means that by calling detach the loss cannot be computed anymore. I have been trying to work around it, but I can't figure out a nice way. https://pytorch.org/ignite/metrics.html#ignite.metrics.Average is not input size aware so it cannot correctly compute the average of the loss. </issue> <code> [start of ignite/metrics/loss.py] 1 from typing import Callable, Dict, Sequence, Tuple, Union, cast 2 3 import torch 4 5 from ignite.exceptions import NotComputableError 6 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce 7 8 __all__ = ["Loss"] 9 10 11 class Loss(Metric): 12 """ 13 Calculates the average loss according to the passed loss_fn. 14 15 Args: 16 loss_fn (callable): a callable taking a prediction tensor, a target 17 tensor, optionally other arguments, and returns the average loss 18 over all observations in the batch. 19 output_transform (callable): a callable that is used to transform the 20 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 21 form expected by the metric. 22 This can be useful if, for example, you have a multi-output model and 23 you want to compute the metric with respect to one of the outputs. 24 The output is expected to be a tuple `(prediction, target)` or 25 (prediction, target, kwargs) where kwargs is a dictionary of extra 26 keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`. 27 batch_size (callable): a callable taking a target tensor that returns the 28 first dimension size (usually the batch size). 29 device (str or torch.device): specifies which device updates are accumulated on. Setting the 30 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is 31 non-blocking. By default, CPU. 32 33 """ 34 35 required_output_keys = None 36 37 def __init__( 38 self, 39 loss_fn: Callable, 40 output_transform: Callable = lambda x: x, 41 batch_size: Callable = lambda x: len(x), 42 device: Union[str, torch.device] = torch.device("cpu"), 43 ): 44 super(Loss, self).__init__(output_transform, device=device) 45 self._loss_fn = loss_fn 46 self._batch_size = batch_size 47 48 @reinit__is_reduced 49 def reset(self) -> None: 50 self._sum = torch.tensor(0.0, device=self._device) 51 self._num_examples = 0 52 53 @reinit__is_reduced 54 def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None: 55 if len(output) == 2: 56 y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output) 57 kwargs = {} # type: Dict 58 else: 59 y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output) 60 average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs) 61 62 if len(average_loss.shape) != 0: 63 raise ValueError("loss_fn did not return the average loss.") 64 65 n = self._batch_size(y) 66 self._sum += average_loss.to(self._device) * n 67 self._num_examples += n 68 69 @sync_all_reduce("_sum", "_num_examples") 70 def compute(self) -> float: 71 if self._num_examples == 0: 72 raise NotComputableError("Loss must have at least one example before it can be computed.") 73 return self._sum.item() / self._num_examples 74 [end of ignite/metrics/loss.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py --- a/ignite/metrics/loss.py +++ b/ignite/metrics/loss.py @@ -57,7 +57,7 @@ kwargs = {} # type: Dict else: y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output) - average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs) + average_loss = self._loss_fn(y_pred, y, **kwargs).detach() if len(average_loss.shape) != 0: raise ValueError("loss_fn did not return the average loss.")
{"golden_diff": "diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py\n--- a/ignite/metrics/loss.py\n+++ b/ignite/metrics/loss.py\n@@ -57,7 +57,7 @@\n kwargs = {} # type: Dict\n else:\n y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)\n- average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs)\n+ average_loss = self._loss_fn(y_pred, y, **kwargs).detach()\n \n if len(average_loss.shape) != 0:\n raise ValueError(\"loss_fn did not return the average loss.\")\n", "issue": "Loss automatically detaching inputs breaks some workflows\n## \ud83d\udc1b Bug description\r\n\r\nhttps://github.com/pytorch/ignite/blob/master/ignite/metrics/loss.py#L60\r\n\r\nBefore 0.4.3, the `y_pred` and `y` were passed without calling detach. Now because of detach a number of usecases are no longer possible.\r\n\r\nAn example is: https://docs.gpytorch.ai/en/v1.3.1/marginal_log_likelihoods.html#variationalelbo\r\n\r\nThe output of `model(train_x)` is an object which does not support detach. This leads to a crash when passing the `mll` function to Loss as a `loss_fn`.\r\n\r\nAnother example is https://github.com/y0ast/deterministic-uncertainty-quantification/blob/master/train_duq_cifar.py#L153\r\n\r\nThe loss is dependent on the gradient, which means that by calling detach the loss cannot be computed anymore.\r\n\r\nI have been trying to work around it, but I can't figure out a nice way. https://pytorch.org/ignite/metrics.html#ignite.metrics.Average is not input size aware so it cannot correctly compute the average of the loss.\n", "before_files": [{"content": "from typing import Callable, Dict, Sequence, Tuple, Union, cast\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"Loss\"]\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n Args:\n loss_fn (callable): a callable taking a prediction tensor, a target\n tensor, optionally other arguments, and returns the average loss\n over all observations in the batch.\n output_transform (callable): a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n The output is expected to be a tuple `(prediction, target)` or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.\n batch_size (callable): a callable taking a target tensor that returns the\n first dimension size (usually the batch size).\n device (str or torch.device): specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n loss_fn: Callable,\n output_transform: Callable = lambda x: x,\n batch_size: Callable = lambda x: len(x),\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super(Loss, self).__init__(output_transform, device=device)\n self._loss_fn = loss_fn\n self._batch_size = batch_size\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:\n if len(output) == 2:\n y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)\n kwargs = {} # type: Dict\n else:\n y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)\n average_loss = self._loss_fn(y_pred.detach(), y.detach(), **kwargs)\n\n if len(average_loss.shape) != 0:\n raise ValueError(\"loss_fn did not return the average loss.\")\n\n n = self._batch_size(y)\n self._sum += average_loss.to(self._device) * n\n self._num_examples += n\n\n @sync_all_reduce(\"_sum\", \"_num_examples\")\n def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"Loss must have at least one example before it can be computed.\")\n return self._sum.item() / self._num_examples\n", "path": "ignite/metrics/loss.py"}]}
1,621
152
gh_patches_debug_1493
rasdani/github-patches
git_diff
plotly__dash-601
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Extract meta failure on missing props docstring. If a props is missing a docstring, it will fail to generate the component with js error, `Cannot read property 'length' of undefined`. https://community.plot.ly/t/dash-component-creation-javascript-ok-nothing-rendered-in-python/19369 </issue> <code> [start of dash/development/component_generator.py] 1 from __future__ import print_function 2 from collections import OrderedDict 3 4 import json 5 import sys 6 import subprocess 7 import shlex 8 import os 9 import argparse 10 import shutil 11 import functools 12 13 import pkg_resources 14 15 from ._r_components_generation import write_class_file 16 from ._r_components_generation import generate_exports 17 from ._py_components_generation import generate_class_file 18 from ._py_components_generation import generate_imports 19 from ._py_components_generation import generate_classes_files 20 21 22 class _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter, 23 argparse.RawDescriptionHelpFormatter): 24 pass 25 26 27 # pylint: disable=too-many-locals 28 def generate_components(components_source, project_shortname, 29 package_info_filename='package.json', 30 ignore='^_', 31 rprefix=None): 32 33 project_shortname = project_shortname.replace('-', '_').rstrip('/\\') 34 35 if rprefix: 36 prefix = rprefix 37 38 is_windows = sys.platform == 'win32' 39 40 extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js') 41 42 os.environ['NODE_PATH'] = 'node_modules' 43 cmd = shlex.split( 44 'node {} {} {}'.format(extract_path, ignore, components_source), 45 posix=not is_windows 46 ) 47 48 shutil.copyfile('package.json', 49 os.path.join(project_shortname, package_info_filename)) 50 51 proc = subprocess.Popen(cmd, 52 stdout=subprocess.PIPE, 53 stderr=subprocess.PIPE, 54 shell=is_windows) 55 out, err = proc.communicate() 56 status = proc.poll() 57 58 if err: 59 print(err.decode(), file=sys.stderr) 60 61 if not out: 62 print( 63 'Error generating metadata in {} (status={})'.format( 64 project_shortname, status), 65 file=sys.stderr) 66 sys.exit(1) 67 68 jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict) 69 70 if sys.version_info[0] >= 3: 71 metadata = jsondata_unicode 72 else: 73 metadata = byteify(jsondata_unicode) 74 75 generator_methods = [generate_class_file] 76 77 if rprefix: 78 if not os.path.exists('man'): 79 os.makedirs('man') 80 if not os.path.exists('R'): 81 os.makedirs('R') 82 generator_methods.append( 83 functools.partial(write_class_file, prefix=prefix)) 84 85 components = generate_classes_files( 86 project_shortname, 87 metadata, 88 *generator_methods 89 ) 90 91 with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f: 92 json.dump(metadata, f) 93 94 generate_imports(project_shortname, components) 95 96 if rprefix: 97 with open('package.json', 'r') as f: 98 jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict) 99 if sys.version_info[0] >= 3: 100 pkg_data = jsondata_unicode 101 else: 102 pkg_data = byteify(jsondata_unicode) 103 104 generate_exports( 105 project_shortname, components, metadata, pkg_data, prefix 106 ) 107 108 109 def cli(): 110 parser = argparse.ArgumentParser( 111 prog='dash-generate-components', 112 formatter_class=_CombinedFormatter, 113 description='Generate dash components by extracting the metadata ' 114 'using react-docgen. Then map the metadata to python classes.' 115 ) 116 parser.add_argument('components_source', 117 help='React components source directory.') 118 parser.add_argument( 119 'project_shortname', 120 help='Name of the project to export the classes files.' 121 ) 122 parser.add_argument( 123 '-p', '--package-info-filename', 124 default='package.json', 125 help='The filename of the copied `package.json` to `project_shortname`' 126 ) 127 parser.add_argument( 128 '-i', '--ignore', 129 default='^_', 130 help='Files/directories matching the pattern will be ignored' 131 ) 132 parser.add_argument( 133 '--r-prefix', 134 help='Experimental: specify a prefix for DashR component names, write' 135 'DashR components to R dir, create R package.' 136 ) 137 138 args = parser.parse_args() 139 generate_components( 140 args.components_source, args.project_shortname, 141 package_info_filename=args.package_info_filename, 142 ignore=args.ignore, 143 rprefix=args.r_prefix) 144 145 146 # pylint: disable=undefined-variable 147 def byteify(input_object): 148 if isinstance(input_object, dict): 149 return OrderedDict([ 150 (byteify(key), byteify(value)) 151 for key, value in input_object.iteritems() 152 ]) 153 elif isinstance(input_object, list): 154 return [byteify(element) for element in input_object] 155 elif isinstance(input_object, unicode): # noqa:F821 156 return input_object.encode('utf-8') 157 return input_object 158 159 160 if __name__ == '__main__': 161 cli() 162 [end of dash/development/component_generator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py --- a/dash/development/component_generator.py +++ b/dash/development/component_generator.py @@ -89,7 +89,7 @@ ) with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f: - json.dump(metadata, f) + json.dump(metadata, f, indent=2) generate_imports(project_shortname, components)
{"golden_diff": "diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py\n--- a/dash/development/component_generator.py\n+++ b/dash/development/component_generator.py\n@@ -89,7 +89,7 @@\n )\n \n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n- json.dump(metadata, f)\n+ json.dump(metadata, f, indent=2)\n \n generate_imports(project_shortname, components)\n", "issue": "Extract meta failure on missing props docstring.\nIf a props is missing a docstring, it will fail to generate the component with js error, `Cannot read property 'length' of undefined`.\r\n\r\nhttps://community.plot.ly/t/dash-component-creation-javascript-ok-nothing-rendered-in-python/19369\n", "before_files": [{"content": "from __future__ import print_function\nfrom collections import OrderedDict\n\nimport json\nimport sys\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport shutil\nimport functools\n\nimport pkg_resources\n\nfrom ._r_components_generation import write_class_file\nfrom ._r_components_generation import generate_exports\nfrom ._py_components_generation import generate_class_file\nfrom ._py_components_generation import generate_imports\nfrom ._py_components_generation import generate_classes_files\n\n\nclass _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\n# pylint: disable=too-many-locals\ndef generate_components(components_source, project_shortname,\n package_info_filename='package.json',\n ignore='^_',\n rprefix=None):\n\n project_shortname = project_shortname.replace('-', '_').rstrip('/\\\\')\n\n if rprefix:\n prefix = rprefix\n\n is_windows = sys.platform == 'win32'\n\n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n\n os.environ['NODE_PATH'] = 'node_modules'\n cmd = shlex.split(\n 'node {} {} {}'.format(extract_path, ignore, components_source),\n posix=not is_windows\n )\n\n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=is_windows)\n out, err = proc.communicate()\n status = proc.poll()\n\n if err:\n print(err.decode(), file=sys.stderr)\n\n if not out:\n print(\n 'Error generating metadata in {} (status={})'.format(\n project_shortname, status),\n file=sys.stderr)\n sys.exit(1)\n\n jsondata_unicode = json.loads(out.decode(), object_pairs_hook=OrderedDict)\n\n if sys.version_info[0] >= 3:\n metadata = jsondata_unicode\n else:\n metadata = byteify(jsondata_unicode)\n\n generator_methods = [generate_class_file]\n\n if rprefix:\n if not os.path.exists('man'):\n os.makedirs('man')\n if not os.path.exists('R'):\n os.makedirs('R')\n generator_methods.append(\n functools.partial(write_class_file, prefix=prefix))\n\n components = generate_classes_files(\n project_shortname,\n metadata,\n *generator_methods\n )\n\n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n json.dump(metadata, f)\n\n generate_imports(project_shortname, components)\n\n if rprefix:\n with open('package.json', 'r') as f:\n jsondata_unicode = json.load(f, object_pairs_hook=OrderedDict)\n if sys.version_info[0] >= 3:\n pkg_data = jsondata_unicode\n else:\n pkg_data = byteify(jsondata_unicode)\n\n generate_exports(\n project_shortname, components, metadata, pkg_data, prefix\n )\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='dash-generate-components',\n formatter_class=_CombinedFormatter,\n description='Generate dash components by extracting the metadata '\n 'using react-docgen. Then map the metadata to python classes.'\n )\n parser.add_argument('components_source',\n help='React components source directory.')\n parser.add_argument(\n 'project_shortname',\n help='Name of the project to export the classes files.'\n )\n parser.add_argument(\n '-p', '--package-info-filename',\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n parser.add_argument(\n '-i', '--ignore',\n default='^_',\n help='Files/directories matching the pattern will be ignored'\n )\n parser.add_argument(\n '--r-prefix',\n help='Experimental: specify a prefix for DashR component names, write'\n 'DashR components to R dir, create R package.'\n )\n\n args = parser.parse_args()\n generate_components(\n args.components_source, args.project_shortname,\n package_info_filename=args.package_info_filename,\n ignore=args.ignore,\n rprefix=args.r_prefix)\n\n\n# pylint: disable=undefined-variable\ndef byteify(input_object):\n if isinstance(input_object, dict):\n return OrderedDict([\n (byteify(key), byteify(value))\n for key, value in input_object.iteritems()\n ])\n elif isinstance(input_object, list):\n return [byteify(element) for element in input_object]\n elif isinstance(input_object, unicode): # noqa:F821\n return input_object.encode('utf-8')\n return input_object\n\n\nif __name__ == '__main__':\n cli()\n", "path": "dash/development/component_generator.py"}]}
1,993
107
gh_patches_debug_1743
rasdani/github-patches
git_diff
conan-io__conan-3816
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Conan crash if .count-files consist NULL-character conan version 1.8.0, Windows 7 x64 For some reason (maybe a crash), the .count-files in each conan package in the cache were corrupted and contained a unicode NULL character (https://www.fileformat.info/info/unicode/char/0000/index.htm). This led to a conan crash for basically every package action (e.g. conan info). I already prepared a fix (https://github.com/conan-io/conan/compare/develop...pianoslum:develop) which also adds a little bit more verbosity in case there is no number in the .count-file. Is this enough info or should I elaborate? </issue> <code> [start of conans/util/locks.py] 1 import fasteners 2 from conans.util.log import logger 3 import time 4 from conans.util.files import save, load 5 import os 6 7 8 class NoLock(object): 9 10 def __enter__(self): 11 pass 12 13 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable 14 pass 15 16 17 class SimpleLock(object): 18 19 def __init__(self, filename): 20 self._lock = fasteners.InterProcessLock(filename, logger=logger) 21 22 def __enter__(self): 23 self._lock.acquire() 24 25 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable 26 self._lock.release() 27 28 29 READ_BUSY_DELAY = 0.5 30 WRITE_BUSY_DELAY = 0.25 31 32 33 class Lock(object): 34 35 @staticmethod 36 def clean(folder): 37 if os.path.exists(folder + ".count"): 38 os.remove(folder + ".count") 39 if os.path.exists(folder + ".count.lock"): 40 os.remove(folder + ".count.lock") 41 42 def __init__(self, folder, locked_item, output): 43 self._count_file = folder + ".count" 44 self._count_lock_file = folder + ".count.lock" 45 self._locked_item = locked_item 46 self._output = output 47 self._first_lock = True 48 49 @property 50 def files(self): 51 return (self._count_file, self._count_lock_file) 52 53 def _info_locked(self): 54 if self._first_lock: 55 self._first_lock = False 56 self._output.info("%s is locked by another concurrent conan process, wait..." 57 % str(self._locked_item)) 58 self._output.info("If not the case, quit, and do 'conan remove --locks'") 59 60 def _readers(self): 61 try: 62 return int(load(self._count_file)) 63 except IOError: 64 return 0 65 66 67 class ReadLock(Lock): 68 69 def __enter__(self): 70 while True: 71 with fasteners.InterProcessLock(self._count_lock_file, logger=logger): 72 readers = self._readers() 73 if readers >= 0: 74 save(self._count_file, str(readers + 1)) 75 break 76 self._info_locked() 77 time.sleep(READ_BUSY_DELAY) 78 79 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable 80 with fasteners.InterProcessLock(self._count_lock_file, logger=logger): 81 readers = self._readers() 82 save(self._count_file, str(readers - 1)) 83 84 85 class WriteLock(Lock): 86 87 def __enter__(self): 88 while True: 89 with fasteners.InterProcessLock(self._count_lock_file, logger=logger): 90 readers = self._readers() 91 if readers == 0: 92 save(self._count_file, "-1") 93 break 94 self._info_locked() 95 time.sleep(WRITE_BUSY_DELAY) 96 97 def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable 98 with fasteners.InterProcessLock(self._count_lock_file, logger=logger): 99 save(self._count_file, "0") 100 [end of conans/util/locks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/util/locks.py b/conans/util/locks.py --- a/conans/util/locks.py +++ b/conans/util/locks.py @@ -60,7 +60,8 @@ def _readers(self): try: return int(load(self._count_file)) - except IOError: + except (IOError, UnicodeEncodeError, ValueError): + self._output.warn("%s does not contain a number!" % self._count_file) return 0
{"golden_diff": "diff --git a/conans/util/locks.py b/conans/util/locks.py\n--- a/conans/util/locks.py\n+++ b/conans/util/locks.py\n@@ -60,7 +60,8 @@\n def _readers(self):\n try:\n return int(load(self._count_file))\n- except IOError:\n+ except (IOError, UnicodeEncodeError, ValueError):\n+ self._output.warn(\"%s does not contain a number!\" % self._count_file)\n return 0\n", "issue": "Conan crash if .count-files consist NULL-character\nconan version 1.8.0, Windows 7 x64\r\n\r\nFor some reason (maybe a crash), the .count-files in each conan package in the cache were corrupted and contained a unicode NULL character (https://www.fileformat.info/info/unicode/char/0000/index.htm). This led to a conan crash for basically every package action (e.g. conan info).\r\n\r\nI already prepared a fix (https://github.com/conan-io/conan/compare/develop...pianoslum:develop) which also adds a little bit more verbosity in case there is no number in the .count-file.\r\n\r\nIs this enough info or should I elaborate?\n", "before_files": [{"content": "import fasteners\nfrom conans.util.log import logger\nimport time\nfrom conans.util.files import save, load\nimport os\n\n\nclass NoLock(object):\n\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n pass\n\n\nclass SimpleLock(object):\n\n def __init__(self, filename):\n self._lock = fasteners.InterProcessLock(filename, logger=logger)\n\n def __enter__(self):\n self._lock.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n self._lock.release()\n\n\nREAD_BUSY_DELAY = 0.5\nWRITE_BUSY_DELAY = 0.25\n\n\nclass Lock(object):\n\n @staticmethod\n def clean(folder):\n if os.path.exists(folder + \".count\"):\n os.remove(folder + \".count\")\n if os.path.exists(folder + \".count.lock\"):\n os.remove(folder + \".count.lock\")\n\n def __init__(self, folder, locked_item, output):\n self._count_file = folder + \".count\"\n self._count_lock_file = folder + \".count.lock\"\n self._locked_item = locked_item\n self._output = output\n self._first_lock = True\n\n @property\n def files(self):\n return (self._count_file, self._count_lock_file)\n\n def _info_locked(self):\n if self._first_lock:\n self._first_lock = False\n self._output.info(\"%s is locked by another concurrent conan process, wait...\"\n % str(self._locked_item))\n self._output.info(\"If not the case, quit, and do 'conan remove --locks'\")\n\n def _readers(self):\n try:\n return int(load(self._count_file))\n except IOError:\n return 0\n\n\nclass ReadLock(Lock):\n\n def __enter__(self):\n while True:\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n if readers >= 0:\n save(self._count_file, str(readers + 1))\n break\n self._info_locked()\n time.sleep(READ_BUSY_DELAY)\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n save(self._count_file, str(readers - 1))\n\n\nclass WriteLock(Lock):\n\n def __enter__(self):\n while True:\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n readers = self._readers()\n if readers == 0:\n save(self._count_file, \"-1\")\n break\n self._info_locked()\n time.sleep(WRITE_BUSY_DELAY)\n\n def __exit__(self, exc_type, exc_val, exc_tb): # @UnusedVariable\n with fasteners.InterProcessLock(self._count_lock_file, logger=logger):\n save(self._count_file, \"0\")\n", "path": "conans/util/locks.py"}]}
1,567
111
gh_patches_debug_27764
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-4025
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> motherless extractor - test 0 fails ``` $ python ~/projects/youtube-dl/test/test_download.py TestDownload.test_Motherless [Motherless] AC3FFE1: Downloading webpage [info] Writing video description metadata as JSON to: AC3FFE1.info.json [debug] Invoking downloader on 'http://s17.motherlessmedia.com/dev386/0/572/287/0572287847.mp4/5cb6d38eccba71d7f6bb2ef260997c3d/544A96C0.mp4' [download] Destination: AC3FFE1.mp4 [download] 100% of 10.00KiB in 00:00 F ====================================================================== FAIL: test_Motherless (__main__.TestDownload) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/crabman/projects/youtube-dl/test/test_download.py", line 170, in test_template self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename) AssertionError: False is not true : Missing file AC3FFE1.flv ---------------------------------------------------------------------- Ran 1 test in 1.690s FAILED (failures=1) ``` Apparently that video's page no longer give you flv, but instead gives you mp4 file. I am not sure why it happened. Maybe motherless doesn't serve flv files anymore, maybe it still does for some videos - I don't know. </issue> <code> [start of youtube_dl/extractor/motherless.py] 1 from __future__ import unicode_literals 2 3 import datetime 4 import re 5 6 from .common import InfoExtractor 7 from ..utils import ( 8 int_or_none, 9 unified_strdate, 10 ) 11 12 13 class MotherlessIE(InfoExtractor): 14 _VALID_URL = r'http://(?:www\.)?motherless\.com/(?P<id>[A-Z0-9]+)' 15 _TESTS = [ 16 { 17 'url': 'http://motherless.com/AC3FFE1', 18 'md5': '5527fef81d2e529215dad3c2d744a7d9', 19 'info_dict': { 20 'id': 'AC3FFE1', 21 'ext': 'flv', 22 'title': 'Fucked in the ass while playing PS3', 23 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'], 24 'upload_date': '20100913', 25 'uploader_id': 'famouslyfuckedup', 26 'thumbnail': 're:http://.*\.jpg', 27 'age_limit': 18, 28 } 29 }, 30 { 31 'url': 'http://motherless.com/532291B', 32 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131', 33 'info_dict': { 34 'id': '532291B', 35 'ext': 'mp4', 36 'title': 'Amazing girl playing the omegle game, PERFECT!', 37 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'], 38 'upload_date': '20140622', 39 'uploader_id': 'Sulivana7x', 40 'thumbnail': 're:http://.*\.jpg', 41 'age_limit': 18, 42 } 43 } 44 ] 45 46 def _real_extract(self,url): 47 mobj = re.match(self._VALID_URL, url) 48 video_id = mobj.group('id') 49 50 webpage = self._download_webpage(url, video_id) 51 52 title = self._html_search_regex(r'id="view-upload-title">\s+([^<]+)<', webpage, 'title') 53 54 video_url = self._html_search_regex(r'setup\(\{\s+"file".+: "([^"]+)",', webpage, 'video_url') 55 age_limit = self._rta_search(webpage) 56 57 view_count = self._html_search_regex(r'<strong>Views</strong>\s+([^<]+)<', webpage, 'view_count') 58 59 upload_date = self._html_search_regex(r'<strong>Uploaded</strong>\s+([^<]+)<', webpage, 'upload_date') 60 if 'Ago' in upload_date: 61 days = int(re.search(r'([0-9]+)', upload_date).group(1)) 62 upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d') 63 else: 64 upload_date = unified_strdate(upload_date) 65 66 like_count = self._html_search_regex(r'<strong>Favorited</strong>\s+([^<]+)<', webpage, 'like_count') 67 68 comment_count = webpage.count('class="media-comment-contents"') 69 uploader_id = self._html_search_regex(r'"thumb-member-username">\s+<a href="/m/([^"]+)"', webpage, 'uploader_id') 70 71 categories = self._html_search_meta('keywords', webpage) 72 if categories: 73 categories = [cat.strip() for cat in categories.split(',')] 74 75 return { 76 'id': video_id, 77 'title': title, 78 'upload_date': upload_date, 79 'uploader_id': uploader_id, 80 'thumbnail': self._og_search_thumbnail(webpage), 81 'categories': categories, 82 'view_count': int_or_none(view_count.replace(',', '')), 83 'like_count': int_or_none(like_count.replace(',', '')), 84 'comment_count': comment_count, 85 'age_limit': age_limit, 86 'url': video_url, 87 } 88 [end of youtube_dl/extractor/motherless.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/youtube_dl/extractor/motherless.py b/youtube_dl/extractor/motherless.py --- a/youtube_dl/extractor/motherless.py +++ b/youtube_dl/extractor/motherless.py @@ -11,14 +11,14 @@ class MotherlessIE(InfoExtractor): - _VALID_URL = r'http://(?:www\.)?motherless\.com/(?P<id>[A-Z0-9]+)' + _VALID_URL = r'http://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)' _TESTS = [ { 'url': 'http://motherless.com/AC3FFE1', - 'md5': '5527fef81d2e529215dad3c2d744a7d9', + 'md5': '310f62e325a9fafe64f68c0bccb6e75f', 'info_dict': { 'id': 'AC3FFE1', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Fucked in the ass while playing PS3', 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'], 'upload_date': '20100913', @@ -40,6 +40,20 @@ 'thumbnail': 're:http://.*\.jpg', 'age_limit': 18, } + }, + { + 'url': 'http://motherless.com/g/cosplay/633979F', + 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0', + 'info_dict': { + 'id': '633979F', + 'ext': 'mp4', + 'title': 'Turtlette', + 'categories': ['superheroine heroine superher'], + 'upload_date': '20140827', + 'uploader_id': 'shade0230', + 'thumbnail': 're:http://.*\.jpg', + 'age_limit': 18, + } } ]
{"golden_diff": "diff --git a/youtube_dl/extractor/motherless.py b/youtube_dl/extractor/motherless.py\n--- a/youtube_dl/extractor/motherless.py\n+++ b/youtube_dl/extractor/motherless.py\n@@ -11,14 +11,14 @@\n \n \n class MotherlessIE(InfoExtractor):\n- _VALID_URL = r'http://(?:www\\.)?motherless\\.com/(?P<id>[A-Z0-9]+)'\n+ _VALID_URL = r'http://(?:www\\.)?motherless\\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'\n _TESTS = [\n {\n 'url': 'http://motherless.com/AC3FFE1',\n- 'md5': '5527fef81d2e529215dad3c2d744a7d9',\n+ 'md5': '310f62e325a9fafe64f68c0bccb6e75f',\n 'info_dict': {\n 'id': 'AC3FFE1',\n- 'ext': 'flv',\n+ 'ext': 'mp4',\n 'title': 'Fucked in the ass while playing PS3',\n 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],\n 'upload_date': '20100913',\n@@ -40,6 +40,20 @@\n 'thumbnail': 're:http://.*\\.jpg',\n 'age_limit': 18,\n }\n+ },\n+ {\n+ 'url': 'http://motherless.com/g/cosplay/633979F',\n+ 'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',\n+ 'info_dict': {\n+ 'id': '633979F',\n+ 'ext': 'mp4',\n+ 'title': 'Turtlette',\n+ 'categories': ['superheroine heroine superher'],\n+ 'upload_date': '20140827',\n+ 'uploader_id': 'shade0230',\n+ 'thumbnail': 're:http://.*\\.jpg',\n+ 'age_limit': 18,\n+ }\n }\n ]\n", "issue": "motherless extractor - test 0 fails\n```\n$ python ~/projects/youtube-dl/test/test_download.py TestDownload.test_Motherless\n[Motherless] AC3FFE1: Downloading webpage\n[info] Writing video description metadata as JSON to: AC3FFE1.info.json\n[debug] Invoking downloader on 'http://s17.motherlessmedia.com/dev386/0/572/287/0572287847.mp4/5cb6d38eccba71d7f6bb2ef260997c3d/544A96C0.mp4'\n[download] Destination: AC3FFE1.mp4\n[download] 100% of 10.00KiB in 00:00\nF\n======================================================================\nFAIL: test_Motherless (__main__.TestDownload)\n----------------------------------------------------------------------\nTraceback (most recent call last):\nFile \"/home/crabman/projects/youtube-dl/test/test_download.py\", line 170, in test_template\n self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)\nAssertionError: False is not true : Missing file AC3FFE1.flv\n\n----------------------------------------------------------------------\nRan 1 test in 1.690s\n\nFAILED (failures=1)\n```\n\nApparently that video's page no longer give you flv, but instead gives you mp4 file. I am not sure why it happened. Maybe motherless doesn't serve flv files anymore, maybe it still does for some videos - I don't know.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport datetime\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n int_or_none,\n unified_strdate,\n)\n\n\nclass MotherlessIE(InfoExtractor):\n _VALID_URL = r'http://(?:www\\.)?motherless\\.com/(?P<id>[A-Z0-9]+)'\n _TESTS = [\n {\n 'url': 'http://motherless.com/AC3FFE1',\n 'md5': '5527fef81d2e529215dad3c2d744a7d9',\n 'info_dict': {\n 'id': 'AC3FFE1',\n 'ext': 'flv',\n 'title': 'Fucked in the ass while playing PS3',\n 'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],\n 'upload_date': '20100913',\n 'uploader_id': 'famouslyfuckedup',\n 'thumbnail': 're:http://.*\\.jpg',\n 'age_limit': 18,\n }\n },\n {\n 'url': 'http://motherless.com/532291B',\n 'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',\n 'info_dict': {\n 'id': '532291B',\n 'ext': 'mp4',\n 'title': 'Amazing girl playing the omegle game, PERFECT!',\n 'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'],\n 'upload_date': '20140622',\n 'uploader_id': 'Sulivana7x',\n 'thumbnail': 're:http://.*\\.jpg',\n 'age_limit': 18,\n }\n }\n ]\n\n def _real_extract(self,url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n\n webpage = self._download_webpage(url, video_id)\n\n title = self._html_search_regex(r'id=\"view-upload-title\">\\s+([^<]+)<', webpage, 'title')\n \n video_url = self._html_search_regex(r'setup\\(\\{\\s+\"file\".+: \"([^\"]+)\",', webpage, 'video_url')\n age_limit = self._rta_search(webpage)\n\n view_count = self._html_search_regex(r'<strong>Views</strong>\\s+([^<]+)<', webpage, 'view_count')\n \n upload_date = self._html_search_regex(r'<strong>Uploaded</strong>\\s+([^<]+)<', webpage, 'upload_date')\n if 'Ago' in upload_date:\n days = int(re.search(r'([0-9]+)', upload_date).group(1))\n upload_date = (datetime.datetime.now() - datetime.timedelta(days=days)).strftime('%Y%m%d')\n else:\n upload_date = unified_strdate(upload_date)\n\n like_count = self._html_search_regex(r'<strong>Favorited</strong>\\s+([^<]+)<', webpage, 'like_count')\n\n comment_count = webpage.count('class=\"media-comment-contents\"')\n uploader_id = self._html_search_regex(r'\"thumb-member-username\">\\s+<a href=\"/m/([^\"]+)\"', webpage, 'uploader_id')\n\n categories = self._html_search_meta('keywords', webpage)\n if categories:\n categories = [cat.strip() for cat in categories.split(',')]\n\n return {\n 'id': video_id,\n 'title': title,\n 'upload_date': upload_date,\n 'uploader_id': uploader_id,\n 'thumbnail': self._og_search_thumbnail(webpage),\n 'categories': categories,\n 'view_count': int_or_none(view_count.replace(',', '')),\n 'like_count': int_or_none(like_count.replace(',', '')),\n 'comment_count': comment_count,\n 'age_limit': age_limit,\n 'url': video_url,\n }\n", "path": "youtube_dl/extractor/motherless.py"}]}
1,969
548
gh_patches_debug_14166
rasdani/github-patches
git_diff
tiangolo__fastapi-454
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add support for Pydantic's dataclasses **Describe the question** I am using a pydantic.dataclasses.dataclass as a response_model of my fast api route. ``` from fastapi import FastAPI from pydantic.dataclasses import dataclass @dataclass class Foo: bar: int app = FastAPI() @app.get('/foo', response_model=Foo) def get_bar() -> Foo: return Foo(bar=1) ``` **To Reproduce** Steps to reproduce the behavior: 1. Create a file **app.py** with the above code. 2. Start the app: `uvicorn app:app` 3. Open the browser and go to **localhost:8000/foo** 4. See error: ``` pydantic.error_wrappers.ValidationError: 1 validation error response __init__() got an unexpected keyword argument '__initialised__' (type=type_error) ``` **Expected behavior** No validation error and serialized json does not contain any keys `__initialised__`. **Environment:** - OS: Linux - FastAPI Version 0.25.0 - Python version 3.7.3 </issue> <code> [start of fastapi/utils.py] 1 import re 2 from typing import Any, Dict, List, Sequence, Set, Type, cast 3 4 from fastapi import routing 5 from fastapi.openapi.constants import REF_PREFIX 6 from pydantic import BaseConfig, BaseModel, Schema, create_model 7 from pydantic.fields import Field 8 from pydantic.schema import get_flat_models_from_fields, model_process_schema 9 from pydantic.utils import lenient_issubclass 10 from starlette.routing import BaseRoute 11 12 13 def get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]: 14 body_fields_from_routes: List[Field] = [] 15 responses_from_routes: List[Field] = [] 16 for route in routes: 17 if getattr(route, "include_in_schema", None) and isinstance( 18 route, routing.APIRoute 19 ): 20 if route.body_field: 21 assert isinstance( 22 route.body_field, Field 23 ), "A request body must be a Pydantic Field" 24 body_fields_from_routes.append(route.body_field) 25 if route.response_field: 26 responses_from_routes.append(route.response_field) 27 if route.response_fields: 28 responses_from_routes.extend(route.response_fields.values()) 29 flat_models = get_flat_models_from_fields( 30 body_fields_from_routes + responses_from_routes, known_models=set() 31 ) 32 return flat_models 33 34 35 def get_model_definitions( 36 *, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str] 37 ) -> Dict[str, Any]: 38 definitions: Dict[str, Dict] = {} 39 for model in flat_models: 40 m_schema, m_definitions, m_nested_models = model_process_schema( 41 model, model_name_map=model_name_map, ref_prefix=REF_PREFIX 42 ) 43 definitions.update(m_definitions) 44 model_name = model_name_map[model] 45 definitions[model_name] = m_schema 46 return definitions 47 48 49 def get_path_param_names(path: str) -> Set[str]: 50 return {item.strip("{}") for item in re.findall("{[^}]*}", path)} 51 52 53 def create_cloned_field(field: Field) -> Field: 54 original_type = field.type_ 55 use_type = original_type 56 if lenient_issubclass(original_type, BaseModel): 57 original_type = cast(Type[BaseModel], original_type) 58 use_type = create_model( # type: ignore 59 original_type.__name__, 60 __config__=original_type.__config__, 61 __validators__=original_type.__validators__, 62 ) 63 for f in original_type.__fields__.values(): 64 use_type.__fields__[f.name] = f 65 new_field = Field( 66 name=field.name, 67 type_=use_type, 68 class_validators={}, 69 default=None, 70 required=False, 71 model_config=BaseConfig, 72 schema=Schema(None), 73 ) 74 new_field.has_alias = field.has_alias 75 new_field.alias = field.alias 76 new_field.class_validators = field.class_validators 77 new_field.default = field.default 78 new_field.required = field.required 79 new_field.model_config = field.model_config 80 new_field.schema = field.schema 81 new_field.allow_none = field.allow_none 82 new_field.validate_always = field.validate_always 83 if field.sub_fields: 84 new_field.sub_fields = [ 85 create_cloned_field(sub_field) for sub_field in field.sub_fields 86 ] 87 if field.key_field: 88 new_field.key_field = create_cloned_field(field.key_field) 89 new_field.validators = field.validators 90 new_field.whole_pre_validators = field.whole_pre_validators 91 new_field.whole_post_validators = field.whole_post_validators 92 new_field.parse_json = field.parse_json 93 new_field.shape = field.shape 94 new_field._populate_validators() 95 return new_field 96 97 98 def generate_operation_id_for_path(*, name: str, path: str, method: str) -> str: 99 operation_id = name + path 100 operation_id = operation_id.replace("{", "_").replace("}", "_").replace("/", "_") 101 operation_id = operation_id + "_" + method.lower() 102 return operation_id 103 [end of fastapi/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fastapi/utils.py b/fastapi/utils.py --- a/fastapi/utils.py +++ b/fastapi/utils.py @@ -1,4 +1,5 @@ import re +from dataclasses import is_dataclass from typing import Any, Dict, List, Sequence, Set, Type, cast from fastapi import routing @@ -52,6 +53,8 @@ def create_cloned_field(field: Field) -> Field: original_type = field.type_ + if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"): + original_type = original_type.__pydantic_model__ # type: ignore use_type = original_type if lenient_issubclass(original_type, BaseModel): original_type = cast(Type[BaseModel], original_type)
{"golden_diff": "diff --git a/fastapi/utils.py b/fastapi/utils.py\n--- a/fastapi/utils.py\n+++ b/fastapi/utils.py\n@@ -1,4 +1,5 @@\n import re\n+from dataclasses import is_dataclass\n from typing import Any, Dict, List, Sequence, Set, Type, cast\n \n from fastapi import routing\n@@ -52,6 +53,8 @@\n \n def create_cloned_field(field: Field) -> Field:\n original_type = field.type_\n+ if is_dataclass(original_type) and hasattr(original_type, \"__pydantic_model__\"):\n+ original_type = original_type.__pydantic_model__ # type: ignore\n use_type = original_type\n if lenient_issubclass(original_type, BaseModel):\n original_type = cast(Type[BaseModel], original_type)\n", "issue": "Add support for Pydantic's dataclasses\n**Describe the question**\r\nI am using a pydantic.dataclasses.dataclass as a response_model of my fast api route.\r\n\r\n\r\n```\r\nfrom fastapi import FastAPI\r\nfrom pydantic.dataclasses import dataclass\r\n\r\n\r\n@dataclass\r\nclass Foo:\r\n bar: int\r\n\r\napp = FastAPI()\r\n\r\[email protected]('/foo', response_model=Foo)\r\ndef get_bar() -> Foo:\r\n return Foo(bar=1)\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a file **app.py** with the above code.\r\n2. Start the app: `uvicorn app:app`\r\n3. Open the browser and go to **localhost:8000/foo**\r\n4. See error: \r\n```\r\npydantic.error_wrappers.ValidationError: 1 validation error\r\nresponse\r\n __init__() got an unexpected keyword argument '__initialised__' (type=type_error)\r\n```\r\n\r\n**Expected behavior**\r\nNo validation error and serialized json does not contain any keys `__initialised__`.\r\n\r\n\r\n**Environment:**\r\n - OS: Linux\r\n - FastAPI Version 0.25.0\r\n- Python version 3.7.3\r\n\n", "before_files": [{"content": "import re\nfrom typing import Any, Dict, List, Sequence, Set, Type, cast\n\nfrom fastapi import routing\nfrom fastapi.openapi.constants import REF_PREFIX\nfrom pydantic import BaseConfig, BaseModel, Schema, create_model\nfrom pydantic.fields import Field\nfrom pydantic.schema import get_flat_models_from_fields, model_process_schema\nfrom pydantic.utils import lenient_issubclass\nfrom starlette.routing import BaseRoute\n\n\ndef get_flat_models_from_routes(routes: Sequence[BaseRoute]) -> Set[Type[BaseModel]]:\n body_fields_from_routes: List[Field] = []\n responses_from_routes: List[Field] = []\n for route in routes:\n if getattr(route, \"include_in_schema\", None) and isinstance(\n route, routing.APIRoute\n ):\n if route.body_field:\n assert isinstance(\n route.body_field, Field\n ), \"A request body must be a Pydantic Field\"\n body_fields_from_routes.append(route.body_field)\n if route.response_field:\n responses_from_routes.append(route.response_field)\n if route.response_fields:\n responses_from_routes.extend(route.response_fields.values())\n flat_models = get_flat_models_from_fields(\n body_fields_from_routes + responses_from_routes, known_models=set()\n )\n return flat_models\n\n\ndef get_model_definitions(\n *, flat_models: Set[Type[BaseModel]], model_name_map: Dict[Type[BaseModel], str]\n) -> Dict[str, Any]:\n definitions: Dict[str, Dict] = {}\n for model in flat_models:\n m_schema, m_definitions, m_nested_models = model_process_schema(\n model, model_name_map=model_name_map, ref_prefix=REF_PREFIX\n )\n definitions.update(m_definitions)\n model_name = model_name_map[model]\n definitions[model_name] = m_schema\n return definitions\n\n\ndef get_path_param_names(path: str) -> Set[str]:\n return {item.strip(\"{}\") for item in re.findall(\"{[^}]*}\", path)}\n\n\ndef create_cloned_field(field: Field) -> Field:\n original_type = field.type_\n use_type = original_type\n if lenient_issubclass(original_type, BaseModel):\n original_type = cast(Type[BaseModel], original_type)\n use_type = create_model( # type: ignore\n original_type.__name__,\n __config__=original_type.__config__,\n __validators__=original_type.__validators__,\n )\n for f in original_type.__fields__.values():\n use_type.__fields__[f.name] = f\n new_field = Field(\n name=field.name,\n type_=use_type,\n class_validators={},\n default=None,\n required=False,\n model_config=BaseConfig,\n schema=Schema(None),\n )\n new_field.has_alias = field.has_alias\n new_field.alias = field.alias\n new_field.class_validators = field.class_validators\n new_field.default = field.default\n new_field.required = field.required\n new_field.model_config = field.model_config\n new_field.schema = field.schema\n new_field.allow_none = field.allow_none\n new_field.validate_always = field.validate_always\n if field.sub_fields:\n new_field.sub_fields = [\n create_cloned_field(sub_field) for sub_field in field.sub_fields\n ]\n if field.key_field:\n new_field.key_field = create_cloned_field(field.key_field)\n new_field.validators = field.validators\n new_field.whole_pre_validators = field.whole_pre_validators\n new_field.whole_post_validators = field.whole_post_validators\n new_field.parse_json = field.parse_json\n new_field.shape = field.shape\n new_field._populate_validators()\n return new_field\n\n\ndef generate_operation_id_for_path(*, name: str, path: str, method: str) -> str:\n operation_id = name + path\n operation_id = operation_id.replace(\"{\", \"_\").replace(\"}\", \"_\").replace(\"/\", \"_\")\n operation_id = operation_id + \"_\" + method.lower()\n return operation_id\n", "path": "fastapi/utils.py"}]}
1,858
179
gh_patches_debug_3231
rasdani/github-patches
git_diff
scikit-image__scikit-image-2784
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deprecate and remove `skimage.novice` `skimage.novice` was built by and for Software Carpentry. I've recently learned that it is no longer being used, so I don't see much use for it remaining in the package. If someone feels excited about turning it into a new package, please do so. </issue> <code> [start of skimage/novice/__init__.py] 1 """ 2 skimage.novice 3 ============== 4 A special Python image submodule for beginners. 5 6 Description 7 ----------- 8 ``skimage.novice`` provides a simple image manipulation interface for 9 beginners. It allows for easy loading, manipulating, and saving of image 10 files. 11 12 This module is primarily intended for teaching and differs significantly from 13 the normal, array-oriented image functions used by scikit-image. 14 15 .. note:: 16 17 This module uses the Cartesian coordinate system, where the origin is at 18 the lower-left corner instead of the upper-right and the order is x, y 19 instead of row, column. 20 21 22 Examples 23 -------- 24 We can create a Picture object open opening an image file: 25 26 >>> from skimage import novice 27 >>> from skimage import data 28 >>> picture = novice.open(data.data_dir + '/chelsea.png') 29 30 We can display pictures (after running this command, close the window to access the prompt again): 31 32 >>> picture.show() # doctest: +SKIP 33 34 Pictures know their format: 35 36 >>> picture.format 37 'png' 38 39 ... and where they came from: 40 41 >>> picture.path.endswith('chelsea.png') 42 True 43 44 ... and their size: 45 46 >>> picture.size 47 (451, 300) 48 >>> picture.width 49 451 50 51 As a reminder, we can preview the picture with our earlier command: 52 53 >>> picture.show() # doctest: +SKIP 54 55 Changing `size` resizes the picture. 56 57 >>> picture.size = (45, 30) 58 59 We can preview the changes we made to the picture with the ``compare`` command: 60 61 >>> picture.compare() # doctest: +SKIP 62 63 You can iterate over pixels, which have RGB values between 0 and 255, 64 and know their location in the picture. 65 66 >>> for pixel in picture: 67 ... if (pixel.red > 128) and (pixel.x < picture.width): 68 ... pixel.red = pixel.red / 2 69 70 Pictures know if they've been modified from the original file 71 72 >>> picture.modified 73 True 74 >>> print(picture.path) 75 None 76 77 Pictures can be indexed like arrays 78 79 >>> picture[0:20, 0:20] = (0, 0, 0) 80 81 Saving the picture updates the path attribute, format, and modified state. 82 83 >>> picture.save('save-demo.jpg') 84 >>> picture.path.endswith('save-demo.jpg') 85 True 86 >>> picture.format 87 'jpeg' 88 >>> picture.modified 89 False 90 91 An image can also be restored to its original state after modification: 92 93 >>> picture[0:20, 0:20] = (0, 0, 0) 94 >>> picture.compare() # doctest: +SKIP 95 >>> picture.reset() 96 >>> picture.compare() # doctest: +SKIP 97 98 """ 99 from ._novice import Picture, open, colors, color_dict 100 101 102 __all__ = ['Picture', 'open', 'colors', 'color_dict'] 103 [end of skimage/novice/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/novice/__init__.py b/skimage/novice/__init__.py --- a/skimage/novice/__init__.py +++ b/skimage/novice/__init__.py @@ -96,7 +96,11 @@ >>> picture.compare() # doctest: +SKIP """ +import warnings from ._novice import Picture, open, colors, color_dict +warnings.warn("This module was deprecated in version 0.14. " + "It will be removed in 0.16.") + __all__ = ['Picture', 'open', 'colors', 'color_dict']
{"golden_diff": "diff --git a/skimage/novice/__init__.py b/skimage/novice/__init__.py\n--- a/skimage/novice/__init__.py\n+++ b/skimage/novice/__init__.py\n@@ -96,7 +96,11 @@\n >>> picture.compare() # doctest: +SKIP\n \n \"\"\"\n+import warnings\n from ._novice import Picture, open, colors, color_dict\n \n \n+warnings.warn(\"This module was deprecated in version 0.14. \"\n+ \"It will be removed in 0.16.\")\n+\n __all__ = ['Picture', 'open', 'colors', 'color_dict']\n", "issue": "Deprecate and remove `skimage.novice`\n`skimage.novice` was built by and for Software Carpentry. I've recently learned that it is no longer being used, so I don't see much use for it remaining in the package.\r\n\r\nIf someone feels excited about turning it into a new package, please do so.\n", "before_files": [{"content": "\"\"\"\nskimage.novice\n==============\nA special Python image submodule for beginners.\n\nDescription\n-----------\n``skimage.novice`` provides a simple image manipulation interface for\nbeginners. It allows for easy loading, manipulating, and saving of image\nfiles.\n\nThis module is primarily intended for teaching and differs significantly from\nthe normal, array-oriented image functions used by scikit-image.\n\n.. note::\n\n This module uses the Cartesian coordinate system, where the origin is at\n the lower-left corner instead of the upper-right and the order is x, y\n instead of row, column.\n\n\nExamples\n--------\nWe can create a Picture object open opening an image file:\n\n>>> from skimage import novice\n>>> from skimage import data\n>>> picture = novice.open(data.data_dir + '/chelsea.png')\n\nWe can display pictures (after running this command, close the window to access the prompt again):\n\n>>> picture.show() # doctest: +SKIP\n\nPictures know their format:\n\n>>> picture.format\n'png'\n\n... and where they came from:\n\n>>> picture.path.endswith('chelsea.png')\nTrue\n\n... and their size:\n\n>>> picture.size\n(451, 300)\n>>> picture.width\n451\n\nAs a reminder, we can preview the picture with our earlier command:\n\n>>> picture.show() # doctest: +SKIP\n\nChanging `size` resizes the picture.\n\n>>> picture.size = (45, 30)\n\nWe can preview the changes we made to the picture with the ``compare`` command:\n\n>>> picture.compare() # doctest: +SKIP\n\nYou can iterate over pixels, which have RGB values between 0 and 255,\nand know their location in the picture.\n\n>>> for pixel in picture:\n... if (pixel.red > 128) and (pixel.x < picture.width):\n... pixel.red = pixel.red / 2\n\nPictures know if they've been modified from the original file\n\n>>> picture.modified\nTrue\n>>> print(picture.path)\nNone\n\nPictures can be indexed like arrays\n\n>>> picture[0:20, 0:20] = (0, 0, 0)\n\nSaving the picture updates the path attribute, format, and modified state.\n\n>>> picture.save('save-demo.jpg')\n>>> picture.path.endswith('save-demo.jpg')\nTrue\n>>> picture.format\n'jpeg'\n>>> picture.modified\nFalse\n\nAn image can also be restored to its original state after modification:\n\n>>> picture[0:20, 0:20] = (0, 0, 0)\n>>> picture.compare() # doctest: +SKIP\n>>> picture.reset()\n>>> picture.compare() # doctest: +SKIP\n\n\"\"\"\nfrom ._novice import Picture, open, colors, color_dict\n\n\n__all__ = ['Picture', 'open', 'colors', 'color_dict']\n", "path": "skimage/novice/__init__.py"}]}
1,439
147
gh_patches_debug_21859
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1540
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add readthedocs documentation for pika instrumentation Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491) </issue> <code> [start of instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 Instrument `pika` to trace RabbitMQ applications. 16 17 Usage 18 ----- 19 20 * Start broker backend 21 22 .. code-block:: python 23 24 docker run -p 5672:5672 rabbitmq 25 26 * Run instrumented task 27 28 .. code-block:: python 29 30 import pika 31 from opentelemetry.instrumentation.pika import PikaInstrumentor 32 33 PikaInstrumentor().instrument() 34 35 connection = pika.BlockingConnection(pika.URLParameters('amqp://localhost')) 36 channel = connection.channel() 37 channel.queue_declare(queue='hello') 38 channel.basic_publish(exchange='', routing_key='hello', body=b'Hello World!') 39 40 * PikaInstrumentor also supports instrumentation of a single channel 41 42 .. code-block:: python 43 44 import pika 45 from opentelemetry.instrumentation.pika import PikaInstrumentor 46 47 connection = pika.BlockingConnection(pika.URLParameters('amqp://localhost')) 48 channel = connection.channel() 49 channel.queue_declare(queue='hello') 50 51 pika_instrumentation = PikaInstrumentor() 52 pika_instrumentation.instrument_channel(channel=channel) 53 54 55 channel.basic_publish(exchange='', routing_key='hello', body=b'Hello World!') 56 57 pika_instrumentation.uninstrument_channel(channel=channel) 58 59 * PikaInstrumentor also supports instrumentation without creating an object, and receiving a tracer_provider 60 61 .. code-block:: python 62 63 PikaInstrumentor.instrument_channel(channel, tracer_provider=tracer_provider) 64 65 * PikaInstrumentor also supports instrumenting with hooks that will be called when producing or consuming a message. 66 The hooks should be of type `Callable[[Span, bytes, BasicProperties], None]` 67 where the first parameter is the span, the second parameter is the message body 68 and the third parameter is the message properties 69 70 .. code-block:: python 71 72 def publish_hook(span: Span, body: bytes, properties: BasicProperties): 73 span.set_attribute("messaging.payload", body.decode()) 74 75 def consume_hook(span: Span, body: bytes, properties: BasicProperties): 76 span.set_attribute("messaging.id", properties.message_id) 77 78 PikaInstrumentor.instrument_channel(channel, publish_hook=publish_hook, consume_hook=consume_hook) 79 80 API 81 --- 82 """ 83 # pylint: disable=import-error 84 85 from .pika_instrumentor import PikaInstrumentor 86 from .version import __version__ 87 88 __all__ = ["PikaInstrumentor", "__version__"] 89 [end of instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py --- a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Instrument `pika` to trace RabbitMQ applications. +Instrument pika to trace RabbitMQ applications. Usage ----- @@ -63,7 +63,7 @@ PikaInstrumentor.instrument_channel(channel, tracer_provider=tracer_provider) * PikaInstrumentor also supports instrumenting with hooks that will be called when producing or consuming a message. - The hooks should be of type `Callable[[Span, bytes, BasicProperties], None]` + The hooks should be of type "Callable[[Span, bytes, BasicProperties], None]" where the first parameter is the span, the second parameter is the message body and the third parameter is the message properties
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \"\"\"\n-Instrument `pika` to trace RabbitMQ applications.\n+Instrument pika to trace RabbitMQ applications.\n \n Usage\n -----\n@@ -63,7 +63,7 @@\n PikaInstrumentor.instrument_channel(channel, tracer_provider=tracer_provider)\n \n * PikaInstrumentor also supports instrumenting with hooks that will be called when producing or consuming a message.\n- The hooks should be of type `Callable[[Span, bytes, BasicProperties], None]`\n+ The hooks should be of type \"Callable[[Span, bytes, BasicProperties], None]\"\n where the first parameter is the span, the second parameter is the message body\n and the third parameter is the message properties\n", "issue": "Add readthedocs documentation for pika instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInstrument `pika` to trace RabbitMQ applications.\n\nUsage\n-----\n\n* Start broker backend\n\n.. code-block:: python\n\n docker run -p 5672:5672 rabbitmq\n\n* Run instrumented task\n\n.. code-block:: python\n\n import pika\n from opentelemetry.instrumentation.pika import PikaInstrumentor\n\n PikaInstrumentor().instrument()\n\n connection = pika.BlockingConnection(pika.URLParameters('amqp://localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='hello')\n channel.basic_publish(exchange='', routing_key='hello', body=b'Hello World!')\n\n* PikaInstrumentor also supports instrumentation of a single channel\n\n.. code-block:: python\n\n import pika\n from opentelemetry.instrumentation.pika import PikaInstrumentor\n\n connection = pika.BlockingConnection(pika.URLParameters('amqp://localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='hello')\n\n pika_instrumentation = PikaInstrumentor()\n pika_instrumentation.instrument_channel(channel=channel)\n\n\n channel.basic_publish(exchange='', routing_key='hello', body=b'Hello World!')\n\n pika_instrumentation.uninstrument_channel(channel=channel)\n\n* PikaInstrumentor also supports instrumentation without creating an object, and receiving a tracer_provider\n\n.. code-block:: python\n\n PikaInstrumentor.instrument_channel(channel, tracer_provider=tracer_provider)\n\n* PikaInstrumentor also supports instrumenting with hooks that will be called when producing or consuming a message.\n The hooks should be of type `Callable[[Span, bytes, BasicProperties], None]`\n where the first parameter is the span, the second parameter is the message body\n and the third parameter is the message properties\n\n.. code-block:: python\n\n def publish_hook(span: Span, body: bytes, properties: BasicProperties):\n span.set_attribute(\"messaging.payload\", body.decode())\n\n def consume_hook(span: Span, body: bytes, properties: BasicProperties):\n span.set_attribute(\"messaging.id\", properties.message_id)\n\n PikaInstrumentor.instrument_channel(channel, publish_hook=publish_hook, consume_hook=consume_hook)\n\nAPI\n---\n\"\"\"\n# pylint: disable=import-error\n\nfrom .pika_instrumentor import PikaInstrumentor\nfrom .version import __version__\n\n__all__ = [\"PikaInstrumentor\", \"__version__\"]\n", "path": "instrumentation/opentelemetry-instrumentation-pika/src/opentelemetry/instrumentation/pika/__init__.py"}]}
1,435
284
gh_patches_debug_15296
rasdani/github-patches
git_diff
litestar-org__litestar-1748
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 </issue> <code> [start of litestar/contrib/pydantic.py] 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING, Generic, TypeVar 4 5 from pydantic import BaseModel 6 7 from litestar.dto.factory.abc import AbstractDTOFactory 8 from litestar.dto.factory.field import DTO_FIELD_META_KEY, DTOField 9 from litestar.dto.factory.types import FieldDefinition 10 from litestar.dto.factory.utils import get_model_type_hints 11 from litestar.types.empty import Empty 12 from litestar.utils.helpers import get_fully_qualified_class_name 13 14 if TYPE_CHECKING: 15 from typing import Any, ClassVar, Collection, Generator 16 17 from pydantic.fields import ModelField 18 19 from litestar.typing import ParsedType 20 21 __all__ = ("PydanticDTO",) 22 23 T = TypeVar("T", bound="BaseModel | Collection[BaseModel]") 24 25 26 class PydanticDTO(AbstractDTOFactory[T], Generic[T]): 27 """Support for domain modelling with Pydantic.""" 28 29 __slots__ = () 30 31 model_type: ClassVar[type[BaseModel]] 32 33 @classmethod 34 def generate_field_definitions(cls, model_type: type[BaseModel]) -> Generator[FieldDefinition, None, None]: 35 for key, parsed_type in get_model_type_hints(model_type).items(): 36 model_field = model_type.__fields__[key] 37 38 dto_field: DTOField | None = model_field.field_info.extra.get(DTO_FIELD_META_KEY) 39 40 def determine_default(_parsed_type: ParsedType, _model_field: ModelField) -> Any: 41 if ( 42 _model_field.default is Ellipsis 43 or _model_field.default_factory is not None 44 or (_model_field.default is None and not _parsed_type.is_optional) 45 ): 46 return Empty 47 48 return _model_field.default 49 50 field_def = FieldDefinition( 51 name=key, 52 default=determine_default(parsed_type, model_field), 53 parsed_type=parsed_type, 54 default_factory=model_field.default_factory or Empty, 55 dto_field=dto_field, 56 unique_model_name=get_fully_qualified_class_name(model_type), 57 ) 58 59 yield field_def 60 61 @classmethod 62 def detect_nested_field(cls, parsed_type: ParsedType) -> bool: 63 return parsed_type.is_subclass_of(BaseModel) 64 [end of litestar/contrib/pydantic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/contrib/pydantic.py b/litestar/contrib/pydantic.py --- a/litestar/contrib/pydantic.py +++ b/litestar/contrib/pydantic.py @@ -32,9 +32,10 @@ @classmethod def generate_field_definitions(cls, model_type: type[BaseModel]) -> Generator[FieldDefinition, None, None]: - for key, parsed_type in get_model_type_hints(model_type).items(): + model_parsed_types = get_model_type_hints(model_type) + for key, model_field in model_type.__fields__.items(): + parsed_type = model_parsed_types[key] model_field = model_type.__fields__[key] - dto_field: DTOField | None = model_field.field_info.extra.get(DTO_FIELD_META_KEY) def determine_default(_parsed_type: ParsedType, _model_field: ModelField) -> Any:
{"golden_diff": "diff --git a/litestar/contrib/pydantic.py b/litestar/contrib/pydantic.py\n--- a/litestar/contrib/pydantic.py\n+++ b/litestar/contrib/pydantic.py\n@@ -32,9 +32,10 @@\n \n @classmethod\n def generate_field_definitions(cls, model_type: type[BaseModel]) -> Generator[FieldDefinition, None, None]:\n- for key, parsed_type in get_model_type_hints(model_type).items():\n+ model_parsed_types = get_model_type_hints(model_type)\n+ for key, model_field in model_type.__fields__.items():\n+ parsed_type = model_parsed_types[key]\n model_field = model_type.__fields__[key]\n-\n dto_field: DTOField | None = model_field.field_info.extra.get(DTO_FIELD_META_KEY)\n \n def determine_default(_parsed_type: ParsedType, _model_field: ModelField) -> Any:\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Generic, TypeVar\n\nfrom pydantic import BaseModel\n\nfrom litestar.dto.factory.abc import AbstractDTOFactory\nfrom litestar.dto.factory.field import DTO_FIELD_META_KEY, DTOField\nfrom litestar.dto.factory.types import FieldDefinition\nfrom litestar.dto.factory.utils import get_model_type_hints\nfrom litestar.types.empty import Empty\nfrom litestar.utils.helpers import get_fully_qualified_class_name\n\nif TYPE_CHECKING:\n from typing import Any, ClassVar, Collection, Generator\n\n from pydantic.fields import ModelField\n\n from litestar.typing import ParsedType\n\n__all__ = (\"PydanticDTO\",)\n\nT = TypeVar(\"T\", bound=\"BaseModel | Collection[BaseModel]\")\n\n\nclass PydanticDTO(AbstractDTOFactory[T], Generic[T]):\n \"\"\"Support for domain modelling with Pydantic.\"\"\"\n\n __slots__ = ()\n\n model_type: ClassVar[type[BaseModel]]\n\n @classmethod\n def generate_field_definitions(cls, model_type: type[BaseModel]) -> Generator[FieldDefinition, None, None]:\n for key, parsed_type in get_model_type_hints(model_type).items():\n model_field = model_type.__fields__[key]\n\n dto_field: DTOField | None = model_field.field_info.extra.get(DTO_FIELD_META_KEY)\n\n def determine_default(_parsed_type: ParsedType, _model_field: ModelField) -> Any:\n if (\n _model_field.default is Ellipsis\n or _model_field.default_factory is not None\n or (_model_field.default is None and not _parsed_type.is_optional)\n ):\n return Empty\n\n return _model_field.default\n\n field_def = FieldDefinition(\n name=key,\n default=determine_default(parsed_type, model_field),\n parsed_type=parsed_type,\n default_factory=model_field.default_factory or Empty,\n dto_field=dto_field,\n unique_model_name=get_fully_qualified_class_name(model_type),\n )\n\n yield field_def\n\n @classmethod\n def detect_nested_field(cls, parsed_type: ParsedType) -> bool:\n return parsed_type.is_subclass_of(BaseModel)\n", "path": "litestar/contrib/pydantic.py"}]}
1,304
205
gh_patches_debug_34571
rasdani/github-patches
git_diff
iterative__dvc-5080
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> config: add --list to list all variables and their values Something that would be handy for us during debugging and for users. Same as `git config --list`. Useful to avoid confusion such as in https://github.com/iterative/dvc/issues/4970 </issue> <code> [start of dvc/command/config.py] 1 import argparse 2 import logging 3 4 from dvc.command.base import CmdBaseNoRepo, append_doc_link 5 from dvc.config import Config, ConfigError 6 7 logger = logging.getLogger(__name__) 8 9 10 class CmdConfig(CmdBaseNoRepo): 11 def __init__(self, args): 12 super().__init__(args) 13 14 self.config = Config(validate=False) 15 16 def run(self): 17 section, opt = self.args.name.lower().strip().split(".", 1) 18 19 if self.args.value is None and not self.args.unset: 20 conf = self.config.load_one(self.args.level) 21 self._check(conf, section, opt) 22 logger.info(conf[section][opt]) 23 return 0 24 25 with self.config.edit(self.args.level) as conf: 26 if self.args.unset: 27 self._check(conf, section, opt) 28 del conf[section][opt] 29 else: 30 self._check(conf, section) 31 conf[section][opt] = self.args.value 32 33 if self.args.name == "cache.type": 34 logger.warning( 35 "You have changed the 'cache.type' option. This doesn't update" 36 " any existing workspace file links, but it can be done with:" 37 "\n dvc checkout --relink" 38 ) 39 40 return 0 41 42 def _check(self, conf, section, opt=None): 43 if section not in conf: 44 msg = "section {} doesn't exist" 45 raise ConfigError(msg.format(self.args.name)) 46 47 if opt and opt not in conf[section]: 48 msg = "option {} doesn't exist" 49 raise ConfigError(msg.format(self.args.name)) 50 51 52 parent_config_parser = argparse.ArgumentParser(add_help=False) 53 level_group = parent_config_parser.add_mutually_exclusive_group() 54 level_group.add_argument( 55 "--global", 56 dest="level", 57 action="store_const", 58 const="global", 59 help="Use global config.", 60 ) 61 level_group.add_argument( 62 "--system", 63 dest="level", 64 action="store_const", 65 const="system", 66 help="Use system config.", 67 ) 68 level_group.add_argument( 69 "--local", 70 dest="level", 71 action="store_const", 72 const="local", 73 help="Use local config.", 74 ) 75 parent_config_parser.set_defaults(level="repo") 76 77 78 def add_parser(subparsers, parent_parser): 79 CONFIG_HELP = "Get or set config options." 80 81 config_parser = subparsers.add_parser( 82 "config", 83 parents=[parent_config_parser, parent_parser], 84 description=append_doc_link(CONFIG_HELP, "config"), 85 help=CONFIG_HELP, 86 formatter_class=argparse.RawDescriptionHelpFormatter, 87 ) 88 config_parser.add_argument( 89 "-u", 90 "--unset", 91 default=False, 92 action="store_true", 93 help="Unset option.", 94 ) 95 config_parser.add_argument("name", help="Option name.") 96 config_parser.add_argument("value", nargs="?", help="Option value.") 97 config_parser.set_defaults(func=CmdConfig) 98 [end of dvc/command/config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/command/config.py b/dvc/command/config.py --- a/dvc/command/config.py +++ b/dvc/command/config.py @@ -3,6 +3,7 @@ from dvc.command.base import CmdBaseNoRepo, append_doc_link from dvc.config import Config, ConfigError +from dvc.utils.flatten import flatten logger = logging.getLogger(__name__) @@ -14,6 +15,22 @@ self.config = Config(validate=False) def run(self): + if self.args.list: + if any((self.args.name, self.args.value, self.args.unset)): + logger.error( + "-l/--list can't be used together with any of these " + "options: -u/--unset, name, value" + ) + return 1 + + conf = self.config.load_one(self.args.level) + logger.info("\n".join(self._format_config(conf))) + return 0 + + if self.args.name is None: + logger.error("name argument is required") + return 1 + section, opt = self.args.name.lower().strip().split(".", 1) if self.args.value is None and not self.args.unset: @@ -48,6 +65,11 @@ msg = "option {} doesn't exist" raise ConfigError(msg.format(self.args.name)) + @staticmethod + def _format_config(config): + for key, value in flatten(config).items(): + yield f"{key}={value}" + parent_config_parser = argparse.ArgumentParser(add_help=False) level_group = parent_config_parser.add_mutually_exclusive_group() @@ -92,6 +114,13 @@ action="store_true", help="Unset option.", ) - config_parser.add_argument("name", help="Option name.") + config_parser.add_argument("name", nargs="?", help="Option name.") config_parser.add_argument("value", nargs="?", help="Option value.") + config_parser.add_argument( + "-l", + "--list", + default=False, + action="store_true", + help="list all defined config values", + ) config_parser.set_defaults(func=CmdConfig)
{"golden_diff": "diff --git a/dvc/command/config.py b/dvc/command/config.py\n--- a/dvc/command/config.py\n+++ b/dvc/command/config.py\n@@ -3,6 +3,7 @@\n \n from dvc.command.base import CmdBaseNoRepo, append_doc_link\n from dvc.config import Config, ConfigError\n+from dvc.utils.flatten import flatten\n \n logger = logging.getLogger(__name__)\n \n@@ -14,6 +15,22 @@\n self.config = Config(validate=False)\n \n def run(self):\n+ if self.args.list:\n+ if any((self.args.name, self.args.value, self.args.unset)):\n+ logger.error(\n+ \"-l/--list can't be used together with any of these \"\n+ \"options: -u/--unset, name, value\"\n+ )\n+ return 1\n+\n+ conf = self.config.load_one(self.args.level)\n+ logger.info(\"\\n\".join(self._format_config(conf)))\n+ return 0\n+\n+ if self.args.name is None:\n+ logger.error(\"name argument is required\")\n+ return 1\n+\n section, opt = self.args.name.lower().strip().split(\".\", 1)\n \n if self.args.value is None and not self.args.unset:\n@@ -48,6 +65,11 @@\n msg = \"option {} doesn't exist\"\n raise ConfigError(msg.format(self.args.name))\n \n+ @staticmethod\n+ def _format_config(config):\n+ for key, value in flatten(config).items():\n+ yield f\"{key}={value}\"\n+\n \n parent_config_parser = argparse.ArgumentParser(add_help=False)\n level_group = parent_config_parser.add_mutually_exclusive_group()\n@@ -92,6 +114,13 @@\n action=\"store_true\",\n help=\"Unset option.\",\n )\n- config_parser.add_argument(\"name\", help=\"Option name.\")\n+ config_parser.add_argument(\"name\", nargs=\"?\", help=\"Option name.\")\n config_parser.add_argument(\"value\", nargs=\"?\", help=\"Option value.\")\n+ config_parser.add_argument(\n+ \"-l\",\n+ \"--list\",\n+ default=False,\n+ action=\"store_true\",\n+ help=\"list all defined config values\",\n+ )\n config_parser.set_defaults(func=CmdConfig)\n", "issue": "config: add --list to list all variables and their values\nSomething that would be handy for us during debugging and for users. Same as `git config --list`. Useful to avoid confusion such as in https://github.com/iterative/dvc/issues/4970\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command.base import CmdBaseNoRepo, append_doc_link\nfrom dvc.config import Config, ConfigError\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdConfig(CmdBaseNoRepo):\n def __init__(self, args):\n super().__init__(args)\n\n self.config = Config(validate=False)\n\n def run(self):\n section, opt = self.args.name.lower().strip().split(\".\", 1)\n\n if self.args.value is None and not self.args.unset:\n conf = self.config.load_one(self.args.level)\n self._check(conf, section, opt)\n logger.info(conf[section][opt])\n return 0\n\n with self.config.edit(self.args.level) as conf:\n if self.args.unset:\n self._check(conf, section, opt)\n del conf[section][opt]\n else:\n self._check(conf, section)\n conf[section][opt] = self.args.value\n\n if self.args.name == \"cache.type\":\n logger.warning(\n \"You have changed the 'cache.type' option. This doesn't update\"\n \" any existing workspace file links, but it can be done with:\"\n \"\\n dvc checkout --relink\"\n )\n\n return 0\n\n def _check(self, conf, section, opt=None):\n if section not in conf:\n msg = \"section {} doesn't exist\"\n raise ConfigError(msg.format(self.args.name))\n\n if opt and opt not in conf[section]:\n msg = \"option {} doesn't exist\"\n raise ConfigError(msg.format(self.args.name))\n\n\nparent_config_parser = argparse.ArgumentParser(add_help=False)\nlevel_group = parent_config_parser.add_mutually_exclusive_group()\nlevel_group.add_argument(\n \"--global\",\n dest=\"level\",\n action=\"store_const\",\n const=\"global\",\n help=\"Use global config.\",\n)\nlevel_group.add_argument(\n \"--system\",\n dest=\"level\",\n action=\"store_const\",\n const=\"system\",\n help=\"Use system config.\",\n)\nlevel_group.add_argument(\n \"--local\",\n dest=\"level\",\n action=\"store_const\",\n const=\"local\",\n help=\"Use local config.\",\n)\nparent_config_parser.set_defaults(level=\"repo\")\n\n\ndef add_parser(subparsers, parent_parser):\n CONFIG_HELP = \"Get or set config options.\"\n\n config_parser = subparsers.add_parser(\n \"config\",\n parents=[parent_config_parser, parent_parser],\n description=append_doc_link(CONFIG_HELP, \"config\"),\n help=CONFIG_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n config_parser.add_argument(\n \"-u\",\n \"--unset\",\n default=False,\n action=\"store_true\",\n help=\"Unset option.\",\n )\n config_parser.add_argument(\"name\", help=\"Option name.\")\n config_parser.add_argument(\"value\", nargs=\"?\", help=\"Option value.\")\n config_parser.set_defaults(func=CmdConfig)\n", "path": "dvc/command/config.py"}]}
1,407
496
gh_patches_debug_2257
rasdani/github-patches
git_diff
apluslms__a-plus-204
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Numbers should be safe characters in file names If the attachment for the "Exercise with attachment" is named for example "2.zip", the name for that file will be just ".zip" in the database because numbers are not considered as safe characters: https://github.com/Aalto-LeTech/a-plus/blob/a86bf/lib/helpers.py#L62 </issue> <code> [start of lib/helpers.py] 1 from django.conf import settings 2 from random import choice 3 from PIL import Image 4 import string 5 import urllib 6 7 8 def extract_form_errors(form): 9 """ 10 Extracts Django form errors to a list of error messages. 11 """ 12 errors = [] 13 for field in form.errors: 14 for err in form.errors[field]: 15 errors.append("%s: %s" % (field, err)) 16 return errors 17 18 19 def get_random_string(length=32): 20 """ 21 This function creates a random string with a given length. 22 The strings consist of upper and lower case letters and numbers. 23 24 @param length: the length of the randomized string, defaults to 32 25 @return: a random string containing lower and upper case letters and digits 26 """ 27 28 # Use all letters and numbers in the identifier 29 choices = string.ascii_letters + string.digits 30 31 return ''.join([choice(choices) for _ in range(length)]) 32 33 34 def query_dict_to_list_of_tuples(query_dict): 35 """ 36 This helper function creates a list of tuples with the values 37 from a QueryDict object. In a QueryDict the same key can have 38 several values, which is not possible with a typical dict nor a JSON 39 object. The resulting list will be similar to [(key1, value1), (key2, value2)]. 40 41 @param query_dict: a QueryDict object 42 @return: a list of tuples with the same keys and values as in the given QueryDict 43 """ 44 list_of_tuples = [] 45 for key in query_dict: 46 for val in query_dict.getlist(key): 47 list_of_tuples.append((key, val)) 48 return list_of_tuples 49 50 51 def update_url_params(url, params): 52 delimiter = "&" if "?" in url else "?" 53 return url + delimiter + urllib.parse.urlencode(params) 54 55 56 def has_same_domain(url1, url2): 57 uri1 = urllib.parse.urlparse(url1) 58 uri2 = urllib.parse.urlparse(url2) 59 return uri1.netloc == uri2.netloc 60 61 62 FILENAME_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-" 63 64 def safe_file_name(name): 65 safename = "".join(c for c in name if c in FILENAME_CHARS) 66 if safename[0] == "-": 67 return "_" + safename[1:80] 68 return safename[:80] 69 70 71 def resize_image(path, max_size): 72 image = Image.open(path) 73 image.thumbnail(max_size, Image.ANTIALIAS) 74 image.save(path) 75 76 77 def roman_numeral(number): 78 numbers = [1000,900,500,400,100,90,50,40,10,9,5,4,1]; 79 letters = ["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]; 80 roman = "" 81 for i in range(len(numbers)): 82 while number >= numbers[i]: 83 roman += letters[i] 84 number -= numbers[i] 85 return roman 86 87 88 def settings_text(request, key): 89 def get(name): 90 if hasattr(settings, name): 91 return getattr(settings, name) 92 return None 93 return get('{}_{}'.format(key, request.LANGUAGE_CODE.upper())) or get(key) 94 [end of lib/helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/helpers.py b/lib/helpers.py --- a/lib/helpers.py +++ b/lib/helpers.py @@ -59,7 +59,7 @@ return uri1.netloc == uri2.netloc -FILENAME_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-" +FILENAME_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-0123456789" def safe_file_name(name): safename = "".join(c for c in name if c in FILENAME_CHARS)
{"golden_diff": "diff --git a/lib/helpers.py b/lib/helpers.py\n--- a/lib/helpers.py\n+++ b/lib/helpers.py\n@@ -59,7 +59,7 @@\n return uri1.netloc == uri2.netloc\n \n \n-FILENAME_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-\"\n+FILENAME_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-0123456789\"\n \n def safe_file_name(name):\n safename = \"\".join(c for c in name if c in FILENAME_CHARS)\n", "issue": "Numbers should be safe characters in file names\nIf the attachment for the \"Exercise with attachment\" is named for example \"2.zip\", the name for that file will be just \".zip\" in the database because numbers are not considered as safe characters:\n\nhttps://github.com/Aalto-LeTech/a-plus/blob/a86bf/lib/helpers.py#L62\n\n", "before_files": [{"content": "from django.conf import settings\nfrom random import choice\nfrom PIL import Image\nimport string\nimport urllib\n\n\ndef extract_form_errors(form):\n \"\"\"\n Extracts Django form errors to a list of error messages.\n \"\"\"\n errors = []\n for field in form.errors:\n for err in form.errors[field]:\n errors.append(\"%s: %s\" % (field, err))\n return errors\n\n\ndef get_random_string(length=32):\n \"\"\"\n This function creates a random string with a given length.\n The strings consist of upper and lower case letters and numbers.\n\n @param length: the length of the randomized string, defaults to 32\n @return: a random string containing lower and upper case letters and digits\n \"\"\"\n\n # Use all letters and numbers in the identifier\n choices = string.ascii_letters + string.digits\n\n return ''.join([choice(choices) for _ in range(length)])\n\n\ndef query_dict_to_list_of_tuples(query_dict):\n \"\"\"\n This helper function creates a list of tuples with the values\n from a QueryDict object. In a QueryDict the same key can have\n several values, which is not possible with a typical dict nor a JSON\n object. The resulting list will be similar to [(key1, value1), (key2, value2)].\n\n @param query_dict: a QueryDict object\n @return: a list of tuples with the same keys and values as in the given QueryDict\n \"\"\"\n list_of_tuples = []\n for key in query_dict:\n for val in query_dict.getlist(key):\n list_of_tuples.append((key, val))\n return list_of_tuples\n\n\ndef update_url_params(url, params):\n delimiter = \"&\" if \"?\" in url else \"?\"\n return url + delimiter + urllib.parse.urlencode(params)\n\n\ndef has_same_domain(url1, url2):\n uri1 = urllib.parse.urlparse(url1)\n uri2 = urllib.parse.urlparse(url2)\n return uri1.netloc == uri2.netloc\n\n\nFILENAME_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ._-\"\n\ndef safe_file_name(name):\n safename = \"\".join(c for c in name if c in FILENAME_CHARS)\n if safename[0] == \"-\":\n return \"_\" + safename[1:80]\n return safename[:80]\n\n\ndef resize_image(path, max_size):\n image = Image.open(path)\n image.thumbnail(max_size, Image.ANTIALIAS)\n image.save(path)\n\n\ndef roman_numeral(number):\n numbers = [1000,900,500,400,100,90,50,40,10,9,5,4,1];\n letters = [\"M\",\"CM\",\"D\",\"CD\",\"C\",\"XC\",\"L\",\"XL\",\"X\",\"IX\",\"V\",\"IV\",\"I\"];\n roman = \"\"\n for i in range(len(numbers)):\n while number >= numbers[i]:\n roman += letters[i]\n number -= numbers[i]\n return roman\n\n\ndef settings_text(request, key):\n def get(name):\n if hasattr(settings, name):\n return getattr(settings, name)\n return None\n return get('{}_{}'.format(key, request.LANGUAGE_CODE.upper())) or get(key)\n", "path": "lib/helpers.py"}]}
1,491
105
gh_patches_debug_10343
rasdani/github-patches
git_diff
scrapy__scrapy-3671
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Copying items not work correctly with list values > > item = Item() > > item['some_field'] = [] > > > > c_item = Item(item) > > c_item['some_field'].append(1) > > > > print item['some_field'], c_item['some_field'] > > [1] [1] </issue> <code> [start of scrapy/item.py] 1 """ 2 Scrapy Item 3 4 See documentation in docs/topics/item.rst 5 """ 6 7 from pprint import pformat 8 from collections import MutableMapping 9 10 from abc import ABCMeta 11 import six 12 13 from scrapy.utils.trackref import object_ref 14 15 16 class BaseItem(object_ref): 17 """Base class for all scraped items.""" 18 pass 19 20 21 class Field(dict): 22 """Container of field metadata""" 23 24 25 class ItemMeta(ABCMeta): 26 27 def __new__(mcs, class_name, bases, attrs): 28 classcell = attrs.pop('__classcell__', None) 29 new_bases = tuple(base._class for base in bases if hasattr(base, '_class')) 30 _class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs) 31 32 fields = getattr(_class, 'fields', {}) 33 new_attrs = {} 34 for n in dir(_class): 35 v = getattr(_class, n) 36 if isinstance(v, Field): 37 fields[n] = v 38 elif n in attrs: 39 new_attrs[n] = attrs[n] 40 41 new_attrs['fields'] = fields 42 new_attrs['_class'] = _class 43 if classcell is not None: 44 new_attrs['__classcell__'] = classcell 45 return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs) 46 47 48 class DictItem(MutableMapping, BaseItem): 49 50 fields = {} 51 52 def __init__(self, *args, **kwargs): 53 self._values = {} 54 if args or kwargs: # avoid creating dict for most common case 55 for k, v in six.iteritems(dict(*args, **kwargs)): 56 self[k] = v 57 58 def __getitem__(self, key): 59 return self._values[key] 60 61 def __setitem__(self, key, value): 62 if key in self.fields: 63 self._values[key] = value 64 else: 65 raise KeyError("%s does not support field: %s" % 66 (self.__class__.__name__, key)) 67 68 def __delitem__(self, key): 69 del self._values[key] 70 71 def __getattr__(self, name): 72 if name in self.fields: 73 raise AttributeError("Use item[%r] to get field value" % name) 74 raise AttributeError(name) 75 76 def __setattr__(self, name, value): 77 if not name.startswith('_'): 78 raise AttributeError("Use item[%r] = %r to set field value" % 79 (name, value)) 80 super(DictItem, self).__setattr__(name, value) 81 82 def __len__(self): 83 return len(self._values) 84 85 def __iter__(self): 86 return iter(self._values) 87 88 __hash__ = BaseItem.__hash__ 89 90 def keys(self): 91 return self._values.keys() 92 93 def __repr__(self): 94 return pformat(dict(self)) 95 96 def copy(self): 97 return self.__class__(self) 98 99 100 @six.add_metaclass(ItemMeta) 101 class Item(DictItem): 102 pass 103 [end of scrapy/item.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/item.py b/scrapy/item.py --- a/scrapy/item.py +++ b/scrapy/item.py @@ -6,6 +6,7 @@ from pprint import pformat from collections import MutableMapping +from copy import deepcopy from abc import ABCMeta import six @@ -96,6 +97,13 @@ def copy(self): return self.__class__(self) + def deepcopy(self): + """Return a `deep copy`_ of this item. + + .. _deep copy: https://docs.python.org/library/copy.html#copy.deepcopy + """ + return deepcopy(self) + @six.add_metaclass(ItemMeta) class Item(DictItem):
{"golden_diff": "diff --git a/scrapy/item.py b/scrapy/item.py\n--- a/scrapy/item.py\n+++ b/scrapy/item.py\n@@ -6,6 +6,7 @@\n \n from pprint import pformat\n from collections import MutableMapping\n+from copy import deepcopy\n \n from abc import ABCMeta\n import six\n@@ -96,6 +97,13 @@\n def copy(self):\n return self.__class__(self)\n \n+ def deepcopy(self):\n+ \"\"\"Return a `deep copy`_ of this item.\n+\n+ .. _deep copy: https://docs.python.org/library/copy.html#copy.deepcopy\n+ \"\"\"\n+ return deepcopy(self)\n+\n \n @six.add_metaclass(ItemMeta)\n class Item(DictItem):\n", "issue": "Copying items not work correctly with list values\n> > item = Item()\n> > item['some_field'] = []\n> > \n> > c_item = Item(item)\n> > c_item['some_field'].append(1)\n> > \n> > print item['some_field'], c_item['some_field']\n> > [1] [1]\n\n", "before_files": [{"content": "\"\"\"\nScrapy Item\n\nSee documentation in docs/topics/item.rst\n\"\"\"\n\nfrom pprint import pformat\nfrom collections import MutableMapping\n\nfrom abc import ABCMeta\nimport six\n\nfrom scrapy.utils.trackref import object_ref\n\n\nclass BaseItem(object_ref):\n \"\"\"Base class for all scraped items.\"\"\"\n pass\n\n\nclass Field(dict):\n \"\"\"Container of field metadata\"\"\"\n\n\nclass ItemMeta(ABCMeta):\n\n def __new__(mcs, class_name, bases, attrs):\n classcell = attrs.pop('__classcell__', None)\n new_bases = tuple(base._class for base in bases if hasattr(base, '_class'))\n _class = super(ItemMeta, mcs).__new__(mcs, 'x_' + class_name, new_bases, attrs)\n\n fields = getattr(_class, 'fields', {})\n new_attrs = {}\n for n in dir(_class):\n v = getattr(_class, n)\n if isinstance(v, Field):\n fields[n] = v\n elif n in attrs:\n new_attrs[n] = attrs[n]\n\n new_attrs['fields'] = fields\n new_attrs['_class'] = _class\n if classcell is not None:\n new_attrs['__classcell__'] = classcell\n return super(ItemMeta, mcs).__new__(mcs, class_name, bases, new_attrs)\n\n\nclass DictItem(MutableMapping, BaseItem):\n\n fields = {}\n\n def __init__(self, *args, **kwargs):\n self._values = {}\n if args or kwargs: # avoid creating dict for most common case\n for k, v in six.iteritems(dict(*args, **kwargs)):\n self[k] = v\n\n def __getitem__(self, key):\n return self._values[key]\n\n def __setitem__(self, key, value):\n if key in self.fields:\n self._values[key] = value\n else:\n raise KeyError(\"%s does not support field: %s\" %\n (self.__class__.__name__, key))\n\n def __delitem__(self, key):\n del self._values[key]\n\n def __getattr__(self, name):\n if name in self.fields:\n raise AttributeError(\"Use item[%r] to get field value\" % name)\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n if not name.startswith('_'):\n raise AttributeError(\"Use item[%r] = %r to set field value\" %\n (name, value))\n super(DictItem, self).__setattr__(name, value)\n\n def __len__(self):\n return len(self._values)\n\n def __iter__(self):\n return iter(self._values)\n\n __hash__ = BaseItem.__hash__\n\n def keys(self):\n return self._values.keys()\n\n def __repr__(self):\n return pformat(dict(self))\n\n def copy(self):\n return self.__class__(self)\n\n\[email protected]_metaclass(ItemMeta)\nclass Item(DictItem):\n pass\n", "path": "scrapy/item.py"}]}
1,461
160
gh_patches_debug_24337
rasdani/github-patches
git_diff
joke2k__faker-258
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Faker should provide company VAT numbers It would be nice if faker could provide company VAT numbers. More information about VAT codes is in Wikipedia: https://en.wikipedia.org/wiki/VAT_identification_number </issue> <code> [start of faker/providers/company/fi_FI/__init__.py] 1 from __future__ import unicode_literals 2 from .. import Provider as CompanyProvider 3 4 5 class Provider(CompanyProvider): 6 formats = ( 7 '{{last_name}} {{company_suffix}}', 8 '{{last_name}} {{last_name}} {{company_suffix}}', 9 '{{last_name}} {{last_name}} {{company_suffix}}', 10 '{{last_name}}' 11 ) 12 13 company_suffixes = ( 14 'As Oy', 'Tmi', 'Oy', 'Oyj', 'Ky', 'Osk', 'ry' 15 ) 16 [end of faker/providers/company/fi_FI/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/company/fi_FI/__init__.py b/faker/providers/company/fi_FI/__init__.py --- a/faker/providers/company/fi_FI/__init__.py +++ b/faker/providers/company/fi_FI/__init__.py @@ -13,3 +13,41 @@ company_suffixes = ( 'As Oy', 'Tmi', 'Oy', 'Oyj', 'Ky', 'Osk', 'ry' ) + + def company_business_id(self): + """ + Returns Finnish company Business Identity Code (y-tunnus). + Format is 8 digits - e.g. FI99999999,[8] last digit is a check + digit utilizing MOD 11-2. The first digit is zero for some old + organizations. This function provides current codes starting with + non-zero. + """ + def calculate_checksum(number): + """Calculate the checksum using mod 11,2 method""" + factors = [7, 9, 10, 5, 8, 4, 2] + sum = 0 + for x, y in zip(number, factors): + sum = sum + int(x) * y + if sum % 11 == 0: + return '0' + else: + return str(11 - sum % 11) + + first_digit = str(self.random_digit_not_null()) + body = first_digit + self.bothify(self.random_element(('######',))) + cs = calculate_checksum(body) + return (body + '-' + str(cs)) + + def company_vat(self): + """ + Returns Finnish VAT identification number (Arvonlisaveronumero). + This can be calculated from company business identity code by + adding prefix "FI" and removing dash before checksum. + """ + def convert_to_vat(business_id): + """ + Convert business id to VATIN + """ + return 'FI' + business_id.replace('-', '') + + return convert_to_vat(self.company_business_id())
{"golden_diff": "diff --git a/faker/providers/company/fi_FI/__init__.py b/faker/providers/company/fi_FI/__init__.py\n--- a/faker/providers/company/fi_FI/__init__.py\n+++ b/faker/providers/company/fi_FI/__init__.py\n@@ -13,3 +13,41 @@\n company_suffixes = (\n 'As Oy', 'Tmi', 'Oy', 'Oyj', 'Ky', 'Osk', 'ry'\n )\n+\n+ def company_business_id(self):\n+ \"\"\"\n+ Returns Finnish company Business Identity Code (y-tunnus).\n+ Format is 8 digits - e.g. FI99999999,[8] last digit is a check\n+ digit utilizing MOD 11-2. The first digit is zero for some old\n+ organizations. This function provides current codes starting with\n+ non-zero.\n+ \"\"\"\n+ def calculate_checksum(number):\n+ \"\"\"Calculate the checksum using mod 11,2 method\"\"\"\n+ factors = [7, 9, 10, 5, 8, 4, 2]\n+ sum = 0\n+ for x, y in zip(number, factors):\n+ sum = sum + int(x) * y\n+ if sum % 11 == 0:\n+ return '0'\n+ else:\n+ return str(11 - sum % 11)\n+\n+ first_digit = str(self.random_digit_not_null())\n+ body = first_digit + self.bothify(self.random_element(('######',)))\n+ cs = calculate_checksum(body)\n+ return (body + '-' + str(cs))\n+\n+ def company_vat(self):\n+ \"\"\"\n+ Returns Finnish VAT identification number (Arvonlisaveronumero).\n+ This can be calculated from company business identity code by\n+ adding prefix \"FI\" and removing dash before checksum.\n+ \"\"\"\n+ def convert_to_vat(business_id):\n+ \"\"\"\n+ Convert business id to VATIN\n+ \"\"\"\n+ return 'FI' + business_id.replace('-', '')\n+\n+ return convert_to_vat(self.company_business_id())\n", "issue": "Faker should provide company VAT numbers\nIt would be nice if faker could provide company VAT numbers. More information about VAT codes is in Wikipedia: https://en.wikipedia.org/wiki/VAT_identification_number\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom .. import Provider as CompanyProvider\n\n\nclass Provider(CompanyProvider):\n formats = (\n '{{last_name}} {{company_suffix}}',\n '{{last_name}} {{last_name}} {{company_suffix}}',\n '{{last_name}} {{last_name}} {{company_suffix}}',\n '{{last_name}}'\n )\n\n company_suffixes = (\n 'As Oy', 'Tmi', 'Oy', 'Oyj', 'Ky', 'Osk', 'ry'\n )\n", "path": "faker/providers/company/fi_FI/__init__.py"}]}
724
479
gh_patches_debug_61685
rasdani/github-patches
git_diff
joke2k__faker-1432
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> gmail.co.uk isn't a valid free email domain in the UK * Faker version: 6.6.2 * OS: macOS 11.2.3 When generating a free email address, I got a result with the domain `gmail.co.uk`. From the source code, this list of free UK email domains was copied from the PHP version of Faker, which is now archived. According to [this Google support thread](https://support.google.com/mail/thread/4572636?hl=en) (albeit not necessarily from someone with the authority to speak on behalf of Google), there is no such domain. ### Steps to reproduce 1. Configure Faker with the `en_UK` locale. 1. Generate free emails by calling `fake.free_email()` repeatedly 1. Observe that some of them end in `gmail.co.uk` ### Expected behavior Email addresses should not have `gmail.co.uk` as a domain. ### Actual behavior As a replacement, maybe include Hotmail's successor, `outlook.com`? It's not UK specific, but I don't know anything about the state of free UK email providers. gmail.co.uk isn't a valid free email domain in the UK * Faker version: 6.6.2 * OS: macOS 11.2.3 When generating a free email address, I got a result with the domain `gmail.co.uk`. From the source code, this list of free UK email domains was copied from the PHP version of Faker, which is now archived. According to [this Google support thread](https://support.google.com/mail/thread/4572636?hl=en) (albeit not necessarily from someone with the authority to speak on behalf of Google), there is no such domain. ### Steps to reproduce 1. Configure Faker with the `en_UK` locale. 1. Generate free emails by calling `fake.free_email()` repeatedly 1. Observe that some of them end in `gmail.co.uk` ### Expected behavior Email addresses should not have `gmail.co.uk` as a domain. ### Actual behavior As a replacement, maybe include Hotmail's successor, `outlook.com`? It's not UK specific, but I don't know anything about the state of free UK email providers. </issue> <code> [start of faker/providers/internet/en_GB/__init__.py] 1 from .. import Provider as InternetProvider 2 3 4 class Provider(InternetProvider): 5 # Data taken from 6 # https://github.com/fzaninotto/Faker/blob/master/src/Faker/Provider/en_GB/Internet.php 7 8 free_email_domains = ( 9 'gmail.com', 10 'yahoo.com', 11 'hotmail.com', 12 'gmail.co.uk', 13 'yahoo.co.uk', 14 'hotmail.co.uk', 15 ) 16 17 tlds = ('com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org', 'co.uk') 18 [end of faker/providers/internet/en_GB/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/internet/en_GB/__init__.py b/faker/providers/internet/en_GB/__init__.py --- a/faker/providers/internet/en_GB/__init__.py +++ b/faker/providers/internet/en_GB/__init__.py @@ -9,9 +9,9 @@ 'gmail.com', 'yahoo.com', 'hotmail.com', - 'gmail.co.uk', 'yahoo.co.uk', 'hotmail.co.uk', + 'outlook.com', ) tlds = ('com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org', 'co.uk')
{"golden_diff": "diff --git a/faker/providers/internet/en_GB/__init__.py b/faker/providers/internet/en_GB/__init__.py\n--- a/faker/providers/internet/en_GB/__init__.py\n+++ b/faker/providers/internet/en_GB/__init__.py\n@@ -9,9 +9,9 @@\n 'gmail.com',\n 'yahoo.com',\n 'hotmail.com',\n- 'gmail.co.uk',\n 'yahoo.co.uk',\n 'hotmail.co.uk',\n+ 'outlook.com',\n )\n \n tlds = ('com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org', 'co.uk')\n", "issue": "gmail.co.uk isn't a valid free email domain in the UK\n* Faker version: 6.6.2\r\n* OS: macOS 11.2.3\r\n\r\nWhen generating a free email address, I got a result with the domain `gmail.co.uk`. From the source code, this list of free UK email domains was copied from the PHP version of Faker, which is now archived. \r\n\r\nAccording to [this Google support thread](https://support.google.com/mail/thread/4572636?hl=en) (albeit not necessarily from someone with the authority to speak on behalf of Google), there is no such domain.\r\n\r\n### Steps to reproduce\r\n\r\n1. Configure Faker with the `en_UK` locale.\r\n1. Generate free emails by calling `fake.free_email()` repeatedly\r\n1. Observe that some of them end in `gmail.co.uk`\r\n\r\n### Expected behavior\r\nEmail addresses should not have `gmail.co.uk` as a domain.\r\n\r\n### Actual behavior\r\nAs a replacement, maybe include Hotmail's successor, `outlook.com`? It's not UK specific, but I don't know anything about the state of free UK email providers.\ngmail.co.uk isn't a valid free email domain in the UK\n* Faker version: 6.6.2\r\n* OS: macOS 11.2.3\r\n\r\nWhen generating a free email address, I got a result with the domain `gmail.co.uk`. From the source code, this list of free UK email domains was copied from the PHP version of Faker, which is now archived. \r\n\r\nAccording to [this Google support thread](https://support.google.com/mail/thread/4572636?hl=en) (albeit not necessarily from someone with the authority to speak on behalf of Google), there is no such domain.\r\n\r\n### Steps to reproduce\r\n\r\n1. Configure Faker with the `en_UK` locale.\r\n1. Generate free emails by calling `fake.free_email()` repeatedly\r\n1. Observe that some of them end in `gmail.co.uk`\r\n\r\n### Expected behavior\r\nEmail addresses should not have `gmail.co.uk` as a domain.\r\n\r\n### Actual behavior\r\nAs a replacement, maybe include Hotmail's successor, `outlook.com`? It's not UK specific, but I don't know anything about the state of free UK email providers.\n", "before_files": [{"content": "from .. import Provider as InternetProvider\n\n\nclass Provider(InternetProvider):\n # Data taken from\n # https://github.com/fzaninotto/Faker/blob/master/src/Faker/Provider/en_GB/Internet.php\n\n free_email_domains = (\n 'gmail.com',\n 'yahoo.com',\n 'hotmail.com',\n 'gmail.co.uk',\n 'yahoo.co.uk',\n 'hotmail.co.uk',\n )\n\n tlds = ('com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org', 'co.uk')\n", "path": "faker/providers/internet/en_GB/__init__.py"}]}
1,185
149
gh_patches_debug_60838
rasdani/github-patches
git_diff
docarray__docarray-60
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fix: fix tags type after pydantic model </issue> <code> [start of docarray/document/pydantic_model.py] 1 from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union 2 3 from pydantic import BaseModel, validator 4 5 from ..math.ndarray import to_list 6 7 if TYPE_CHECKING: 8 from ..types import ArrayType 9 10 _ProtoValueType = Optional[Union[str, bool, float]] 11 _StructValueType = Union[ 12 _ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType] 13 ] 14 15 16 def _convert_ndarray_to_list(v: 'ArrayType'): 17 if v is not None: 18 return to_list(v) 19 20 21 class PydanticDocument(BaseModel): 22 id: str 23 parent_id: Optional[str] 24 granularity: Optional[int] 25 adjacency: Optional[int] 26 blob: Optional[bytes] 27 tensor: Optional[Any] 28 mime_type: Optional[str] 29 text: Optional[str] 30 weight: Optional[float] 31 uri: Optional[str] 32 tags: Optional[Dict[str, '_StructValueType']] 33 offset: Optional[float] 34 location: Optional[List[float]] 35 embedding: Optional[Any] 36 modality: Optional[str] 37 evaluations: Optional[Dict[str, Dict[str, '_StructValueType']]] 38 scores: Optional[Dict[str, Dict[str, '_StructValueType']]] 39 chunks: Optional[List['PydanticDocument']] 40 matches: Optional[List['PydanticDocument']] 41 42 _tensor2list = validator('tensor', allow_reuse=True)(_convert_ndarray_to_list) 43 _embedding2list = validator('embedding', allow_reuse=True)(_convert_ndarray_to_list) 44 45 46 PydanticDocument.update_forward_refs() 47 48 PydanticDocumentArray = List[PydanticDocument] 49 [end of docarray/document/pydantic_model.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docarray/document/pydantic_model.py b/docarray/document/pydantic_model.py --- a/docarray/document/pydantic_model.py +++ b/docarray/document/pydantic_model.py @@ -7,7 +7,8 @@ if TYPE_CHECKING: from ..types import ArrayType -_ProtoValueType = Optional[Union[str, bool, float]] +# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions +_ProtoValueType = Optional[Union[bool, float, str]] _StructValueType = Union[ _ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType] ]
{"golden_diff": "diff --git a/docarray/document/pydantic_model.py b/docarray/document/pydantic_model.py\n--- a/docarray/document/pydantic_model.py\n+++ b/docarray/document/pydantic_model.py\n@@ -7,7 +7,8 @@\n if TYPE_CHECKING:\n from ..types import ArrayType\n \n-_ProtoValueType = Optional[Union[str, bool, float]]\n+# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions\n+_ProtoValueType = Optional[Union[bool, float, str]]\n _StructValueType = Union[\n _ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]\n ]\n", "issue": "fix: fix tags type after pydantic model\n\n", "before_files": [{"content": "from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union\n\nfrom pydantic import BaseModel, validator\n\nfrom ..math.ndarray import to_list\n\nif TYPE_CHECKING:\n from ..types import ArrayType\n\n_ProtoValueType = Optional[Union[str, bool, float]]\n_StructValueType = Union[\n _ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]\n]\n\n\ndef _convert_ndarray_to_list(v: 'ArrayType'):\n if v is not None:\n return to_list(v)\n\n\nclass PydanticDocument(BaseModel):\n id: str\n parent_id: Optional[str]\n granularity: Optional[int]\n adjacency: Optional[int]\n blob: Optional[bytes]\n tensor: Optional[Any]\n mime_type: Optional[str]\n text: Optional[str]\n weight: Optional[float]\n uri: Optional[str]\n tags: Optional[Dict[str, '_StructValueType']]\n offset: Optional[float]\n location: Optional[List[float]]\n embedding: Optional[Any]\n modality: Optional[str]\n evaluations: Optional[Dict[str, Dict[str, '_StructValueType']]]\n scores: Optional[Dict[str, Dict[str, '_StructValueType']]]\n chunks: Optional[List['PydanticDocument']]\n matches: Optional[List['PydanticDocument']]\n\n _tensor2list = validator('tensor', allow_reuse=True)(_convert_ndarray_to_list)\n _embedding2list = validator('embedding', allow_reuse=True)(_convert_ndarray_to_list)\n\n\nPydanticDocument.update_forward_refs()\n\nPydanticDocumentArray = List[PydanticDocument]\n", "path": "docarray/document/pydantic_model.py"}]}
986
143
gh_patches_debug_32437
rasdani/github-patches
git_diff
safe-global__safe-config-service-292
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow querying `ChainInfo` by `shortName` @rmeissner add use case </issue> <code> [start of src/chains/urls.py] 1 from django.urls import path 2 3 from chains.views import ChainsDetailView, ChainsListView 4 5 app_name = "chains" 6 7 urlpatterns = [ 8 path("", ChainsListView.as_view(), name="list"), 9 path("<pk>/", ChainsDetailView.as_view(), name="detail"), 10 ] 11 [end of src/chains/urls.py] [start of src/chains/views.py] 1 from rest_framework import filters 2 from rest_framework.generics import ListAPIView, RetrieveAPIView 3 from rest_framework.pagination import LimitOffsetPagination 4 5 from .models import Chain 6 from .serializers import ChainSerializer 7 8 9 class ChainsListView(ListAPIView): 10 serializer_class = ChainSerializer 11 pagination_class = LimitOffsetPagination 12 pagination_class.max_limit = 100 13 pagination_class.default_limit = 10 14 queryset = Chain.objects.all() 15 filter_backends = [filters.OrderingFilter] 16 ordering_fields = ["relevance", "name"] 17 ordering = [ 18 "relevance", 19 "name", 20 ] 21 22 23 class ChainsDetailView(RetrieveAPIView): 24 serializer_class = ChainSerializer 25 queryset = Chain.objects.all() 26 [end of src/chains/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/chains/urls.py b/src/chains/urls.py --- a/src/chains/urls.py +++ b/src/chains/urls.py @@ -1,10 +1,15 @@ from django.urls import path -from chains.views import ChainsDetailView, ChainsListView +from chains.views import ChainsDetailView, ChainsDetailViewByShortName, ChainsListView app_name = "chains" urlpatterns = [ path("", ChainsListView.as_view(), name="list"), - path("<pk>/", ChainsDetailView.as_view(), name="detail"), + path("<int:pk>/", ChainsDetailView.as_view(), name="detail"), + path( + "<str:short_name>/", + ChainsDetailViewByShortName.as_view(), + name="detail_by_short_name", + ), ] diff --git a/src/chains/views.py b/src/chains/views.py --- a/src/chains/views.py +++ b/src/chains/views.py @@ -1,6 +1,11 @@ +from typing import Any + +from drf_yasg.utils import swagger_auto_schema from rest_framework import filters from rest_framework.generics import ListAPIView, RetrieveAPIView from rest_framework.pagination import LimitOffsetPagination +from rest_framework.request import Request +from rest_framework.response import Response from .models import Chain from .serializers import ChainSerializer @@ -23,3 +28,22 @@ class ChainsDetailView(RetrieveAPIView): serializer_class = ChainSerializer queryset = Chain.objects.all() + + @swagger_auto_schema( + operation_id="Get chain by id" + ) # type: ignore[misc] # Untyped decorator makes function "get" untyped + def get(self, request: Request, *args: Any, **kwargs: Any) -> Response: + return super().get(request, *args, **kwargs) + + +class ChainsDetailViewByShortName(RetrieveAPIView): + lookup_field = "short_name" + serializer_class = ChainSerializer + queryset = Chain.objects.all() + + @swagger_auto_schema( + operation_id="Get chain by shortName", + operation_description="Warning: `shortNames` may contain characters that need to be URL encoded (i.e.: whitespaces)", # noqa E501 + ) # type: ignore[misc] # Untyped decorator makes function "get" untyped + def get(self, request: Request, *args: Any, **kwargs: Any) -> Response: + return super().get(request, *args, **kwargs)
{"golden_diff": "diff --git a/src/chains/urls.py b/src/chains/urls.py\n--- a/src/chains/urls.py\n+++ b/src/chains/urls.py\n@@ -1,10 +1,15 @@\n from django.urls import path\n \n-from chains.views import ChainsDetailView, ChainsListView\n+from chains.views import ChainsDetailView, ChainsDetailViewByShortName, ChainsListView\n \n app_name = \"chains\"\n \n urlpatterns = [\n path(\"\", ChainsListView.as_view(), name=\"list\"),\n- path(\"<pk>/\", ChainsDetailView.as_view(), name=\"detail\"),\n+ path(\"<int:pk>/\", ChainsDetailView.as_view(), name=\"detail\"),\n+ path(\n+ \"<str:short_name>/\",\n+ ChainsDetailViewByShortName.as_view(),\n+ name=\"detail_by_short_name\",\n+ ),\n ]\ndiff --git a/src/chains/views.py b/src/chains/views.py\n--- a/src/chains/views.py\n+++ b/src/chains/views.py\n@@ -1,6 +1,11 @@\n+from typing import Any\n+\n+from drf_yasg.utils import swagger_auto_schema\n from rest_framework import filters\n from rest_framework.generics import ListAPIView, RetrieveAPIView\n from rest_framework.pagination import LimitOffsetPagination\n+from rest_framework.request import Request\n+from rest_framework.response import Response\n \n from .models import Chain\n from .serializers import ChainSerializer\n@@ -23,3 +28,22 @@\n class ChainsDetailView(RetrieveAPIView):\n serializer_class = ChainSerializer\n queryset = Chain.objects.all()\n+\n+ @swagger_auto_schema(\n+ operation_id=\"Get chain by id\"\n+ ) # type: ignore[misc] # Untyped decorator makes function \"get\" untyped\n+ def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n+ return super().get(request, *args, **kwargs)\n+\n+\n+class ChainsDetailViewByShortName(RetrieveAPIView):\n+ lookup_field = \"short_name\"\n+ serializer_class = ChainSerializer\n+ queryset = Chain.objects.all()\n+\n+ @swagger_auto_schema(\n+ operation_id=\"Get chain by shortName\",\n+ operation_description=\"Warning: `shortNames` may contain characters that need to be URL encoded (i.e.: whitespaces)\", # noqa E501\n+ ) # type: ignore[misc] # Untyped decorator makes function \"get\" untyped\n+ def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:\n+ return super().get(request, *args, **kwargs)\n", "issue": "Allow querying `ChainInfo` by `shortName`\n@rmeissner add use case\n", "before_files": [{"content": "from django.urls import path\n\nfrom chains.views import ChainsDetailView, ChainsListView\n\napp_name = \"chains\"\n\nurlpatterns = [\n path(\"\", ChainsListView.as_view(), name=\"list\"),\n path(\"<pk>/\", ChainsDetailView.as_view(), name=\"detail\"),\n]\n", "path": "src/chains/urls.py"}, {"content": "from rest_framework import filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.pagination import LimitOffsetPagination\n\nfrom .models import Chain\nfrom .serializers import ChainSerializer\n\n\nclass ChainsListView(ListAPIView):\n serializer_class = ChainSerializer\n pagination_class = LimitOffsetPagination\n pagination_class.max_limit = 100\n pagination_class.default_limit = 10\n queryset = Chain.objects.all()\n filter_backends = [filters.OrderingFilter]\n ordering_fields = [\"relevance\", \"name\"]\n ordering = [\n \"relevance\",\n \"name\",\n ]\n\n\nclass ChainsDetailView(RetrieveAPIView):\n serializer_class = ChainSerializer\n queryset = Chain.objects.all()\n", "path": "src/chains/views.py"}]}
841
559
gh_patches_debug_2542
rasdani/github-patches
git_diff
CTFd__CTFd-1508
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Access media library from Challenge UI Accessing the media library from the challenge UI is a useful idea if you're using images in the challenge interface. Saves some clicks. </issue> <code> [start of CTFd/admin/pages.py] 1 from flask import render_template, request 2 3 from CTFd.admin import admin 4 from CTFd.models import Pages 5 from CTFd.schemas.pages import PageSchema 6 from CTFd.utils import markdown 7 from CTFd.utils.config.pages import build_html 8 from CTFd.utils.decorators import admins_only 9 10 11 @admin.route("/admin/pages") 12 @admins_only 13 def pages_listing(): 14 pages = Pages.query.all() 15 return render_template("admin/pages.html", pages=pages) 16 17 18 @admin.route("/admin/pages/new") 19 @admins_only 20 def pages_new(): 21 return render_template("admin/editor.html") 22 23 24 @admin.route("/admin/pages/preview", methods=["POST"]) 25 @admins_only 26 def pages_preview(): 27 data = request.form.to_dict() 28 schema = PageSchema() 29 page = schema.load(data) 30 return render_template("page.html", content=build_html(page.data["content"])) 31 32 33 @admin.route("/admin/pages/<int:page_id>") 34 @admins_only 35 def pages_detail(page_id): 36 page = Pages.query.filter_by(id=page_id).first_or_404() 37 page_op = request.args.get("operation") 38 39 if request.method == "GET" and page_op == "preview": 40 return render_template("page.html", content=markdown(page.content)) 41 42 if request.method == "GET" and page_op == "create": 43 return render_template("admin/editor.html") 44 45 return render_template("admin/editor.html", page=page) 46 [end of CTFd/admin/pages.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/admin/pages.py b/CTFd/admin/pages.py --- a/CTFd/admin/pages.py +++ b/CTFd/admin/pages.py @@ -27,7 +27,7 @@ data = request.form.to_dict() schema = PageSchema() page = schema.load(data) - return render_template("page.html", content=build_html(page.data["content"])) + return render_template("page.html", content=build_html(page.data.content)) @admin.route("/admin/pages/<int:page_id>")
{"golden_diff": "diff --git a/CTFd/admin/pages.py b/CTFd/admin/pages.py\n--- a/CTFd/admin/pages.py\n+++ b/CTFd/admin/pages.py\n@@ -27,7 +27,7 @@\n data = request.form.to_dict()\n schema = PageSchema()\n page = schema.load(data)\n- return render_template(\"page.html\", content=build_html(page.data[\"content\"]))\n+ return render_template(\"page.html\", content=build_html(page.data.content))\n \n \n @admin.route(\"/admin/pages/<int:page_id>\")\n", "issue": "Access media library from Challenge UI\nAccessing the media library from the challenge UI is a useful idea if you're using images in the challenge interface. Saves some clicks. \n", "before_files": [{"content": "from flask import render_template, request\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Pages\nfrom CTFd.schemas.pages import PageSchema\nfrom CTFd.utils import markdown\nfrom CTFd.utils.config.pages import build_html\nfrom CTFd.utils.decorators import admins_only\n\n\[email protected](\"/admin/pages\")\n@admins_only\ndef pages_listing():\n pages = Pages.query.all()\n return render_template(\"admin/pages.html\", pages=pages)\n\n\[email protected](\"/admin/pages/new\")\n@admins_only\ndef pages_new():\n return render_template(\"admin/editor.html\")\n\n\[email protected](\"/admin/pages/preview\", methods=[\"POST\"])\n@admins_only\ndef pages_preview():\n data = request.form.to_dict()\n schema = PageSchema()\n page = schema.load(data)\n return render_template(\"page.html\", content=build_html(page.data[\"content\"]))\n\n\[email protected](\"/admin/pages/<int:page_id>\")\n@admins_only\ndef pages_detail(page_id):\n page = Pages.query.filter_by(id=page_id).first_or_404()\n page_op = request.args.get(\"operation\")\n\n if request.method == \"GET\" and page_op == \"preview\":\n return render_template(\"page.html\", content=markdown(page.content))\n\n if request.method == \"GET\" and page_op == \"create\":\n return render_template(\"admin/editor.html\")\n\n return render_template(\"admin/editor.html\", page=page)\n", "path": "CTFd/admin/pages.py"}]}
968
118
gh_patches_debug_16361
rasdani/github-patches
git_diff
benoitc__gunicorn-3083
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Version 21.0.1 or higher breaks with python 3.5 I am currently running an older application with Python 3.5, and I encountered an error when attempting to update the Gunicorn version to the latest one: ``` NameError: name 'ModuleNotFoundError' is not defined ``` After some investigation, I realized that the 'ModuleNotFoundError' is only available for Python 3.6 or higher. My suspicion is that this bug was introduced in [this commit](https://github.com/benoitc/gunicorn/commit/f628dd9730f965b1917397ea1846c68844b1fe7a). As a result, it seems that Gunicorn is not fully compatible with Python 3.5 </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 - 2 # 3 # This file is part of gunicorn released under the MIT license. 4 # See the NOTICE for more information. 5 6 import os 7 import sys 8 9 from setuptools import setup, find_packages 10 from setuptools.command.test import test as TestCommand 11 12 from gunicorn import __version__ 13 14 15 CLASSIFIERS = [ 16 'Development Status :: 5 - Production/Stable', 17 'Environment :: Other Environment', 18 'Intended Audience :: Developers', 19 'License :: OSI Approved :: MIT License', 20 'Operating System :: MacOS :: MacOS X', 21 'Operating System :: POSIX', 22 'Programming Language :: Python', 23 'Programming Language :: Python :: 3', 24 'Programming Language :: Python :: 3.5', 25 'Programming Language :: Python :: 3.6', 26 'Programming Language :: Python :: 3.7', 27 'Programming Language :: Python :: 3.8', 28 'Programming Language :: Python :: 3.9', 29 'Programming Language :: Python :: 3.10', 30 'Programming Language :: Python :: 3.11', 31 'Programming Language :: Python :: 3 :: Only', 32 'Programming Language :: Python :: Implementation :: CPython', 33 'Programming Language :: Python :: Implementation :: PyPy', 34 'Topic :: Internet', 35 'Topic :: Utilities', 36 'Topic :: Software Development :: Libraries :: Python Modules', 37 'Topic :: Internet :: WWW/HTTP', 38 'Topic :: Internet :: WWW/HTTP :: WSGI', 39 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server', 40 'Topic :: Internet :: WWW/HTTP :: Dynamic Content'] 41 42 # read long description 43 with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: 44 long_description = f.read() 45 46 # read dev requirements 47 fname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt') 48 with open(fname) as f: 49 tests_require = [l.strip() for l in f.readlines()] 50 51 class PyTestCommand(TestCommand): 52 user_options = [ 53 ("cov", None, "measure coverage") 54 ] 55 56 def initialize_options(self): 57 TestCommand.initialize_options(self) 58 self.cov = None 59 60 def finalize_options(self): 61 TestCommand.finalize_options(self) 62 self.test_args = ['tests'] 63 if self.cov: 64 self.test_args += ['--cov', 'gunicorn'] 65 self.test_suite = True 66 67 def run_tests(self): 68 import pytest 69 errno = pytest.main(self.test_args) 70 sys.exit(errno) 71 72 73 install_requires = [ 74 'importlib_metadata; python_version<"3.8"', 75 'packaging', 76 ] 77 78 extras_require = { 79 'gevent': ['gevent>=1.4.0'], 80 'eventlet': ['eventlet>=0.24.1'], 81 'tornado': ['tornado>=0.2'], 82 'gthread': [], 83 'setproctitle': ['setproctitle'], 84 } 85 86 setup( 87 name='gunicorn', 88 version=__version__, 89 90 description='WSGI HTTP Server for UNIX', 91 long_description=long_description, 92 author='Benoit Chesneau', 93 author_email='[email protected]', 94 license='MIT', 95 url='https://gunicorn.org', 96 project_urls={ 97 'Documentation': 'https://docs.gunicorn.org', 98 'Homepage': 'https://gunicorn.org', 99 'Issue tracker': 'https://github.com/benoitc/gunicorn/issues', 100 'Source code': 'https://github.com/benoitc/gunicorn', 101 }, 102 103 python_requires='>=3.5', 104 install_requires=install_requires, 105 classifiers=CLASSIFIERS, 106 zip_safe=False, 107 packages=find_packages(exclude=['examples', 'tests']), 108 include_package_data=True, 109 110 tests_require=tests_require, 111 cmdclass={'test': PyTestCommand}, 112 113 entry_points=""" 114 [console_scripts] 115 gunicorn=gunicorn.app.wsgiapp:run 116 117 [paste.server_runner] 118 main=gunicorn.app.pasterapp:serve 119 """, 120 extras_require=extras_require, 121 ) 122 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -21,8 +21,6 @@ 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', @@ -100,7 +98,7 @@ 'Source code': 'https://github.com/benoitc/gunicorn', }, - python_requires='>=3.5', + python_requires='>=3.7', install_requires=install_requires, classifiers=CLASSIFIERS, zip_safe=False,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,8 +21,6 @@\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.5',\n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n@@ -100,7 +98,7 @@\n 'Source code': 'https://github.com/benoitc/gunicorn',\n },\n \n- python_requires='>=3.5',\n+ python_requires='>=3.7',\n install_requires=install_requires,\n classifiers=CLASSIFIERS,\n zip_safe=False,\n", "issue": "Version 21.0.1 or higher breaks with python 3.5\nI am currently running an older application with Python 3.5, and I encountered an error when attempting to update the Gunicorn version to the latest one:\r\n\r\n```\r\nNameError: name 'ModuleNotFoundError' is not defined\r\n```\r\n\r\nAfter some investigation, I realized that the 'ModuleNotFoundError' is only available for Python 3.6 or higher. My suspicion is that this bug was introduced in [this commit](https://github.com/benoitc/gunicorn/commit/f628dd9730f965b1917397ea1846c68844b1fe7a).\r\n\r\nAs a result, it seems that Gunicorn is not fully compatible with Python 3.5\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\nfrom gunicorn import __version__\n\n\nCLASSIFIERS = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet',\n 'Topic :: Utilities',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content']\n\n# read long description\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:\n long_description = f.read()\n\n# read dev requirements\nfname = os.path.join(os.path.dirname(__file__), 'requirements_test.txt')\nwith open(fname) as f:\n tests_require = [l.strip() for l in f.readlines()]\n\nclass PyTestCommand(TestCommand):\n user_options = [\n (\"cov\", None, \"measure coverage\")\n ]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.cov = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n if self.cov:\n self.test_args += ['--cov', 'gunicorn']\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ninstall_requires = [\n 'importlib_metadata; python_version<\"3.8\"',\n 'packaging',\n]\n\nextras_require = {\n 'gevent': ['gevent>=1.4.0'],\n 'eventlet': ['eventlet>=0.24.1'],\n 'tornado': ['tornado>=0.2'],\n 'gthread': [],\n 'setproctitle': ['setproctitle'],\n}\n\nsetup(\n name='gunicorn',\n version=__version__,\n\n description='WSGI HTTP Server for UNIX',\n long_description=long_description,\n author='Benoit Chesneau',\n author_email='[email protected]',\n license='MIT',\n url='https://gunicorn.org',\n project_urls={\n 'Documentation': 'https://docs.gunicorn.org',\n 'Homepage': 'https://gunicorn.org',\n 'Issue tracker': 'https://github.com/benoitc/gunicorn/issues',\n 'Source code': 'https://github.com/benoitc/gunicorn',\n },\n\n python_requires='>=3.5',\n install_requires=install_requires,\n classifiers=CLASSIFIERS,\n zip_safe=False,\n packages=find_packages(exclude=['examples', 'tests']),\n include_package_data=True,\n\n tests_require=tests_require,\n cmdclass={'test': PyTestCommand},\n\n entry_points=\"\"\"\n [console_scripts]\n gunicorn=gunicorn.app.wsgiapp:run\n\n [paste.server_runner]\n main=gunicorn.app.pasterapp:serve\n \"\"\",\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
1,842
191
gh_patches_debug_5581
rasdani/github-patches
git_diff
ansible-collections__community.general-6644
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The DependencyMixin is being deprecated. ### Summary When I use the module to blacklist a kernel module, a warning appears. ### Issue Type Bug Report ### Component Name kernel_blacklist ### Ansible Version ```console (paste below) $ansible --version ansible [core 2.15.0] config file = None configured module search path = ['/home/bandura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/bandura/.local/lib/python3.9/site-packages/ansible ansible collection location = /home/bandura/.ansible/collections:/usr/share/ansible/collections executable location = /home/bandura/.local/bin/ansible python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3) jinja version = 3.1.2 libyaml = True ``` ### Community.general Version ```console (paste below) $ansible-galaxy collection list community.general # /home/bandura/.local/lib/python3.9/site-packages/ansible_collections Collection Version ----------------- ------- community.general 7.0.1 ``` ### Configuration ```console (paste below) $ansible-config dump --only-changed CONFIG_FILE() = None ``` ### OS / Environment Debian 11-based ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - name: Blacklist old kernel module become: true community.general.kernel_blacklist: name: x_tables ``` ### Expected Results No warning ### Actual Results Works, but with warning. ```console (paste below) TASK [install-nftables : Blacklist old IPv4 kernel module] ********************* [DEPRECATION WARNING]: The DependencyMixin is being deprecated. Modules should use community.general.plugins.module_utils.deps instead. This feature will be removed from community.general in version 9.0.0. Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct </issue> <code> [start of plugins/module_utils/mh/mixins/deps.py] 1 # -*- coding: utf-8 -*- 2 # (c) 2020, Alexei Znamensky <[email protected]> 3 # Copyright (c) 2020, Ansible Project 4 # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) 5 # SPDX-License-Identifier: BSD-2-Clause 6 7 from __future__ import absolute_import, division, print_function 8 __metaclass__ = type 9 10 import traceback 11 12 from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase 13 from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception 14 15 16 class DependencyCtxMgr(object): 17 def __init__(self, name, msg=None): 18 self.name = name 19 self.msg = msg 20 self.has_it = False 21 self.exc_type = None 22 self.exc_val = None 23 self.exc_tb = None 24 25 def __enter__(self): 26 return self 27 28 def __exit__(self, exc_type, exc_val, exc_tb): 29 self.has_it = exc_type is None 30 self.exc_type = exc_type 31 self.exc_val = exc_val 32 self.exc_tb = exc_tb 33 return not self.has_it 34 35 @property 36 def text(self): 37 return self.msg or str(self.exc_val) 38 39 40 class DependencyMixin(ModuleHelperBase): 41 """ 42 THIS CLASS IS BEING DEPRECATED. 43 See the deprecation notice in ``DependencyMixin.fail_on_missing_deps()`` below. 44 45 Mixin for mapping module options to running a CLI command with its arguments. 46 """ 47 _dependencies = [] 48 49 @classmethod 50 def dependency(cls, name, msg): 51 cls._dependencies.append(DependencyCtxMgr(name, msg)) 52 return cls._dependencies[-1] 53 54 def fail_on_missing_deps(self): 55 self.module.deprecate( 56 'The DependencyMixin is being deprecated. ' 57 'Modules should use community.general.plugins.module_utils.deps instead.', 58 version='9.0.0', 59 collection_name='community.general', 60 ) 61 for d in self._dependencies: 62 if not d.has_it: 63 self.module.fail_json(changed=False, 64 exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), 65 msg=d.text, 66 **self.output) 67 68 @module_fails_on_exception 69 def run(self): 70 self.fail_on_missing_deps() 71 super(DependencyMixin, self).run() 72 [end of plugins/module_utils/mh/mixins/deps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py --- a/plugins/module_utils/mh/mixins/deps.py +++ b/plugins/module_utils/mh/mixins/deps.py @@ -52,6 +52,8 @@ return cls._dependencies[-1] def fail_on_missing_deps(self): + if not self._dependencies: + return self.module.deprecate( 'The DependencyMixin is being deprecated. ' 'Modules should use community.general.plugins.module_utils.deps instead.',
{"golden_diff": "diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py\n--- a/plugins/module_utils/mh/mixins/deps.py\n+++ b/plugins/module_utils/mh/mixins/deps.py\n@@ -52,6 +52,8 @@\n return cls._dependencies[-1]\n \n def fail_on_missing_deps(self):\n+ if not self._dependencies:\n+ return\n self.module.deprecate(\n 'The DependencyMixin is being deprecated. '\n 'Modules should use community.general.plugins.module_utils.deps instead.',\n", "issue": "The DependencyMixin is being deprecated.\n### Summary\r\n\r\nWhen I use the module to blacklist a kernel module, a warning appears.\r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\nkernel_blacklist\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ansible --version\r\nansible [core 2.15.0]\r\n config file = None\r\n configured module search path = ['/home/bandura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/bandura/.local/lib/python3.9/site-packages/ansible\r\n ansible collection location = /home/bandura/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/bandura/.local/bin/ansible\r\n python version = 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] (/usr/bin/python3)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n$ansible-galaxy collection list community.general\r\n\r\n# /home/bandura/.local/lib/python3.9/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 7.0.1\r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ansible-config dump --only-changed\r\nCONFIG_FILE() = None\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\nDebian 11-based\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- name: Blacklist old kernel module\r\n become: true\r\n community.general.kernel_blacklist:\r\n name: x_tables\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nNo warning\r\n\r\n### Actual Results\r\n\r\nWorks, but with warning.\r\n```console (paste below)\r\nTASK [install-nftables : Blacklist old IPv4 kernel module] *********************\r\n[DEPRECATION WARNING]: The DependencyMixin is being deprecated. Modules should \r\nuse community.general.plugins.module_utils.deps instead. This feature will be \r\nremoved from community.general in version 9.0.0. Deprecation warnings can be \r\ndisabled by setting deprecation_warnings=False in ansible.cfg.\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# (c) 2020, Alexei Znamensky <[email protected]>\n# Copyright (c) 2020, Ansible Project\n# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause)\n# SPDX-License-Identifier: BSD-2-Clause\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nimport traceback\n\nfrom ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase\nfrom ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception\n\n\nclass DependencyCtxMgr(object):\n def __init__(self, name, msg=None):\n self.name = name\n self.msg = msg\n self.has_it = False\n self.exc_type = None\n self.exc_val = None\n self.exc_tb = None\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.has_it = exc_type is None\n self.exc_type = exc_type\n self.exc_val = exc_val\n self.exc_tb = exc_tb\n return not self.has_it\n\n @property\n def text(self):\n return self.msg or str(self.exc_val)\n\n\nclass DependencyMixin(ModuleHelperBase):\n \"\"\"\n THIS CLASS IS BEING DEPRECATED.\n See the deprecation notice in ``DependencyMixin.fail_on_missing_deps()`` below.\n\n Mixin for mapping module options to running a CLI command with its arguments.\n \"\"\"\n _dependencies = []\n\n @classmethod\n def dependency(cls, name, msg):\n cls._dependencies.append(DependencyCtxMgr(name, msg))\n return cls._dependencies[-1]\n\n def fail_on_missing_deps(self):\n self.module.deprecate(\n 'The DependencyMixin is being deprecated. '\n 'Modules should use community.general.plugins.module_utils.deps instead.',\n version='9.0.0',\n collection_name='community.general',\n )\n for d in self._dependencies:\n if not d.has_it:\n self.module.fail_json(changed=False,\n exception=\"\\n\".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)),\n msg=d.text,\n **self.output)\n\n @module_fails_on_exception\n def run(self):\n self.fail_on_missing_deps()\n super(DependencyMixin, self).run()\n", "path": "plugins/module_utils/mh/mixins/deps.py"}]}
1,728
129
gh_patches_debug_24855
rasdani/github-patches
git_diff
TheAlgorithms__Python-9182
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> sorts/random_normal_distribution_quicksort.py has no tests ### Repository commit 3 ### Python version (python --version) Python 3.11.5 ### Dependencies version (pip freeze) Numpy ### Expected behavior Tests. ### Actual behavior No tests. </issue> <code> [start of sorts/random_normal_distribution_quicksort.py] 1 from random import randint 2 from tempfile import TemporaryFile 3 4 import numpy as np 5 6 7 def _in_place_quick_sort(a, start, end): 8 count = 0 9 if start < end: 10 pivot = randint(start, end) 11 temp = a[end] 12 a[end] = a[pivot] 13 a[pivot] = temp 14 15 p, count = _in_place_partition(a, start, end) 16 count += _in_place_quick_sort(a, start, p - 1) 17 count += _in_place_quick_sort(a, p + 1, end) 18 return count 19 20 21 def _in_place_partition(a, start, end): 22 count = 0 23 pivot = randint(start, end) 24 temp = a[end] 25 a[end] = a[pivot] 26 a[pivot] = temp 27 new_pivot_index = start - 1 28 for index in range(start, end): 29 count += 1 30 if a[index] < a[end]: # check if current val is less than pivot value 31 new_pivot_index = new_pivot_index + 1 32 temp = a[new_pivot_index] 33 a[new_pivot_index] = a[index] 34 a[index] = temp 35 36 temp = a[new_pivot_index + 1] 37 a[new_pivot_index + 1] = a[end] 38 a[end] = temp 39 return new_pivot_index + 1, count 40 41 42 outfile = TemporaryFile() 43 p = 100 # 1000 elements are to be sorted 44 45 46 mu, sigma = 0, 1 # mean and standard deviation 47 X = np.random.normal(mu, sigma, p) 48 np.save(outfile, X) 49 print("The array is") 50 print(X) 51 52 53 outfile.seek(0) # using the same array 54 M = np.load(outfile) 55 r = len(M) - 1 56 z = _in_place_quick_sort(M, 0, r) 57 58 print( 59 "No of Comparisons for 100 elements selected from a standard normal distribution" 60 "is :" 61 ) 62 print(z) 63 [end of sorts/random_normal_distribution_quicksort.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py deleted file mode 100644 --- a/sorts/random_normal_distribution_quicksort.py +++ /dev/null @@ -1,62 +0,0 @@ -from random import randint -from tempfile import TemporaryFile - -import numpy as np - - -def _in_place_quick_sort(a, start, end): - count = 0 - if start < end: - pivot = randint(start, end) - temp = a[end] - a[end] = a[pivot] - a[pivot] = temp - - p, count = _in_place_partition(a, start, end) - count += _in_place_quick_sort(a, start, p - 1) - count += _in_place_quick_sort(a, p + 1, end) - return count - - -def _in_place_partition(a, start, end): - count = 0 - pivot = randint(start, end) - temp = a[end] - a[end] = a[pivot] - a[pivot] = temp - new_pivot_index = start - 1 - for index in range(start, end): - count += 1 - if a[index] < a[end]: # check if current val is less than pivot value - new_pivot_index = new_pivot_index + 1 - temp = a[new_pivot_index] - a[new_pivot_index] = a[index] - a[index] = temp - - temp = a[new_pivot_index + 1] - a[new_pivot_index + 1] = a[end] - a[end] = temp - return new_pivot_index + 1, count - - -outfile = TemporaryFile() -p = 100 # 1000 elements are to be sorted - - -mu, sigma = 0, 1 # mean and standard deviation -X = np.random.normal(mu, sigma, p) -np.save(outfile, X) -print("The array is") -print(X) - - -outfile.seek(0) # using the same array -M = np.load(outfile) -r = len(M) - 1 -z = _in_place_quick_sort(M, 0, r) - -print( - "No of Comparisons for 100 elements selected from a standard normal distribution" - "is :" -) -print(z)
{"golden_diff": "diff --git a/sorts/random_normal_distribution_quicksort.py b/sorts/random_normal_distribution_quicksort.py\ndeleted file mode 100644\n--- a/sorts/random_normal_distribution_quicksort.py\n+++ /dev/null\n@@ -1,62 +0,0 @@\n-from random import randint\n-from tempfile import TemporaryFile\n-\n-import numpy as np\n-\n-\n-def _in_place_quick_sort(a, start, end):\n- count = 0\n- if start < end:\n- pivot = randint(start, end)\n- temp = a[end]\n- a[end] = a[pivot]\n- a[pivot] = temp\n-\n- p, count = _in_place_partition(a, start, end)\n- count += _in_place_quick_sort(a, start, p - 1)\n- count += _in_place_quick_sort(a, p + 1, end)\n- return count\n-\n-\n-def _in_place_partition(a, start, end):\n- count = 0\n- pivot = randint(start, end)\n- temp = a[end]\n- a[end] = a[pivot]\n- a[pivot] = temp\n- new_pivot_index = start - 1\n- for index in range(start, end):\n- count += 1\n- if a[index] < a[end]: # check if current val is less than pivot value\n- new_pivot_index = new_pivot_index + 1\n- temp = a[new_pivot_index]\n- a[new_pivot_index] = a[index]\n- a[index] = temp\n-\n- temp = a[new_pivot_index + 1]\n- a[new_pivot_index + 1] = a[end]\n- a[end] = temp\n- return new_pivot_index + 1, count\n-\n-\n-outfile = TemporaryFile()\n-p = 100 # 1000 elements are to be sorted\n-\n-\n-mu, sigma = 0, 1 # mean and standard deviation\n-X = np.random.normal(mu, sigma, p)\n-np.save(outfile, X)\n-print(\"The array is\")\n-print(X)\n-\n-\n-outfile.seek(0) # using the same array\n-M = np.load(outfile)\n-r = len(M) - 1\n-z = _in_place_quick_sort(M, 0, r)\n-\n-print(\n- \"No of Comparisons for 100 elements selected from a standard normal distribution\"\n- \"is :\"\n-)\n-print(z)\n", "issue": "sorts/random_normal_distribution_quicksort.py has no tests\n### Repository commit\n\n3\n\n### Python version (python --version)\n\nPython 3.11.5\n\n### Dependencies version (pip freeze)\n\nNumpy\n\n### Expected behavior\n\nTests.\n\n### Actual behavior\n\nNo tests.\n", "before_files": [{"content": "from random import randint\nfrom tempfile import TemporaryFile\n\nimport numpy as np\n\n\ndef _in_place_quick_sort(a, start, end):\n count = 0\n if start < end:\n pivot = randint(start, end)\n temp = a[end]\n a[end] = a[pivot]\n a[pivot] = temp\n\n p, count = _in_place_partition(a, start, end)\n count += _in_place_quick_sort(a, start, p - 1)\n count += _in_place_quick_sort(a, p + 1, end)\n return count\n\n\ndef _in_place_partition(a, start, end):\n count = 0\n pivot = randint(start, end)\n temp = a[end]\n a[end] = a[pivot]\n a[pivot] = temp\n new_pivot_index = start - 1\n for index in range(start, end):\n count += 1\n if a[index] < a[end]: # check if current val is less than pivot value\n new_pivot_index = new_pivot_index + 1\n temp = a[new_pivot_index]\n a[new_pivot_index] = a[index]\n a[index] = temp\n\n temp = a[new_pivot_index + 1]\n a[new_pivot_index + 1] = a[end]\n a[end] = temp\n return new_pivot_index + 1, count\n\n\noutfile = TemporaryFile()\np = 100 # 1000 elements are to be sorted\n\n\nmu, sigma = 0, 1 # mean and standard deviation\nX = np.random.normal(mu, sigma, p)\nnp.save(outfile, X)\nprint(\"The array is\")\nprint(X)\n\n\noutfile.seek(0) # using the same array\nM = np.load(outfile)\nr = len(M) - 1\nz = _in_place_quick_sort(M, 0, r)\n\nprint(\n \"No of Comparisons for 100 elements selected from a standard normal distribution\"\n \"is :\"\n)\nprint(z)\n", "path": "sorts/random_normal_distribution_quicksort.py"}]}
1,174
554
gh_patches_debug_4174
rasdani/github-patches
git_diff
aimhubio__aim-2577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Detection of Jax Arrays Breaks on Jax=0.4.* The detection for jax arrays here https://github.com/aimhubio/aim/blob/285a09fefbf25d418e00bc305e9a1357d6343d3c/aim/sdk/num_utils.py#L66 is broken in Jax 0.4.* since `DeviceArray` was renamed to `Array`. The simple fix to support the prior/current version would be to check for either `DeviceArray` or `Array` </issue> <code> [start of aim/sdk/num_utils.py] 1 def get_inst_type_str(inst): 2 """ 3 Get instance type and class type full names 4 """ 5 obj_name = obj_module = obj_cls_name = obj_cls_module = '' 6 7 if hasattr(inst, '__name__'): 8 obj_name = inst.__name__ 9 if hasattr(inst, '__module__'): 10 obj_module = inst.__module__ 11 if hasattr(inst, '__class__'): 12 if hasattr(inst.__class__, '__name__'): 13 obj_cls_name = inst.__class__.__name__ 14 if hasattr(inst.__class__, '__module__'): 15 obj_cls_module = inst.__class__.__module__ 16 17 obj_full = '{}.{}'.format(obj_name, obj_module) 18 obj_cls_full = '{}.{}'.format(obj_cls_name, obj_cls_module) 19 20 return obj_full, obj_cls_full 21 22 23 def get_inst_base_types(inst): 24 """ 25 Get instance and it's base classes types 26 """ 27 bases_types = [] 28 for b in inst.__class__.__bases__: 29 b_type, b_cls_type = get_inst_type_str(b) 30 bases_types.append(b_type) 31 bases_types.append(b_cls_type) 32 return bases_types 33 34 35 def inst_has_typename(inst, types): 36 """ 37 Return `True` if the instance is created from class 38 which has base that matches passed `types` 39 """ 40 inst_type, inst_cls_type = get_inst_type_str(inst) 41 inst_types = [inst_type, inst_cls_type] + get_inst_base_types(inst) 42 43 for i in inst_types: 44 found = True 45 for t in types: 46 if i.find(t) == -1: 47 found = False 48 break 49 if found: 50 return True 51 52 return False 53 54 55 def is_pytorch_tensor(inst): 56 """ 57 Check whether `inst` is instance of pytorch tensor 58 """ 59 return inst_has_typename(inst, ['torch', 'Tensor']) 60 61 62 def is_tf_tensor(inst): 63 return inst_has_typename(inst, ['tensorflow', 'Tensor']) 64 65 66 def is_jax_device_array(inst): 67 """ 68 Check whether `inst` is instance of jax device array 69 """ 70 return inst_has_typename(inst, ['jaxlib', 'xla_extension', 'DeviceArray']) 71 72 73 def is_numpy_array(inst): 74 """ 75 Check whether `inst` is instance of numpy array 76 """ 77 return inst_has_typename(inst, ['numpy', 'ndarray']) 78 79 80 def is_numpy_number(inst): 81 """ 82 Check whether `inst` is numpy number 83 """ 84 85 return inst_has_typename(inst, ['numpy']) 86 87 88 def is_py_number(value): 89 return isinstance(value, (int, float)) 90 91 92 def is_number(value): 93 """ 94 Checks if the given value is a number 95 """ 96 if is_py_number(value): 97 return True 98 99 if is_numpy_array(value): 100 return True 101 102 if is_numpy_number(value): 103 return True 104 105 if is_jax_device_array(value): 106 return True 107 108 if is_pytorch_tensor(value): 109 return True 110 111 if is_tf_tensor(value): 112 return True 113 114 return False 115 116 117 def convert_to_py_number(value) -> object: 118 """ 119 Converts numpy objects or tensors to python number types 120 """ 121 if isinstance(value, int): 122 return int(value) 123 124 if isinstance(value, float): 125 return float(value) 126 127 if is_numpy_array(value): 128 return value.item() 129 130 if is_numpy_number(value): 131 return value.item() 132 133 if is_jax_device_array(value): 134 return value.item() 135 136 if is_pytorch_tensor(value): 137 return value.item() 138 139 if is_tf_tensor(value): 140 return value.numpy().item() 141 142 raise ValueError('not a number') 143 [end of aim/sdk/num_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aim/sdk/num_utils.py b/aim/sdk/num_utils.py --- a/aim/sdk/num_utils.py +++ b/aim/sdk/num_utils.py @@ -67,7 +67,11 @@ """ Check whether `inst` is instance of jax device array """ - return inst_has_typename(inst, ['jaxlib', 'xla_extension', 'DeviceArray']) + if inst_has_typename(inst, ['jaxlib', 'xla_extension', 'Array']): + return True + if inst_has_typename(inst, ['jaxlib', 'xla_extension', 'DeviceArray']): + return True + return False def is_numpy_array(inst):
{"golden_diff": "diff --git a/aim/sdk/num_utils.py b/aim/sdk/num_utils.py\n--- a/aim/sdk/num_utils.py\n+++ b/aim/sdk/num_utils.py\n@@ -67,7 +67,11 @@\n \"\"\"\n Check whether `inst` is instance of jax device array\n \"\"\"\n- return inst_has_typename(inst, ['jaxlib', 'xla_extension', 'DeviceArray'])\n+ if inst_has_typename(inst, ['jaxlib', 'xla_extension', 'Array']):\n+ return True\n+ if inst_has_typename(inst, ['jaxlib', 'xla_extension', 'DeviceArray']):\n+ return True\n+ return False\n \n \n def is_numpy_array(inst):\n", "issue": "Detection of Jax Arrays Breaks on Jax=0.4.*\nThe detection for jax arrays here https://github.com/aimhubio/aim/blob/285a09fefbf25d418e00bc305e9a1357d6343d3c/aim/sdk/num_utils.py#L66 is broken in Jax 0.4.* since `DeviceArray` was renamed to `Array`. The simple fix to support the prior/current version would be to check for either `DeviceArray` or `Array`\n", "before_files": [{"content": "def get_inst_type_str(inst):\n \"\"\"\n Get instance type and class type full names\n \"\"\"\n obj_name = obj_module = obj_cls_name = obj_cls_module = ''\n\n if hasattr(inst, '__name__'):\n obj_name = inst.__name__\n if hasattr(inst, '__module__'):\n obj_module = inst.__module__\n if hasattr(inst, '__class__'):\n if hasattr(inst.__class__, '__name__'):\n obj_cls_name = inst.__class__.__name__\n if hasattr(inst.__class__, '__module__'):\n obj_cls_module = inst.__class__.__module__\n\n obj_full = '{}.{}'.format(obj_name, obj_module)\n obj_cls_full = '{}.{}'.format(obj_cls_name, obj_cls_module)\n\n return obj_full, obj_cls_full\n\n\ndef get_inst_base_types(inst):\n \"\"\"\n Get instance and it's base classes types\n \"\"\"\n bases_types = []\n for b in inst.__class__.__bases__:\n b_type, b_cls_type = get_inst_type_str(b)\n bases_types.append(b_type)\n bases_types.append(b_cls_type)\n return bases_types\n\n\ndef inst_has_typename(inst, types):\n \"\"\"\n Return `True` if the instance is created from class\n which has base that matches passed `types`\n \"\"\"\n inst_type, inst_cls_type = get_inst_type_str(inst)\n inst_types = [inst_type, inst_cls_type] + get_inst_base_types(inst)\n\n for i in inst_types:\n found = True\n for t in types:\n if i.find(t) == -1:\n found = False\n break\n if found:\n return True\n\n return False\n\n\ndef is_pytorch_tensor(inst):\n \"\"\"\n Check whether `inst` is instance of pytorch tensor\n \"\"\"\n return inst_has_typename(inst, ['torch', 'Tensor'])\n\n\ndef is_tf_tensor(inst):\n return inst_has_typename(inst, ['tensorflow', 'Tensor'])\n\n\ndef is_jax_device_array(inst):\n \"\"\"\n Check whether `inst` is instance of jax device array\n \"\"\"\n return inst_has_typename(inst, ['jaxlib', 'xla_extension', 'DeviceArray'])\n\n\ndef is_numpy_array(inst):\n \"\"\"\n Check whether `inst` is instance of numpy array\n \"\"\"\n return inst_has_typename(inst, ['numpy', 'ndarray'])\n\n\ndef is_numpy_number(inst):\n \"\"\"\n Check whether `inst` is numpy number\n \"\"\"\n\n return inst_has_typename(inst, ['numpy'])\n\n\ndef is_py_number(value):\n return isinstance(value, (int, float))\n\n\ndef is_number(value):\n \"\"\"\n Checks if the given value is a number\n \"\"\"\n if is_py_number(value):\n return True\n\n if is_numpy_array(value):\n return True\n\n if is_numpy_number(value):\n return True\n\n if is_jax_device_array(value):\n return True\n\n if is_pytorch_tensor(value):\n return True\n\n if is_tf_tensor(value):\n return True\n\n return False\n\n\ndef convert_to_py_number(value) -> object:\n \"\"\"\n Converts numpy objects or tensors to python number types\n \"\"\"\n if isinstance(value, int):\n return int(value)\n\n if isinstance(value, float):\n return float(value)\n\n if is_numpy_array(value):\n return value.item()\n\n if is_numpy_number(value):\n return value.item()\n\n if is_jax_device_array(value):\n return value.item()\n\n if is_pytorch_tensor(value):\n return value.item()\n\n if is_tf_tensor(value):\n return value.numpy().item()\n\n raise ValueError('not a number')\n", "path": "aim/sdk/num_utils.py"}]}
1,778
159
gh_patches_debug_683
rasdani/github-patches
git_diff
pallets__werkzeug-1726
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pytest fails due to missing dependency Reproduction: Activate virtualenv and execute `pytest` Expected result: Tests are run Actual result: ```(env) :~/git/werkzeug[master ?]🙂 pytest ========================= test session starts ========================== platform darwin -- Python 3.6.8, pytest-5.3.2, py-1.8.0, pluggy-0.13.0 rootdir: /Users/latham/git/werkzeug, inifile: setup.cfg, testpaths: tests plugins: mock-1.11.2, cov-2.8.1 collected 563 items / 1 error / 562 selected ================================ ERRORS ================================ _________________ ERROR collecting tests/test_debug.py _________________ tests/test_debug.py:372: in <module> @pytest.mark.timeout(2) ../../Library/Python/3.6/lib/python/site-packages/_pytest/mark/structures.py:327: in __getattr__ PytestUnknownMarkWarning, E pytest.PytestUnknownMarkWarning: Unknown pytest.mark.timeout - is this a typo? You can register custom marks to avoid this warning - for details, see https://docs.pytest.org/en/latest/mark.html !!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!! =========================== 1 error in 1.60s ===========================``` </issue> <code> [start of setup.py] 1 import io 2 import re 3 4 from setuptools import find_packages 5 from setuptools import setup 6 7 with io.open("README.rst", "rt", encoding="utf8") as f: 8 readme = f.read() 9 10 with io.open("src/werkzeug/__init__.py", "rt", encoding="utf8") as f: 11 version = re.search(r'__version__ = "(.*?)"', f.read(), re.M).group(1) 12 13 setup( 14 name="Werkzeug", 15 version=version, 16 url="https://palletsprojects.com/p/werkzeug/", 17 project_urls={ 18 "Documentation": "https://werkzeug.palletsprojects.com/", 19 "Code": "https://github.com/pallets/werkzeug", 20 "Issue tracker": "https://github.com/pallets/werkzeug/issues", 21 }, 22 license="BSD-3-Clause", 23 author="Armin Ronacher", 24 author_email="[email protected]", 25 maintainer="Pallets", 26 maintainer_email="[email protected]", 27 description="The comprehensive WSGI web application library.", 28 long_description=readme, 29 classifiers=[ 30 "Development Status :: 5 - Production/Stable", 31 "Environment :: Web Environment", 32 "Intended Audience :: Developers", 33 "License :: OSI Approved :: BSD License", 34 "Operating System :: OS Independent", 35 "Programming Language :: Python", 36 "Programming Language :: Python :: 2", 37 "Programming Language :: Python :: 2.7", 38 "Programming Language :: Python :: 3", 39 "Programming Language :: Python :: 3.5", 40 "Programming Language :: Python :: 3.6", 41 "Programming Language :: Python :: 3.7", 42 "Programming Language :: Python :: 3.8", 43 "Programming Language :: Python :: Implementation :: CPython", 44 "Programming Language :: Python :: Implementation :: PyPy", 45 "Topic :: Internet :: WWW/HTTP :: Dynamic Content", 46 "Topic :: Internet :: WWW/HTTP :: WSGI", 47 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", 48 "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", 49 "Topic :: Software Development :: Libraries :: Application Frameworks", 50 "Topic :: Software Development :: Libraries :: Python Modules", 51 ], 52 packages=find_packages("src"), 53 package_dir={"": "src"}, 54 include_package_data=True, 55 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", 56 extras_require={ 57 "watchdog": ["watchdog"], 58 "dev": [ 59 "pytest", 60 "coverage", 61 "tox", 62 "sphinx", 63 "pallets-sphinx-themes", 64 "sphinx-issues", 65 ], 66 }, 67 ) 68 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,6 +57,7 @@ "watchdog": ["watchdog"], "dev": [ "pytest", + "pytest-timeout", "coverage", "tox", "sphinx",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,6 +57,7 @@\n \"watchdog\": [\"watchdog\"],\n \"dev\": [\n \"pytest\",\n+ \"pytest-timeout\",\n \"coverage\",\n \"tox\",\n \"sphinx\",\n", "issue": "Pytest fails due to missing dependency\nReproduction:\r\n\r\nActivate virtualenv and execute `pytest` \r\n\r\nExpected result:\r\nTests are run\r\n\r\nActual result:\r\n```(env) :~/git/werkzeug[master ?]\ud83d\ude42 pytest\r\n========================= test session starts ==========================\r\nplatform darwin -- Python 3.6.8, pytest-5.3.2, py-1.8.0, pluggy-0.13.0\r\nrootdir: /Users/latham/git/werkzeug, inifile: setup.cfg, testpaths: tests\r\nplugins: mock-1.11.2, cov-2.8.1\r\ncollected 563 items / 1 error / 562 selected \r\n\r\n================================ ERRORS ================================\r\n_________________ ERROR collecting tests/test_debug.py _________________\r\ntests/test_debug.py:372: in <module>\r\n @pytest.mark.timeout(2)\r\n../../Library/Python/3.6/lib/python/site-packages/_pytest/mark/structures.py:327: in __getattr__\r\n PytestUnknownMarkWarning,\r\nE pytest.PytestUnknownMarkWarning: Unknown pytest.mark.timeout - is this a typo? You can register custom marks to avoid this warning - for details, see https://docs.pytest.org/en/latest/mark.html\r\n!!!!!!!!!!!!!!!! Interrupted: 1 error during collection !!!!!!!!!!!!!!!!\r\n=========================== 1 error in 1.60s ===========================```\r\n\n", "before_files": [{"content": "import io\nimport re\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nwith io.open(\"README.rst\", \"rt\", encoding=\"utf8\") as f:\n readme = f.read()\n\nwith io.open(\"src/werkzeug/__init__.py\", \"rt\", encoding=\"utf8\") as f:\n version = re.search(r'__version__ = \"(.*?)\"', f.read(), re.M).group(1)\n\nsetup(\n name=\"Werkzeug\",\n version=version,\n url=\"https://palletsprojects.com/p/werkzeug/\",\n project_urls={\n \"Documentation\": \"https://werkzeug.palletsprojects.com/\",\n \"Code\": \"https://github.com/pallets/werkzeug\",\n \"Issue tracker\": \"https://github.com/pallets/werkzeug/issues\",\n },\n license=\"BSD-3-Clause\",\n author=\"Armin Ronacher\",\n author_email=\"[email protected]\",\n maintainer=\"Pallets\",\n maintainer_email=\"[email protected]\",\n description=\"The comprehensive WSGI web application library.\",\n long_description=readme,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n extras_require={\n \"watchdog\": [\"watchdog\"],\n \"dev\": [\n \"pytest\",\n \"coverage\",\n \"tox\",\n \"sphinx\",\n \"pallets-sphinx-themes\",\n \"sphinx-issues\",\n ],\n },\n)\n", "path": "setup.py"}]}
1,572
72
gh_patches_debug_2224
rasdani/github-patches
git_diff
google__TensorNetwork-608
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Some doc links to github yield error The "Edit on GitHub" links in the top right of some doc pages yield 404 errors when followed, for example [tn.Node](https://tensornetwork.readthedocs.io/en/latest/stubs/tensornetwork.Node.html) and [tn.contractors.optimal](https://tensornetwork.readthedocs.io/en/latest/stubs/tensornetwork.contractors.optimal.html#tensornetwork.contractors.optimal). The links at the top of list pages work, for example [common functions](https://tensornetwork.readthedocs.io/en/latest/network.html) and [contractors](https://tensornetwork.readthedocs.io/en/latest/contractors.html). Possibly those are meant to be links to edit the doc pages themselves, rather than the source code? </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # http://www.sphinx-doc.org/en/master/config 6 7 # -- Path setup -------------------------------------------------------------- 8 9 # If extensions (or modules to document with autodoc) are in another directory, 10 # add these directories to sys.path here. If the directory is relative to the 11 # documentation root, use os.path.abspath to make it absolute, like shown here. 12 # 13 import os 14 import sys 15 sys.path.insert(0, os.path.abspath('../')) 16 17 # -- Project information ----------------------------------------------------- 18 19 project = 'TensorNetwork' 20 copyright = '2019, The TensorNetwork Authors' 21 author = 'The TensorNetwork Authors' 22 23 # -- General configuration --------------------------------------------------- 24 25 # Add any Sphinx extension module names here, as strings. They can be 26 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 27 # ones. 28 extensions = [ 29 'sphinx.ext.autodoc', 30 'sphinx.ext.napoleon', 31 'sphinx.ext.autosummary', 32 ] 33 34 # Add any paths that contain templates here, relative to this directory. 35 templates_path = ['_templates'] 36 37 # The master toctree document. 38 master_doc = 'index' 39 40 # List of patterns, relative to source directory, that match files and 41 # directories to ignore when looking for source files. 42 # This pattern also affects html_static_path and html_extra_path. 43 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 44 45 autosummary_generate = True 46 napolean_use_rtype = False 47 # -- Options for HTML output ------------------------------------------------- 48 49 # The theme to use for HTML and HTML Help pages. See the documentation for 50 # a list of builtin themes. 51 # 52 html_theme = 'sphinx_rtd_theme' 53 html_theme_options = { 54 'logo_only': True, 55 } 56 html_logo = '_static/tensornetwork_logo.jpg' 57 master_doc = 'index' 58 default_role = 'py:obj' 59 autodoc_default_flags = ['members'] 60 autosummary_generate = True 61 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -43,6 +43,7 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] autosummary_generate = True +autosummary_generate_overwrite = False napolean_use_rtype = False # -- Options for HTML output -------------------------------------------------
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -43,6 +43,7 @@\n exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n \n autosummary_generate = True\n+autosummary_generate_overwrite = False\n napolean_use_rtype = False\n # -- Options for HTML output -------------------------------------------------\n", "issue": "Some doc links to github yield error\nThe \"Edit on GitHub\" links in the top right of some doc pages yield 404 errors when followed, for example [tn.Node](https://tensornetwork.readthedocs.io/en/latest/stubs/tensornetwork.Node.html) and [tn.contractors.optimal](https://tensornetwork.readthedocs.io/en/latest/stubs/tensornetwork.contractors.optimal.html#tensornetwork.contractors.optimal).\r\n\r\nThe links at the top of list pages work, for example [common functions](https://tensornetwork.readthedocs.io/en/latest/network.html) and [contractors](https://tensornetwork.readthedocs.io/en/latest/contractors.html).\r\n\r\nPossibly those are meant to be links to edit the doc pages themselves, rather than the source code?\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('../'))\n\n# -- Project information -----------------------------------------------------\n\nproject = 'TensorNetwork'\ncopyright = '2019, The TensorNetwork Authors'\nauthor = 'The TensorNetwork Authors'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\nautosummary_generate = True\nnapolean_use_rtype = False\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'logo_only': True,\n}\nhtml_logo = '_static/tensornetwork_logo.jpg'\nmaster_doc = 'index'\ndefault_role = 'py:obj'\nautodoc_default_flags = ['members']\nautosummary_generate = True\n", "path": "docs/conf.py"}]}
1,263
87
gh_patches_debug_1019
rasdani/github-patches
git_diff
sunpy__sunpy-2561
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document the CI DOCUMENT ALL THE THINGS </issue> <code> [start of sunpy/net/jsoc/attrs.py] 1 from __future__ import absolute_import 2 3 from sunpy.net.attr import AttrWalker, AttrAnd, AttrOr 4 from sunpy.net.vso.attrs import _VSOSimpleAttr 5 from sunpy.net.vso.attrs import Time, Wavelength 6 7 8 __all__ = ['Series', 'Protocol', 'Notify', 'Compression', 'Segment'] 9 10 11 class Series(_VSOSimpleAttr): 12 """ 13 The JSOC Series to Download. 14 15 See `this<http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_` 16 for a list of series'. 17 """ 18 pass 19 20 21 class Segment(_VSOSimpleAttr): 22 """ 23 Segments choose which files to download when there are more than 24 one present for each record e.g. 'image' 25 """ 26 pass 27 28 29 class Protocol(_VSOSimpleAttr): 30 """ 31 The type of download to request one of 32 ("FITS", "JPEG", "MPG", "MP4", or "as-is"). 33 Only FITS is supported, the others will require extra keywords. 34 """ 35 pass 36 37 38 class Notify(_VSOSimpleAttr): 39 """ 40 An email address to get a notification to when JSOC has staged your request 41 """ 42 43 def __init__(self, value): 44 super(Notify, self).__init__(value) 45 if value.find('@') == -1: 46 raise ValueError("Notify attribute must contain an '@' symbol " 47 "to be a valid email address") 48 self.value = value 49 50 51 class Compression(_VSOSimpleAttr): 52 """ 53 Compression format for requested files. 54 55 'rice' or None, download FITS files with RICE compression. 56 """ 57 pass 58 59 60 walker = AttrWalker() 61 62 63 @walker.add_creator(AttrAnd, _VSOSimpleAttr, Time) 64 def _create(wlk, query): 65 66 map_ = {} 67 wlk.apply(query, map_) 68 return [map_] 69 70 71 @walker.add_applier(AttrAnd) 72 def _apply(wlk, query, imap): 73 74 for iattr in query.attrs: 75 wlk.apply(iattr, imap) 76 77 78 @walker.add_applier(_VSOSimpleAttr) 79 def _apply1(wlk, query, imap): 80 81 imap[query.__class__.__name__.lower()] = query.value 82 83 84 @walker.add_applier(Time) 85 def _apply2(wlk, query, imap): 86 imap['start_time'] = query.start 87 imap['end_time'] = query.end 88 89 90 @walker.add_applier(Wavelength) 91 def _apply_wave(wlk, query, imap): 92 if query.min != query.max: 93 raise ValueError( 94 "For JSOC queries Wavelength.min must equal Wavelength.max") 95 96 imap[query.__class__.__name__.lower()] = query.min 97 98 99 @walker.add_creator(AttrOr) 100 def _create1(wlk, query): 101 102 qblocks = [] 103 for iattr in query.attrs: 104 qblocks.extend(wlk.create(iattr)) 105 106 return qblocks 107 [end of sunpy/net/jsoc/attrs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sunpy/net/jsoc/attrs.py b/sunpy/net/jsoc/attrs.py --- a/sunpy/net/jsoc/attrs.py +++ b/sunpy/net/jsoc/attrs.py @@ -12,8 +12,7 @@ """ The JSOC Series to Download. - See `this<http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_` - for a list of series'. + This is the list of `Series <http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`. """ pass
{"golden_diff": "diff --git a/sunpy/net/jsoc/attrs.py b/sunpy/net/jsoc/attrs.py\n--- a/sunpy/net/jsoc/attrs.py\n+++ b/sunpy/net/jsoc/attrs.py\n@@ -12,8 +12,7 @@\n \"\"\"\n The JSOC Series to Download.\n \n- See `this<http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`\n- for a list of series'.\n+ This is the list of `Series <http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`.\n \"\"\"\n pass\n", "issue": "Document the CI\nDOCUMENT ALL THE THINGS\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sunpy.net.attr import AttrWalker, AttrAnd, AttrOr\nfrom sunpy.net.vso.attrs import _VSOSimpleAttr\nfrom sunpy.net.vso.attrs import Time, Wavelength\n\n\n__all__ = ['Series', 'Protocol', 'Notify', 'Compression', 'Segment']\n\n\nclass Series(_VSOSimpleAttr):\n \"\"\"\n The JSOC Series to Download.\n\n See `this<http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>_`\n for a list of series'.\n \"\"\"\n pass\n\n\nclass Segment(_VSOSimpleAttr):\n \"\"\"\n Segments choose which files to download when there are more than\n one present for each record e.g. 'image'\n \"\"\"\n pass\n\n\nclass Protocol(_VSOSimpleAttr):\n \"\"\"\n The type of download to request one of\n (\"FITS\", \"JPEG\", \"MPG\", \"MP4\", or \"as-is\").\n Only FITS is supported, the others will require extra keywords.\n \"\"\"\n pass\n\n\nclass Notify(_VSOSimpleAttr):\n \"\"\"\n An email address to get a notification to when JSOC has staged your request\n \"\"\"\n\n def __init__(self, value):\n super(Notify, self).__init__(value)\n if value.find('@') == -1:\n raise ValueError(\"Notify attribute must contain an '@' symbol \"\n \"to be a valid email address\")\n self.value = value\n\n\nclass Compression(_VSOSimpleAttr):\n \"\"\"\n Compression format for requested files.\n\n 'rice' or None, download FITS files with RICE compression.\n \"\"\"\n pass\n\n\nwalker = AttrWalker()\n\n\[email protected]_creator(AttrAnd, _VSOSimpleAttr, Time)\ndef _create(wlk, query):\n\n map_ = {}\n wlk.apply(query, map_)\n return [map_]\n\n\[email protected]_applier(AttrAnd)\ndef _apply(wlk, query, imap):\n\n for iattr in query.attrs:\n wlk.apply(iattr, imap)\n\n\[email protected]_applier(_VSOSimpleAttr)\ndef _apply1(wlk, query, imap):\n\n imap[query.__class__.__name__.lower()] = query.value\n\n\[email protected]_applier(Time)\ndef _apply2(wlk, query, imap):\n imap['start_time'] = query.start\n imap['end_time'] = query.end\n\n\[email protected]_applier(Wavelength)\ndef _apply_wave(wlk, query, imap):\n if query.min != query.max:\n raise ValueError(\n \"For JSOC queries Wavelength.min must equal Wavelength.max\")\n\n imap[query.__class__.__name__.lower()] = query.min\n\n\[email protected]_creator(AttrOr)\ndef _create1(wlk, query):\n\n qblocks = []\n for iattr in query.attrs:\n qblocks.extend(wlk.create(iattr))\n\n return qblocks\n", "path": "sunpy/net/jsoc/attrs.py"}]}
1,413
135
gh_patches_debug_21587
rasdani/github-patches
git_diff
plotly__dash-744
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> merge pytest fixtures the scope of the issue is to merge and restructure the pytest fixtures in https://github.com/plotly/pytest-dash into dash. It might include few integration tests rewrite with the new fixtures and python state `assert`, but all tests rewrite is out of the scope of this issue. Note: all fixtures will be available for pytest as plugin thanks to the `pytest11` entry point by `pip install dash`. </issue> <code> [start of setup.py] 1 import io 2 from setuptools import setup, find_packages 3 4 main_ns = {} 5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used 6 7 setup( 8 name='dash', 9 version=main_ns['__version__'], 10 author='chris p', 11 author_email='[email protected]', 12 packages=find_packages(exclude=['tests*']), 13 include_package_data=True, 14 license='MIT', 15 description=('A Python framework for building reactive web-apps. ' 16 'Developed by Plotly.'), 17 long_description=io.open('README.md', encoding='utf-8').read(), 18 long_description_content_type='text/markdown', 19 install_requires=[ 20 'Flask>=0.12', 21 'flask-compress', 22 'plotly', 23 'dash_renderer==0.24.0', 24 'dash-core-components==0.48.0', 25 'dash-html-components==0.16.0', 26 'dash-table==3.7.0' 27 ], 28 entry_points={ 29 'console_scripts': [ 30 'dash-generate-components =' 31 ' dash.development.component_generator:cli' 32 ] 33 }, 34 url='https://plot.ly/dash', 35 classifiers=[ 36 'Development Status :: 5 - Production/Stable', 37 'Environment :: Web Environment', 38 'Framework :: Flask', 39 'Intended Audience :: Developers', 40 'Intended Audience :: Education', 41 'Intended Audience :: Financial and Insurance Industry', 42 'Intended Audience :: Healthcare Industry', 43 'Intended Audience :: Manufacturing', 44 'Intended Audience :: Science/Research', 45 'License :: OSI Approved :: MIT License', 46 'Programming Language :: Python', 47 'Programming Language :: Python :: 2', 48 'Programming Language :: Python :: 2.7', 49 'Programming Language :: Python :: 3', 50 'Programming Language :: Python :: 3.3', 51 'Programming Language :: Python :: 3.4', 52 'Programming Language :: Python :: 3.5', 53 'Programming Language :: Python :: 3.6', 54 'Programming Language :: Python :: 3.7', 55 'Topic :: Database :: Front-Ends', 56 'Topic :: Office/Business :: Financial :: Spreadsheet', 57 'Topic :: Scientific/Engineering :: Visualization', 58 'Topic :: Software Development :: Libraries :: Application Frameworks', 59 'Topic :: Software Development :: Widget Sets' 60 ] 61 ) 62 [end of setup.py] [start of dash-renderer/version.py] 1 __version__ = '0.23.0' 2 [end of dash-renderer/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dash-renderer/version.py b/dash-renderer/version.py --- a/dash-renderer/version.py +++ b/dash-renderer/version.py @@ -1 +1 @@ -__version__ = '0.23.0' +__version__ = '0.24.0' diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -12,8 +12,10 @@ packages=find_packages(exclude=['tests*']), include_package_data=True, license='MIT', - description=('A Python framework for building reactive web-apps. ' - 'Developed by Plotly.'), + description=( + 'A Python framework for building reactive web-apps. ' + 'Developed by Plotly.' + ), long_description=io.open('README.md', encoding='utf-8').read(), long_description_content_type='text/markdown', install_requires=[ @@ -29,7 +31,10 @@ 'console_scripts': [ 'dash-generate-components =' ' dash.development.component_generator:cli' - ] + ], + 'pytest11': [ + 'dash = dash.testing.plugin' + ], }, url='https://plot.ly/dash', classifiers=[
{"golden_diff": "diff --git a/dash-renderer/version.py b/dash-renderer/version.py\n--- a/dash-renderer/version.py\n+++ b/dash-renderer/version.py\n@@ -1 +1 @@\n-__version__ = '0.23.0'\n+__version__ = '0.24.0'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,8 +12,10 @@\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n- description=('A Python framework for building reactive web-apps. '\n- 'Developed by Plotly.'),\n+ description=(\n+ 'A Python framework for building reactive web-apps. '\n+ 'Developed by Plotly.'\n+ ),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n@@ -29,7 +31,10 @@\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n- ]\n+ ],\n+ 'pytest11': [\n+ 'dash = dash.testing.plugin'\n+ ],\n },\n url='https://plot.ly/dash',\n classifiers=[\n", "issue": "merge pytest fixtures\nthe scope of the issue is to merge and restructure the pytest fixtures in https://github.com/plotly/pytest-dash into dash. It might include few integration tests rewrite with the new fixtures and python state `assert`, but all tests rewrite is out of the scope of this issue. \r\n\r\nNote: all fixtures will be available for pytest as plugin thanks to the `pytest11` entry point by `pip install dash`. \n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer==0.24.0',\n 'dash-core-components==0.48.0',\n 'dash-html-components==0.16.0',\n 'dash-table==3.7.0'\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}, {"content": "__version__ = '0.23.0'\n", "path": "dash-renderer/version.py"}]}
1,280
287
gh_patches_debug_23701
rasdani/github-patches
git_diff
praw-dev__praw-905
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Simplify emoji code This pull requests contains the squashed changes from #894 (thanks @bakonydraco!) and provides some simplifications so that we have a base set of features to work with and can subsequently make additions or modifications as necessary. </issue> <code> [start of praw/models/reddit/emoji.py] 1 """Provide the Emoji class.""" 2 import os 3 4 from ...const import API_PATH 5 from ...exceptions import ClientException 6 from .base import RedditBase 7 8 9 class Emoji(RedditBase): 10 """An individual Emoji object.""" 11 12 __hash__ = RedditBase.__hash__ 13 STR_FIELD = 'name' 14 15 def __init__(self, reddit, subreddit, name, _data=None): 16 """Construct an instance of the Emoji object.""" 17 self.name = name 18 self.subreddit = subreddit 19 super(Emoji, self).__init__(reddit, _data) 20 21 def _fetch(self): 22 for emoji in self.subreddit.emoji: 23 if emoji.name.lower() == self.name.lower(): 24 self.__dict__.update(emoji.__dict__) 25 self._fetched = True 26 return 27 raise ClientException('/r/{} does not have the emoji {}' 28 .format(self.subreddit, self.name)) 29 30 def delete(self): 31 """Delete an emoji from this subreddit by Emoji. 32 33 To delete ``'test'`` as an emoji on the subreddit ``'praw_test'`` try: 34 35 .. code:: python 36 37 reddit.subreddit('praw_test').emoji['test'].delete() 38 39 """ 40 url = API_PATH['emoji_delete'].format( 41 emoji_name=self.name, subreddit=self.subreddit) 42 self._reddit.request('DELETE', url) 43 44 45 class SubredditEmoji(RedditBase): 46 """Provides a set of functions to a Subreddit for emoji.""" 47 48 __hash__ = RedditBase.__hash__ 49 50 def __getitem__(self, name): 51 """Lazily return the Emoji for the subreddit named ``name``. 52 53 :param name: The name of the emoji 54 55 This method is to be used to fetch a specific emoji url, like so: 56 57 .. code:: python 58 59 emoji = reddit.subreddit('praw_test').emoji['test'] 60 print(emoji) 61 62 """ 63 return Emoji(self._reddit, self.subreddit, name) 64 65 def __init__(self, subreddit): 66 """Create a SubredditEmoji instance. 67 68 :param subreddit: The subreddit whose emoji are affected. 69 70 """ 71 self.subreddit = subreddit 72 super(SubredditEmoji, self).__init__(subreddit._reddit, None) 73 74 def __iter__(self): 75 """Return a list of Emoji for the subreddit. 76 77 This method is to be used to discover all emoji for a subreddit: 78 79 .. code:: python 80 81 for emoji in reddit.subreddit('praw_test').emoji: 82 print(emoji) 83 84 """ 85 response = self.subreddit._reddit.get( 86 API_PATH['emoji_list'].format(subreddit=self.subreddit)) 87 for emoji_name, emoji_data in \ 88 response[self.subreddit.fullname].items(): 89 yield Emoji(self._reddit, self.subreddit, emoji_name, 90 _data=emoji_data) 91 92 def add(self, name, image_path): 93 """Add an emoji to this subreddit. 94 95 :param name: The name of the emoji 96 :param image_path: A path to a jpeg or png image. 97 :returns: The Emoji added. 98 99 To add ``'test'`` to the subreddit ``'praw_test'`` try: 100 101 .. code:: python 102 103 reddit.subreddit('praw_test').emoji.add('test','test.png') 104 105 """ 106 data = {'filepath': os.path.basename(image_path), 107 'mimetype': 'image/jpeg'} 108 if image_path.lower().endswith('.png'): 109 data['mimetype'] = 'image/png' 110 url = API_PATH['emoji_lease'].format(subreddit=self.subreddit) 111 112 # until we learn otherwise, assume this request always succeeds 113 upload_lease = self._reddit.post(url, data=data)['s3UploadLease'] 114 upload_data = {item['name']: item['value'] 115 for item in upload_lease['fields']} 116 upload_url = 'https:{}'.format(upload_lease['action']) 117 118 with open(image_path, 'rb') as image: 119 response = self._reddit._core._requestor._http.post( 120 upload_url, data=upload_data, files={'file': image}) 121 response.raise_for_status() 122 123 url = API_PATH['emoji_upload'].format( 124 subreddit=self.subreddit) 125 self._reddit.post(url, 126 data={'name': name, 's3_key': upload_data['key']}) 127 return Emoji(self._reddit, self.subreddit, name) 128 [end of praw/models/reddit/emoji.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/praw/models/reddit/emoji.py b/praw/models/reddit/emoji.py --- a/praw/models/reddit/emoji.py +++ b/praw/models/reddit/emoji.py @@ -9,9 +9,20 @@ class Emoji(RedditBase): """An individual Emoji object.""" - __hash__ = RedditBase.__hash__ STR_FIELD = 'name' + def __eq__(self, other): + """Return whether the other instance equals the current.""" + if isinstance(other, str): + return other == str(self) + return (isinstance(other, self.__class__) and + str(self) == str(other) and other.subreddit == self.subreddit) + + def __hash__(self): + """Return the hash of the current instance.""" + return (hash(self.__class__.__name__) ^ hash(str(self)) ^ + hash(self.subreddit)) + def __init__(self, reddit, subreddit, name, _data=None): """Construct an instance of the Emoji object.""" self.name = name @@ -20,7 +31,7 @@ def _fetch(self): for emoji in self.subreddit.emoji: - if emoji.name.lower() == self.name.lower(): + if emoji.name == self.name: self.__dict__.update(emoji.__dict__) self._fetched = True return
{"golden_diff": "diff --git a/praw/models/reddit/emoji.py b/praw/models/reddit/emoji.py\n--- a/praw/models/reddit/emoji.py\n+++ b/praw/models/reddit/emoji.py\n@@ -9,9 +9,20 @@\n class Emoji(RedditBase):\n \"\"\"An individual Emoji object.\"\"\"\n \n- __hash__ = RedditBase.__hash__\n STR_FIELD = 'name'\n \n+ def __eq__(self, other):\n+ \"\"\"Return whether the other instance equals the current.\"\"\"\n+ if isinstance(other, str):\n+ return other == str(self)\n+ return (isinstance(other, self.__class__) and\n+ str(self) == str(other) and other.subreddit == self.subreddit)\n+\n+ def __hash__(self):\n+ \"\"\"Return the hash of the current instance.\"\"\"\n+ return (hash(self.__class__.__name__) ^ hash(str(self)) ^\n+ hash(self.subreddit))\n+\n def __init__(self, reddit, subreddit, name, _data=None):\n \"\"\"Construct an instance of the Emoji object.\"\"\"\n self.name = name\n@@ -20,7 +31,7 @@\n \n def _fetch(self):\n for emoji in self.subreddit.emoji:\n- if emoji.name.lower() == self.name.lower():\n+ if emoji.name == self.name:\n self.__dict__.update(emoji.__dict__)\n self._fetched = True\n return\n", "issue": "Simplify emoji code\nThis pull requests contains the squashed changes from #894 (thanks\n@bakonydraco!) and provides some simplifications so that we have a base set of\nfeatures to work with and can subsequently make additions or modifications as\nnecessary.\n", "before_files": [{"content": "\"\"\"Provide the Emoji class.\"\"\"\nimport os\n\nfrom ...const import API_PATH\nfrom ...exceptions import ClientException\nfrom .base import RedditBase\n\n\nclass Emoji(RedditBase):\n \"\"\"An individual Emoji object.\"\"\"\n\n __hash__ = RedditBase.__hash__\n STR_FIELD = 'name'\n\n def __init__(self, reddit, subreddit, name, _data=None):\n \"\"\"Construct an instance of the Emoji object.\"\"\"\n self.name = name\n self.subreddit = subreddit\n super(Emoji, self).__init__(reddit, _data)\n\n def _fetch(self):\n for emoji in self.subreddit.emoji:\n if emoji.name.lower() == self.name.lower():\n self.__dict__.update(emoji.__dict__)\n self._fetched = True\n return\n raise ClientException('/r/{} does not have the emoji {}'\n .format(self.subreddit, self.name))\n\n def delete(self):\n \"\"\"Delete an emoji from this subreddit by Emoji.\n\n To delete ``'test'`` as an emoji on the subreddit ``'praw_test'`` try:\n\n .. code:: python\n\n reddit.subreddit('praw_test').emoji['test'].delete()\n\n \"\"\"\n url = API_PATH['emoji_delete'].format(\n emoji_name=self.name, subreddit=self.subreddit)\n self._reddit.request('DELETE', url)\n\n\nclass SubredditEmoji(RedditBase):\n \"\"\"Provides a set of functions to a Subreddit for emoji.\"\"\"\n\n __hash__ = RedditBase.__hash__\n\n def __getitem__(self, name):\n \"\"\"Lazily return the Emoji for the subreddit named ``name``.\n\n :param name: The name of the emoji\n\n This method is to be used to fetch a specific emoji url, like so:\n\n .. code:: python\n\n emoji = reddit.subreddit('praw_test').emoji['test']\n print(emoji)\n\n \"\"\"\n return Emoji(self._reddit, self.subreddit, name)\n\n def __init__(self, subreddit):\n \"\"\"Create a SubredditEmoji instance.\n\n :param subreddit: The subreddit whose emoji are affected.\n\n \"\"\"\n self.subreddit = subreddit\n super(SubredditEmoji, self).__init__(subreddit._reddit, None)\n\n def __iter__(self):\n \"\"\"Return a list of Emoji for the subreddit.\n\n This method is to be used to discover all emoji for a subreddit:\n\n .. code:: python\n\n for emoji in reddit.subreddit('praw_test').emoji:\n print(emoji)\n\n \"\"\"\n response = self.subreddit._reddit.get(\n API_PATH['emoji_list'].format(subreddit=self.subreddit))\n for emoji_name, emoji_data in \\\n response[self.subreddit.fullname].items():\n yield Emoji(self._reddit, self.subreddit, emoji_name,\n _data=emoji_data)\n\n def add(self, name, image_path):\n \"\"\"Add an emoji to this subreddit.\n\n :param name: The name of the emoji\n :param image_path: A path to a jpeg or png image.\n :returns: The Emoji added.\n\n To add ``'test'`` to the subreddit ``'praw_test'`` try:\n\n .. code:: python\n\n reddit.subreddit('praw_test').emoji.add('test','test.png')\n\n \"\"\"\n data = {'filepath': os.path.basename(image_path),\n 'mimetype': 'image/jpeg'}\n if image_path.lower().endswith('.png'):\n data['mimetype'] = 'image/png'\n url = API_PATH['emoji_lease'].format(subreddit=self.subreddit)\n\n # until we learn otherwise, assume this request always succeeds\n upload_lease = self._reddit.post(url, data=data)['s3UploadLease']\n upload_data = {item['name']: item['value']\n for item in upload_lease['fields']}\n upload_url = 'https:{}'.format(upload_lease['action'])\n\n with open(image_path, 'rb') as image:\n response = self._reddit._core._requestor._http.post(\n upload_url, data=upload_data, files={'file': image})\n response.raise_for_status()\n\n url = API_PATH['emoji_upload'].format(\n subreddit=self.subreddit)\n self._reddit.post(url,\n data={'name': name, 's3_key': upload_data['key']})\n return Emoji(self._reddit, self.subreddit, name)\n", "path": "praw/models/reddit/emoji.py"}]}
1,831
310
gh_patches_debug_20203
rasdani/github-patches
git_diff
lightly-ai__lightly-583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Rewrite Docs with Low-Level Building Blocks: Python API # Rewrite Docs with Low-Level Building Blocks: Main Concepts The [Python API](https://docs.lightly.ai/lightly.html) pages still contain mid- and high-level building blocks. - [x] Remove the documentation for the mid-level building blocks - [x] Remove the documentation for the high-level building blocks Related to #493. </issue> <code> [start of lightly/__init__.py] 1 """Lightly is a computer vision framework for self-supervised learning. 2 3 With Lightly you can train deep learning models using 4 self-supervision. This means, that you don't require 5 any labels to train a model. Lightly has been built 6 to help you understand and work with large unlabeled datasets. 7 It is built on top of PyTorch and therefore fully compatible 8 with other frameworks such as Fast.ai. 9 10 The framework is structured into the following modules: 11 12 - **api**: 13 14 The lightly.api module handles communication with the Lightly web-app. 15 16 - **cli**: 17 18 The lightly.cli module provides a command-line interface for training 19 self-supervised models and embedding images. Furthermore, the command-line 20 tool can be used to upload and download images from/to the Lightly web-app. 21 22 - **core**: 23 24 The lightly.core module offers one-liners for simple self-supervised learning. 25 26 - **data**: 27 28 The lightly.data module provides a dataset wrapper and collate functions. The 29 collate functions are in charge of the data augmentations which are crucial for 30 self-supervised learning. 31 32 - **embedding**: 33 34 The lightly.embedding module combines the self-supervised models with a dataloader, 35 optimizer, and loss function to provide a simple pytorch-lightning trainable. 36 37 - **loss**: 38 39 The lightly.loss module contains implementations of popular self-supervised training 40 loss functions. 41 42 - **models**: 43 44 The lightly.models module holds the implementation of the ResNet as well as self- 45 supervised methods. Currently implements: 46 47 - SimCLR 48 49 - MoCo 50 51 - SimSiam 52 53 - Barlow Twins 54 55 - BYOL 56 57 - NNCLR 58 59 - **transforms**: 60 61 The lightly.transforms module implements custom data transforms. Currently implements: 62 63 - Gaussian Blur 64 65 - Random Rotation 66 67 - Random Solarization 68 69 - **utils**: 70 71 The lightly.utils package provides global utility methods. 72 The io module contains utility to save and load embeddings in a format which is 73 understood by the Lightly library. 74 75 """ 76 77 # Copyright (c) 2020. Lightly AG and its affiliates. 78 # All Rights Reserved 79 80 __name__ = 'lightly' 81 __version__ = '1.1.22' 82 83 import os 84 85 try: 86 # See (https://github.com/PyTorchLightning/pytorch-lightning) 87 # This variable is injected in the __builtins__ by the build 88 # process. It used to enable importing subpackages of skimage when 89 # the binaries are not built 90 __LIGHTLY_SETUP__ 91 except NameError: 92 __LIGHTLY_SETUP__ = False 93 94 95 if __LIGHTLY_SETUP__: 96 # setting up lightly 97 msg = f'Partial import of {__name__}=={__version__} during build process.' 98 print(msg) 99 else: 100 # see if prefetch_generator is available 101 try: 102 import prefetch_generator 103 except ImportError: 104 _prefetch_generator_available = False 105 else: 106 _prefetch_generator_available = True 107 108 def _is_prefetch_generator_available(): 109 return _prefetch_generator_available 110 111 from lightly.core import * 112 from lightly import active_learning 113 from lightly import api 114 from lightly import data 115 from lightly import embedding 116 from lightly import loss 117 from lightly import models 118 from lightly import openapi_generated 119 from lightly import transforms 120 from lightly import utils 121 122 from lightly.api.version_checking import do_version_check 123 124 if os.getenv('LIGHTLY_DID_VERSION_CHECK', 'False') == 'False': 125 os.environ['LIGHTLY_DID_VERSION_CHECK'] = 'True' 126 127 try: 128 do_version_check(current_version=__version__) 129 except Exception as e: 130 pass 131 132 133 134 [end of lightly/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightly/__init__.py b/lightly/__init__.py --- a/lightly/__init__.py +++ b/lightly/__init__.py @@ -29,11 +29,6 @@ collate functions are in charge of the data augmentations which are crucial for self-supervised learning. -- **embedding**: - - The lightly.embedding module combines the self-supervised models with a dataloader, - optimizer, and loss function to provide a simple pytorch-lightning trainable. - - **loss**: The lightly.loss module contains implementations of popular self-supervised training @@ -41,20 +36,22 @@ - **models**: - The lightly.models module holds the implementation of the ResNet as well as self- - supervised methods. Currently implements: - - - SimCLR - - - MoCo - - - SimSiam + The lightly.models module holds the implementation of the ResNet as well as heads + for self-supervised methods. It currently implements the heads of: - Barlow Twins - BYOL - + + - MoCo + - NNCLR + + - SimCLR + + - SimSiam + + - SwaV - **transforms**:
{"golden_diff": "diff --git a/lightly/__init__.py b/lightly/__init__.py\n--- a/lightly/__init__.py\n+++ b/lightly/__init__.py\n@@ -29,11 +29,6 @@\n collate functions are in charge of the data augmentations which are crucial for\n self-supervised learning.\n \n-- **embedding**:\n-\n- The lightly.embedding module combines the self-supervised models with a dataloader,\n- optimizer, and loss function to provide a simple pytorch-lightning trainable.\n-\n - **loss**:\n \n The lightly.loss module contains implementations of popular self-supervised training\n@@ -41,20 +36,22 @@\n \n - **models**:\n \n- The lightly.models module holds the implementation of the ResNet as well as self-\n- supervised methods. Currently implements:\n-\n- - SimCLR\n-\n- - MoCo\n-\n- - SimSiam\n+ The lightly.models module holds the implementation of the ResNet as well as heads\n+ for self-supervised methods. It currently implements the heads of:\n \n - Barlow Twins\n \n - BYOL\n-\n+ \n+ - MoCo\n+ \n - NNCLR\n+ \n+ - SimCLR\n+ \n+ - SimSiam\n+ \n+ - SwaV\n \n - **transforms**:\n", "issue": "Rewrite Docs with Low-Level Building Blocks: Python API\n# Rewrite Docs with Low-Level Building Blocks: Main Concepts\r\n\r\nThe [Python API](https://docs.lightly.ai/lightly.html) pages still contain mid- and high-level building blocks.\r\n\r\n- [x] Remove the documentation for the mid-level building blocks\r\n- [x] Remove the documentation for the high-level building blocks\r\n\r\nRelated to #493.\n", "before_files": [{"content": "\"\"\"Lightly is a computer vision framework for self-supervised learning.\n\nWith Lightly you can train deep learning models using\nself-supervision. This means, that you don't require\nany labels to train a model. Lightly has been built\nto help you understand and work with large unlabeled datasets.\nIt is built on top of PyTorch and therefore fully compatible \nwith other frameworks such as Fast.ai.\n\nThe framework is structured into the following modules:\n\n- **api**: \n\n The lightly.api module handles communication with the Lightly web-app.\n\n- **cli**:\n\n The lightly.cli module provides a command-line interface for training \n self-supervised models and embedding images. Furthermore, the command-line\n tool can be used to upload and download images from/to the Lightly web-app.\n\n- **core**:\n\n The lightly.core module offers one-liners for simple self-supervised learning.\n\n- **data**:\n\n The lightly.data module provides a dataset wrapper and collate functions. The\n collate functions are in charge of the data augmentations which are crucial for\n self-supervised learning.\n\n- **embedding**:\n\n The lightly.embedding module combines the self-supervised models with a dataloader,\n optimizer, and loss function to provide a simple pytorch-lightning trainable.\n\n- **loss**:\n\n The lightly.loss module contains implementations of popular self-supervised training\n loss functions.\n\n- **models**:\n\n The lightly.models module holds the implementation of the ResNet as well as self-\n supervised methods. Currently implements:\n\n - SimCLR\n\n - MoCo\n\n - SimSiam\n\n - Barlow Twins\n\n - BYOL\n\n - NNCLR\n\n- **transforms**:\n\n The lightly.transforms module implements custom data transforms. Currently implements:\n\n - Gaussian Blur\n\n - Random Rotation\n\n - Random Solarization\n\n- **utils**:\n\n The lightly.utils package provides global utility methods.\n The io module contains utility to save and load embeddings in a format which is\n understood by the Lightly library.\n\n\"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\n__name__ = 'lightly'\n__version__ = '1.1.22'\n\nimport os\n\ntry:\n # See (https://github.com/PyTorchLightning/pytorch-lightning)\n # This variable is injected in the __builtins__ by the build\n # process. It used to enable importing subpackages of skimage when\n # the binaries are not built\n __LIGHTLY_SETUP__\nexcept NameError:\n __LIGHTLY_SETUP__ = False\n\n\nif __LIGHTLY_SETUP__:\n # setting up lightly\n msg = f'Partial import of {__name__}=={__version__} during build process.' \n print(msg)\nelse:\n # see if prefetch_generator is available\n try:\n import prefetch_generator\n except ImportError:\n _prefetch_generator_available = False\n else:\n _prefetch_generator_available = True\n\n def _is_prefetch_generator_available():\n return _prefetch_generator_available\n\n from lightly.core import *\n from lightly import active_learning\n from lightly import api\n from lightly import data\n from lightly import embedding\n from lightly import loss\n from lightly import models\n from lightly import openapi_generated\n from lightly import transforms\n from lightly import utils\n \n from lightly.api.version_checking import do_version_check\n\n if os.getenv('LIGHTLY_DID_VERSION_CHECK', 'False') == 'False':\n os.environ['LIGHTLY_DID_VERSION_CHECK'] = 'True'\n\n try:\n do_version_check(current_version=__version__)\n except Exception as e:\n pass\n\n\n\n", "path": "lightly/__init__.py"}]}
1,742
297
gh_patches_debug_40041
rasdani/github-patches
git_diff
conan-io__conan-center-index-6496
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [request] celero/2.8.0 ### Package Details * Package Name/Version: **celero/2.8.0** * Changelog: **https://github.com/DigitalInBlue/Celero/releases/tag/v2.8.0** The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version. </issue> <code> [start of recipes/celero/all/conanfile.py] 1 import os 2 3 from conans import ConanFile, CMake, tools 4 from conans.errors import ConanInvalidConfiguration 5 6 required_conan_version = ">=1.28.0" 7 8 class CeleroConan(ConanFile): 9 name = "celero" 10 description = "C++ Benchmarking Library" 11 license = "Apache-2.0" 12 topics = ("conan", "celero", "benchmark", "benchmark-tests", "measurements", "microbenchmarks") 13 homepage = "https://github.com/DigitalInBlue/Celero" 14 url = "https://github.com/conan-io/conan-center-index" 15 exports_sources = ["CMakeLists.txt", "patches/**"] 16 generators = "cmake" 17 settings = "os", "arch", "compiler", "build_type" 18 options = {"shared": [True, False], "fPIC": [True, False]} 19 default_options = {"shared": False, "fPIC": True} 20 21 _cmake = None 22 23 @property 24 def _source_subfolder(self): 25 return "source_subfolder" 26 27 def config_options(self): 28 if self.settings.os == "Windows": 29 del self.options.fPIC 30 31 @property 32 def _compilers_minimum_version(self): 33 return { 34 "gcc": "6", 35 "Visual Studio": "14", 36 "clang": "3.4", 37 "apple-clang": "5.1", 38 } 39 40 def configure(self): 41 if self.options.shared: 42 del self.options.fPIC 43 if self.settings.compiler.cppstd: 44 tools.check_min_cppstd(self, 14) 45 minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) 46 if not minimum_version: 47 self.output.warn("celero requires C++14. Your compiler is unknown. Assuming it supports C++14.") 48 elif tools.Version(self.settings.compiler.version) < minimum_version: 49 raise ConanInvalidConfiguration("celero requires C++14, which your compiler does not support.") 50 51 def source(self): 52 tools.get(**self.conan_data["sources"][self.version]) 53 os.rename("Celero-" + self.version, self._source_subfolder) 54 55 def _configure_cmake(self): 56 if self._cmake: 57 return self._cmake 58 self._cmake = CMake(self) 59 self._cmake.definitions["CELERO_COMPILE_DYNAMIC_LIBRARIES"] = self.options.shared 60 self._cmake.definitions["CELERO_COMPILE_PIC"] = self.options.get_safe("fPIC", True) 61 self._cmake.definitions["CELERO_ENABLE_EXPERIMENTS"] = False 62 self._cmake.definitions["CELERO_ENABLE_FOLDERS"] = False 63 self._cmake.definitions["CELERO_ENABLE_TESTS"] = False 64 self._cmake.definitions["CELERO_TREAT_WARNINGS_AS_ERRORS"] = False 65 self._cmake.configure() 66 return self._cmake 67 68 def build(self): 69 for patch in self.conan_data.get("patches", {}).get(self.version, []): 70 tools.patch(**patch) 71 cmake = self._configure_cmake() 72 cmake.build() 73 74 def package(self): 75 self.copy("license.txt", dst="licenses", src=self._source_subfolder) 76 cmake = self._configure_cmake() 77 cmake.install() 78 tools.rmdir(os.path.join(self.package_folder, "share")) 79 80 def package_info(self): 81 # FIXME: official CMake target is exported without namespace 82 self.cpp_info.filenames["cmake_find_package"] = "Celero" 83 self.cpp_info.filenames["cmake_find_package_multi"] = "Celero" 84 self.cpp_info.names["cmake_find_package"] = "celero" 85 self.cpp_info.names["cmake_find_package_multi"] = "celero" 86 self.cpp_info.libs = tools.collect_libs(self) 87 if not self.options.shared: 88 self.cpp_info.defines = ["CELERO_STATIC"] 89 if self.settings.os == "Linux": 90 self.cpp_info.system_libs = ["pthread"] 91 elif self.settings.os == "Windows": 92 self.cpp_info.system_libs = ["powrprof", "psapi"] 93 [end of recipes/celero/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/celero/all/conanfile.py b/recipes/celero/all/conanfile.py --- a/recipes/celero/all/conanfile.py +++ b/recipes/celero/all/conanfile.py @@ -1,9 +1,9 @@ import os - +import textwrap from conans import ConanFile, CMake, tools from conans.errors import ConanInvalidConfiguration -required_conan_version = ">=1.28.0" +required_conan_version = ">=1.33.0" class CeleroConan(ConanFile): name = "celero" @@ -49,8 +49,7 @@ raise ConanInvalidConfiguration("celero requires C++14, which your compiler does not support.") def source(self): - tools.get(**self.conan_data["sources"][self.version]) - os.rename("Celero-" + self.version, self._source_subfolder) + tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder) def _configure_cmake(self): if self._cmake: @@ -76,17 +75,44 @@ cmake = self._configure_cmake() cmake.install() tools.rmdir(os.path.join(self.package_folder, "share")) + self._create_cmake_module_alias_targets( + os.path.join(self.package_folder, self._module_file_rel_path), + {"celero": "celero::celero"} + ) + + @staticmethod + def _create_cmake_module_alias_targets(module_file, targets): + content = "" + for alias, aliased in targets.items(): + content += textwrap.dedent("""\ + if(TARGET {aliased} AND NOT TARGET {alias}) + add_library({alias} INTERFACE IMPORTED) + set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased}) + endif() + """.format(alias=alias, aliased=aliased)) + tools.save(module_file, content) + + @property + def _module_subfolder(self): + return os.path.join("lib", "cmake") + + @property + def _module_file_rel_path(self): + return os.path.join(self._module_subfolder, + "conan-official-{}-targets.cmake".format(self.name)) def package_info(self): - # FIXME: official CMake target is exported without namespace self.cpp_info.filenames["cmake_find_package"] = "Celero" self.cpp_info.filenames["cmake_find_package_multi"] = "Celero" self.cpp_info.names["cmake_find_package"] = "celero" self.cpp_info.names["cmake_find_package_multi"] = "celero" self.cpp_info.libs = tools.collect_libs(self) + self.cpp_info.builddirs.append(self._module_subfolder) + self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path] + self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path] if not self.options.shared: self.cpp_info.defines = ["CELERO_STATIC"] - if self.settings.os == "Linux": + if self.settings.os in ("FreeBSD", "Linux"): self.cpp_info.system_libs = ["pthread"] elif self.settings.os == "Windows": self.cpp_info.system_libs = ["powrprof", "psapi"]
{"golden_diff": "diff --git a/recipes/celero/all/conanfile.py b/recipes/celero/all/conanfile.py\n--- a/recipes/celero/all/conanfile.py\n+++ b/recipes/celero/all/conanfile.py\n@@ -1,9 +1,9 @@\n import os\n-\n+import textwrap\n from conans import ConanFile, CMake, tools\n from conans.errors import ConanInvalidConfiguration\n \n-required_conan_version = \">=1.28.0\"\n+required_conan_version = \">=1.33.0\"\n \n class CeleroConan(ConanFile):\n name = \"celero\"\n@@ -49,8 +49,7 @@\n raise ConanInvalidConfiguration(\"celero requires C++14, which your compiler does not support.\")\n \n def source(self):\n- tools.get(**self.conan_data[\"sources\"][self.version])\n- os.rename(\"Celero-\" + self.version, self._source_subfolder)\n+ tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True, destination=self._source_subfolder)\n \n def _configure_cmake(self):\n if self._cmake:\n@@ -76,17 +75,44 @@\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n+ self._create_cmake_module_alias_targets(\n+ os.path.join(self.package_folder, self._module_file_rel_path),\n+ {\"celero\": \"celero::celero\"}\n+ )\n+\n+ @staticmethod\n+ def _create_cmake_module_alias_targets(module_file, targets):\n+ content = \"\"\n+ for alias, aliased in targets.items():\n+ content += textwrap.dedent(\"\"\"\\\n+ if(TARGET {aliased} AND NOT TARGET {alias})\n+ add_library({alias} INTERFACE IMPORTED)\n+ set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n+ endif()\n+ \"\"\".format(alias=alias, aliased=aliased))\n+ tools.save(module_file, content)\n+\n+ @property\n+ def _module_subfolder(self):\n+ return os.path.join(\"lib\", \"cmake\")\n+\n+ @property\n+ def _module_file_rel_path(self):\n+ return os.path.join(self._module_subfolder,\n+ \"conan-official-{}-targets.cmake\".format(self.name))\n \n def package_info(self):\n- # FIXME: official CMake target is exported without namespace\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Celero\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Celero\"\n self.cpp_info.names[\"cmake_find_package\"] = \"celero\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"celero\"\n self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.builddirs.append(self._module_subfolder)\n+ self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n+ self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n if not self.options.shared:\n self.cpp_info.defines = [\"CELERO_STATIC\"]\n- if self.settings.os == \"Linux\":\n+ if self.settings.os in (\"FreeBSD\", \"Linux\"):\n self.cpp_info.system_libs = [\"pthread\"]\n elif self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"powrprof\", \"psapi\"]\n", "issue": "[request] celero/2.8.0\n### Package Details\r\n * Package Name/Version: **celero/2.8.0**\r\n * Changelog: **https://github.com/DigitalInBlue/Celero/releases/tag/v2.8.0**\r\n\r\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.28.0\"\n\nclass CeleroConan(ConanFile):\n name = \"celero\"\n description = \"C++ Benchmarking Library\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"celero\", \"benchmark\", \"benchmark-tests\", \"measurements\", \"microbenchmarks\")\n homepage = \"https://github.com/DigitalInBlue/Celero\"\n url = \"https://github.com/conan-io/conan-center-index\"\n exports_sources = [\"CMakeLists.txt\", \"patches/**\"]\n generators = \"cmake\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n @property\n def _compilers_minimum_version(self):\n return {\n \"gcc\": \"6\",\n \"Visual Studio\": \"14\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"5.1\",\n }\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, 14)\n minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)\n if not minimum_version:\n self.output.warn(\"celero requires C++14. Your compiler is unknown. Assuming it supports C++14.\")\n elif tools.Version(self.settings.compiler.version) < minimum_version:\n raise ConanInvalidConfiguration(\"celero requires C++14, which your compiler does not support.\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"Celero-\" + self.version, self._source_subfolder)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"CELERO_COMPILE_DYNAMIC_LIBRARIES\"] = self.options.shared\n self._cmake.definitions[\"CELERO_COMPILE_PIC\"] = self.options.get_safe(\"fPIC\", True)\n self._cmake.definitions[\"CELERO_ENABLE_EXPERIMENTS\"] = False\n self._cmake.definitions[\"CELERO_ENABLE_FOLDERS\"] = False\n self._cmake.definitions[\"CELERO_ENABLE_TESTS\"] = False\n self._cmake.definitions[\"CELERO_TREAT_WARNINGS_AS_ERRORS\"] = False\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n for patch in self.conan_data.get(\"patches\", {}).get(self.version, []):\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"license.txt\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n # FIXME: official CMake target is exported without namespace\n self.cpp_info.filenames[\"cmake_find_package\"] = \"Celero\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"Celero\"\n self.cpp_info.names[\"cmake_find_package\"] = \"celero\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"celero\"\n self.cpp_info.libs = tools.collect_libs(self)\n if not self.options.shared:\n self.cpp_info.defines = [\"CELERO_STATIC\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"pthread\"]\n elif self.settings.os == \"Windows\":\n self.cpp_info.system_libs = [\"powrprof\", \"psapi\"]\n", "path": "recipes/celero/all/conanfile.py"}]}
1,707
774
gh_patches_debug_1916
rasdani/github-patches
git_diff
safe-global__safe-config-service-23
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set port numbers in docker compose via environment variables To provide more flexibility when setting up the ports for a given environment, we should not use static ports in `docker-compose`. Instead those ports should be extracted to the `.env` file. </issue> <code> [start of src/config/gunicorn.py] 1 import multiprocessing 2 import os 3 from distutils.util import strtobool 4 5 bind = f"0.0.0.0:{os.getenv('PORT', '8000')}" 6 accesslog = "-" 7 8 workers = int(os.getenv("WEB_CONCURRENCY", multiprocessing.cpu_count() * 2)) 9 threads = int(os.getenv("PYTHON_MAX_THREADS", 1)) 10 11 reload = bool(strtobool(os.getenv("WEB_RELOAD", "false"))) 12 [end of src/config/gunicorn.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/config/gunicorn.py b/src/config/gunicorn.py --- a/src/config/gunicorn.py +++ b/src/config/gunicorn.py @@ -2,7 +2,7 @@ import os from distutils.util import strtobool -bind = f"0.0.0.0:{os.getenv('PORT', '8000')}" +bind = f"0.0.0.0:{os.getenv('GUNICORN_BIND_PORT', '8000')}" accesslog = "-" workers = int(os.getenv("WEB_CONCURRENCY", multiprocessing.cpu_count() * 2))
{"golden_diff": "diff --git a/src/config/gunicorn.py b/src/config/gunicorn.py\n--- a/src/config/gunicorn.py\n+++ b/src/config/gunicorn.py\n@@ -2,7 +2,7 @@\n import os\n from distutils.util import strtobool\n \n-bind = f\"0.0.0.0:{os.getenv('PORT', '8000')}\"\n+bind = f\"0.0.0.0:{os.getenv('GUNICORN_BIND_PORT', '8000')}\"\n accesslog = \"-\"\n \n workers = int(os.getenv(\"WEB_CONCURRENCY\", multiprocessing.cpu_count() * 2))\n", "issue": "Set port numbers in docker compose via environment variables\nTo provide more flexibility when setting up the ports for a given environment, we should not use static ports in `docker-compose`. Instead those ports should be extracted to the `.env` file.\n", "before_files": [{"content": "import multiprocessing\nimport os\nfrom distutils.util import strtobool\n\nbind = f\"0.0.0.0:{os.getenv('PORT', '8000')}\"\naccesslog = \"-\"\n\nworkers = int(os.getenv(\"WEB_CONCURRENCY\", multiprocessing.cpu_count() * 2))\nthreads = int(os.getenv(\"PYTHON_MAX_THREADS\", 1))\n\nreload = bool(strtobool(os.getenv(\"WEB_RELOAD\", \"false\")))\n", "path": "src/config/gunicorn.py"}]}
694
133
gh_patches_debug_35252
rasdani/github-patches
git_diff
pytorch__text-1525
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a `max_words` argument to `build_vocab_from_iterator` ## 🚀 Feature <!-- A clear and concise description of the feature proposal --> [Link to the docs](https://pytorch.org/text/stable/vocab.html?highlight=build%20vocab#torchtext.vocab.build_vocab_from_iterator) I believe it would be beneficial to limit the number of words you want in your vocabulary with an argument like `max_words`, e.g.: ``` vocab = build_vocab_from_iterator(yield_tokens_batch(file_path), specials=["<unk>"], max_words=50000) ``` **Motivation** <!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too --> This allows a controllable-sized `nn.Embedding`, with rare words being mapped to `<unk>`. Otherwise, it would not be practical to use `build_vocab_from_iterator` for larger datasets. **Alternatives** <!-- A clear and concise description of any alternative solutions or features you've considered, if any. --> Keras and Huggingface's tokenizers would be viable alternatives, but do not nicely integrate with the torchtext ecosystem. </issue> <code> [start of torchtext/vocab/vocab_factory.py] 1 from .vocab import Vocab 2 from typing import Dict, Iterable, Optional, List 3 from collections import Counter, OrderedDict 4 from torchtext._torchtext import ( 5 Vocab as VocabPybind, 6 ) 7 8 9 def vocab(ordered_dict: Dict, min_freq: int = 1, 10 specials: Optional[List[str]] = None, 11 special_first: bool = True) -> Vocab: 12 r"""Factory method for creating a vocab object which maps tokens to indices. 13 14 Note that the ordering in which key value pairs were inserted in the `ordered_dict` will be respected when building the vocab. 15 Therefore if sorting by token frequency is important to the user, the `ordered_dict` should be created in a way to reflect this. 16 17 Args: 18 ordered_dict: Ordered Dictionary mapping tokens to their corresponding occurance frequencies. 19 min_freq: The minimum frequency needed to include a token in the vocabulary. 20 specials: Special symbols to add. The order of supplied tokens will be preserved. 21 special_first: Indicates whether to insert symbols at the beginning or at the end. 22 23 Returns: 24 torchtext.vocab.Vocab: A `Vocab` object 25 26 Examples: 27 >>> from torchtext.vocab import vocab 28 >>> from collections import Counter, OrderedDict 29 >>> counter = Counter(["a", "a", "b", "b", "b"]) 30 >>> sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[1], reverse=True) 31 >>> ordered_dict = OrderedDict(sorted_by_freq_tuples) 32 >>> v1 = vocab(ordered_dict) 33 >>> print(v1['a']) #prints 1 34 >>> print(v1['out of vocab']) #raise RuntimeError since default index is not set 35 >>> tokens = ['e', 'd', 'c', 'b', 'a'] 36 >>> #adding <unk> token and default index 37 >>> unk_token = '<unk>' 38 >>> default_index = -1 39 >>> v2 = vocab(OrderedDict([(token, 1) for token in tokens]), specials=[unk_token]) 40 >>> v2.set_default_index(default_index) 41 >>> print(v2['<unk>']) #prints 0 42 >>> print(v2['out of vocab']) #prints -1 43 >>> #make default index same as index of unk_token 44 >>> v2.set_default_index(v2[unk_token]) 45 >>> v2['out of vocab'] is v2[unk_token] #prints True 46 """ 47 specials = specials or [] 48 for token in specials: 49 ordered_dict.pop(token, None) 50 51 tokens = [] 52 for token, freq in ordered_dict.items(): 53 if freq >= min_freq: 54 tokens.append(token) 55 56 if special_first: 57 tokens[0:0] = specials 58 else: 59 tokens.extend(specials) 60 61 return Vocab(VocabPybind(tokens, None)) 62 63 64 def build_vocab_from_iterator(iterator: Iterable, min_freq: int = 1, specials: Optional[List[str]] = None, special_first: bool = True) -> Vocab: 65 """ 66 Build a Vocab from an iterator. 67 68 Args: 69 iterator: Iterator used to build Vocab. Must yield list or iterator of tokens. 70 min_freq: The minimum frequency needed to include a token in the vocabulary. 71 specials: Special symbols to add. The order of supplied tokens will be preserved. 72 special_first: Indicates whether to insert symbols at the beginning or at the end. 73 74 75 Returns: 76 torchtext.vocab.Vocab: A `Vocab` object 77 78 Examples: 79 >>> #generating vocab from text file 80 >>> import io 81 >>> from torchtext.vocab import build_vocab_from_iterator 82 >>> def yield_tokens(file_path): 83 >>> with io.open(file_path, encoding = 'utf-8') as f: 84 >>> for line in f: 85 >>> yield line.strip().split() 86 >>> vocab = build_vocab_from_iterator(yield_tokens_batch(file_path), specials=["<unk>"]) 87 """ 88 89 counter = Counter() 90 for tokens in iterator: 91 counter.update(tokens) 92 93 sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[0]) 94 sorted_by_freq_tuples.sort(key=lambda x: x[1], reverse=True) 95 ordered_dict = OrderedDict(sorted_by_freq_tuples) 96 97 word_vocab = vocab(ordered_dict, min_freq=min_freq, specials=specials or [], 98 special_first=special_first) 99 return word_vocab 100 [end of torchtext/vocab/vocab_factory.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchtext/vocab/vocab_factory.py b/torchtext/vocab/vocab_factory.py --- a/torchtext/vocab/vocab_factory.py +++ b/torchtext/vocab/vocab_factory.py @@ -49,6 +49,7 @@ ordered_dict.pop(token, None) tokens = [] + # Save room for special tokens for token, freq in ordered_dict.items(): if freq >= min_freq: tokens.append(token) @@ -61,7 +62,7 @@ return Vocab(VocabPybind(tokens, None)) -def build_vocab_from_iterator(iterator: Iterable, min_freq: int = 1, specials: Optional[List[str]] = None, special_first: bool = True) -> Vocab: +def build_vocab_from_iterator(iterator: Iterable, min_freq: int = 1, specials: Optional[List[str]] = None, special_first: bool = True, max_tokens: Optional[int] = None) -> Vocab: """ Build a Vocab from an iterator. @@ -70,6 +71,7 @@ min_freq: The minimum frequency needed to include a token in the vocabulary. specials: Special symbols to add. The order of supplied tokens will be preserved. special_first: Indicates whether to insert symbols at the beginning or at the end. + max_tokens: If provided, creates the vocab from the `max_tokens - len(specials)` most frequent tokens. Returns: @@ -90,10 +92,16 @@ for tokens in iterator: counter.update(tokens) - sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[0]) - sorted_by_freq_tuples.sort(key=lambda x: x[1], reverse=True) - ordered_dict = OrderedDict(sorted_by_freq_tuples) + specials = specials or [] + + # First sort by descending frequency, then lexicographically + sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: (-x[1], x[0])) + + if max_tokens is None: + ordered_dict = OrderedDict(sorted_by_freq_tuples) + else: + assert len(specials) < max_tokens, "len(specials) >= max_tokens, so the vocab will be entirely special tokens." + ordered_dict = OrderedDict(sorted_by_freq_tuples[:max_tokens - len(specials)]) - word_vocab = vocab(ordered_dict, min_freq=min_freq, specials=specials or [], - special_first=special_first) + word_vocab = vocab(ordered_dict, min_freq=min_freq, specials=specials, special_first=special_first) return word_vocab
{"golden_diff": "diff --git a/torchtext/vocab/vocab_factory.py b/torchtext/vocab/vocab_factory.py\n--- a/torchtext/vocab/vocab_factory.py\n+++ b/torchtext/vocab/vocab_factory.py\n@@ -49,6 +49,7 @@\n ordered_dict.pop(token, None)\n \n tokens = []\n+ # Save room for special tokens\n for token, freq in ordered_dict.items():\n if freq >= min_freq:\n tokens.append(token)\n@@ -61,7 +62,7 @@\n return Vocab(VocabPybind(tokens, None))\n \n \n-def build_vocab_from_iterator(iterator: Iterable, min_freq: int = 1, specials: Optional[List[str]] = None, special_first: bool = True) -> Vocab:\n+def build_vocab_from_iterator(iterator: Iterable, min_freq: int = 1, specials: Optional[List[str]] = None, special_first: bool = True, max_tokens: Optional[int] = None) -> Vocab:\n \"\"\"\n Build a Vocab from an iterator.\n \n@@ -70,6 +71,7 @@\n min_freq: The minimum frequency needed to include a token in the vocabulary.\n specials: Special symbols to add. The order of supplied tokens will be preserved.\n special_first: Indicates whether to insert symbols at the beginning or at the end.\n+ max_tokens: If provided, creates the vocab from the `max_tokens - len(specials)` most frequent tokens.\n \n \n Returns:\n@@ -90,10 +92,16 @@\n for tokens in iterator:\n counter.update(tokens)\n \n- sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[0])\n- sorted_by_freq_tuples.sort(key=lambda x: x[1], reverse=True)\n- ordered_dict = OrderedDict(sorted_by_freq_tuples)\n+ specials = specials or []\n+\n+ # First sort by descending frequency, then lexicographically\n+ sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n+\n+ if max_tokens is None:\n+ ordered_dict = OrderedDict(sorted_by_freq_tuples)\n+ else:\n+ assert len(specials) < max_tokens, \"len(specials) >= max_tokens, so the vocab will be entirely special tokens.\"\n+ ordered_dict = OrderedDict(sorted_by_freq_tuples[:max_tokens - len(specials)])\n \n- word_vocab = vocab(ordered_dict, min_freq=min_freq, specials=specials or [],\n- special_first=special_first)\n+ word_vocab = vocab(ordered_dict, min_freq=min_freq, specials=specials, special_first=special_first)\n return word_vocab\n", "issue": "Add a `max_words` argument to `build_vocab_from_iterator`\n## \ud83d\ude80 Feature\r\n<!-- A clear and concise description of the feature proposal -->\r\n\r\n[Link to the docs](https://pytorch.org/text/stable/vocab.html?highlight=build%20vocab#torchtext.vocab.build_vocab_from_iterator)\r\n\r\nI believe it would be beneficial to limit the number of words you want in your vocabulary with an argument like `max_words`, e.g.:\r\n```\r\nvocab = build_vocab_from_iterator(yield_tokens_batch(file_path), specials=[\"<unk>\"], max_words=50000)\r\n```\r\n\r\n**Motivation**\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\n\r\n\r\nThis allows a controllable-sized `nn.Embedding`, with rare words being mapped to `<unk>`. Otherwise, it would not be practical to use `build_vocab_from_iterator` for larger datasets.\r\n\r\n\r\n**Alternatives**\r\n\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->\r\n\r\nKeras and Huggingface's tokenizers would be viable alternatives, but do not nicely integrate with the torchtext ecosystem.\r\n\r\n\n", "before_files": [{"content": "from .vocab import Vocab\nfrom typing import Dict, Iterable, Optional, List\nfrom collections import Counter, OrderedDict\nfrom torchtext._torchtext import (\n Vocab as VocabPybind,\n)\n\n\ndef vocab(ordered_dict: Dict, min_freq: int = 1,\n specials: Optional[List[str]] = None,\n special_first: bool = True) -> Vocab:\n r\"\"\"Factory method for creating a vocab object which maps tokens to indices.\n\n Note that the ordering in which key value pairs were inserted in the `ordered_dict` will be respected when building the vocab.\n Therefore if sorting by token frequency is important to the user, the `ordered_dict` should be created in a way to reflect this.\n\n Args:\n ordered_dict: Ordered Dictionary mapping tokens to their corresponding occurance frequencies.\n min_freq: The minimum frequency needed to include a token in the vocabulary.\n specials: Special symbols to add. The order of supplied tokens will be preserved.\n special_first: Indicates whether to insert symbols at the beginning or at the end.\n\n Returns:\n torchtext.vocab.Vocab: A `Vocab` object\n\n Examples:\n >>> from torchtext.vocab import vocab\n >>> from collections import Counter, OrderedDict\n >>> counter = Counter([\"a\", \"a\", \"b\", \"b\", \"b\"])\n >>> sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[1], reverse=True)\n >>> ordered_dict = OrderedDict(sorted_by_freq_tuples)\n >>> v1 = vocab(ordered_dict)\n >>> print(v1['a']) #prints 1\n >>> print(v1['out of vocab']) #raise RuntimeError since default index is not set\n >>> tokens = ['e', 'd', 'c', 'b', 'a']\n >>> #adding <unk> token and default index\n >>> unk_token = '<unk>'\n >>> default_index = -1\n >>> v2 = vocab(OrderedDict([(token, 1) for token in tokens]), specials=[unk_token])\n >>> v2.set_default_index(default_index)\n >>> print(v2['<unk>']) #prints 0\n >>> print(v2['out of vocab']) #prints -1\n >>> #make default index same as index of unk_token\n >>> v2.set_default_index(v2[unk_token])\n >>> v2['out of vocab'] is v2[unk_token] #prints True\n \"\"\"\n specials = specials or []\n for token in specials:\n ordered_dict.pop(token, None)\n\n tokens = []\n for token, freq in ordered_dict.items():\n if freq >= min_freq:\n tokens.append(token)\n\n if special_first:\n tokens[0:0] = specials\n else:\n tokens.extend(specials)\n\n return Vocab(VocabPybind(tokens, None))\n\n\ndef build_vocab_from_iterator(iterator: Iterable, min_freq: int = 1, specials: Optional[List[str]] = None, special_first: bool = True) -> Vocab:\n \"\"\"\n Build a Vocab from an iterator.\n\n Args:\n iterator: Iterator used to build Vocab. Must yield list or iterator of tokens.\n min_freq: The minimum frequency needed to include a token in the vocabulary.\n specials: Special symbols to add. The order of supplied tokens will be preserved.\n special_first: Indicates whether to insert symbols at the beginning or at the end.\n\n\n Returns:\n torchtext.vocab.Vocab: A `Vocab` object\n\n Examples:\n >>> #generating vocab from text file\n >>> import io\n >>> from torchtext.vocab import build_vocab_from_iterator\n >>> def yield_tokens(file_path):\n >>> with io.open(file_path, encoding = 'utf-8') as f:\n >>> for line in f:\n >>> yield line.strip().split()\n >>> vocab = build_vocab_from_iterator(yield_tokens_batch(file_path), specials=[\"<unk>\"])\n \"\"\"\n\n counter = Counter()\n for tokens in iterator:\n counter.update(tokens)\n\n sorted_by_freq_tuples = sorted(counter.items(), key=lambda x: x[0])\n sorted_by_freq_tuples.sort(key=lambda x: x[1], reverse=True)\n ordered_dict = OrderedDict(sorted_by_freq_tuples)\n\n word_vocab = vocab(ordered_dict, min_freq=min_freq, specials=specials or [],\n special_first=special_first)\n return word_vocab\n", "path": "torchtext/vocab/vocab_factory.py"}]}
1,941
573
gh_patches_debug_30437
rasdani/github-patches
git_diff
rotki__rotki-4261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docker container's /tmp doesn't get automatically cleaned ## Problem Definition PyInstaller extracts the files in /tmp every time the backend starts In the docker container /tmp is never cleaned which results in an ever-increasing size on every application restart ## TODO - [ ] Add /tmp cleanup on start </issue> <code> [start of packaging/docker/entrypoint.py] 1 #!/usr/bin/python3 2 import json 3 import logging 4 import os 5 import subprocess 6 import time 7 from pathlib import Path 8 from typing import Dict, Optional, Any, List 9 10 logger = logging.getLogger('monitor') 11 logging.basicConfig(level=logging.DEBUG) 12 13 DEFAULT_LOG_LEVEL = 'critical' 14 15 16 def load_config_from_file() -> Optional[Dict[str, Any]]: 17 config_file = Path('/config/rotki_config.json') 18 if not config_file.exists(): 19 logger.info('no config file provided') 20 return None 21 22 with open(config_file) as file: 23 try: 24 data = json.load(file) 25 return data 26 except json.JSONDecodeError as e: 27 logger.error(e) 28 return None 29 30 31 def load_config_from_env() -> Dict[str, Any]: 32 loglevel = os.environ.get('LOGLEVEL') 33 logfromothermodules = os.environ.get('LOGFROMOTHERMODDULES') 34 sleep_secs = os.environ.get('SLEEP_SECS') 35 max_size_in_mb_all_logs = os.environ.get('MAX_SIZE_IN_MB_ALL_LOGS') 36 max_logfiles_num = os.environ.get('MAX_LOGFILES_NUM') 37 38 return { 39 'loglevel': loglevel, 40 'logfromothermodules': logfromothermodules, 41 'sleep_secs': sleep_secs, 42 'max_logfiles_num': max_logfiles_num, 43 'max_size_in_mb_all_logs': max_size_in_mb_all_logs, 44 } 45 46 47 def load_config() -> List[str]: 48 env_config = load_config_from_env() 49 file_config = load_config_from_file() 50 51 logger.info('loading config from env') 52 53 loglevel = env_config.get('loglevel') 54 log_from_other_modules = env_config.get('logfromothermodules') 55 sleep_secs = env_config.get('sleep_secs') 56 max_logfiles_num = env_config.get('max_logfiles_num') 57 max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs') 58 59 if file_config is not None: 60 logger.info('loading config from file') 61 62 if file_config.get('loglevel') is not None: 63 loglevel = file_config.get('loglevel') 64 65 if file_config.get('logfromothermodules') is not None: 66 log_from_other_modules = file_config.get('logfromothermodules') 67 68 if file_config.get('sleep-secs') is not None: 69 sleep_secs = file_config.get('sleep-secs') 70 71 if file_config.get('max_logfiles_num') is not None: 72 max_logfiles_num = file_config.get('max_logfiles_num') 73 74 if file_config.get('max_size_in_mb_all_logs') is not None: 75 max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs') 76 77 args = [ 78 '--data-dir', 79 '/data', 80 '--logfile', 81 '/logs/rotki.log', 82 '--loglevel', 83 loglevel if loglevel is not None else DEFAULT_LOG_LEVEL, 84 ] 85 86 if log_from_other_modules is True: 87 args.append('--logfromothermodules') 88 89 if sleep_secs is not None: 90 args.append('--sleep-secs') 91 args.append(str(sleep_secs)) 92 93 if max_logfiles_num is not None: 94 args.append('--max-logfiles-num') 95 args.append(str(max_logfiles_num)) 96 97 if max_size_in_mb_all_logs is not None: 98 args.append('--max-size-in-mb-all-logs') 99 args.append(str(max_size_in_mb_all_logs)) 100 101 return args 102 103 104 base_args = [ 105 '/usr/sbin/rotki', 106 '--rest-api-port', 107 '4242', 108 '--websockets-api-port', 109 '4243', 110 '--api-cors', 111 'http://localhost:*/*,app://.', 112 '--api-host', 113 '0.0.0.0', 114 ] 115 116 config_args = load_config() 117 cmd = base_args + config_args 118 119 logger.info('starting rotki backend') 120 121 rotki = subprocess.Popen(cmd) 122 123 if rotki.returncode == 1: 124 logger.error('Failed to start rotki') 125 exit(1) 126 127 logger.info('starting nginx') 128 129 nginx = subprocess.Popen('nginx -g "daemon off;"', shell=True) 130 131 if nginx.returncode == 1: 132 logger.error('Failed to start nginx') 133 exit(1) 134 135 while True: 136 time.sleep(60) 137 138 if rotki.poll() is not None: 139 logger.error('rotki has terminated exiting') 140 exit(1) 141 142 if nginx.poll() is not None: 143 logger.error('nginx was not running') 144 exit(1) 145 146 logger.info('OK: processes still running') 147 [end of packaging/docker/entrypoint.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/packaging/docker/entrypoint.py b/packaging/docker/entrypoint.py --- a/packaging/docker/entrypoint.py +++ b/packaging/docker/entrypoint.py @@ -2,10 +2,12 @@ import json import logging import os +import shutil import subprocess import time +from datetime import datetime, timedelta from pathlib import Path -from typing import Dict, Optional, Any, List +from typing import Any, Dict, List, Optional logger = logging.getLogger('monitor') logging.basicConfig(level=logging.DEBUG) @@ -13,6 +15,41 @@ DEFAULT_LOG_LEVEL = 'critical' +def can_delete(file: Path, cutoff: int) -> bool: + return int(os.stat(file).st_mtime) <= cutoff or file.name.startswith('_MEI') + + +def cleanup_tmp() -> None: + logger.info('Preparing to cleanup tmp directory') + tmp_dir = Path('/tmp/').glob('*') + cache_cutoff = datetime.today() - timedelta(hours=6) + cutoff_epoch = int(cache_cutoff.strftime("%s")) + to_delete = filter(lambda x: can_delete(x, cutoff_epoch), tmp_dir) + + deleted = 0 + skipped = 0 + + for item in to_delete: + path = Path(item) + if path.is_file(): + try: + path.unlink() + deleted += 1 + continue + except PermissionError: + skipped += 1 + continue + + try: + shutil.rmtree(item) + deleted += 1 + except OSError: + skipped += 1 + continue + + logger.info(f'Deleted {deleted} files or directories, skipped {skipped} from /tmp') + + def load_config_from_file() -> Optional[Dict[str, Any]]: config_file = Path('/config/rotki_config.json') if not config_file.exists(): @@ -101,6 +138,8 @@ return args +cleanup_tmp() + base_args = [ '/usr/sbin/rotki', '--rest-api-port',
{"golden_diff": "diff --git a/packaging/docker/entrypoint.py b/packaging/docker/entrypoint.py\n--- a/packaging/docker/entrypoint.py\n+++ b/packaging/docker/entrypoint.py\n@@ -2,10 +2,12 @@\n import json\n import logging\n import os\n+import shutil\n import subprocess\n import time\n+from datetime import datetime, timedelta\n from pathlib import Path\n-from typing import Dict, Optional, Any, List\n+from typing import Any, Dict, List, Optional\n \n logger = logging.getLogger('monitor')\n logging.basicConfig(level=logging.DEBUG)\n@@ -13,6 +15,41 @@\n DEFAULT_LOG_LEVEL = 'critical'\n \n \n+def can_delete(file: Path, cutoff: int) -> bool:\n+ return int(os.stat(file).st_mtime) <= cutoff or file.name.startswith('_MEI')\n+\n+\n+def cleanup_tmp() -> None:\n+ logger.info('Preparing to cleanup tmp directory')\n+ tmp_dir = Path('/tmp/').glob('*')\n+ cache_cutoff = datetime.today() - timedelta(hours=6)\n+ cutoff_epoch = int(cache_cutoff.strftime(\"%s\"))\n+ to_delete = filter(lambda x: can_delete(x, cutoff_epoch), tmp_dir)\n+\n+ deleted = 0\n+ skipped = 0\n+\n+ for item in to_delete:\n+ path = Path(item)\n+ if path.is_file():\n+ try:\n+ path.unlink()\n+ deleted += 1\n+ continue\n+ except PermissionError:\n+ skipped += 1\n+ continue\n+\n+ try:\n+ shutil.rmtree(item)\n+ deleted += 1\n+ except OSError:\n+ skipped += 1\n+ continue\n+\n+ logger.info(f'Deleted {deleted} files or directories, skipped {skipped} from /tmp')\n+\n+\n def load_config_from_file() -> Optional[Dict[str, Any]]:\n config_file = Path('/config/rotki_config.json')\n if not config_file.exists():\n@@ -101,6 +138,8 @@\n return args\n \n \n+cleanup_tmp()\n+\n base_args = [\n '/usr/sbin/rotki',\n '--rest-api-port',\n", "issue": "Docker container's /tmp doesn't get automatically cleaned\n## Problem Definition\r\n\r\nPyInstaller extracts the files in /tmp every time the backend starts\r\nIn the docker container /tmp is never cleaned which results in an ever-increasing size on every application restart\r\n\r\n## TODO\r\n\r\n- [ ] Add /tmp cleanup on start\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\nimport json\nimport logging\nimport os\nimport subprocess\nimport time\nfrom pathlib import Path\nfrom typing import Dict, Optional, Any, List\n\nlogger = logging.getLogger('monitor')\nlogging.basicConfig(level=logging.DEBUG)\n\nDEFAULT_LOG_LEVEL = 'critical'\n\n\ndef load_config_from_file() -> Optional[Dict[str, Any]]:\n config_file = Path('/config/rotki_config.json')\n if not config_file.exists():\n logger.info('no config file provided')\n return None\n\n with open(config_file) as file:\n try:\n data = json.load(file)\n return data\n except json.JSONDecodeError as e:\n logger.error(e)\n return None\n\n\ndef load_config_from_env() -> Dict[str, Any]:\n loglevel = os.environ.get('LOGLEVEL')\n logfromothermodules = os.environ.get('LOGFROMOTHERMODDULES')\n sleep_secs = os.environ.get('SLEEP_SECS')\n max_size_in_mb_all_logs = os.environ.get('MAX_SIZE_IN_MB_ALL_LOGS')\n max_logfiles_num = os.environ.get('MAX_LOGFILES_NUM')\n\n return {\n 'loglevel': loglevel,\n 'logfromothermodules': logfromothermodules,\n 'sleep_secs': sleep_secs,\n 'max_logfiles_num': max_logfiles_num,\n 'max_size_in_mb_all_logs': max_size_in_mb_all_logs,\n }\n\n\ndef load_config() -> List[str]:\n env_config = load_config_from_env()\n file_config = load_config_from_file()\n\n logger.info('loading config from env')\n\n loglevel = env_config.get('loglevel')\n log_from_other_modules = env_config.get('logfromothermodules')\n sleep_secs = env_config.get('sleep_secs')\n max_logfiles_num = env_config.get('max_logfiles_num')\n max_size_in_mb_all_logs = env_config.get('max_size_in_mb_all_logs')\n\n if file_config is not None:\n logger.info('loading config from file')\n\n if file_config.get('loglevel') is not None:\n loglevel = file_config.get('loglevel')\n\n if file_config.get('logfromothermodules') is not None:\n log_from_other_modules = file_config.get('logfromothermodules')\n\n if file_config.get('sleep-secs') is not None:\n sleep_secs = file_config.get('sleep-secs')\n\n if file_config.get('max_logfiles_num') is not None:\n max_logfiles_num = file_config.get('max_logfiles_num')\n\n if file_config.get('max_size_in_mb_all_logs') is not None:\n max_size_in_mb_all_logs = file_config.get('max_size_in_mb_all_logs')\n\n args = [\n '--data-dir',\n '/data',\n '--logfile',\n '/logs/rotki.log',\n '--loglevel',\n loglevel if loglevel is not None else DEFAULT_LOG_LEVEL,\n ]\n\n if log_from_other_modules is True:\n args.append('--logfromothermodules')\n\n if sleep_secs is not None:\n args.append('--sleep-secs')\n args.append(str(sleep_secs))\n\n if max_logfiles_num is not None:\n args.append('--max-logfiles-num')\n args.append(str(max_logfiles_num))\n\n if max_size_in_mb_all_logs is not None:\n args.append('--max-size-in-mb-all-logs')\n args.append(str(max_size_in_mb_all_logs))\n\n return args\n\n\nbase_args = [\n '/usr/sbin/rotki',\n '--rest-api-port',\n '4242',\n '--websockets-api-port',\n '4243',\n '--api-cors',\n 'http://localhost:*/*,app://.',\n '--api-host',\n '0.0.0.0',\n]\n\nconfig_args = load_config()\ncmd = base_args + config_args\n\nlogger.info('starting rotki backend')\n\nrotki = subprocess.Popen(cmd)\n\nif rotki.returncode == 1:\n logger.error('Failed to start rotki')\n exit(1)\n\nlogger.info('starting nginx')\n\nnginx = subprocess.Popen('nginx -g \"daemon off;\"', shell=True)\n\nif nginx.returncode == 1:\n logger.error('Failed to start nginx')\n exit(1)\n\nwhile True:\n time.sleep(60)\n\n if rotki.poll() is not None:\n logger.error('rotki has terminated exiting')\n exit(1)\n\n if nginx.poll() is not None:\n logger.error('nginx was not running')\n exit(1)\n\n logger.info('OK: processes still running')\n", "path": "packaging/docker/entrypoint.py"}]}
1,933
473
gh_patches_debug_4839
rasdani/github-patches
git_diff
getsentry__sentry-python-261
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Half installed AioHttpIntegration causes aiohttp to crash If I call: ```python sentry_sdk.integrations.setup_integrations( [sentry_sdk.integrations.aiohttp.AioHttpIntegration()]) ``` after `sentry_sdk.init()` the `_handle` method of `aiohttp.web.Application` gets replaced but the integration does not get registered in the client. This causes the replaced `_handle` ro run into a codepath where there as a `await` missing. This gives an exception in every request: ``` ERROR:aiohttp.server:Unhandled exception Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/aiohttp/web_protocol.py", line 447, in start await resp.prepare(request) AttributeError: 'coroutine' object has no attribute 'prepare' /usr/local/lib/python3.7/site-packages/xxx/base.py:151: RuntimeWarning: coroutine 'Application._handle' was never awaited self._loop.run_forever() ``` This will not get logged to sentry at all, because the `aiohttp.server` logger gets ignored by (half-)installing the integration (see #259). </issue> <code> [start of sentry_sdk/integrations/aiohttp.py] 1 import sys 2 import weakref 3 4 from sentry_sdk._compat import reraise 5 from sentry_sdk.hub import Hub 6 from sentry_sdk.integrations import Integration 7 from sentry_sdk.integrations.logging import ignore_logger 8 from sentry_sdk.integrations._wsgi_common import _filter_headers 9 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception 10 11 import asyncio 12 from aiohttp.web import Application, HTTPException 13 14 15 class AioHttpIntegration(Integration): 16 identifier = "aiohttp" 17 18 @staticmethod 19 def setup_once(): 20 if sys.version_info < (3, 7): 21 # We better have contextvars or we're going to leak state between 22 # requests. 23 raise RuntimeError( 24 "The aiohttp integration for Sentry requires Python 3.7+" 25 ) 26 27 ignore_logger("aiohttp.server") 28 29 old_handle = Application._handle 30 31 async def sentry_app_handle(self, request, *args, **kwargs): 32 async def inner(): 33 hub = Hub.current 34 if hub.get_integration(AioHttpIntegration) is None: 35 return old_handle(self, request, *args, **kwargs) 36 37 weak_request = weakref.ref(request) 38 39 with Hub(Hub.current) as hub: 40 with hub.configure_scope() as scope: 41 scope.add_event_processor(_make_request_processor(weak_request)) 42 43 try: 44 response = await old_handle(self, request) 45 except HTTPException: 46 raise 47 except Exception: 48 reraise(*_capture_exception(hub)) 49 50 return response 51 52 return await asyncio.create_task(inner()) 53 54 Application._handle = sentry_app_handle 55 56 57 def _make_request_processor(weak_request): 58 def aiohttp_processor(event, hint): 59 request = weak_request() 60 if request is None: 61 return event 62 63 with capture_internal_exceptions(): 64 # TODO: Figure out what to do with request body. Methods on request 65 # are async, but event processors are not. 66 67 request_info = event.setdefault("request", {}) 68 69 request_info["url"] = "%s://%s%s" % ( 70 request.scheme, 71 request.host, 72 request.path, 73 ) 74 75 request_info["query_string"] = request.query_string 76 request_info["method"] = request.method 77 request_info["env"] = {"REMOTE_ADDR": request.remote} 78 request_info["headers"] = _filter_headers(dict(request.headers)) 79 80 return event 81 82 return aiohttp_processor 83 84 85 def _capture_exception(hub): 86 exc_info = sys.exc_info() 87 event, hint = event_from_exception( 88 exc_info, 89 client_options=hub.client.options, 90 mechanism={"type": "aiohttp", "handled": False}, 91 ) 92 hub.capture_event(event, hint=hint) 93 return exc_info 94 [end of sentry_sdk/integrations/aiohttp.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py --- a/sentry_sdk/integrations/aiohttp.py +++ b/sentry_sdk/integrations/aiohttp.py @@ -32,7 +32,7 @@ async def inner(): hub = Hub.current if hub.get_integration(AioHttpIntegration) is None: - return old_handle(self, request, *args, **kwargs) + return await old_handle(self, request, *args, **kwargs) weak_request = weakref.ref(request)
{"golden_diff": "diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py\n--- a/sentry_sdk/integrations/aiohttp.py\n+++ b/sentry_sdk/integrations/aiohttp.py\n@@ -32,7 +32,7 @@\n async def inner():\n hub = Hub.current\n if hub.get_integration(AioHttpIntegration) is None:\n- return old_handle(self, request, *args, **kwargs)\n+ return await old_handle(self, request, *args, **kwargs)\n \n weak_request = weakref.ref(request)\n", "issue": "Half installed AioHttpIntegration causes aiohttp to crash\nIf I call:\r\n```python\r\nsentry_sdk.integrations.setup_integrations(\r\n [sentry_sdk.integrations.aiohttp.AioHttpIntegration()])\r\n```\r\nafter `sentry_sdk.init()` the `_handle` method of `aiohttp.web.Application` gets replaced but the integration does not get registered in the client. This causes the replaced `_handle` ro run into a codepath where there as a `await` missing. This gives an exception in every request:\r\n```\r\nERROR:aiohttp.server:Unhandled exception \r\nTraceback (most recent call last): \r\n File \"/usr/local/lib/python3.7/site-packages/aiohttp/web_protocol.py\", line 447, in start \r\n await resp.prepare(request) \r\nAttributeError: 'coroutine' object has no attribute 'prepare' \r\n/usr/local/lib/python3.7/site-packages/xxx/base.py:151: RuntimeWarning: coroutine 'Application._handle' was never awaited \r\n self._loop.run_forever() \r\n```\r\n\r\nThis will not get logged to sentry at all, because the `aiohttp.server` logger gets ignored by (half-)installing the integration (see #259).\n", "before_files": [{"content": "import sys\nimport weakref\n\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\n\nimport asyncio\nfrom aiohttp.web import Application, HTTPException\n\n\nclass AioHttpIntegration(Integration):\n identifier = \"aiohttp\"\n\n @staticmethod\n def setup_once():\n if sys.version_info < (3, 7):\n # We better have contextvars or we're going to leak state between\n # requests.\n raise RuntimeError(\n \"The aiohttp integration for Sentry requires Python 3.7+\"\n )\n\n ignore_logger(\"aiohttp.server\")\n\n old_handle = Application._handle\n\n async def sentry_app_handle(self, request, *args, **kwargs):\n async def inner():\n hub = Hub.current\n if hub.get_integration(AioHttpIntegration) is None:\n return old_handle(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(Hub.current) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_request_processor(weak_request))\n\n try:\n response = await old_handle(self, request)\n except HTTPException:\n raise\n except Exception:\n reraise(*_capture_exception(hub))\n\n return response\n\n return await asyncio.create_task(inner())\n\n Application._handle = sentry_app_handle\n\n\ndef _make_request_processor(weak_request):\n def aiohttp_processor(event, hint):\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n # TODO: Figure out what to do with request body. Methods on request\n # are async, but event processors are not.\n\n request_info = event.setdefault(\"request\", {})\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n request.scheme,\n request.host,\n request.path,\n )\n\n request_info[\"query_string\"] = request.query_string\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return aiohttp_processor\n\n\ndef _capture_exception(hub):\n exc_info = sys.exc_info()\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"aiohttp\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n return exc_info\n", "path": "sentry_sdk/integrations/aiohttp.py"}]}
1,599
130
gh_patches_debug_744
rasdani/github-patches
git_diff
LMFDB__lmfdb-5795
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Half integeral weight page visible on prod https://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/half/ should redirect to beta, but it doesn't since the whitelist thinks it's inside CMFs. </issue> <code> [start of lmfdb/half_integral_weight_forms/__init__.py] 1 # -*- coding: utf-8 -*- 2 3 from lmfdb.app import app 4 from lmfdb.logger import make_logger 5 from flask import Blueprint 6 7 hiwf_page = Blueprint("hiwf", __name__, template_folder='templates', static_folder="static") 8 hiwf_logger = make_logger(hiwf_page) 9 10 11 @hiwf_page.context_processor 12 def body_class(): 13 return {'body_class': 'hiwf'} 14 15 from . import half_integral_form 16 assert half_integral_form 17 18 app.register_blueprint(hiwf_page, url_prefix="/ModularForm/GL2/Q/holomorphic/half") 19 [end of lmfdb/half_integral_weight_forms/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lmfdb/half_integral_weight_forms/__init__.py b/lmfdb/half_integral_weight_forms/__init__.py --- a/lmfdb/half_integral_weight_forms/__init__.py +++ b/lmfdb/half_integral_weight_forms/__init__.py @@ -15,4 +15,4 @@ from . import half_integral_form assert half_integral_form -app.register_blueprint(hiwf_page, url_prefix="/ModularForm/GL2/Q/holomorphic/half") +app.register_blueprint(hiwf_page, url_prefix="/ModularForm/GL2/Q/holomorphic_half")
{"golden_diff": "diff --git a/lmfdb/half_integral_weight_forms/__init__.py b/lmfdb/half_integral_weight_forms/__init__.py\n--- a/lmfdb/half_integral_weight_forms/__init__.py\n+++ b/lmfdb/half_integral_weight_forms/__init__.py\n@@ -15,4 +15,4 @@\n from . import half_integral_form\n assert half_integral_form\n \n-app.register_blueprint(hiwf_page, url_prefix=\"/ModularForm/GL2/Q/holomorphic/half\")\n+app.register_blueprint(hiwf_page, url_prefix=\"/ModularForm/GL2/Q/holomorphic_half\")\n", "issue": "Half integeral weight page visible on prod\nhttps://www.lmfdb.org/ModularForm/GL2/Q/holomorphic/half/ should redirect to beta, but it doesn't since the whitelist thinks it's inside CMFs.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint\n\nhiwf_page = Blueprint(\"hiwf\", __name__, template_folder='templates', static_folder=\"static\")\nhiwf_logger = make_logger(hiwf_page)\n\n\n@hiwf_page.context_processor\ndef body_class():\n return {'body_class': 'hiwf'}\n\nfrom . import half_integral_form\nassert half_integral_form\n\napp.register_blueprint(hiwf_page, url_prefix=\"/ModularForm/GL2/Q/holomorphic/half\")\n", "path": "lmfdb/half_integral_weight_forms/__init__.py"}]}
752
131
gh_patches_debug_29078
rasdani/github-patches
git_diff
mindee__doctr-848
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [datasets] Targets are modified inplace ### Bug description **Targets** are being changed when iterating over some dataset more than one time. The reason is storing targets in self.data, and changing them in the `__getitem__` ***in place*** using `pre_transforms`, etc. ```python # _AbstractDataset def __getitem__( self, index: int ) -> Tuple[Any, Any]: # Read image img, target = self._read_sample(index) # Pre-transforms (format conversion at run-time etc.) if self._pre_transforms is not None: img, target = self._pre_transforms(img, target) if self.img_transforms is not None: # typing issue cf. https://github.com/python/mypy/issues/5485 img = self.img_transforms(img) # type: ignore[call-arg] if self.sample_transforms is not None: img, target = self.sample_transforms(img, target) return img, target ``` This can be fixed by copying target in the `_read_sample` ```python # AbstractDataset def _read_sample(self, index: int) -> Tuple[tf.Tensor, Any]: img_name, target = self.data[index] # Read image img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32) return img, target ``` **OR** returning a copy of the target in all transform methods. ```python def convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]: target['boxes'] = convert_to_relative_coords(target['boxes'], get_img_shape(img)) return img, target ``` ### Code snippet to reproduce the bug ```python def process_image(train_example): img, target = train_example img_numpy = img.numpy() * 255 for example in target['boxes']: print(example) unnormalized_example = [int(example[0]*img.shape[1]), int(example[1]*img.shape[0]), int(example[2]*img.shape[1]), int(example[3]*img.shape[0])] cv2.rectangle(img=img_numpy, pt1=(unnormalized_example[0], unnormalized_example[1]), pt2=(unnormalized_example[2], unnormalized_example[3]), color=(0, 0, 255), thickness=2) return img_numpy train_set = SROIE(train=True, download=True) for i in range(2): for j, example in enumerate(train_set): if j == 0: print(f"{i} ____") img_n = process_image(example) ``` P.S. Sorry for not a pretty code style. This snippet is just for an example :) ### Error traceback ~changed target box coordinates ### Environment . </issue> <code> [start of doctr/datasets/datasets/tensorflow.py] 1 # Copyright (C) 2021-2022, Mindee. 2 3 # This program is licensed under the Apache License version 2. 4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. 5 6 import os 7 from typing import Any, List, Tuple 8 9 import tensorflow as tf 10 11 from doctr.io import read_img_as_tensor 12 13 from .base import _AbstractDataset, _VisionDataset 14 15 __all__ = ['AbstractDataset', 'VisionDataset'] 16 17 18 class AbstractDataset(_AbstractDataset): 19 20 def _read_sample(self, index: int) -> Tuple[tf.Tensor, Any]: 21 img_name, target = self.data[index] 22 # Read image 23 img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32) 24 25 return img, target 26 27 @staticmethod 28 def collate_fn(samples: List[Tuple[tf.Tensor, Any]]) -> Tuple[tf.Tensor, List[Any]]: 29 30 images, targets = zip(*samples) 31 images = tf.stack(images, axis=0) 32 33 return images, list(targets) 34 35 36 class VisionDataset(AbstractDataset, _VisionDataset): 37 pass 38 [end of doctr/datasets/datasets/tensorflow.py] [start of doctr/datasets/datasets/pytorch.py] 1 # Copyright (C) 2021-2022, Mindee. 2 3 # This program is licensed under the Apache License version 2. 4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. 5 6 import os 7 from typing import Any, List, Tuple 8 9 import torch 10 11 from doctr.io import read_img_as_tensor 12 13 from .base import _AbstractDataset, _VisionDataset 14 15 __all__ = ['AbstractDataset', 'VisionDataset'] 16 17 18 class AbstractDataset(_AbstractDataset): 19 20 def _read_sample(self, index: int) -> Tuple[torch.Tensor, Any]: 21 img_name, target = self.data[index] 22 # Read image 23 img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=torch.float32) 24 25 return img, target 26 27 @staticmethod 28 def collate_fn(samples: List[Tuple[torch.Tensor, Any]]) -> Tuple[torch.Tensor, List[Any]]: 29 30 images, targets = zip(*samples) 31 images = torch.stack(images, dim=0) 32 33 return images, list(targets) 34 35 36 class VisionDataset(AbstractDataset, _VisionDataset): 37 pass 38 [end of doctr/datasets/datasets/pytorch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doctr/datasets/datasets/pytorch.py b/doctr/datasets/datasets/pytorch.py --- a/doctr/datasets/datasets/pytorch.py +++ b/doctr/datasets/datasets/pytorch.py @@ -4,6 +4,7 @@ # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. import os +from copy import deepcopy from typing import Any, List, Tuple import torch @@ -22,7 +23,7 @@ # Read image img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=torch.float32) - return img, target + return img, deepcopy(target) @staticmethod def collate_fn(samples: List[Tuple[torch.Tensor, Any]]) -> Tuple[torch.Tensor, List[Any]]: diff --git a/doctr/datasets/datasets/tensorflow.py b/doctr/datasets/datasets/tensorflow.py --- a/doctr/datasets/datasets/tensorflow.py +++ b/doctr/datasets/datasets/tensorflow.py @@ -4,6 +4,7 @@ # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details. import os +from copy import deepcopy from typing import Any, List, Tuple import tensorflow as tf @@ -22,7 +23,7 @@ # Read image img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32) - return img, target + return img, deepcopy(target) @staticmethod def collate_fn(samples: List[Tuple[tf.Tensor, Any]]) -> Tuple[tf.Tensor, List[Any]]:
{"golden_diff": "diff --git a/doctr/datasets/datasets/pytorch.py b/doctr/datasets/datasets/pytorch.py\n--- a/doctr/datasets/datasets/pytorch.py\n+++ b/doctr/datasets/datasets/pytorch.py\n@@ -4,6 +4,7 @@\n # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n \n import os\n+from copy import deepcopy\n from typing import Any, List, Tuple\n \n import torch\n@@ -22,7 +23,7 @@\n # Read image\n img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=torch.float32)\n \n- return img, target\n+ return img, deepcopy(target)\n \n @staticmethod\n def collate_fn(samples: List[Tuple[torch.Tensor, Any]]) -> Tuple[torch.Tensor, List[Any]]:\ndiff --git a/doctr/datasets/datasets/tensorflow.py b/doctr/datasets/datasets/tensorflow.py\n--- a/doctr/datasets/datasets/tensorflow.py\n+++ b/doctr/datasets/datasets/tensorflow.py\n@@ -4,6 +4,7 @@\n # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n \n import os\n+from copy import deepcopy\n from typing import Any, List, Tuple\n \n import tensorflow as tf\n@@ -22,7 +23,7 @@\n # Read image\n img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32)\n \n- return img, target\n+ return img, deepcopy(target)\n \n @staticmethod\n def collate_fn(samples: List[Tuple[tf.Tensor, Any]]) -> Tuple[tf.Tensor, List[Any]]:\n", "issue": "[datasets] Targets are modified inplace\n### Bug description\n\n**Targets** are being changed when iterating over some dataset more than one time.\r\nThe reason is storing targets in self.data, and changing them in the `__getitem__` ***in place*** using `pre_transforms`, etc.\r\n```python\r\n# _AbstractDataset\r\ndef __getitem__(\r\n self,\r\n index: int\r\n ) -> Tuple[Any, Any]:\r\n\r\n # Read image\r\n img, target = self._read_sample(index)\r\n # Pre-transforms (format conversion at run-time etc.)\r\n if self._pre_transforms is not None:\r\n img, target = self._pre_transforms(img, target)\r\n\r\n if self.img_transforms is not None:\r\n # typing issue cf. https://github.com/python/mypy/issues/5485\r\n img = self.img_transforms(img) # type: ignore[call-arg]\r\n\r\n if self.sample_transforms is not None:\r\n img, target = self.sample_transforms(img, target)\r\n\r\n return img, target\r\n```\r\n\r\nThis can be fixed by copying target in the `_read_sample` \r\n```python\r\n# AbstractDataset\r\ndef _read_sample(self, index: int) -> Tuple[tf.Tensor, Any]:\r\n img_name, target = self.data[index]\r\n # Read image\r\n img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32)\r\n\r\n return img, target\r\n```\r\n\r\n**OR** returning a copy of the target in all transform methods.\r\n```python\r\ndef convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]:\r\n\r\n target['boxes'] = convert_to_relative_coords(target['boxes'], get_img_shape(img))\r\n return img, target\r\n```\r\n\n\n### Code snippet to reproduce the bug\n\n```python\r\ndef process_image(train_example):\r\n img, target = train_example\r\n img_numpy = img.numpy() * 255\r\n for example in target['boxes']:\r\n print(example)\r\n unnormalized_example = [int(example[0]*img.shape[1]), int(example[1]*img.shape[0]),\r\n int(example[2]*img.shape[1]), int(example[3]*img.shape[0])]\r\n cv2.rectangle(img=img_numpy,\r\n pt1=(unnormalized_example[0], unnormalized_example[1]),\r\n pt2=(unnormalized_example[2], unnormalized_example[3]),\r\n color=(0, 0, 255), thickness=2)\r\n return img_numpy \r\n\r\n\r\ntrain_set = SROIE(train=True, download=True)\r\n\r\nfor i in range(2):\r\n for j, example in enumerate(train_set):\r\n if j == 0: \r\n print(f\"{i} ____\")\r\n img_n = process_image(example)\r\n```\r\n\r\nP.S. Sorry for not a pretty code style. This snippet is just for an example :) \n\n### Error traceback\n\n~changed target box coordinates\n\n### Environment\n\n.\n", "before_files": [{"content": "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\nfrom typing import Any, List, Tuple\n\nimport tensorflow as tf\n\nfrom doctr.io import read_img_as_tensor\n\nfrom .base import _AbstractDataset, _VisionDataset\n\n__all__ = ['AbstractDataset', 'VisionDataset']\n\n\nclass AbstractDataset(_AbstractDataset):\n\n def _read_sample(self, index: int) -> Tuple[tf.Tensor, Any]:\n img_name, target = self.data[index]\n # Read image\n img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32)\n\n return img, target\n\n @staticmethod\n def collate_fn(samples: List[Tuple[tf.Tensor, Any]]) -> Tuple[tf.Tensor, List[Any]]:\n\n images, targets = zip(*samples)\n images = tf.stack(images, axis=0)\n\n return images, list(targets)\n\n\nclass VisionDataset(AbstractDataset, _VisionDataset):\n pass\n", "path": "doctr/datasets/datasets/tensorflow.py"}, {"content": "# Copyright (C) 2021-2022, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\nfrom typing import Any, List, Tuple\n\nimport torch\n\nfrom doctr.io import read_img_as_tensor\n\nfrom .base import _AbstractDataset, _VisionDataset\n\n__all__ = ['AbstractDataset', 'VisionDataset']\n\n\nclass AbstractDataset(_AbstractDataset):\n\n def _read_sample(self, index: int) -> Tuple[torch.Tensor, Any]:\n img_name, target = self.data[index]\n # Read image\n img = read_img_as_tensor(os.path.join(self.root, img_name), dtype=torch.float32)\n\n return img, target\n\n @staticmethod\n def collate_fn(samples: List[Tuple[torch.Tensor, Any]]) -> Tuple[torch.Tensor, List[Any]]:\n\n images, targets = zip(*samples)\n images = torch.stack(images, dim=0)\n\n return images, list(targets)\n\n\nclass VisionDataset(AbstractDataset, _VisionDataset):\n pass\n", "path": "doctr/datasets/datasets/pytorch.py"}]}
1,855
387
gh_patches_debug_24256
rasdani/github-patches
git_diff
mlcommons__GaNDLF-228
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add center cropping **Is your feature request related to a problem? Please describe.** We do not have any mechanism to perform cropping, which is important for certain DL training problems. **Describe the solution you'd like** Expose the [cropping functionality in TorchIO](https://torchio.readthedocs.io/transforms/preprocessing.html?highlight=crop#torchio.transforms.Crop) as a preprocessing mechanism. **Describe alternatives you've considered** N.A. **Additional context** Requested by @Geeks-Sid for SBU-TIL. </issue> <code> [start of GANDLF/data/preprocessing/__init__.py] 1 from .crop_zero_planes import CropExternalZeroplanes 2 from .non_zero_normalize import NonZeroNormalizeOnMaskedRegion 3 from .threshold_and_clip import ( 4 threshold_transform, 5 clip_transform, 6 ) 7 from .normalize_rgb import ( 8 normalize_by_val_transform, 9 normalize_imagenet_transform, 10 normalize_standardize_transform, 11 normalize_div_by_255_transform, 12 ) 13 14 from torchio.transforms import ( 15 ZNormalization, 16 ToCanonical, 17 ) 18 19 20 def positive_voxel_mask(image): 21 return image > 0 22 23 24 def nonzero_voxel_mask(image): 25 return image != 0 26 27 28 def to_canonical_transform(parameters): 29 return ToCanonical() 30 31 32 # defining dict for pre-processing - key is the string and the value is the transform object 33 global_preprocessing_dict = { 34 "to_canonical": to_canonical_transform, 35 "threshold": threshold_transform, 36 "clip": clip_transform, 37 "clamp": clip_transform, 38 "crop_external_zero_planes": CropExternalZeroplanes, 39 "normalize_by_val": normalize_by_val_transform, 40 "normalize_imagenet": normalize_imagenet_transform, 41 "normalize_standardize": normalize_standardize_transform, 42 "normalize_div_by_255": normalize_div_by_255_transform, 43 "normalize": ZNormalization(), 44 "normalize_positive": ZNormalization(masking_method=positive_voxel_mask), 45 "normalize_nonZero": ZNormalization(masking_method=nonzero_voxel_mask), 46 "normalize_nonZero_masked": NonZeroNormalizeOnMaskedRegion(), 47 } 48 [end of GANDLF/data/preprocessing/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/GANDLF/data/preprocessing/__init__.py b/GANDLF/data/preprocessing/__init__.py --- a/GANDLF/data/preprocessing/__init__.py +++ b/GANDLF/data/preprocessing/__init__.py @@ -14,6 +14,8 @@ from torchio.transforms import ( ZNormalization, ToCanonical, + Crop, + CropOrPad, ) @@ -29,6 +31,14 @@ return ToCanonical() +def crop_transform(patch_size): + return Crop(patch_size) + + +def centercrop_transform(patch_size): + return CropOrPad(target_shape=patch_size) + + # defining dict for pre-processing - key is the string and the value is the transform object global_preprocessing_dict = { "to_canonical": to_canonical_transform, @@ -36,6 +46,8 @@ "clip": clip_transform, "clamp": clip_transform, "crop_external_zero_planes": CropExternalZeroplanes, + "crop": crop_transform, + "centercrop": centercrop_transform, "normalize_by_val": normalize_by_val_transform, "normalize_imagenet": normalize_imagenet_transform, "normalize_standardize": normalize_standardize_transform,
{"golden_diff": "diff --git a/GANDLF/data/preprocessing/__init__.py b/GANDLF/data/preprocessing/__init__.py\n--- a/GANDLF/data/preprocessing/__init__.py\n+++ b/GANDLF/data/preprocessing/__init__.py\n@@ -14,6 +14,8 @@\n from torchio.transforms import (\n ZNormalization,\n ToCanonical,\n+ Crop,\n+ CropOrPad,\n )\n \n \n@@ -29,6 +31,14 @@\n return ToCanonical()\n \n \n+def crop_transform(patch_size):\n+ return Crop(patch_size)\n+\n+\n+def centercrop_transform(patch_size):\n+ return CropOrPad(target_shape=patch_size)\n+\n+\n # defining dict for pre-processing - key is the string and the value is the transform object\n global_preprocessing_dict = {\n \"to_canonical\": to_canonical_transform,\n@@ -36,6 +46,8 @@\n \"clip\": clip_transform,\n \"clamp\": clip_transform,\n \"crop_external_zero_planes\": CropExternalZeroplanes,\n+ \"crop\": crop_transform,\n+ \"centercrop\": centercrop_transform,\n \"normalize_by_val\": normalize_by_val_transform,\n \"normalize_imagenet\": normalize_imagenet_transform,\n \"normalize_standardize\": normalize_standardize_transform,\n", "issue": "Add center cropping\n**Is your feature request related to a problem? Please describe.**\r\nWe do not have any mechanism to perform cropping, which is important for certain DL training problems.\r\n\r\n**Describe the solution you'd like**\r\nExpose the [cropping functionality in TorchIO](https://torchio.readthedocs.io/transforms/preprocessing.html?highlight=crop#torchio.transforms.Crop) as a preprocessing mechanism.\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nRequested by @Geeks-Sid for SBU-TIL.\r\n\n", "before_files": [{"content": "from .crop_zero_planes import CropExternalZeroplanes\nfrom .non_zero_normalize import NonZeroNormalizeOnMaskedRegion\nfrom .threshold_and_clip import (\n threshold_transform,\n clip_transform,\n)\nfrom .normalize_rgb import (\n normalize_by_val_transform,\n normalize_imagenet_transform,\n normalize_standardize_transform,\n normalize_div_by_255_transform,\n)\n\nfrom torchio.transforms import (\n ZNormalization,\n ToCanonical,\n)\n\n\ndef positive_voxel_mask(image):\n return image > 0\n\n\ndef nonzero_voxel_mask(image):\n return image != 0\n\n\ndef to_canonical_transform(parameters):\n return ToCanonical()\n\n\n# defining dict for pre-processing - key is the string and the value is the transform object\nglobal_preprocessing_dict = {\n \"to_canonical\": to_canonical_transform,\n \"threshold\": threshold_transform,\n \"clip\": clip_transform,\n \"clamp\": clip_transform,\n \"crop_external_zero_planes\": CropExternalZeroplanes,\n \"normalize_by_val\": normalize_by_val_transform,\n \"normalize_imagenet\": normalize_imagenet_transform,\n \"normalize_standardize\": normalize_standardize_transform,\n \"normalize_div_by_255\": normalize_div_by_255_transform,\n \"normalize\": ZNormalization(),\n \"normalize_positive\": ZNormalization(masking_method=positive_voxel_mask),\n \"normalize_nonZero\": ZNormalization(masking_method=nonzero_voxel_mask),\n \"normalize_nonZero_masked\": NonZeroNormalizeOnMaskedRegion(),\n}\n", "path": "GANDLF/data/preprocessing/__init__.py"}]}
1,072
278
gh_patches_debug_31454
rasdani/github-patches
git_diff
zulip__zulip-18885
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Permissions and warning for custom emoji overriding unicode emoji Only administrators/owners should be able to override unicode emoji 1. If an administrator attempts to override a unicode emoji with a custom emoji, they should get a warning. #16937 attempts to fix this, but it is currently not working in production. We should also shorten the warning message and avoid referring to "unicode" to avoid confusing non-technical users: >**Override built-in emoji?** > Uploading a custom emoji with the name **<name>** will override the built-in **<name>** emoji. Continue? 2. If a non-administrator attempts to override an emoji, show an error in the same style as the error for overriding custom emoji (screenshot below). Text: "Failed: An emoji with this name already exists. Only administrators can override built-in emoji." Error for overriding custom emoji: <img width="531" alt="Screen Shot 2021-06-15 at 2 30 38 PM" src="https://user-images.githubusercontent.com/2090066/122126418-915e9880-cde6-11eb-86f6-0a4338478739.png"> Related issue: #18269 [Related CZO thread](https://chat.zulip.org/#narrow/stream/2-general/topic/ok.20emoji) </issue> <code> [start of zerver/views/realm_emoji.py] 1 from django.conf import settings 2 from django.http import HttpRequest, HttpResponse 3 from django.utils.translation import gettext as _ 4 5 from zerver.decorator import require_member_or_admin 6 from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji 7 from zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name 8 from zerver.lib.request import REQ, JsonableError, has_request_variables 9 from zerver.lib.response import json_success 10 from zerver.models import RealmEmoji, UserProfile 11 12 13 def list_emoji(request: HttpRequest, user_profile: UserProfile) -> HttpResponse: 14 15 # We don't call check_emoji_admin here because the list of realm 16 # emoji is public. 17 return json_success({"emoji": user_profile.realm.get_emoji()}) 18 19 20 @require_member_or_admin 21 @has_request_variables 22 def upload_emoji( 23 request: HttpRequest, user_profile: UserProfile, emoji_name: str = REQ(path_only=True) 24 ) -> HttpResponse: 25 emoji_name = emoji_name.strip().replace(" ", "_") 26 check_valid_emoji_name(emoji_name) 27 check_emoji_admin(user_profile) 28 if RealmEmoji.objects.filter( 29 realm=user_profile.realm, name=emoji_name, deactivated=False 30 ).exists(): 31 raise JsonableError(_("A custom emoji with this name already exists.")) 32 if len(request.FILES) != 1: 33 raise JsonableError(_("You must upload exactly one file.")) 34 emoji_file = list(request.FILES.values())[0] 35 if (settings.MAX_EMOJI_FILE_SIZE_MIB * 1024 * 1024) < emoji_file.size: 36 raise JsonableError( 37 _("Uploaded file is larger than the allowed limit of {} MiB").format( 38 settings.MAX_EMOJI_FILE_SIZE_MIB, 39 ) 40 ) 41 42 realm_emoji = check_add_realm_emoji(user_profile.realm, emoji_name, user_profile, emoji_file) 43 if realm_emoji is None: 44 raise JsonableError(_("Image file upload failed.")) 45 return json_success() 46 47 48 def delete_emoji(request: HttpRequest, user_profile: UserProfile, emoji_name: str) -> HttpResponse: 49 if not RealmEmoji.objects.filter( 50 realm=user_profile.realm, name=emoji_name, deactivated=False 51 ).exists(): 52 raise JsonableError(_("Emoji '{}' does not exist").format(emoji_name)) 53 check_emoji_admin(user_profile, emoji_name) 54 do_remove_realm_emoji(user_profile.realm, emoji_name) 55 return json_success() 56 [end of zerver/views/realm_emoji.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zerver/views/realm_emoji.py b/zerver/views/realm_emoji.py --- a/zerver/views/realm_emoji.py +++ b/zerver/views/realm_emoji.py @@ -4,7 +4,7 @@ from zerver.decorator import require_member_or_admin from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji -from zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name +from zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name, name_to_codepoint from zerver.lib.request import REQ, JsonableError, has_request_variables from zerver.lib.response import json_success from zerver.models import RealmEmoji, UserProfile @@ -23,6 +23,7 @@ request: HttpRequest, user_profile: UserProfile, emoji_name: str = REQ(path_only=True) ) -> HttpResponse: emoji_name = emoji_name.strip().replace(" ", "_") + valid_built_in_emoji = name_to_codepoint.keys() check_valid_emoji_name(emoji_name) check_emoji_admin(user_profile) if RealmEmoji.objects.filter( @@ -31,6 +32,9 @@ raise JsonableError(_("A custom emoji with this name already exists.")) if len(request.FILES) != 1: raise JsonableError(_("You must upload exactly one file.")) + if emoji_name in valid_built_in_emoji: + if not user_profile.is_realm_admin: + raise JsonableError(_("Only administrators can override built-in emoji.")) emoji_file = list(request.FILES.values())[0] if (settings.MAX_EMOJI_FILE_SIZE_MIB * 1024 * 1024) < emoji_file.size: raise JsonableError(
{"golden_diff": "diff --git a/zerver/views/realm_emoji.py b/zerver/views/realm_emoji.py\n--- a/zerver/views/realm_emoji.py\n+++ b/zerver/views/realm_emoji.py\n@@ -4,7 +4,7 @@\n \n from zerver.decorator import require_member_or_admin\n from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji\n-from zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name\n+from zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name, name_to_codepoint\n from zerver.lib.request import REQ, JsonableError, has_request_variables\n from zerver.lib.response import json_success\n from zerver.models import RealmEmoji, UserProfile\n@@ -23,6 +23,7 @@\n request: HttpRequest, user_profile: UserProfile, emoji_name: str = REQ(path_only=True)\n ) -> HttpResponse:\n emoji_name = emoji_name.strip().replace(\" \", \"_\")\n+ valid_built_in_emoji = name_to_codepoint.keys()\n check_valid_emoji_name(emoji_name)\n check_emoji_admin(user_profile)\n if RealmEmoji.objects.filter(\n@@ -31,6 +32,9 @@\n raise JsonableError(_(\"A custom emoji with this name already exists.\"))\n if len(request.FILES) != 1:\n raise JsonableError(_(\"You must upload exactly one file.\"))\n+ if emoji_name in valid_built_in_emoji:\n+ if not user_profile.is_realm_admin:\n+ raise JsonableError(_(\"Only administrators can override built-in emoji.\"))\n emoji_file = list(request.FILES.values())[0]\n if (settings.MAX_EMOJI_FILE_SIZE_MIB * 1024 * 1024) < emoji_file.size:\n raise JsonableError(\n", "issue": "Permissions and warning for custom emoji overriding unicode emoji\nOnly administrators/owners should be able to override unicode emoji\r\n\r\n1. If an administrator attempts to override a unicode emoji with a custom emoji, they should get a warning. #16937 attempts to fix this, but it is currently not working in production.\r\n\r\nWe should also shorten the warning message and avoid referring to \"unicode\" to avoid confusing non-technical users:\r\n>**Override built-in emoji?**\r\n> Uploading a custom emoji with the name **<name>** will override the built-in **<name>** emoji. Continue?\r\n\r\n2. If a non-administrator attempts to override an emoji, show an error in the same style as the error for overriding custom emoji (screenshot below). Text: \"Failed: An emoji with this name already exists. Only administrators can override built-in emoji.\"\r\n\r\nError for overriding custom emoji:\r\n<img width=\"531\" alt=\"Screen Shot 2021-06-15 at 2 30 38 PM\" src=\"https://user-images.githubusercontent.com/2090066/122126418-915e9880-cde6-11eb-86f6-0a4338478739.png\">\r\n\r\nRelated issue: #18269\r\n[Related CZO thread](https://chat.zulip.org/#narrow/stream/2-general/topic/ok.20emoji)\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\n\nfrom zerver.decorator import require_member_or_admin\nfrom zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji\nfrom zerver.lib.emoji import check_emoji_admin, check_valid_emoji_name\nfrom zerver.lib.request import REQ, JsonableError, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.models import RealmEmoji, UserProfile\n\n\ndef list_emoji(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:\n\n # We don't call check_emoji_admin here because the list of realm\n # emoji is public.\n return json_success({\"emoji\": user_profile.realm.get_emoji()})\n\n\n@require_member_or_admin\n@has_request_variables\ndef upload_emoji(\n request: HttpRequest, user_profile: UserProfile, emoji_name: str = REQ(path_only=True)\n) -> HttpResponse:\n emoji_name = emoji_name.strip().replace(\" \", \"_\")\n check_valid_emoji_name(emoji_name)\n check_emoji_admin(user_profile)\n if RealmEmoji.objects.filter(\n realm=user_profile.realm, name=emoji_name, deactivated=False\n ).exists():\n raise JsonableError(_(\"A custom emoji with this name already exists.\"))\n if len(request.FILES) != 1:\n raise JsonableError(_(\"You must upload exactly one file.\"))\n emoji_file = list(request.FILES.values())[0]\n if (settings.MAX_EMOJI_FILE_SIZE_MIB * 1024 * 1024) < emoji_file.size:\n raise JsonableError(\n _(\"Uploaded file is larger than the allowed limit of {} MiB\").format(\n settings.MAX_EMOJI_FILE_SIZE_MIB,\n )\n )\n\n realm_emoji = check_add_realm_emoji(user_profile.realm, emoji_name, user_profile, emoji_file)\n if realm_emoji is None:\n raise JsonableError(_(\"Image file upload failed.\"))\n return json_success()\n\n\ndef delete_emoji(request: HttpRequest, user_profile: UserProfile, emoji_name: str) -> HttpResponse:\n if not RealmEmoji.objects.filter(\n realm=user_profile.realm, name=emoji_name, deactivated=False\n ).exists():\n raise JsonableError(_(\"Emoji '{}' does not exist\").format(emoji_name))\n check_emoji_admin(user_profile, emoji_name)\n do_remove_realm_emoji(user_profile.realm, emoji_name)\n return json_success()\n", "path": "zerver/views/realm_emoji.py"}]}
1,489
392
gh_patches_debug_14158
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-834
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG: `geocentroid` coordiantes should divide distance <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [x] closes #832 - [x] whatsnew entry ```latex \left\{\begin{matrix} d_i &=& D(P(\bar{x}_n, \bar{y}_n), P(x_i,y_i)) \\ \bar{x}_0 &=& \frac{\sum w_i x_i}{\sum w_i} \\ \bar{y}_0 &=& \frac{\sum w_i y_i}{\sum w_i} \\ \bar{x}_{n+1} &=& \frac{\sum w_i x_i / d_i}{\sum w_i / d_i} \\ \bar{y}_{n+1} &=& \frac{\sum w_i y_i / d_i}{\sum w_i / d_i} \\ \end{matrix}\right. ``` </issue> <code> [start of dtoolkit/geoaccessor/geoseries/geocentroid.py] 1 import geopandas as gpd 2 import numpy as np 3 import pandas as pd 4 from shapely import Point 5 6 from dtoolkit.geoaccessor.geoseries.geodistance import geodistance 7 from dtoolkit.geoaccessor.geoseries.xy import xy 8 from dtoolkit.geoaccessor.register import register_geoseries_method 9 10 11 @register_geoseries_method 12 def geocentroid( 13 s: gpd.GeoSeries, 14 /, 15 weights: pd.Series = None, 16 max_iter: int = 300, 17 tol: float = 1e-5, 18 ) -> Point: 19 r""" 20 Return the centroid of all points via the center of gravity method. 21 22 .. math:: 23 24 \left\{\begin{matrix} 25 d_i &=& D(P(\bar{x}_n, \bar{y}_n), P(x_i, y_i)) \\ 26 \bar{x}_0 &=& \frac{\sum w_i x_i}{\sum w_i} \\ 27 \bar{y}_0 &=& \frac{\sum w_i y_i}{\sum w_i} \\ 28 \bar{x}_{n+1} &=& \frac{\sum w_i x_i / d_i}{\sum w_i / d_i} \\ 29 \bar{y}_{n+1} &=& \frac{\sum w_i y_i / d_i}{\sum w_i / d_i} \\ 30 \end{matrix}\right. 31 32 Parameters 33 ---------- 34 weights : Hashable or 1d array-like, optional 35 - None : All weights will be set to 1. 36 - Hashable : Only for DataFrame, the column name. 37 - 1d array-like : The weights of each point. 38 39 max_iter : int, default 300 40 Maximum number of iterations to perform. 41 42 tol : float, default 1e-5 43 Tolerance for convergence. 44 45 Returns 46 ------- 47 Point 48 49 See Also 50 -------- 51 geopandas.GeoSeries.centroid 52 dtoolkit.geoaccessor.geoseries.geocentroid 53 dtoolkit.geoaccessor.geodataframe.geocentroid 54 55 Examples 56 -------- 57 >>> import dtoolkit.geoaccessor 58 >>> import geopandas as gpd 59 >>> from shapely import Point 60 >>> df = gpd.GeoDataFrame( 61 ... { 62 ... "weights": [1, 2, 3], 63 ... "geometry": [Point(100, 32), Point(120, 50), Point(122, 55)], 64 ... }, 65 ... crs=4326, 66 ... ) 67 >>> df 68 weights geometry 69 0 1 POINT (100.00000 32.00000) 70 1 2 POINT (120.00000 50.00000) 71 2 3 POINT (122.00000 55.00000) 72 >>> df.geocentroid() 73 <POINT (120 50)> 74 75 Set weights for each point. 76 77 >>> df.geocentroid("weights") 78 <POINT (121.999 54.998)> 79 >>> df.geocentroid([1, 2, 3]) 80 <POINT (121.999 54.998)> 81 """ 82 83 weights = np.asarray(weights) if weights is not None else 1 84 coord = xy(s) 85 X = coord.mean() 86 for _ in range(max_iter): 87 dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0) 88 Xt = coord.mul(dis, axis=0).sum() / dis.sum() 89 90 if ((X - Xt).abs() <= tol).all(): 91 X = Xt 92 break 93 94 X = Xt 95 96 return Point(*X.tolist()) 97 [end of dtoolkit/geoaccessor/geoseries/geocentroid.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py --- a/dtoolkit/geoaccessor/geoseries/geocentroid.py +++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py @@ -75,14 +75,14 @@ Set weights for each point. >>> df.geocentroid("weights") - <POINT (121.999 54.998)> + <POINT (121.999 54.999)> >>> df.geocentroid([1, 2, 3]) - <POINT (121.999 54.998)> + <POINT (121.999 54.999)> """ weights = np.asarray(weights) if weights is not None else 1 coord = xy(s) - X = coord.mean() + X = coord.mul(weights, axis=0).mean() for _ in range(max_iter): dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0) Xt = coord.mul(dis, axis=0).sum() / dis.sum()
{"golden_diff": "diff --git a/dtoolkit/geoaccessor/geoseries/geocentroid.py b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n--- a/dtoolkit/geoaccessor/geoseries/geocentroid.py\n+++ b/dtoolkit/geoaccessor/geoseries/geocentroid.py\n@@ -75,14 +75,14 @@\n Set weights for each point.\n \n >>> df.geocentroid(\"weights\")\n- <POINT (121.999 54.998)>\n+ <POINT (121.999 54.999)>\n >>> df.geocentroid([1, 2, 3])\n- <POINT (121.999 54.998)>\n+ <POINT (121.999 54.999)>\n \"\"\"\n \n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n- X = coord.mean()\n+ X = coord.mul(weights, axis=0).mean()\n for _ in range(max_iter):\n dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n", "issue": "BUG: `geocentroid` coordiantes should divide distance\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [x] closes #832\r\n- [x] whatsnew entry\r\n\r\n```latex\r\n \\left\\{\\begin{matrix}\r\n d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i,y_i)) \\\\\r\n \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\r\n \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\r\n \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\r\n \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\r\n \\end{matrix}\\right.\r\n```\n", "before_files": [{"content": "import geopandas as gpd\nimport numpy as np\nimport pandas as pd\nfrom shapely import Point\n\nfrom dtoolkit.geoaccessor.geoseries.geodistance import geodistance\nfrom dtoolkit.geoaccessor.geoseries.xy import xy\nfrom dtoolkit.geoaccessor.register import register_geoseries_method\n\n\n@register_geoseries_method\ndef geocentroid(\n s: gpd.GeoSeries,\n /,\n weights: pd.Series = None,\n max_iter: int = 300,\n tol: float = 1e-5,\n) -> Point:\n r\"\"\"\n Return the centroid of all points via the center of gravity method.\n\n .. math::\n\n \\left\\{\\begin{matrix}\n d_i &=& D(P(\\bar{x}_n, \\bar{y}_n), P(x_i, y_i)) \\\\\n \\bar{x}_0 &=& \\frac{\\sum w_i x_i}{\\sum w_i} \\\\\n \\bar{y}_0 &=& \\frac{\\sum w_i y_i}{\\sum w_i} \\\\\n \\bar{x}_{n+1} &=& \\frac{\\sum w_i x_i / d_i}{\\sum w_i / d_i} \\\\\n \\bar{y}_{n+1} &=& \\frac{\\sum w_i y_i / d_i}{\\sum w_i / d_i} \\\\\n \\end{matrix}\\right.\n\n Parameters\n ----------\n weights : Hashable or 1d array-like, optional\n - None : All weights will be set to 1.\n - Hashable : Only for DataFrame, the column name.\n - 1d array-like : The weights of each point.\n\n max_iter : int, default 300\n Maximum number of iterations to perform.\n\n tol : float, default 1e-5\n Tolerance for convergence.\n\n Returns\n -------\n Point\n\n See Also\n --------\n geopandas.GeoSeries.centroid\n dtoolkit.geoaccessor.geoseries.geocentroid\n dtoolkit.geoaccessor.geodataframe.geocentroid\n\n Examples\n --------\n >>> import dtoolkit.geoaccessor\n >>> import geopandas as gpd\n >>> from shapely import Point\n >>> df = gpd.GeoDataFrame(\n ... {\n ... \"weights\": [1, 2, 3],\n ... \"geometry\": [Point(100, 32), Point(120, 50), Point(122, 55)],\n ... },\n ... crs=4326,\n ... )\n >>> df\n weights geometry\n 0 1 POINT (100.00000 32.00000)\n 1 2 POINT (120.00000 50.00000)\n 2 3 POINT (122.00000 55.00000)\n >>> df.geocentroid()\n <POINT (120 50)>\n\n Set weights for each point.\n\n >>> df.geocentroid(\"weights\")\n <POINT (121.999 54.998)>\n >>> df.geocentroid([1, 2, 3])\n <POINT (121.999 54.998)>\n \"\"\"\n\n weights = np.asarray(weights) if weights is not None else 1\n coord = xy(s)\n X = coord.mean()\n for _ in range(max_iter):\n dis = geodistance(s, Point(*X.tolist())).rdiv(1).mul(weights, axis=0)\n Xt = coord.mul(dis, axis=0).sum() / dis.sum()\n\n if ((X - Xt).abs() <= tol).all():\n X = Xt\n break\n\n X = Xt\n\n return Point(*X.tolist())\n", "path": "dtoolkit/geoaccessor/geoseries/geocentroid.py"}]}
1,998
297
gh_patches_debug_19943
rasdani/github-patches
git_diff
fossasia__open-event-server-2937
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Adding user image to profile and saving it results in error page After user uploads an image to the profile page and updates/saves the profile an error page shows up. ![screenshot from 2016-12-31 15-36-41](https://cloud.githubusercontent.com/assets/1583873/21577975/160aba80-cf6f-11e6-999a-d42a21e9561b.png) </issue> <code> [start of app/views/users/profile.py] 1 from uuid import uuid4 2 3 from flask import Blueprint 4 from flask import render_template 5 from flask import request, url_for, redirect, flash, jsonify 6 from flask.ext import login 7 from markupsafe import Markup 8 9 from app.helpers.auth import AuthManager 10 from app.helpers.data import DataManager, get_facebook_auth, get_instagram_auth, get_twitter_auth_url, save_to_db, get_google_auth 11 from app.helpers.data_getter import DataGetter 12 from app.helpers.helpers import uploaded_file 13 from app.helpers.oauth import FbOAuth, InstagramOAuth, OAuth 14 from app.helpers.storage import upload, UPLOAD_PATHS 15 16 profile = Blueprint('profile', __name__, url_prefix='/profile') 17 18 19 @profile.route('/') 20 def index_view(): 21 if not AuthManager.is_verified_user(): 22 flash(Markup("Your account is unverified. " 23 "Please verify by clicking on the confirmation link that has been emailed to you." 24 '<br>Did not get the email? Please <a href="/resend_email/" class="alert-link"> ' 25 'click here to resend the confirmation.</a>')) 26 profile = DataGetter.get_user(login.current_user.id) 27 return render_template('gentelella/admin/profile/index.html', 28 profile=profile) 29 30 31 @profile.route('/edit/', methods=('GET', 'POST')) 32 @profile.route('/edit/<user_id>', methods=('GET', 'POST')) 33 def edit_view(user_id=None): 34 admin = None 35 if not user_id: 36 user_id = login.current_user.id 37 else: 38 admin = True 39 if request.method == 'POST': 40 DataManager.update_user(request.form, int(user_id)) 41 if admin: 42 return redirect(url_for('sadmin_users.details_view', user_id=user_id)) 43 return redirect(url_for('.index_view')) 44 return redirect(url_for('.index_view')) 45 46 47 @profile.route('/fb_connect', methods=('GET', 'POST')) 48 def connect_facebook(): 49 facebook = get_facebook_auth() 50 fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline') 51 return redirect(fb_auth_url) 52 53 54 @profile.route('/tw_connect', methods=('GET', 'POST')) 55 def connect_twitter(): 56 twitter_auth_url, __ = get_twitter_auth_url() 57 return redirect('https://api.twitter.com/oauth/authenticate?' + twitter_auth_url) 58 59 @profile.route('/instagram_connect', methods=('GET', 'POST')) 60 def connect_instagram(): 61 instagram = get_instagram_auth() 62 instagram_auth_url, state = instagram.authorization_url(InstagramOAuth.get_auth_uri(), access_type='offline') 63 return redirect(instagram_auth_url) 64 65 @profile.route('/<int:user_id>/editfiles/bgimage', methods=('POST', 'DELETE')) 66 def bgimage_upload(user_id): 67 if request.method == 'POST': 68 background_image = request.form['bgimage'] 69 if background_image: 70 background_file = uploaded_file(file_content=background_image) 71 background_url = upload( 72 background_file, 73 UPLOAD_PATHS['user']['avatar'].format( 74 user_id=user_id 75 )) 76 return jsonify({'status': 'ok', 'background_url': background_url}) 77 else: 78 return jsonify({'status': 'no bgimage'}) 79 elif request.method == 'DELETE': 80 profile = DataGetter.get_user(int(user_id)) 81 profile.avatar_uploaded = '' 82 save_to_db(profile) 83 return jsonify({'status': 'ok'}) 84 85 86 @profile.route('/create/files/bgimage', methods=('POST',)) 87 def create_event_bgimage_upload(): 88 if request.method == 'POST': 89 background_image = request.form['bgimage'] 90 if background_image: 91 background_file = uploaded_file(file_content=background_image) 92 background_url = upload( 93 background_file, 94 UPLOAD_PATHS['temp']['event'].format(uuid=uuid4()) 95 ) 96 return jsonify({'status': 'ok', 'background_url': background_url}) 97 else: 98 return jsonify({'status': 'no bgimage'}) 99 [end of app/views/users/profile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/views/users/profile.py b/app/views/users/profile.py --- a/app/views/users/profile.py +++ b/app/views/users/profile.py @@ -11,7 +11,7 @@ from app.helpers.data_getter import DataGetter from app.helpers.helpers import uploaded_file from app.helpers.oauth import FbOAuth, InstagramOAuth, OAuth -from app.helpers.storage import upload, UPLOAD_PATHS +from app.helpers.storage import upload, UPLOAD_PATHS, upload_local profile = Blueprint('profile', __name__, url_prefix='/profile') @@ -89,7 +89,7 @@ background_image = request.form['bgimage'] if background_image: background_file = uploaded_file(file_content=background_image) - background_url = upload( + background_url = upload_local( background_file, UPLOAD_PATHS['temp']['event'].format(uuid=uuid4()) )
{"golden_diff": "diff --git a/app/views/users/profile.py b/app/views/users/profile.py\n--- a/app/views/users/profile.py\n+++ b/app/views/users/profile.py\n@@ -11,7 +11,7 @@\n from app.helpers.data_getter import DataGetter\n from app.helpers.helpers import uploaded_file\n from app.helpers.oauth import FbOAuth, InstagramOAuth, OAuth\n-from app.helpers.storage import upload, UPLOAD_PATHS\n+from app.helpers.storage import upload, UPLOAD_PATHS, upload_local\n \n profile = Blueprint('profile', __name__, url_prefix='/profile')\n \n@@ -89,7 +89,7 @@\n background_image = request.form['bgimage']\n if background_image:\n background_file = uploaded_file(file_content=background_image)\n- background_url = upload(\n+ background_url = upload_local(\n background_file,\n UPLOAD_PATHS['temp']['event'].format(uuid=uuid4())\n )\n", "issue": "Adding user image to profile and saving it results in error page\nAfter user uploads an image to the profile page and updates/saves the profile an error page shows up.\r\n\r\n![screenshot from 2016-12-31 15-36-41](https://cloud.githubusercontent.com/assets/1583873/21577975/160aba80-cf6f-11e6-999a-d42a21e9561b.png)\r\n\n", "before_files": [{"content": "from uuid import uuid4\n\nfrom flask import Blueprint\nfrom flask import render_template\nfrom flask import request, url_for, redirect, flash, jsonify\nfrom flask.ext import login\nfrom markupsafe import Markup\n\nfrom app.helpers.auth import AuthManager\nfrom app.helpers.data import DataManager, get_facebook_auth, get_instagram_auth, get_twitter_auth_url, save_to_db, get_google_auth\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import uploaded_file\nfrom app.helpers.oauth import FbOAuth, InstagramOAuth, OAuth\nfrom app.helpers.storage import upload, UPLOAD_PATHS\n\nprofile = Blueprint('profile', __name__, url_prefix='/profile')\n\n\[email protected]('/')\ndef index_view():\n if not AuthManager.is_verified_user():\n flash(Markup(\"Your account is unverified. \"\n \"Please verify by clicking on the confirmation link that has been emailed to you.\"\n '<br>Did not get the email? Please <a href=\"/resend_email/\" class=\"alert-link\"> '\n 'click here to resend the confirmation.</a>'))\n profile = DataGetter.get_user(login.current_user.id)\n return render_template('gentelella/admin/profile/index.html',\n profile=profile)\n\n\[email protected]('/edit/', methods=('GET', 'POST'))\[email protected]('/edit/<user_id>', methods=('GET', 'POST'))\ndef edit_view(user_id=None):\n admin = None\n if not user_id:\n user_id = login.current_user.id\n else:\n admin = True\n if request.method == 'POST':\n DataManager.update_user(request.form, int(user_id))\n if admin:\n return redirect(url_for('sadmin_users.details_view', user_id=user_id))\n return redirect(url_for('.index_view'))\n return redirect(url_for('.index_view'))\n\n\[email protected]('/fb_connect', methods=('GET', 'POST'))\ndef connect_facebook():\n facebook = get_facebook_auth()\n fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline')\n return redirect(fb_auth_url)\n\n\[email protected]('/tw_connect', methods=('GET', 'POST'))\ndef connect_twitter():\n twitter_auth_url, __ = get_twitter_auth_url()\n return redirect('https://api.twitter.com/oauth/authenticate?' + twitter_auth_url)\n\[email protected]('/instagram_connect', methods=('GET', 'POST'))\ndef connect_instagram():\n instagram = get_instagram_auth()\n instagram_auth_url, state = instagram.authorization_url(InstagramOAuth.get_auth_uri(), access_type='offline')\n return redirect(instagram_auth_url)\n\[email protected]('/<int:user_id>/editfiles/bgimage', methods=('POST', 'DELETE'))\ndef bgimage_upload(user_id):\n if request.method == 'POST':\n background_image = request.form['bgimage']\n if background_image:\n background_file = uploaded_file(file_content=background_image)\n background_url = upload(\n background_file,\n UPLOAD_PATHS['user']['avatar'].format(\n user_id=user_id\n ))\n return jsonify({'status': 'ok', 'background_url': background_url})\n else:\n return jsonify({'status': 'no bgimage'})\n elif request.method == 'DELETE':\n profile = DataGetter.get_user(int(user_id))\n profile.avatar_uploaded = ''\n save_to_db(profile)\n return jsonify({'status': 'ok'})\n\n\[email protected]('/create/files/bgimage', methods=('POST',))\ndef create_event_bgimage_upload():\n if request.method == 'POST':\n background_image = request.form['bgimage']\n if background_image:\n background_file = uploaded_file(file_content=background_image)\n background_url = upload(\n background_file,\n UPLOAD_PATHS['temp']['event'].format(uuid=uuid4())\n )\n return jsonify({'status': 'ok', 'background_url': background_url})\n else:\n return jsonify({'status': 'no bgimage'})\n", "path": "app/views/users/profile.py"}]}
1,668
193
gh_patches_debug_3624
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-912
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pkg_resources (setuptools) requirement not declared in setup.py *cfn-lint version:* 0.20.1 *Description of issue.* While trying to package cfn-lint for conda-forge, I ran into the issue that pkg_resources is [imported in a few places](https://github.com/aws-cloudformation/cfn-python-lint/search?q=pkg_resources&unscoped_q=pkg_resources) but that this requirement (setuptools) is not specified in setup.py https://github.com/aws-cloudformation/cfn-python-lint/blob/master/setup.py#L75-L82 Is setuptools desired to be a run time requirement? If so, install_requires should probably list it. </issue> <code> [start of setup.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import codecs 18 import re 19 from setuptools import find_packages 20 from setuptools import setup 21 22 23 def get_version(filename): 24 with codecs.open(filename, 'r', 'utf-8') as fp: 25 contents = fp.read() 26 return re.search(r"__version__ = ['\"]([^'\"]+)['\"]", contents).group(1) 27 28 29 version = get_version('src/cfnlint/version.py') 30 31 32 with open('README.md') as f: 33 readme = f.read() 34 35 setup( 36 name='cfn-lint', 37 version=version, 38 description=('checks cloudformation for practices and behaviour \ 39 that could potentially be improved'), 40 long_description=readme, 41 long_description_content_type="text/markdown", 42 keywords='aws, lint', 43 author='kddejong', 44 author_email='[email protected]', 45 url='https://github.com/aws-cloudformation/cfn-python-lint', 46 package_dir={'': 'src'}, 47 package_data={'cfnlint': [ 48 'data/CloudSpecs/*.json', 49 'data/AdditionalSpecs/*.json', 50 'data/Serverless/*.json', 51 'data/ExtendedSpecs/all/*.json', 52 'data/ExtendedSpecs/ap-northeast-1/*.json', 53 'data/ExtendedSpecs/ap-northeast-2/*.json', 54 'data/ExtendedSpecs/ap-northeast-3/*.json', 55 'data/ExtendedSpecs/ap-south-1/*.json', 56 'data/ExtendedSpecs/ap-southeast-1/*.json', 57 'data/ExtendedSpecs/ap-southeast-2/*.json', 58 'data/ExtendedSpecs/ca-central-1/*.json', 59 'data/ExtendedSpecs/eu-central-1/*.json', 60 'data/ExtendedSpecs/eu-north-1/*.json', 61 'data/ExtendedSpecs/eu-west-1/*.json', 62 'data/ExtendedSpecs/eu-west-2/*.json', 63 'data/ExtendedSpecs/eu-west-3/*.json', 64 'data/ExtendedSpecs/sa-east-1/*.json', 65 'data/ExtendedSpecs/us-east-1/*.json', 66 'data/ExtendedSpecs/us-east-2/*.json', 67 'data/ExtendedSpecs/us-gov-east-1/*.json', 68 'data/ExtendedSpecs/us-gov-west-1/*.json', 69 'data/ExtendedSpecs/us-west-1/*.json', 70 'data/ExtendedSpecs/us-west-2/*.json', 71 'data/CfnLintCli/config/schema.json' 72 ]}, 73 packages=find_packages('src'), 74 zip_safe=False, 75 install_requires=[ 76 'pyyaml', 77 'six~=1.11', 78 'requests>=2.15.0,<=2.21.0', 79 'aws-sam-translator>=1.10.0', 80 'jsonpatch', 81 'jsonschema~=2.6', 82 'pathlib2>=2.3.0;python_version<"3.4"' 83 ], 84 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', 85 entry_points={ 86 'console_scripts': [ 87 'cfn-lint = cfnlint.__main__:main' 88 ] 89 }, 90 license='MIT no attribution', 91 test_suite="unittest", 92 classifiers=[ 93 'Development Status :: 5 - Production/Stable', 94 'Intended Audience :: Developers', 95 'License :: OSI Approved :: MIT License', 96 'Natural Language :: English', 97 'Operating System :: OS Independent', 98 'Programming Language :: Python :: 2', 99 'Programming Language :: Python :: 2.7', 100 'Programming Language :: Python :: 3', 101 'Programming Language :: Python :: 3.4', 102 'Programming Language :: Python :: 3.5', 103 'Programming Language :: Python :: 3.6', 104 'Programming Language :: Python :: 3.7', 105 ], 106 ) 107 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -79,7 +79,8 @@ 'aws-sam-translator>=1.10.0', 'jsonpatch', 'jsonschema~=2.6', - 'pathlib2>=2.3.0;python_version<"3.4"' + 'pathlib2>=2.3.0;python_version<"3.4"', + 'setuptools', ], python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', entry_points={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -79,7 +79,8 @@\n 'aws-sam-translator>=1.10.0',\n 'jsonpatch',\n 'jsonschema~=2.6',\n- 'pathlib2>=2.3.0;python_version<\"3.4\"'\n+ 'pathlib2>=2.3.0;python_version<\"3.4\"',\n+ 'setuptools',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n", "issue": "pkg_resources (setuptools) requirement not declared in setup.py\n*cfn-lint version:* 0.20.1\r\n\r\n*Description of issue.*\r\nWhile trying to package cfn-lint for conda-forge, I ran into the issue that pkg_resources is [imported in a few places](https://github.com/aws-cloudformation/cfn-python-lint/search?q=pkg_resources&unscoped_q=pkg_resources) but that this requirement (setuptools) is not specified in setup.py https://github.com/aws-cloudformation/cfn-python-lint/blob/master/setup.py#L75-L82\r\n\r\nIs setuptools desired to be a run time requirement? If so, install_requires should probably list it. \n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/aws-cloudformation/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/ExtendedSpecs/all/*.json',\n 'data/ExtendedSpecs/ap-northeast-1/*.json',\n 'data/ExtendedSpecs/ap-northeast-2/*.json',\n 'data/ExtendedSpecs/ap-northeast-3/*.json',\n 'data/ExtendedSpecs/ap-south-1/*.json',\n 'data/ExtendedSpecs/ap-southeast-1/*.json',\n 'data/ExtendedSpecs/ap-southeast-2/*.json',\n 'data/ExtendedSpecs/ca-central-1/*.json',\n 'data/ExtendedSpecs/eu-central-1/*.json',\n 'data/ExtendedSpecs/eu-north-1/*.json',\n 'data/ExtendedSpecs/eu-west-1/*.json',\n 'data/ExtendedSpecs/eu-west-2/*.json',\n 'data/ExtendedSpecs/eu-west-3/*.json',\n 'data/ExtendedSpecs/sa-east-1/*.json',\n 'data/ExtendedSpecs/us-east-1/*.json',\n 'data/ExtendedSpecs/us-east-2/*.json',\n 'data/ExtendedSpecs/us-gov-east-1/*.json',\n 'data/ExtendedSpecs/us-gov-west-1/*.json',\n 'data/ExtendedSpecs/us-west-1/*.json',\n 'data/ExtendedSpecs/us-west-2/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=[\n 'pyyaml',\n 'six~=1.11',\n 'requests>=2.15.0,<=2.21.0',\n 'aws-sam-translator>=1.10.0',\n 'jsonpatch',\n 'jsonschema~=2.6',\n 'pathlib2>=2.3.0;python_version<\"3.4\"'\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n", "path": "setup.py"}]}
1,953
144
gh_patches_debug_12804
rasdani/github-patches
git_diff
pypa__virtualenv-2088
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use the builtin plugin classes unless another plugin class is specifically asked for. **What's the problem this feature will solve?** I have a private plugin for virtualenv where I add an additional discovery class. This discovery class adds a new argument to the command line parser that is required but only when the discovery class is chosen. However I'm seeing an issue where using virtualenv via the command line as normal is now asking for this argument. The reason seems to be that virtualenv is picking a default discovery class but in a non-deterministic way and sometimes the additional discovery class is chosen as the default discovery class and so the argument is required. The default class is chosen depending on which entry point is discovered first. I believe entry points give no guarantees about order of discovery. The order of entry points discovery seems to change in different installs of virtualenv and the plugin, rather than changing in the same environment between different invocations of virtualenv. I believe the problem will be the same for creators, seeders, and activators as well. **Describe the solution you'd like** I would expect the builtin discovery class to be chosen as the default discovery class unless explicitly set otherwise. **Alternative Solutions** These classes could have a priority set at the class level. The builtin classes would have a priority set such that a plugin class could opt to set it's priority lower or higher than the builtins. virtualenv would then order these classes by their priority. Classes would be allowed to have the same priority with the understanding that the order of classes with the same priority value would be non-deterministic. </issue> <code> [start of src/virtualenv/run/plugin/discovery.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from .base import PluginLoader 4 5 6 class Discovery(PluginLoader): 7 """""" 8 9 10 def get_discover(parser, args): 11 discover_types = Discovery.entry_points_for("virtualenv.discovery") 12 discovery_parser = parser.add_argument_group( 13 title="discovery", 14 description="discover and provide a target interpreter", 15 ) 16 discovery_parser.add_argument( 17 "--discovery", 18 choices=_get_default_discovery(discover_types), 19 default=next(i for i in discover_types.keys()), 20 required=False, 21 help="interpreter discovery method", 22 ) 23 options, _ = parser.parse_known_args(args) 24 discover_class = discover_types[options.discovery] 25 discover_class.add_parser_arguments(discovery_parser) 26 options, _ = parser.parse_known_args(args, namespace=options) 27 discover = discover_class(options) 28 return discover 29 30 31 def _get_default_discovery(discover_types): 32 return list(discover_types.keys()) 33 [end of src/virtualenv/run/plugin/discovery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/virtualenv/run/plugin/discovery.py b/src/virtualenv/run/plugin/discovery.py --- a/src/virtualenv/run/plugin/discovery.py +++ b/src/virtualenv/run/plugin/discovery.py @@ -13,10 +13,13 @@ title="discovery", description="discover and provide a target interpreter", ) + choices = _get_default_discovery(discover_types) + # prefer the builtin if present, otherwise fallback to first defined type + choices = sorted(choices, key=lambda a: 0 if a == "builtin" else 1) discovery_parser.add_argument( "--discovery", - choices=_get_default_discovery(discover_types), - default=next(i for i in discover_types.keys()), + choices=choices, + default=next(iter(choices)), required=False, help="interpreter discovery method", )
{"golden_diff": "diff --git a/src/virtualenv/run/plugin/discovery.py b/src/virtualenv/run/plugin/discovery.py\n--- a/src/virtualenv/run/plugin/discovery.py\n+++ b/src/virtualenv/run/plugin/discovery.py\n@@ -13,10 +13,13 @@\n title=\"discovery\",\n description=\"discover and provide a target interpreter\",\n )\n+ choices = _get_default_discovery(discover_types)\n+ # prefer the builtin if present, otherwise fallback to first defined type\n+ choices = sorted(choices, key=lambda a: 0 if a == \"builtin\" else 1)\n discovery_parser.add_argument(\n \"--discovery\",\n- choices=_get_default_discovery(discover_types),\n- default=next(i for i in discover_types.keys()),\n+ choices=choices,\n+ default=next(iter(choices)),\n required=False,\n help=\"interpreter discovery method\",\n )\n", "issue": "Use the builtin plugin classes unless another plugin class is specifically asked for.\n**What's the problem this feature will solve?**\r\n\r\nI have a private plugin for virtualenv where I add an additional discovery class. This discovery class adds a new argument to the command line parser that is required but only when the discovery class is chosen. However I'm seeing an issue where using virtualenv via the command line as normal is now asking for this argument. The reason seems to be that virtualenv is picking a default discovery class but in a non-deterministic way and sometimes the additional discovery class is chosen as the default discovery class and so the argument is required. The default class is chosen depending on which entry point is discovered first. I believe entry points give no guarantees about order of discovery.\r\n\r\nThe order of entry points discovery seems to change in different installs of virtualenv and the plugin, rather than changing in the same environment between different invocations of virtualenv.\r\n\r\nI believe the problem will be the same for creators, seeders, and activators as well.\r\n\r\n**Describe the solution you'd like**\r\n\r\nI would expect the builtin discovery class to be chosen as the default discovery class unless explicitly set otherwise.\r\n\r\n**Alternative Solutions**\r\n\r\nThese classes could have a priority set at the class level. The builtin classes would have a priority set such that a plugin class could opt to set it's priority lower or higher than the builtins. virtualenv would then order these classes by their priority. Classes would be allowed to have the same priority with the understanding that the order of classes with the same priority value would be non-deterministic.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom .base import PluginLoader\n\n\nclass Discovery(PluginLoader):\n \"\"\"\"\"\"\n\n\ndef get_discover(parser, args):\n discover_types = Discovery.entry_points_for(\"virtualenv.discovery\")\n discovery_parser = parser.add_argument_group(\n title=\"discovery\",\n description=\"discover and provide a target interpreter\",\n )\n discovery_parser.add_argument(\n \"--discovery\",\n choices=_get_default_discovery(discover_types),\n default=next(i for i in discover_types.keys()),\n required=False,\n help=\"interpreter discovery method\",\n )\n options, _ = parser.parse_known_args(args)\n discover_class = discover_types[options.discovery]\n discover_class.add_parser_arguments(discovery_parser)\n options, _ = parser.parse_known_args(args, namespace=options)\n discover = discover_class(options)\n return discover\n\n\ndef _get_default_discovery(discover_types):\n return list(discover_types.keys())\n", "path": "src/virtualenv/run/plugin/discovery.py"}]}
1,134
196
gh_patches_debug_6429
rasdani/github-patches
git_diff
saleor__saleor-6833
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in validate_price_precision ### What I'm trying to achieve Get sane validation obviously. ### Steps to reproduce the problem 1. Try to create a voucher with the minimum order amount set to `2000` 2. It will throw an error. ### What I expected to happen It shouldn't throw an error. ### Observation Upon normalizing it converts the zeros to exponents. ```python def validate_price_precision(value: Optional["Decimal"], currency: str = None): """Validate if price amount does not have too many decimal places. Price amount can't have more decimal places than currency allows to. Works only with decimal created from a string. """ # check no needed when there is no value if not value: return currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY) value = value.normalize() if abs(value.as_tuple().exponent) > currency_fraction: raise ValidationError( f"Value cannot have more than {currency_fraction} decimal places." ) ``` should be: ```python def validate_price_precision(value: Optional["Decimal"], currency: str = None): """Validate if price amount does not have too many decimal places. Price amount can't have more decimal places than currency allows to. Works only with decimal created from a string. """ # check no needed when there is no value if not value: return currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY) value = value.normalize() exp = value.as_tuple().exponent if exp < 0 and abs(value.as_tuple().exponent) > currency_fraction: raise ValidationError( f"Value cannot have more than {currency_fraction} decimal places." ) ``` So that it doesn't misinterpret zeros from the right as values after decimal places. </issue> <code> [start of saleor/graphql/core/validators.py] 1 from typing import TYPE_CHECKING, Optional 2 3 from django.conf import settings 4 from django.core.exceptions import ValidationError 5 from django_prices.utils.formatting import get_currency_fraction 6 from graphql.error import GraphQLError 7 8 if TYPE_CHECKING: 9 from decimal import Decimal 10 11 12 def validate_one_of_args_is_in_query(*args): 13 # split args into a list with 2-element tuples: 14 # [(arg1_name, arg1_value), (arg2_name, arg2_value), ...] 15 splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203 16 # filter trueish values from each tuple 17 filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args)) 18 19 if len(filter_args) > 1: 20 rest_args = ", ".join([f"'{item[0]}'" for item in filter_args[1:]]) 21 raise GraphQLError( 22 f"Argument '{filter_args[0][0]}' cannot be combined with {rest_args}" 23 ) 24 25 if not filter_args: 26 required_args = ", ".join([f"'{item[0]}'" for item in splitted_args]) 27 raise GraphQLError(f"At least one of arguments is required: {required_args}.") 28 29 30 def validate_price_precision(value: Optional["Decimal"], currency: str = None): 31 """Validate if price amount does not have too many decimal places. 32 33 Price amount can't have more decimal places than currency allow to. 34 Works only with decimal created from a string. 35 """ 36 37 # check no needed when there is no value 38 if not value: 39 return 40 41 currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY) 42 value = value.normalize() 43 if abs(value.as_tuple().exponent) > currency_fraction: 44 raise ValidationError( 45 f"Value cannot have more than {currency_fraction} decimal places." 46 ) 47 [end of saleor/graphql/core/validators.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/core/validators.py b/saleor/graphql/core/validators.py --- a/saleor/graphql/core/validators.py +++ b/saleor/graphql/core/validators.py @@ -40,7 +40,7 @@ currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY) value = value.normalize() - if abs(value.as_tuple().exponent) > currency_fraction: + if value.as_tuple().exponent < -currency_fraction: raise ValidationError( f"Value cannot have more than {currency_fraction} decimal places." )
{"golden_diff": "diff --git a/saleor/graphql/core/validators.py b/saleor/graphql/core/validators.py\n--- a/saleor/graphql/core/validators.py\n+++ b/saleor/graphql/core/validators.py\n@@ -40,7 +40,7 @@\n \n currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)\n value = value.normalize()\n- if abs(value.as_tuple().exponent) > currency_fraction:\n+ if value.as_tuple().exponent < -currency_fraction:\n raise ValidationError(\n f\"Value cannot have more than {currency_fraction} decimal places.\"\n )\n", "issue": "Bug in validate_price_precision\n### What I'm trying to achieve\r\nGet sane validation obviously.\r\n\r\n### Steps to reproduce the problem\r\n1. Try to create a voucher with the minimum order amount set to `2000`\r\n2. It will throw an error.\r\n\r\n### What I expected to happen\r\nIt shouldn't throw an error.\r\n\r\n### Observation\r\nUpon normalizing it converts the zeros to exponents.\r\n\r\n```python\r\ndef validate_price_precision(value: Optional[\"Decimal\"], currency: str = None):\r\n \"\"\"Validate if price amount does not have too many decimal places.\r\n\r\n Price amount can't have more decimal places than currency allows to.\r\n Works only with decimal created from a string.\r\n \"\"\"\r\n\r\n # check no needed when there is no value\r\n if not value:\r\n return\r\n\r\n currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)\r\n value = value.normalize()\r\n if abs(value.as_tuple().exponent) > currency_fraction:\r\n raise ValidationError(\r\n f\"Value cannot have more than {currency_fraction} decimal places.\"\r\n )\r\n```\r\nshould be:\r\n\r\n```python\r\ndef validate_price_precision(value: Optional[\"Decimal\"], currency: str = None):\r\n \"\"\"Validate if price amount does not have too many decimal places.\r\n\r\n Price amount can't have more decimal places than currency allows to.\r\n Works only with decimal created from a string.\r\n \"\"\"\r\n\r\n # check no needed when there is no value\r\n if not value:\r\n return\r\n\r\n currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)\r\n value = value.normalize()\r\n exp = value.as_tuple().exponent\r\n if exp < 0 and abs(value.as_tuple().exponent) > currency_fraction:\r\n raise ValidationError(\r\n f\"Value cannot have more than {currency_fraction} decimal places.\"\r\n )\r\n```\r\nSo that it doesn't misinterpret zeros from the right as values after decimal places.\r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Optional\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django_prices.utils.formatting import get_currency_fraction\nfrom graphql.error import GraphQLError\n\nif TYPE_CHECKING:\n from decimal import Decimal\n\n\ndef validate_one_of_args_is_in_query(*args):\n # split args into a list with 2-element tuples:\n # [(arg1_name, arg1_value), (arg2_name, arg2_value), ...]\n splitted_args = [args[i : i + 2] for i in range(0, len(args), 2)] # noqa: E203\n # filter trueish values from each tuple\n filter_args = list(filter(lambda item: bool(item[1]) is True, splitted_args))\n\n if len(filter_args) > 1:\n rest_args = \", \".join([f\"'{item[0]}'\" for item in filter_args[1:]])\n raise GraphQLError(\n f\"Argument '{filter_args[0][0]}' cannot be combined with {rest_args}\"\n )\n\n if not filter_args:\n required_args = \", \".join([f\"'{item[0]}'\" for item in splitted_args])\n raise GraphQLError(f\"At least one of arguments is required: {required_args}.\")\n\n\ndef validate_price_precision(value: Optional[\"Decimal\"], currency: str = None):\n \"\"\"Validate if price amount does not have too many decimal places.\n\n Price amount can't have more decimal places than currency allow to.\n Works only with decimal created from a string.\n \"\"\"\n\n # check no needed when there is no value\n if not value:\n return\n\n currency_fraction = get_currency_fraction(currency or settings.DEFAULT_CURRENCY)\n value = value.normalize()\n if abs(value.as_tuple().exponent) > currency_fraction:\n raise ValidationError(\n f\"Value cannot have more than {currency_fraction} decimal places.\"\n )\n", "path": "saleor/graphql/core/validators.py"}]}
1,431
128
gh_patches_debug_20161
rasdani/github-patches
git_diff
bridgecrewio__checkov-3938
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AZURE_144 passes on defaults **Describe the issue** If it is related to an existing check, please note the relevant check ID. Also, explain the logic for this addition / change. The check CKV_AZURE_144 passes if the property "public_network_access_enabled" is not explicitly set since it assumes that it defaults to false. This seems to not be the case at least for AzureRM < 3.0.0. Right now we have publicly accessible Workspaces for which the check passes since the property is not set. **Examples** Please share an example code sample (in the IaC of your choice) + the expected outcomes. The Module Code: <img width="567" alt="image" src="https://user-images.githubusercontent.com/34415231/203775024-77d6bc7c-dbec-4e8c-8639-42aa67136a3d.png"> The actual Workspace: <img width="1182" alt="image" src="https://user-images.githubusercontent.com/34415231/203775161-91611475-5a27-4435-81a8-a40c7430061d.png"> Since the defaults seem to be subject to change the check should probably fail if the property is not set. </issue> <code> [start of checkov/terraform/checks/resource/azure/MLPublicAccess.py] 1 from __future__ import annotations 2 3 from typing import Any 4 5 from checkov.common.models.enums import CheckCategories 6 from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck 7 8 9 class MLPublicAccess(BaseResourceNegativeValueCheck): 10 def __init__(self) -> None: 11 # This is the full description of your check 12 description = "Ensure that Public Access is disabled for Machine Learning Workspace" 13 14 # This is the Unique ID for your check 15 id = "CKV_AZURE_144" 16 17 # These are the terraform objects supported by this check (ex: aws_iam_policy_document) 18 supported_resources = ('azurerm_machine_learning_workspace',) 19 20 # Valid CheckCategories are defined in checkov/common/models/enums.py 21 categories = (CheckCategories.NETWORKING,) 22 super().__init__(name=description, id=id, categories=categories, supported_resources=supported_resources) 23 24 def get_inspected_key(self) -> str: 25 return "public_network_access_enabled" 26 27 def get_forbidden_values(self) -> list[Any]: 28 return [True] 29 30 31 check = MLPublicAccess() 32 [end of checkov/terraform/checks/resource/azure/MLPublicAccess.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/MLPublicAccess.py b/checkov/terraform/checks/resource/azure/MLPublicAccess.py --- a/checkov/terraform/checks/resource/azure/MLPublicAccess.py +++ b/checkov/terraform/checks/resource/azure/MLPublicAccess.py @@ -2,7 +2,7 @@ from typing import Any -from checkov.common.models.enums import CheckCategories +from checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck @@ -19,7 +19,8 @@ # Valid CheckCategories are defined in checkov/common/models/enums.py categories = (CheckCategories.NETWORKING,) - super().__init__(name=description, id=id, categories=categories, supported_resources=supported_resources) + super().__init__(name=description, id=id, categories=categories, + supported_resources=supported_resources, missing_attribute_result=CheckResult.FAILED) def get_inspected_key(self) -> str: return "public_network_access_enabled"
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/MLPublicAccess.py b/checkov/terraform/checks/resource/azure/MLPublicAccess.py\n--- a/checkov/terraform/checks/resource/azure/MLPublicAccess.py\n+++ b/checkov/terraform/checks/resource/azure/MLPublicAccess.py\n@@ -2,7 +2,7 @@\n \n from typing import Any\n \n-from checkov.common.models.enums import CheckCategories\n+from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\n \n \n@@ -19,7 +19,8 @@\n \n # Valid CheckCategories are defined in checkov/common/models/enums.py\n categories = (CheckCategories.NETWORKING,)\n- super().__init__(name=description, id=id, categories=categories, supported_resources=supported_resources)\n+ super().__init__(name=description, id=id, categories=categories,\n+ supported_resources=supported_resources, missing_attribute_result=CheckResult.FAILED)\n \n def get_inspected_key(self) -> str:\n return \"public_network_access_enabled\"\n", "issue": "CKV_AZURE_144 passes on defaults\n**Describe the issue**\r\nIf it is related to an existing check, please note the relevant check ID.\r\nAlso, explain the logic for this addition / change.\r\n\r\nThe check CKV_AZURE_144 passes if the property \"public_network_access_enabled\" is not explicitly set since it assumes that it defaults to false. This seems to not be the case at least for AzureRM < 3.0.0. Right now we have publicly accessible Workspaces for which the check passes since the property is not set.\r\n\r\n**Examples**\r\nPlease share an example code sample (in the IaC of your choice) + the expected outcomes.\r\n\r\nThe Module Code:\r\n\r\n<img width=\"567\" alt=\"image\" src=\"https://user-images.githubusercontent.com/34415231/203775024-77d6bc7c-dbec-4e8c-8639-42aa67136a3d.png\">\r\n\r\nThe actual Workspace:\r\n<img width=\"1182\" alt=\"image\" src=\"https://user-images.githubusercontent.com/34415231/203775161-91611475-5a27-4435-81a8-a40c7430061d.png\">\r\n\r\nSince the defaults seem to be subject to change the check should probably fail if the property is not set.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\n\n\nclass MLPublicAccess(BaseResourceNegativeValueCheck):\n def __init__(self) -> None:\n # This is the full description of your check\n description = \"Ensure that Public Access is disabled for Machine Learning Workspace\"\n\n # This is the Unique ID for your check\n id = \"CKV_AZURE_144\"\n\n # These are the terraform objects supported by this check (ex: aws_iam_policy_document)\n supported_resources = ('azurerm_machine_learning_workspace',)\n\n # Valid CheckCategories are defined in checkov/common/models/enums.py\n categories = (CheckCategories.NETWORKING,)\n super().__init__(name=description, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self) -> str:\n return \"public_network_access_enabled\"\n\n def get_forbidden_values(self) -> list[Any]:\n return [True]\n\n\ncheck = MLPublicAccess()\n", "path": "checkov/terraform/checks/resource/azure/MLPublicAccess.py"}]}
1,179
247
gh_patches_debug_7991
rasdani/github-patches
git_diff
biolab__orange3-2093
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fitter preprocessors <!-- This is an issue template. Please fill in the relevant details in the sections below. --> ##### Orange version <!-- From menu _Help→About→Version_ or code `Orange.version.full_version` --> 3.4.0 ##### Expected behavior Learners use preprocessors. ##### Actual behavior Learners extending the Fitter base class do not use preprocessors. ##### Steps to reproduce the behavior Use a learner on e.g. hearth_disease data set ##### Additional info (worksheets, data, screenshots, ...) </issue> <code> [start of Orange/modelling/base.py] 1 from Orange.base import Learner, Model 2 3 4 class FitterMeta(type): 5 """Ensure that each subclass of the `Fitter` class overrides the `__fits__` 6 attribute with a valid value.""" 7 def __new__(mcs, name, bases, attrs): 8 # Check that a fitter implementation defines a valid `__fits__` 9 if any(cls.__name__ == 'Fitter' for cls in bases): 10 fits = attrs.get('__fits__') 11 assert isinstance(fits, dict), '__fits__ must be dict instance' 12 assert fits.get('classification') and fits.get('regression'), \ 13 ('`__fits__` property does not define classification ' 14 'or regression learner. Use a simple learner if you don\'t ' 15 'need the functionality provided by Fitter.') 16 return super().__new__(mcs, name, bases, attrs) 17 18 19 class Fitter(Learner, metaclass=FitterMeta): 20 """Handle multiple types of target variable with one learner. 21 22 Subclasses of this class serve as a sort of dispatcher. When subclassing, 23 we provide a `dict` which contain actual learner classes that handle 24 appropriate data types. The fitter can then be used on any data and will 25 delegate the work to the appropriate learner. 26 27 If the learners that handle each data type require different parameters, 28 you should pass in all the possible parameters to the fitter. The fitter 29 will then determine which parameters have to be passed to individual 30 learners. 31 32 """ 33 __fits__ = None 34 __returns__ = Model 35 36 # Constants to indicate what kind of problem we're dealing with 37 CLASSIFICATION, REGRESSION = 'classification', 'regression' 38 39 def __init__(self, preprocessors=None, **kwargs): 40 super().__init__(preprocessors=preprocessors) 41 self.kwargs = kwargs 42 # Make sure to pass preprocessor params to individual learners 43 self.kwargs['preprocessors'] = preprocessors 44 self.__learners = {self.CLASSIFICATION: None, self.REGRESSION: None} 45 46 def _fit_model(self, data): 47 if data.domain.has_discrete_class: 48 learner = self.get_learner(self.CLASSIFICATION) 49 else: 50 learner = self.get_learner(self.REGRESSION) 51 52 if type(self).fit is Learner.fit: 53 return learner.fit_storage(data) 54 else: 55 X, Y, W = data.X, data.Y, data.W if data.has_weights() else None 56 return learner.fit(X, Y, W) 57 58 def get_learner(self, problem_type): 59 """Get the learner for a given problem type. 60 61 Returns 62 ------- 63 Learner 64 The appropriate learner for the given problem type. 65 66 """ 67 # Prevent trying to access the learner when problem type is None 68 if problem_type not in self.__fits__: 69 raise TypeError("No learner to handle '{}'".format(problem_type)) 70 if self.__learners[problem_type] is None: 71 learner = self.__fits__[problem_type](**self.__kwargs(problem_type)) 72 learner.use_default_preprocessors = self.use_default_preprocessors 73 self.__learners[problem_type] = learner 74 return self.__learners[problem_type] 75 76 def __kwargs(self, problem_type): 77 learner_kwargs = set( 78 self.__fits__[problem_type].__init__.__code__.co_varnames[1:]) 79 changed_kwargs = self._change_kwargs(self.kwargs, problem_type) 80 return {k: v for k, v in changed_kwargs.items() if k in learner_kwargs} 81 82 def _change_kwargs(self, kwargs, problem_type): 83 """Handle the kwargs to be passed to the learner before they are used. 84 85 In some cases we need to manipulate the kwargs that will be passed to 86 the learner, e.g. SGD takes a `loss` parameter in both the regression 87 and classification learners, but the learner widget cannot 88 differentiate between these two, so it passes classification and 89 regression loss parameters individually. The appropriate one must be 90 renamed into `loss` before passed to the actual learner instance. This 91 is done here. 92 93 """ 94 return kwargs 95 96 @property 97 def supports_weights(self): 98 """The fitter supports weights if both the classification and 99 regression learners support weights.""" 100 return ( 101 hasattr(self.get_learner(self.CLASSIFICATION), 'supports_weights') 102 and self.get_learner(self.CLASSIFICATION).supports_weights) and ( 103 hasattr(self.get_learner(self.REGRESSION), 'supports_weights') 104 and self.get_learner(self.REGRESSION).supports_weights) 105 106 @property 107 def params(self): 108 raise TypeError( 109 'A fitter does not have its own params. If you need to access ' 110 'learner params, please use the `get_params` method.') 111 112 def get_params(self, problem_type): 113 """Access the specific learner params of a given learner.""" 114 return self.get_learner(problem_type).params 115 [end of Orange/modelling/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Orange/modelling/base.py b/Orange/modelling/base.py --- a/Orange/modelling/base.py +++ b/Orange/modelling/base.py @@ -55,6 +55,12 @@ X, Y, W = data.X, data.Y, data.W if data.has_weights() else None return learner.fit(X, Y, W) + def preprocess(self, data): + if data.domain.has_discrete_class: + return self.get_learner(self.CLASSIFICATION).preprocess(data) + else: + return self.get_learner(self.REGRESSION).preprocess(data) + def get_learner(self, problem_type): """Get the learner for a given problem type.
{"golden_diff": "diff --git a/Orange/modelling/base.py b/Orange/modelling/base.py\n--- a/Orange/modelling/base.py\n+++ b/Orange/modelling/base.py\n@@ -55,6 +55,12 @@\n X, Y, W = data.X, data.Y, data.W if data.has_weights() else None\n return learner.fit(X, Y, W)\n \n+ def preprocess(self, data):\n+ if data.domain.has_discrete_class:\n+ return self.get_learner(self.CLASSIFICATION).preprocess(data)\n+ else:\n+ return self.get_learner(self.REGRESSION).preprocess(data)\n+\n def get_learner(self, problem_type):\n \"\"\"Get the learner for a given problem type.\n", "issue": "Fitter preprocessors\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.4.0\r\n\r\n##### Expected behavior\r\nLearners use preprocessors.\r\n\r\n\r\n##### Actual behavior\r\nLearners extending the Fitter base class do not use preprocessors.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nUse a learner on e.g. hearth_disease data set\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "from Orange.base import Learner, Model\n\n\nclass FitterMeta(type):\n \"\"\"Ensure that each subclass of the `Fitter` class overrides the `__fits__`\n attribute with a valid value.\"\"\"\n def __new__(mcs, name, bases, attrs):\n # Check that a fitter implementation defines a valid `__fits__`\n if any(cls.__name__ == 'Fitter' for cls in bases):\n fits = attrs.get('__fits__')\n assert isinstance(fits, dict), '__fits__ must be dict instance'\n assert fits.get('classification') and fits.get('regression'), \\\n ('`__fits__` property does not define classification '\n 'or regression learner. Use a simple learner if you don\\'t '\n 'need the functionality provided by Fitter.')\n return super().__new__(mcs, name, bases, attrs)\n\n\nclass Fitter(Learner, metaclass=FitterMeta):\n \"\"\"Handle multiple types of target variable with one learner.\n\n Subclasses of this class serve as a sort of dispatcher. When subclassing,\n we provide a `dict` which contain actual learner classes that handle\n appropriate data types. The fitter can then be used on any data and will\n delegate the work to the appropriate learner.\n\n If the learners that handle each data type require different parameters,\n you should pass in all the possible parameters to the fitter. The fitter\n will then determine which parameters have to be passed to individual\n learners.\n\n \"\"\"\n __fits__ = None\n __returns__ = Model\n\n # Constants to indicate what kind of problem we're dealing with\n CLASSIFICATION, REGRESSION = 'classification', 'regression'\n\n def __init__(self, preprocessors=None, **kwargs):\n super().__init__(preprocessors=preprocessors)\n self.kwargs = kwargs\n # Make sure to pass preprocessor params to individual learners\n self.kwargs['preprocessors'] = preprocessors\n self.__learners = {self.CLASSIFICATION: None, self.REGRESSION: None}\n\n def _fit_model(self, data):\n if data.domain.has_discrete_class:\n learner = self.get_learner(self.CLASSIFICATION)\n else:\n learner = self.get_learner(self.REGRESSION)\n\n if type(self).fit is Learner.fit:\n return learner.fit_storage(data)\n else:\n X, Y, W = data.X, data.Y, data.W if data.has_weights() else None\n return learner.fit(X, Y, W)\n\n def get_learner(self, problem_type):\n \"\"\"Get the learner for a given problem type.\n\n Returns\n -------\n Learner\n The appropriate learner for the given problem type.\n\n \"\"\"\n # Prevent trying to access the learner when problem type is None\n if problem_type not in self.__fits__:\n raise TypeError(\"No learner to handle '{}'\".format(problem_type))\n if self.__learners[problem_type] is None:\n learner = self.__fits__[problem_type](**self.__kwargs(problem_type))\n learner.use_default_preprocessors = self.use_default_preprocessors\n self.__learners[problem_type] = learner\n return self.__learners[problem_type]\n\n def __kwargs(self, problem_type):\n learner_kwargs = set(\n self.__fits__[problem_type].__init__.__code__.co_varnames[1:])\n changed_kwargs = self._change_kwargs(self.kwargs, problem_type)\n return {k: v for k, v in changed_kwargs.items() if k in learner_kwargs}\n\n def _change_kwargs(self, kwargs, problem_type):\n \"\"\"Handle the kwargs to be passed to the learner before they are used.\n\n In some cases we need to manipulate the kwargs that will be passed to\n the learner, e.g. SGD takes a `loss` parameter in both the regression\n and classification learners, but the learner widget cannot\n differentiate between these two, so it passes classification and\n regression loss parameters individually. The appropriate one must be\n renamed into `loss` before passed to the actual learner instance. This\n is done here.\n\n \"\"\"\n return kwargs\n\n @property\n def supports_weights(self):\n \"\"\"The fitter supports weights if both the classification and\n regression learners support weights.\"\"\"\n return (\n hasattr(self.get_learner(self.CLASSIFICATION), 'supports_weights')\n and self.get_learner(self.CLASSIFICATION).supports_weights) and (\n hasattr(self.get_learner(self.REGRESSION), 'supports_weights')\n and self.get_learner(self.REGRESSION).supports_weights)\n\n @property\n def params(self):\n raise TypeError(\n 'A fitter does not have its own params. If you need to access '\n 'learner params, please use the `get_params` method.')\n\n def get_params(self, problem_type):\n \"\"\"Access the specific learner params of a given learner.\"\"\"\n return self.get_learner(problem_type).params\n", "path": "Orange/modelling/base.py"}]}
1,952
158
gh_patches_debug_17346
rasdani/github-patches
git_diff
bridgecrewio__checkov-373
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_4 is an invalid check **Describe the bug** CKV_AWS_4 for terraform resource `aws_ebs_snapshot` is invalid. There is not an argument for encryption. Please remove this check. **To Reproduce** Steps to reproduce the behavior: 1. create tf file ckv_aws_4_test.tf: ``` resource "aws_ebs_volume" "example" { availability_zone = "us-west-2a" encrypted = true size = 40 tags = { Name = "HelloWorld" } } resource "aws_ebs_snapshot" "example_snapshot" { volume_id = "${aws_ebs_volume.example.id}" tags = { Name = "HelloWorld_snap" } } ``` 2. Run cli command 'checkov -f ckv_aws_4_test.tf' 3. Failed when should have passed **Expected behavior** Passing check **Screenshots** ![image](https://user-images.githubusercontent.com/15948264/85431230-dd199180-b546-11ea-824f-b763481a3bfe.png) **Desktop (please complete the following information):** - OS: [MacOS] - Checkov Version [1.0.391] **Additional context** - [link to resource doc](https://www.terraform.io/docs/providers/aws/r/ebs_snapshot.html) As you can see, there is not an argument for encryption listed. Only a computed artifact named encryption. ![image](https://user-images.githubusercontent.com/15948264/85432245-4c43b580-b548-11ea-8b5f-9b663364bb52.png) - [TF SourceCode shows encryption as being computed](https://github.com/terraform-providers/terraform-provider-aws/blob/master/aws/resource_aws_ebs_snapshot.go) ![image](https://user-images.githubusercontent.com/15948264/85432117-1d2d4400-b548-11ea-9378-2dcb88c98cb2.png) - The docs from AWS explain that snapshots that are taken from encrypted volumes are automatically encrypted. [link](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-snapshot.html) False positive for CKV_AWS_17 Checkov `1.0.391` will fail CKV_AWS_17 for a Terraform file defining any value for `publicly_accessible`, even false, because the check is for any value rather the actual security goal which should be a test for true: https://github.com/bridgecrewio/checkov/blob/b906298b4a26135b7ee6b58f1aa4c54fc04ead20/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py It should probably also say “RDS instance” rather than “RDS bucket” </issue> <code> [start of checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py] 1 from checkov.common.models.consts import ANY_VALUE 2 from checkov.common.models.enums import CheckCategories 3 from checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck 4 5 6 class RDSPubliclyAccessible(BaseResourceNegativeValueCheck): 7 def __init__(self): 8 name = "Ensure all data stored in the RDS bucket is not public accessible" 9 id = "CKV_AWS_17" 10 supported_resources = ['aws_db_instance', 'aws_rds_cluster_instance'] 11 categories = [CheckCategories.NETWORKING] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 13 14 def get_inspected_key(self): 15 return 'publicly_accessible' 16 17 def get_forbidden_values(self): 18 return [ANY_VALUE] 19 20 21 check = RDSPubliclyAccessible() 22 [end of checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py] [start of checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py] 1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 2 from checkov.common.models.enums import CheckCategories 3 4 5 class EBSSnapshotEncryption(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure all data stored in the EBS Snapshot is securely encrypted" 8 id = "CKV_AWS_4" 9 supported_resources = ['aws_ebs_snapshot'] 10 categories = [CheckCategories.ENCRYPTION] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return "encrypted" 15 16 17 check = EBSSnapshotEncryption() 18 [end of checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py b/checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py deleted file mode 100644 --- a/checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py +++ /dev/null @@ -1,17 +0,0 @@ -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck -from checkov.common.models.enums import CheckCategories - - -class EBSSnapshotEncryption(BaseResourceValueCheck): - def __init__(self): - name = "Ensure all data stored in the EBS Snapshot is securely encrypted" - id = "CKV_AWS_4" - supported_resources = ['aws_ebs_snapshot'] - categories = [CheckCategories.ENCRYPTION] - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - - def get_inspected_key(self): - return "encrypted" - - -check = EBSSnapshotEncryption() diff --git a/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py b/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py --- a/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py +++ b/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py @@ -15,7 +15,7 @@ return 'publicly_accessible' def get_forbidden_values(self): - return [ANY_VALUE] + return [True] check = RDSPubliclyAccessible()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py b/checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-from checkov.common.models.enums import CheckCategories\n-\n-\n-class EBSSnapshotEncryption(BaseResourceValueCheck):\n- def __init__(self):\n- name = \"Ensure all data stored in the EBS Snapshot is securely encrypted\"\n- id = \"CKV_AWS_4\"\n- supported_resources = ['aws_ebs_snapshot']\n- categories = [CheckCategories.ENCRYPTION]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def get_inspected_key(self):\n- return \"encrypted\"\n-\n-\n-check = EBSSnapshotEncryption()\ndiff --git a/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py b/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py\n--- a/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py\n+++ b/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py\n@@ -15,7 +15,7 @@\n return 'publicly_accessible'\n \n def get_forbidden_values(self):\n- return [ANY_VALUE]\n+ return [True]\n \n \n check = RDSPubliclyAccessible()\n", "issue": "CKV_AWS_4 is an invalid check\n**Describe the bug**\r\nCKV_AWS_4 for terraform resource `aws_ebs_snapshot` is invalid. There is not an argument for encryption. Please remove this check.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. create tf file ckv_aws_4_test.tf:\r\n```\r\nresource \"aws_ebs_volume\" \"example\" {\r\n availability_zone = \"us-west-2a\"\r\n encrypted = true\r\n size = 40\r\n\r\n tags = {\r\n Name = \"HelloWorld\"\r\n }\r\n}\r\n\r\nresource \"aws_ebs_snapshot\" \"example_snapshot\" {\r\n volume_id = \"${aws_ebs_volume.example.id}\"\r\n\r\n tags = {\r\n Name = \"HelloWorld_snap\"\r\n }\r\n}\r\n```\r\n\r\n2. Run cli command 'checkov -f ckv_aws_4_test.tf'\r\n3. Failed when should have passed\r\n\r\n**Expected behavior**\r\nPassing check\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/15948264/85431230-dd199180-b546-11ea-824f-b763481a3bfe.png)\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [MacOS]\r\n - Checkov Version [1.0.391]\r\n\r\n**Additional context**\r\n- [link to resource doc](https://www.terraform.io/docs/providers/aws/r/ebs_snapshot.html)\r\nAs you can see, there is not an argument for encryption listed. Only a computed artifact named encryption.\r\n![image](https://user-images.githubusercontent.com/15948264/85432245-4c43b580-b548-11ea-8b5f-9b663364bb52.png)\r\n\r\n- [TF SourceCode shows encryption as being computed](https://github.com/terraform-providers/terraform-provider-aws/blob/master/aws/resource_aws_ebs_snapshot.go)\r\n![image](https://user-images.githubusercontent.com/15948264/85432117-1d2d4400-b548-11ea-9378-2dcb88c98cb2.png)\r\n\r\n- The docs from AWS explain that snapshots that are taken from encrypted volumes are automatically encrypted. [link](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-snapshot.html) \r\n\nFalse positive for CKV_AWS_17\nCheckov `1.0.391` will fail CKV_AWS_17 for a Terraform file defining any value for `publicly_accessible`, even false, because the check is for any value rather the actual security goal which should be a test for true:\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/b906298b4a26135b7ee6b58f1aa4c54fc04ead20/checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py\r\n\r\nIt should probably also say \u201cRDS instance\u201d rather than \u201cRDS bucket\u201d\n", "before_files": [{"content": "from checkov.common.models.consts import ANY_VALUE\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_negative_value_check import BaseResourceNegativeValueCheck\n\n\nclass RDSPubliclyAccessible(BaseResourceNegativeValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the RDS bucket is not public accessible\"\n id = \"CKV_AWS_17\"\n supported_resources = ['aws_db_instance', 'aws_rds_cluster_instance']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'publicly_accessible'\n\n def get_forbidden_values(self):\n return [ANY_VALUE]\n\n\ncheck = RDSPubliclyAccessible()\n", "path": "checkov/terraform/checks/resource/aws/RDSPubliclyAccessible.py"}, {"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\n\n\nclass EBSSnapshotEncryption(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure all data stored in the EBS Snapshot is securely encrypted\"\n id = \"CKV_AWS_4\"\n supported_resources = ['aws_ebs_snapshot']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"encrypted\"\n\n\ncheck = EBSSnapshotEncryption()\n", "path": "checkov/terraform/checks/resource/aws/EBSSnapshotEncryption.py"}]}
1,665
344
gh_patches_debug_3717
rasdani/github-patches
git_diff
mdn__kuma-5972
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'NoneType' object has no attribute 'sites' **Summary** _What is the problem?_ I was installing `Kuma` on my computer. When I run the command `docker-compose exec web ./manage.py configure_github_social`, console desk show the Error `AttributeError: 'NoneType' object has no attribute 'sites'`. **Steps To Reproduce (STR)** _How can we reproduce the problem?_ 1. Get in [https://kuma.readthedocs.io/en/latest/installation.html#load-the-sample-database](https://kuma.readthedocs.io/en/latest/installation.html#load-the-sample-database) 2. Find the step **Enable GitHub authentication (optional)** 3. At that step I run `docker-compose exec web ./manage.py configure_github_social`, and error occured. **Actual behavior** _What actually happened?_ I checked the code and found that in file `kuma/attachments/management/commands/configure_github_social.py` line 75, the variable `social_app` is None. Was I got something wrong? </issue> <code> [start of kuma/attachments/management/commands/configure_github_social.py] 1 import fileinput 2 import os 3 import sys 4 5 from allauth.socialaccount.models import SocialApp 6 from django.conf import settings 7 from django.contrib.sites.models import Site 8 from django.core.management.base import BaseCommand 9 10 try: 11 input = raw_input 12 except NameError: 13 # Python3's input behaves like raw_input 14 # TODO: Delete this block when we've migrated 15 pass 16 17 LOCALHOST = 'localhost:8000' 18 MDN_LOCALHOST = 'mdn.localhost' 19 20 OVERWRITE_PROMPT = 'There\'s already a SocialApp for GitHub, if you want to overwrite it type "yes":' 21 GITHUB_INFO = ( 22 'Visit https://github.com/settings/developers and click "New OAuth App"\n' 23 'Set "Homepage URL" to "http://mdn.localhost:8000/" and Authorization callback URL to ' + 24 '"http://mdn.localhost:8000/users/github/login/callback/" respectively' 25 ) 26 ENV_INFO = 'Putting SITE_ID and DOMAIN into .env' 27 HOSTS_INFO = ( 28 'Make sure your hosts file contains these lines:\n' 29 '127.0.0.1 localhost demos mdn.localhost beta.mdn.localhost wiki.mdn.localhost\n' 30 '::1 mdn.localhost beta.mdn.localhost wiki.mdn.localhost' 31 ) 32 33 34 def overwrite_or_create_env_vars(env_vars): 35 file_path = os.path.join(os.getcwd(), '.env') 36 37 for line in fileinput.input(file_path, inplace=True): 38 key = line.strip().split('=')[0] 39 if key not in env_vars: 40 sys.stdout.write(line) 41 42 with open(file_path, 'a') as file: 43 file.write('\n') 44 for key, value in env_vars.items(): 45 file.write(key + '=' + str(value) + '\n') 46 47 48 class Command(BaseCommand): 49 help = 'Configure Kuma for Sign-In with GitHub' 50 51 def handle(self, **options): 52 print('\n') 53 54 social_app = SocialApp.objects.filter(provider='github').first() 55 if social_app is not None and input(OVERWRITE_PROMPT) == 'yes': 56 print('\n') 57 58 print(GITHUB_INFO) 59 client_id = input('Client ID: ').strip() 60 client_secret = input('Client Secret: ').strip() 61 62 social_app, created = SocialApp.objects.update_or_create( 63 provider='github', 64 defaults={ 65 'name': 'MDN Development', 66 'client_id': client_id, 67 'secret': client_secret 68 } 69 ) 70 71 site, created = Site.objects.update_or_create( 72 domain=LOCALHOST, 73 defaults={'name': LOCALHOST} 74 ) 75 social_app.sites.add(site) 76 77 print('\n') 78 79 print(ENV_INFO) 80 overwrite_or_create_env_vars( 81 {'SITE_ID': site.id, 'DOMAIN': MDN_LOCALHOST} if site.id != settings.SITE_ID else 82 {'DOMAIN': MDN_LOCALHOST}) 83 84 print(HOSTS_INFO) 85 [end of kuma/attachments/management/commands/configure_github_social.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/attachments/management/commands/configure_github_social.py b/kuma/attachments/management/commands/configure_github_social.py --- a/kuma/attachments/management/commands/configure_github_social.py +++ b/kuma/attachments/management/commands/configure_github_social.py @@ -52,7 +52,7 @@ print('\n') social_app = SocialApp.objects.filter(provider='github').first() - if social_app is not None and input(OVERWRITE_PROMPT) == 'yes': + if social_app is None or input(OVERWRITE_PROMPT) == 'yes': print('\n') print(GITHUB_INFO)
{"golden_diff": "diff --git a/kuma/attachments/management/commands/configure_github_social.py b/kuma/attachments/management/commands/configure_github_social.py\n--- a/kuma/attachments/management/commands/configure_github_social.py\n+++ b/kuma/attachments/management/commands/configure_github_social.py\n@@ -52,7 +52,7 @@\n print('\\n')\n \n social_app = SocialApp.objects.filter(provider='github').first()\n- if social_app is not None and input(OVERWRITE_PROMPT) == 'yes':\n+ if social_app is None or input(OVERWRITE_PROMPT) == 'yes':\n print('\\n')\n \n print(GITHUB_INFO)\n", "issue": "AttributeError: 'NoneType' object has no attribute 'sites'\n**Summary**\r\n_What is the problem?_\r\nI was installing `Kuma` on my computer.\r\nWhen I run the command `docker-compose exec web ./manage.py configure_github_social`, console desk show the Error `AttributeError: 'NoneType' object has no attribute 'sites'`.\r\n\r\n**Steps To Reproduce (STR)**\r\n_How can we reproduce the problem?_\r\n\r\n1. Get in [https://kuma.readthedocs.io/en/latest/installation.html#load-the-sample-database](https://kuma.readthedocs.io/en/latest/installation.html#load-the-sample-database)\r\n2. Find the step **Enable GitHub authentication (optional)**\r\n3. At that step I run `docker-compose exec web ./manage.py configure_github_social`, and error occured.\r\n\r\n\r\n**Actual behavior**\r\n_What actually happened?_\r\nI checked the code and found that in file `kuma/attachments/management/commands/configure_github_social.py` line 75, the variable `social_app` is None. Was I got something wrong? \n", "before_files": [{"content": "import fileinput\nimport os\nimport sys\n\nfrom allauth.socialaccount.models import SocialApp\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.core.management.base import BaseCommand\n\ntry:\n input = raw_input\nexcept NameError:\n # Python3's input behaves like raw_input\n # TODO: Delete this block when we've migrated\n pass\n\nLOCALHOST = 'localhost:8000'\nMDN_LOCALHOST = 'mdn.localhost'\n\nOVERWRITE_PROMPT = 'There\\'s already a SocialApp for GitHub, if you want to overwrite it type \"yes\":'\nGITHUB_INFO = (\n 'Visit https://github.com/settings/developers and click \"New OAuth App\"\\n'\n 'Set \"Homepage URL\" to \"http://mdn.localhost:8000/\" and Authorization callback URL to ' +\n '\"http://mdn.localhost:8000/users/github/login/callback/\" respectively'\n)\nENV_INFO = 'Putting SITE_ID and DOMAIN into .env'\nHOSTS_INFO = (\n 'Make sure your hosts file contains these lines:\\n'\n '127.0.0.1 localhost demos mdn.localhost beta.mdn.localhost wiki.mdn.localhost\\n'\n '::1 mdn.localhost beta.mdn.localhost wiki.mdn.localhost'\n)\n\n\ndef overwrite_or_create_env_vars(env_vars):\n file_path = os.path.join(os.getcwd(), '.env')\n\n for line in fileinput.input(file_path, inplace=True):\n key = line.strip().split('=')[0]\n if key not in env_vars:\n sys.stdout.write(line)\n\n with open(file_path, 'a') as file:\n file.write('\\n')\n for key, value in env_vars.items():\n file.write(key + '=' + str(value) + '\\n')\n\n\nclass Command(BaseCommand):\n help = 'Configure Kuma for Sign-In with GitHub'\n\n def handle(self, **options):\n print('\\n')\n\n social_app = SocialApp.objects.filter(provider='github').first()\n if social_app is not None and input(OVERWRITE_PROMPT) == 'yes':\n print('\\n')\n\n print(GITHUB_INFO)\n client_id = input('Client ID: ').strip()\n client_secret = input('Client Secret: ').strip()\n\n social_app, created = SocialApp.objects.update_or_create(\n provider='github',\n defaults={\n 'name': 'MDN Development',\n 'client_id': client_id,\n 'secret': client_secret\n }\n )\n\n site, created = Site.objects.update_or_create(\n domain=LOCALHOST,\n defaults={'name': LOCALHOST}\n )\n social_app.sites.add(site)\n\n print('\\n')\n\n print(ENV_INFO)\n overwrite_or_create_env_vars(\n {'SITE_ID': site.id, 'DOMAIN': MDN_LOCALHOST} if site.id != settings.SITE_ID else\n {'DOMAIN': MDN_LOCALHOST})\n\n print(HOSTS_INFO)\n", "path": "kuma/attachments/management/commands/configure_github_social.py"}]}
1,594
152
gh_patches_debug_61258
rasdani/github-patches
git_diff
microsoft__torchgeo-80
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Determine minimum supported dependency versions Before releasing, we should determine the minimum supported version of each dependency. We should also consider a test with this version just to make sure it doesn't change. </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 import os 10 import sys 11 12 import pytorch_sphinx_theme 13 14 # If extensions (or modules to document with autodoc) are in another directory, 15 # add these directories to sys.path here. If the directory is relative to the 16 # documentation root, use os.path.abspath to make it absolute, like shown here. 17 sys.path.insert(0, os.path.abspath("..")) 18 19 import torchgeo # noqa: E402 20 21 # -- Project information ----------------------------------------------------- 22 23 project = "torchgeo" 24 copyright = "2021, Microsoft Corporation" 25 author = "Adam J. Stewart" 26 version = ".".join(torchgeo.__version__.split(".")[:2]) 27 release = torchgeo.__version__ 28 29 30 # -- General configuration --------------------------------------------------- 31 32 # Add any Sphinx extension module names here, as strings. They can be 33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 # ones. 35 extensions = [ 36 "sphinx.ext.autodoc", 37 "sphinx.ext.autosectionlabel", 38 "sphinx.ext.intersphinx", 39 "sphinx.ext.napoleon", 40 "sphinx.ext.todo", 41 "sphinx.ext.viewcode", 42 ] 43 44 # List of patterns, relative to source directory, that match files and 45 # directories to ignore when looking for source files. 46 # This pattern also affects html_static_path and html_extra_path. 47 exclude_patterns = ["_build"] 48 49 nitpicky = True 50 nitpick_ignore = [ 51 # https://github.com/sphinx-doc/sphinx/issues/8127 52 ("py:class", ".."), 53 # TODO: can't figure out why this isn't found 54 ("py:class", "LightningDataModule"), 55 ] 56 57 58 # -- Options for HTML output ------------------------------------------------- 59 60 # The theme to use for HTML and HTML Help pages. See the documentation for 61 # a list of builtin themes. 62 html_theme = "pytorch_sphinx_theme" 63 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] 64 65 # Theme options are theme-specific and customize the look and feel of a theme 66 # further. For a list of options available for each theme, see the 67 # documentation. 68 html_theme_options = { 69 "collapse_navigation": False, 70 "display_version": True, 71 "logo_only": True, 72 "pytorch_project": "docs", 73 "navigation_with_keys": True, 74 "analytics_id": "UA-117752657-2", 75 } 76 77 # -- Extension configuration ------------------------------------------------- 78 79 # sphinx.ext.autodoc 80 autodoc_default_options = { 81 "members": True, 82 "special-members": True, 83 "show-inheritance": True, 84 } 85 autodoc_member_order = "bysource" 86 autodoc_typehints = "description" 87 88 # sphinx.ext.intersphinx 89 intersphinx_mapping = { 90 "python": ("https://docs.python.org/3", None), 91 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None), 92 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None), 93 "torch": ("https://pytorch.org/docs/stable", None), 94 } 95 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -46,6 +46,10 @@ # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build"] +# Sphinx 3.0+ required for: +# autodoc_typehints = "description" +needs_sphinx = "3.0" + nitpicky = True nitpick_ignore = [ # https://github.com/sphinx-doc/sphinx/issues/8127
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -46,6 +46,10 @@\n # This pattern also affects html_static_path and html_extra_path.\n exclude_patterns = [\"_build\"]\n \n+# Sphinx 3.0+ required for:\n+# autodoc_typehints = \"description\"\n+needs_sphinx = \"3.0\"\n+\n nitpicky = True\n nitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n", "issue": "Determine minimum supported dependency versions\nBefore releasing, we should determine the minimum supported version of each dependency. We should also consider a test with this version just to make sure it doesn't change.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = \"Adam J. Stewart\"\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-117752657-2\",\n}\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n}\n", "path": "docs/conf.py"}]}
1,481
119
gh_patches_debug_556
rasdani/github-patches
git_diff
pex-tool__pex-804
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.0.2 On the docket: + [x] Add a test of pypi index rendering. (#799) + [x] Fix `iter_compatible_interpreters` path biasing. (#798) + [x] Fix current platform handling. #801 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '2.0.1' 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.0.1' +__version__ = '2.0.2'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.0.1'\n+__version__ = '2.0.2'\n", "issue": "Release 2.0.2\nOn the docket:\r\n\r\n+ [x] Add a test of pypi index rendering. (#799)\r\n+ [x] Fix `iter_compatible_interpreters` path biasing. (#798)\r\n+ [x] Fix current platform handling. #801\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.0.1'\n", "path": "pex/version.py"}]}
649
95
gh_patches_debug_28102
rasdani/github-patches
git_diff
streamlink__streamlink-2428
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Goodgame plugin not worked. <!-- Thanks for reporting a plugin issue! USE THE TEMPLATE. Otherwise your plugin issue may be rejected. First, see the contribution guidelines: https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink Also check the list of open and closed plugin issues: https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22 Please see the text preview to avoid unnecessary formatting errors. --> ## Plugin Issue <!-- Replace [ ] with [x] in order to check the box --> - [x] This is a plugin issue and I have read the contribution guidelines. ### Description <!-- Explain the plugin issue as thoroughly as you can. --> It looks like the plugin can no longer open streams. ### Reproduction steps / Explicit stream URLs to test https://goodgame.ru/channel/Miker/#autoplay <!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. --> 1. ... 2. ... 3. ... ### Log output <!-- TEXT LOG OUTPUT IS REQUIRED for a plugin issue! Use the `--loglevel debug` parameter and avoid using parameters which suppress log output. https://streamlink.github.io/cli.html#cmdoption-l Make sure to **remove usernames and passwords** You can copy the output to https://gist.github.com/ or paste it below. --> ``` REPLACE THIS TEXT WITH THE LOG OUTPUT ``` c:\>streamlink --loglevel debug https://goodgame.ru/channel/Miker/#autoplay best [cli][debug] OS: Windows 7 [cli][debug] Python: 3.6.6 [cli][debug] Streamlink: 1.1.1 [cli][debug] Requests(2.21.0), Socks(1.6.7), Websocket(0.56.0) [cli][info] Found matching plugin goodgame for URL https://goodgame.ru/channel/Miker/#autoplay Traceback (most recent call last): File "runpy.py", line 193, in _run_module_as_main File "runpy.py", line 85, in _run_code File "C:\Program Files (x86)\Streamlink\bin\streamlink.exe\__main__.py", line 18, in <module> File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 1033, in main handle_url() File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 577, in handle_url streams = fetch_streams(plugin) File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 457, in fetch_streams sorting_excludes=args.stream_sorting_excludes) File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugin\plugin.py", line 317, in streams ostreams = self._get_streams() File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugins\goodgame.py", line 49, in _get_str eams **channel_info) File "logging\__init__.py", line 1295, in debug TypeError: _log() got an unexpected keyword argument 'id' ### Additional comments, screenshots, etc. [Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate) </issue> <code> [start of src/streamlink/plugins/goodgame.py] 1 import re 2 3 from streamlink.plugin import Plugin 4 from streamlink.stream import HLSStream 5 from streamlink.utils import parse_json 6 7 HLS_URL_FORMAT = "https://hls.goodgame.ru/hls/{0}{1}.m3u8" 8 QUALITIES = { 9 "1080p": "", 10 "720p": "_720", 11 "480p": "_480", 12 "240p": "_240" 13 } 14 15 _url_re = re.compile(r"https?://(?:www\.)?goodgame.ru/channel/(?P<user>[^/]+)") 16 _apidata_re = re.compile(r'''(?P<quote>["']?)channel(?P=quote)\s*:\s*(?P<data>{.*?})\s*,''') 17 _ddos_re = re.compile(r'document.cookie="(__DDOS_[^;]+)') 18 19 20 class GoodGame(Plugin): 21 @classmethod 22 def can_handle_url(cls, url): 23 return _url_re.match(url) 24 25 def _check_stream(self, url): 26 res = self.session.http.get(url, acceptable_status=(200, 404)) 27 if res.status_code == 200: 28 return True 29 30 def _get_streams(self): 31 headers = { 32 "Referer": self.url 33 } 34 res = self.session.http.get(self.url, headers=headers) 35 36 match = _ddos_re.search(res.text) 37 if match: 38 self.logger.debug("Anti-DDOS bypass...") 39 headers["Cookie"] = match.group(1) 40 res = self.session.http.get(self.url, headers=headers) 41 42 match = _apidata_re.search(res.text) 43 channel_info = match and parse_json(match.group("data")) 44 if not channel_info: 45 self.logger.error("Could not find channel info") 46 return 47 48 self.logger.debug("Found channel info: channelkey={channelkey} pid={streamkey} online={status}", 49 **channel_info) 50 if not channel_info['status']: 51 self.logger.debug("Channel appears to be offline") 52 53 streams = {} 54 for name, url_suffix in QUALITIES.items(): 55 url = HLS_URL_FORMAT.format(channel_info['streamkey'], url_suffix) 56 if not self._check_stream(url): 57 continue 58 59 streams[name] = HLSStream(self.session, url) 60 61 return streams 62 63 64 __plugin__ = GoodGame 65 [end of src/streamlink/plugins/goodgame.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/goodgame.py b/src/streamlink/plugins/goodgame.py --- a/src/streamlink/plugins/goodgame.py +++ b/src/streamlink/plugins/goodgame.py @@ -1,9 +1,12 @@ import re +import logging from streamlink.plugin import Plugin from streamlink.stream import HLSStream from streamlink.utils import parse_json +log = logging.getLogger(__name__) + HLS_URL_FORMAT = "https://hls.goodgame.ru/hls/{0}{1}.m3u8" QUALITIES = { "1080p": "", @@ -35,7 +38,7 @@ match = _ddos_re.search(res.text) if match: - self.logger.debug("Anti-DDOS bypass...") + log.debug("Anti-DDOS bypass...") headers["Cookie"] = match.group(1) res = self.session.http.get(self.url, headers=headers) @@ -45,10 +48,9 @@ self.logger.error("Could not find channel info") return - self.logger.debug("Found channel info: channelkey={channelkey} pid={streamkey} online={status}", - **channel_info) + log.debug("Found channel info: id={id} channelkey={channelkey} pid={streamkey} online={status}".format(**channel_info)) if not channel_info['status']: - self.logger.debug("Channel appears to be offline") + log.debug("Channel appears to be offline") streams = {} for name, url_suffix in QUALITIES.items():
{"golden_diff": "diff --git a/src/streamlink/plugins/goodgame.py b/src/streamlink/plugins/goodgame.py\n--- a/src/streamlink/plugins/goodgame.py\n+++ b/src/streamlink/plugins/goodgame.py\n@@ -1,9 +1,12 @@\n import re\n+import logging\n \n from streamlink.plugin import Plugin\n from streamlink.stream import HLSStream\n from streamlink.utils import parse_json\n \n+log = logging.getLogger(__name__)\n+\n HLS_URL_FORMAT = \"https://hls.goodgame.ru/hls/{0}{1}.m3u8\"\n QUALITIES = {\n \"1080p\": \"\",\n@@ -35,7 +38,7 @@\n \n match = _ddos_re.search(res.text)\n if match:\n- self.logger.debug(\"Anti-DDOS bypass...\")\n+ log.debug(\"Anti-DDOS bypass...\")\n headers[\"Cookie\"] = match.group(1)\n res = self.session.http.get(self.url, headers=headers)\n \n@@ -45,10 +48,9 @@\n self.logger.error(\"Could not find channel info\")\n return\n \n- self.logger.debug(\"Found channel info: channelkey={channelkey} pid={streamkey} online={status}\",\n- **channel_info)\n+ log.debug(\"Found channel info: id={id} channelkey={channelkey} pid={streamkey} online={status}\".format(**channel_info))\n if not channel_info['status']:\n- self.logger.debug(\"Channel appears to be offline\")\n+ log.debug(\"Channel appears to be offline\")\n \n streams = {}\n for name, url_suffix in QUALITIES.items():\n", "issue": "Goodgame plugin not worked.\n<!--\r\nThanks for reporting a plugin issue!\r\nUSE THE TEMPLATE. Otherwise your plugin issue may be rejected.\r\n\r\nFirst, see the contribution guidelines:\r\nhttps://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink\r\n\r\nAlso check the list of open and closed plugin issues:\r\nhttps://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22\r\n\r\nPlease see the text preview to avoid unnecessary formatting errors.\r\n-->\r\n\r\n\r\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\n<!-- Explain the plugin issue as thoroughly as you can. -->\r\nIt looks like the plugin can no longer open streams.\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\nhttps://goodgame.ru/channel/Miker/#autoplay\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. ...\r\n2. ...\r\n3. ...\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\nREPLACE THIS TEXT WITH THE LOG OUTPUT\r\n```\r\nc:\\>streamlink --loglevel debug https://goodgame.ru/channel/Miker/#autoplay best\r\n[cli][debug] OS: Windows 7\r\n[cli][debug] Python: 3.6.6\r\n[cli][debug] Streamlink: 1.1.1\r\n[cli][debug] Requests(2.21.0), Socks(1.6.7), Websocket(0.56.0)\r\n[cli][info] Found matching plugin goodgame for URL https://goodgame.ru/channel/Miker/#autoplay\r\nTraceback (most recent call last):\r\n File \"runpy.py\", line 193, in _run_module_as_main\r\n File \"runpy.py\", line 85, in _run_code\r\n File \"C:\\Program Files (x86)\\Streamlink\\bin\\streamlink.exe\\__main__.py\", line 18, in <module>\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 1033, in main\r\n handle_url()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 577, in handle_url\r\n streams = fetch_streams(plugin)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 457, in fetch_streams\r\n sorting_excludes=args.stream_sorting_excludes)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", line 317, in streams\r\n ostreams = self._get_streams()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugins\\goodgame.py\", line 49, in _get_str\r\neams\r\n **channel_info)\r\n File \"logging\\__init__.py\", line 1295, in debug\r\nTypeError: _log() got an unexpected keyword argument 'id'\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json\n\nHLS_URL_FORMAT = \"https://hls.goodgame.ru/hls/{0}{1}.m3u8\"\nQUALITIES = {\n \"1080p\": \"\",\n \"720p\": \"_720\",\n \"480p\": \"_480\",\n \"240p\": \"_240\"\n}\n\n_url_re = re.compile(r\"https?://(?:www\\.)?goodgame.ru/channel/(?P<user>[^/]+)\")\n_apidata_re = re.compile(r'''(?P<quote>[\"']?)channel(?P=quote)\\s*:\\s*(?P<data>{.*?})\\s*,''')\n_ddos_re = re.compile(r'document.cookie=\"(__DDOS_[^;]+)')\n\n\nclass GoodGame(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _check_stream(self, url):\n res = self.session.http.get(url, acceptable_status=(200, 404))\n if res.status_code == 200:\n return True\n\n def _get_streams(self):\n headers = {\n \"Referer\": self.url\n }\n res = self.session.http.get(self.url, headers=headers)\n\n match = _ddos_re.search(res.text)\n if match:\n self.logger.debug(\"Anti-DDOS bypass...\")\n headers[\"Cookie\"] = match.group(1)\n res = self.session.http.get(self.url, headers=headers)\n\n match = _apidata_re.search(res.text)\n channel_info = match and parse_json(match.group(\"data\"))\n if not channel_info:\n self.logger.error(\"Could not find channel info\")\n return\n\n self.logger.debug(\"Found channel info: channelkey={channelkey} pid={streamkey} online={status}\",\n **channel_info)\n if not channel_info['status']:\n self.logger.debug(\"Channel appears to be offline\")\n\n streams = {}\n for name, url_suffix in QUALITIES.items():\n url = HLS_URL_FORMAT.format(channel_info['streamkey'], url_suffix)\n if not self._check_stream(url):\n continue\n\n streams[name] = HLSStream(self.session, url)\n\n return streams\n\n\n__plugin__ = GoodGame\n", "path": "src/streamlink/plugins/goodgame.py"}]}
1,987
344
gh_patches_debug_8349
rasdani/github-patches
git_diff
pre-commit__pre-commit-206
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Windows: Large number of files causes `xargs: ... Bad file number` Originally here: https://github.com/pre-commit/pre-commit-hooks/issues/41 </issue> <code> [start of pre_commit/languages/helpers.py] 1 from __future__ import unicode_literals 2 3 import pipes 4 5 6 def file_args_to_stdin(file_args): 7 return '\0'.join(list(file_args) + ['']) 8 9 10 def run_hook(env, hook, file_args): 11 quoted_args = [pipes.quote(arg) for arg in hook['args']] 12 return env.run( 13 ' '.join(['xargs', '-0', hook['entry']] + quoted_args), 14 stdin=file_args_to_stdin(file_args), 15 retcode=None, 16 ) 17 18 19 class Environment(object): 20 def __init__(self, repo_cmd_runner): 21 self.repo_cmd_runner = repo_cmd_runner 22 23 @property 24 def env_prefix(self): 25 """env_prefix is a value that is prefixed to the command that is run. 26 27 Usually this is to source a virtualenv, etc. 28 29 Commands basically end up looking like: 30 31 bash -c '{env_prefix} {cmd}' 32 33 so you'll often want to end your prefix with && 34 """ 35 raise NotImplementedError 36 37 def run(self, cmd, **kwargs): 38 """Returns (returncode, stdout, stderr).""" 39 return self.repo_cmd_runner.run( 40 ['bash', '-c', ' '.join([self.env_prefix, cmd])], **kwargs 41 ) 42 [end of pre_commit/languages/helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py --- a/pre_commit/languages/helpers.py +++ b/pre_commit/languages/helpers.py @@ -10,7 +10,9 @@ def run_hook(env, hook, file_args): quoted_args = [pipes.quote(arg) for arg in hook['args']] return env.run( - ' '.join(['xargs', '-0', hook['entry']] + quoted_args), + # Use -s 4000 (slightly less than posix mandated minimum) + # This is to prevent "xargs: ... Bad file number" on windows + ' '.join(['xargs', '-0', '-s4000', hook['entry']] + quoted_args), stdin=file_args_to_stdin(file_args), retcode=None, )
{"golden_diff": "diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py\n--- a/pre_commit/languages/helpers.py\n+++ b/pre_commit/languages/helpers.py\n@@ -10,7 +10,9 @@\n def run_hook(env, hook, file_args):\n quoted_args = [pipes.quote(arg) for arg in hook['args']]\n return env.run(\n- ' '.join(['xargs', '-0', hook['entry']] + quoted_args),\n+ # Use -s 4000 (slightly less than posix mandated minimum)\n+ # This is to prevent \"xargs: ... Bad file number\" on windows\n+ ' '.join(['xargs', '-0', '-s4000', hook['entry']] + quoted_args),\n stdin=file_args_to_stdin(file_args),\n retcode=None,\n )\n", "issue": "Windows: Large number of files causes `xargs: ... Bad file number`\nOriginally here: https://github.com/pre-commit/pre-commit-hooks/issues/41\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport pipes\n\n\ndef file_args_to_stdin(file_args):\n return '\\0'.join(list(file_args) + [''])\n\n\ndef run_hook(env, hook, file_args):\n quoted_args = [pipes.quote(arg) for arg in hook['args']]\n return env.run(\n ' '.join(['xargs', '-0', hook['entry']] + quoted_args),\n stdin=file_args_to_stdin(file_args),\n retcode=None,\n )\n\n\nclass Environment(object):\n def __init__(self, repo_cmd_runner):\n self.repo_cmd_runner = repo_cmd_runner\n\n @property\n def env_prefix(self):\n \"\"\"env_prefix is a value that is prefixed to the command that is run.\n\n Usually this is to source a virtualenv, etc.\n\n Commands basically end up looking like:\n\n bash -c '{env_prefix} {cmd}'\n\n so you'll often want to end your prefix with &&\n \"\"\"\n raise NotImplementedError\n\n def run(self, cmd, **kwargs):\n \"\"\"Returns (returncode, stdout, stderr).\"\"\"\n return self.repo_cmd_runner.run(\n ['bash', '-c', ' '.join([self.env_prefix, cmd])], **kwargs\n )\n", "path": "pre_commit/languages/helpers.py"}]}
913
183
gh_patches_debug_8
rasdani/github-patches
git_diff
kivy__python-for-android-2797
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python exception when using colorlog due to incomplete IO implementation in sys.stderr I am attempting to run a program which uses `TTYColoredFormatter` from [colorlog](https://pypi.org/project/colorlog/). This class formats log messages, adding ANSI escape codes _only_ if the stream it is writing to returns `True` for `stream.isatty()`. Unfortunately, python-for-android's bootstrap code replaces sys.stderr and sys.stdout with a custom `LogFile` object: https://github.com/kivy/python-for-android/blob/53d77fc26c9e37eb6ce05f8899f4dae8334842b1/pythonforandroid/bootstraps/common/build/jni/application/src/start.c#L226-L242 This object doesn't implement `isatty()` (or much else, for that matter). As a result, the program raises an exception: ``` 03-03 13:32:56.222 5806 5891 I python : Traceback (most recent call last): 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/main.py", line 3, in <module> 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/main_activity/__main__.py", line 7, in main 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/main_activity/activity.py", line 19, in <module> 03-03 13:32:56.222 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/kolibri_utils.py", line 13, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/android_whitenoise.py", line 11, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/__init__.py", line 10, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/utils/env.py", line 29, in <module> 03-03 13:32:56.223 5806 5891 I python : File "/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/dist/colorlog/colorlog.py", line 203, in __init__ 03-03 13:32:56.223 5806 5891 I python : AttributeError: 'LogFile' object has no attribute 'isatty' ``` (For reference, we're using colorlog v3.2.0, so the code raising the exception looks like this: https://github.com/borntyping/python-colorlog/blob/v3.2.0/colorlog/colorlog.py#L191-L211). Service don t start anymore, as smallIconName extra is now mandatory https://github.com/kivy/python-for-android/blob/8cb497dd89e402478011df61f4690b963a0c96da/pythonforandroid/bootstraps/common/build/src/main/java/org/kivy/android/PythonService.java#L116 ```java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.String.equals(java.lang.Object)' on a null object reference``` We could test if null before. </issue> <code> [start of pythonforandroid/__init__.py] 1 __version__ = '2023.02.10' 2 [end of pythonforandroid/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pythonforandroid/__init__.py b/pythonforandroid/__init__.py --- a/pythonforandroid/__init__.py +++ b/pythonforandroid/__init__.py @@ -1 +1 @@ -__version__ = '2023.02.10' +__version__ = '2023.05.21'
{"golden_diff": "diff --git a/pythonforandroid/__init__.py b/pythonforandroid/__init__.py\n--- a/pythonforandroid/__init__.py\n+++ b/pythonforandroid/__init__.py\n@@ -1 +1 @@\n-__version__ = '2023.02.10'\n+__version__ = '2023.05.21'\n", "issue": "Python exception when using colorlog due to incomplete IO implementation in sys.stderr\nI am attempting to run a program which uses `TTYColoredFormatter` from [colorlog](https://pypi.org/project/colorlog/). This class formats log messages, adding ANSI escape codes _only_ if the stream it is writing to returns `True` for `stream.isatty()`.\r\n\r\nUnfortunately, python-for-android's bootstrap code replaces sys.stderr and sys.stdout with a custom `LogFile` object: https://github.com/kivy/python-for-android/blob/53d77fc26c9e37eb6ce05f8899f4dae8334842b1/pythonforandroid/bootstraps/common/build/jni/application/src/start.c#L226-L242\r\n\r\nThis object doesn't implement `isatty()` (or much else, for that matter). As a result, the program raises an exception:\r\n\r\n```\r\n03-03 13:32:56.222 5806 5891 I python : Traceback (most recent call last):\r\n03-03 13:32:56.222 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/main.py\", line 3, in <module>\r\n03-03 13:32:56.222 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/main_activity/__main__.py\", line 7, in main\r\n03-03 13:32:56.222 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/main_activity/activity.py\", line 19, in <module>\r\n03-03 13:32:56.222 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/kolibri_utils.py\", line 13, in <module>\r\n03-03 13:32:56.223 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri_android/android_whitenoise.py\", line 11, in <module>\r\n03-03 13:32:56.223 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/__init__.py\", line 10, in <module>\r\n03-03 13:32:56.223 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/utils/env.py\", line 29, in <module>\r\n03-03 13:32:56.223 5806 5891 I python : File \"/home/jenkins/workspace/kolibri-installer-android-pr/src/kolibri/dist/colorlog/colorlog.py\", line 203, in __init__\r\n03-03 13:32:56.223 5806 5891 I python : AttributeError: 'LogFile' object has no attribute 'isatty'\r\n```\r\n\r\n(For reference, we're using colorlog v3.2.0, so the code raising the exception looks like this: https://github.com/borntyping/python-colorlog/blob/v3.2.0/colorlog/colorlog.py#L191-L211).\nService don t start anymore, as smallIconName extra is now mandatory\nhttps://github.com/kivy/python-for-android/blob/8cb497dd89e402478011df61f4690b963a0c96da/pythonforandroid/bootstraps/common/build/src/main/java/org/kivy/android/PythonService.java#L116\r\n\r\n```java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.String.equals(java.lang.Object)' on a null object reference```\r\n\r\nWe could test if null before.\n", "before_files": [{"content": "__version__ = '2023.02.10'\n", "path": "pythonforandroid/__init__.py"}]}
1,560
81
gh_patches_debug_13152
rasdani/github-patches
git_diff
netbox-community__netbox-16013
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unable to reference object id in site using REST API ### Deployment Type Self-hosted ### NetBox Version v4.0.0 ### Python Version 3.10 ### Steps to Reproduce 1. Create a tenant named "Test Tenant". Make a note of the tenant's id (in my case it's 7) 2. Create a site using REST API ``` curl -s -X POST \ -H "Authorization: Token 0123456789abcdef0123456789abcdef01234567" \ -H "Content-Type: application/json" \ http://localhost:32768/api/dcim/sites/ \ --data '{"name": "Test site 1", "slug": "test-site-1", "tenant": 7}' | jq '.' ``` ### Expected Behavior The site is created in and tenant is set to Test tenant. ### Observed Behavior ``` { "tenant": { "non_field_errors": [ "Invalid data. Expected a dictionary, but got int." ] } } ``` The same API calls work as expected in NetBox 3.7. </issue> <code> [start of netbox/dcim/api/serializers_/sites.py] 1 from rest_framework import serializers 2 from timezone_field.rest_framework import TimeZoneSerializerField 3 4 from dcim.choices import * 5 from dcim.models import Location, Region, Site, SiteGroup 6 from ipam.api.serializers_.asns import ASNSerializer 7 from ipam.models import ASN 8 from netbox.api.fields import ChoiceField, RelatedObjectCountField, SerializedPKRelatedField 9 from netbox.api.serializers import NestedGroupModelSerializer, NetBoxModelSerializer 10 from tenancy.api.serializers_.tenants import TenantSerializer 11 from ..nested_serializers import * 12 13 __all__ = ( 14 'LocationSerializer', 15 'RegionSerializer', 16 'SiteGroupSerializer', 17 'SiteSerializer', 18 ) 19 20 21 class RegionSerializer(NestedGroupModelSerializer): 22 url = serializers.HyperlinkedIdentityField(view_name='dcim-api:region-detail') 23 parent = NestedRegionSerializer(required=False, allow_null=True, default=None) 24 site_count = serializers.IntegerField(read_only=True) 25 26 class Meta: 27 model = Region 28 fields = [ 29 'id', 'url', 'display', 'name', 'slug', 'parent', 'description', 'tags', 'custom_fields', 'created', 30 'last_updated', 'site_count', '_depth', 31 ] 32 brief_fields = ('id', 'url', 'display', 'name', 'slug', 'description', 'site_count', '_depth') 33 34 35 class SiteGroupSerializer(NestedGroupModelSerializer): 36 url = serializers.HyperlinkedIdentityField(view_name='dcim-api:sitegroup-detail') 37 parent = NestedSiteGroupSerializer(required=False, allow_null=True, default=None) 38 site_count = serializers.IntegerField(read_only=True) 39 40 class Meta: 41 model = SiteGroup 42 fields = [ 43 'id', 'url', 'display', 'name', 'slug', 'parent', 'description', 'tags', 'custom_fields', 'created', 44 'last_updated', 'site_count', '_depth', 45 ] 46 brief_fields = ('id', 'url', 'display', 'name', 'slug', 'description', 'site_count', '_depth') 47 48 49 class SiteSerializer(NetBoxModelSerializer): 50 url = serializers.HyperlinkedIdentityField(view_name='dcim-api:site-detail') 51 status = ChoiceField(choices=SiteStatusChoices, required=False) 52 region = RegionSerializer(nested=True, required=False, allow_null=True) 53 group = SiteGroupSerializer(nested=True, required=False, allow_null=True) 54 tenant = TenantSerializer(required=False, allow_null=True) 55 time_zone = TimeZoneSerializerField(required=False, allow_null=True) 56 asns = SerializedPKRelatedField( 57 queryset=ASN.objects.all(), 58 serializer=ASNSerializer, 59 nested=True, 60 required=False, 61 many=True 62 ) 63 64 # Related object counts 65 circuit_count = RelatedObjectCountField('circuit_terminations') 66 device_count = RelatedObjectCountField('devices') 67 prefix_count = RelatedObjectCountField('prefixes') 68 rack_count = RelatedObjectCountField('racks') 69 vlan_count = RelatedObjectCountField('vlans') 70 virtualmachine_count = RelatedObjectCountField('virtual_machines') 71 72 class Meta: 73 model = Site 74 fields = [ 75 'id', 'url', 'display', 'name', 'slug', 'status', 'region', 'group', 'tenant', 'facility', 'time_zone', 76 'description', 'physical_address', 'shipping_address', 'latitude', 'longitude', 'comments', 'asns', 'tags', 77 'custom_fields', 'created', 'last_updated', 'circuit_count', 'device_count', 'prefix_count', 'rack_count', 78 'virtualmachine_count', 'vlan_count', 79 ] 80 brief_fields = ('id', 'url', 'display', 'name', 'description', 'slug') 81 82 83 class LocationSerializer(NestedGroupModelSerializer): 84 url = serializers.HyperlinkedIdentityField(view_name='dcim-api:location-detail') 85 site = SiteSerializer(nested=True) 86 parent = NestedLocationSerializer(required=False, allow_null=True, default=None) 87 status = ChoiceField(choices=LocationStatusChoices, required=False) 88 tenant = TenantSerializer(nested=True, required=False, allow_null=True) 89 rack_count = serializers.IntegerField(read_only=True) 90 device_count = serializers.IntegerField(read_only=True) 91 92 class Meta: 93 model = Location 94 fields = [ 95 'id', 'url', 'display', 'name', 'slug', 'site', 'parent', 'status', 'tenant', 'facility', 'description', 96 'tags', 'custom_fields', 'created', 'last_updated', 'rack_count', 'device_count', '_depth', 97 ] 98 brief_fields = ('id', 'url', 'display', 'name', 'slug', 'description', 'rack_count', '_depth') 99 [end of netbox/dcim/api/serializers_/sites.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/dcim/api/serializers_/sites.py b/netbox/dcim/api/serializers_/sites.py --- a/netbox/dcim/api/serializers_/sites.py +++ b/netbox/dcim/api/serializers_/sites.py @@ -51,7 +51,7 @@ status = ChoiceField(choices=SiteStatusChoices, required=False) region = RegionSerializer(nested=True, required=False, allow_null=True) group = SiteGroupSerializer(nested=True, required=False, allow_null=True) - tenant = TenantSerializer(required=False, allow_null=True) + tenant = TenantSerializer(nested=True, required=False, allow_null=True) time_zone = TimeZoneSerializerField(required=False, allow_null=True) asns = SerializedPKRelatedField( queryset=ASN.objects.all(),
{"golden_diff": "diff --git a/netbox/dcim/api/serializers_/sites.py b/netbox/dcim/api/serializers_/sites.py\n--- a/netbox/dcim/api/serializers_/sites.py\n+++ b/netbox/dcim/api/serializers_/sites.py\n@@ -51,7 +51,7 @@\n status = ChoiceField(choices=SiteStatusChoices, required=False)\n region = RegionSerializer(nested=True, required=False, allow_null=True)\n group = SiteGroupSerializer(nested=True, required=False, allow_null=True)\n- tenant = TenantSerializer(required=False, allow_null=True)\n+ tenant = TenantSerializer(nested=True, required=False, allow_null=True)\n time_zone = TimeZoneSerializerField(required=False, allow_null=True)\n asns = SerializedPKRelatedField(\n queryset=ASN.objects.all(),\n", "issue": "Unable to reference object id in site using REST API \n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv4.0.0\r\n\r\n### Python Version\r\n\r\n3.10\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a tenant named \"Test Tenant\". Make a note of the tenant's id (in my case it's 7)\r\n2. Create a site using REST API\r\n```\r\ncurl -s -X POST \\\r\n-H \"Authorization: Token 0123456789abcdef0123456789abcdef01234567\" \\\r\n-H \"Content-Type: application/json\" \\\r\nhttp://localhost:32768/api/dcim/sites/ \\\r\n--data '{\"name\": \"Test site 1\", \"slug\": \"test-site-1\", \"tenant\": 7}' | jq '.'\r\n```\r\n\r\n### Expected Behavior\r\n\r\nThe site is created in and tenant is set to Test tenant.\r\n\r\n### Observed Behavior\r\n\r\n```\r\n{\r\n \"tenant\": {\r\n \"non_field_errors\": [\r\n \"Invalid data. Expected a dictionary, but got int.\"\r\n ]\r\n }\r\n}\r\n```\r\n\r\nThe same API calls work as expected in NetBox 3.7.\n", "before_files": [{"content": "from rest_framework import serializers\nfrom timezone_field.rest_framework import TimeZoneSerializerField\n\nfrom dcim.choices import *\nfrom dcim.models import Location, Region, Site, SiteGroup\nfrom ipam.api.serializers_.asns import ASNSerializer\nfrom ipam.models import ASN\nfrom netbox.api.fields import ChoiceField, RelatedObjectCountField, SerializedPKRelatedField\nfrom netbox.api.serializers import NestedGroupModelSerializer, NetBoxModelSerializer\nfrom tenancy.api.serializers_.tenants import TenantSerializer\nfrom ..nested_serializers import *\n\n__all__ = (\n 'LocationSerializer',\n 'RegionSerializer',\n 'SiteGroupSerializer',\n 'SiteSerializer',\n)\n\n\nclass RegionSerializer(NestedGroupModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='dcim-api:region-detail')\n parent = NestedRegionSerializer(required=False, allow_null=True, default=None)\n site_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Region\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'parent', 'description', 'tags', 'custom_fields', 'created',\n 'last_updated', 'site_count', '_depth',\n ]\n brief_fields = ('id', 'url', 'display', 'name', 'slug', 'description', 'site_count', '_depth')\n\n\nclass SiteGroupSerializer(NestedGroupModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='dcim-api:sitegroup-detail')\n parent = NestedSiteGroupSerializer(required=False, allow_null=True, default=None)\n site_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = SiteGroup\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'parent', 'description', 'tags', 'custom_fields', 'created',\n 'last_updated', 'site_count', '_depth',\n ]\n brief_fields = ('id', 'url', 'display', 'name', 'slug', 'description', 'site_count', '_depth')\n\n\nclass SiteSerializer(NetBoxModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='dcim-api:site-detail')\n status = ChoiceField(choices=SiteStatusChoices, required=False)\n region = RegionSerializer(nested=True, required=False, allow_null=True)\n group = SiteGroupSerializer(nested=True, required=False, allow_null=True)\n tenant = TenantSerializer(required=False, allow_null=True)\n time_zone = TimeZoneSerializerField(required=False, allow_null=True)\n asns = SerializedPKRelatedField(\n queryset=ASN.objects.all(),\n serializer=ASNSerializer,\n nested=True,\n required=False,\n many=True\n )\n\n # Related object counts\n circuit_count = RelatedObjectCountField('circuit_terminations')\n device_count = RelatedObjectCountField('devices')\n prefix_count = RelatedObjectCountField('prefixes')\n rack_count = RelatedObjectCountField('racks')\n vlan_count = RelatedObjectCountField('vlans')\n virtualmachine_count = RelatedObjectCountField('virtual_machines')\n\n class Meta:\n model = Site\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'status', 'region', 'group', 'tenant', 'facility', 'time_zone',\n 'description', 'physical_address', 'shipping_address', 'latitude', 'longitude', 'comments', 'asns', 'tags',\n 'custom_fields', 'created', 'last_updated', 'circuit_count', 'device_count', 'prefix_count', 'rack_count',\n 'virtualmachine_count', 'vlan_count',\n ]\n brief_fields = ('id', 'url', 'display', 'name', 'description', 'slug')\n\n\nclass LocationSerializer(NestedGroupModelSerializer):\n url = serializers.HyperlinkedIdentityField(view_name='dcim-api:location-detail')\n site = SiteSerializer(nested=True)\n parent = NestedLocationSerializer(required=False, allow_null=True, default=None)\n status = ChoiceField(choices=LocationStatusChoices, required=False)\n tenant = TenantSerializer(nested=True, required=False, allow_null=True)\n rack_count = serializers.IntegerField(read_only=True)\n device_count = serializers.IntegerField(read_only=True)\n\n class Meta:\n model = Location\n fields = [\n 'id', 'url', 'display', 'name', 'slug', 'site', 'parent', 'status', 'tenant', 'facility', 'description',\n 'tags', 'custom_fields', 'created', 'last_updated', 'rack_count', 'device_count', '_depth',\n ]\n brief_fields = ('id', 'url', 'display', 'name', 'slug', 'description', 'rack_count', '_depth')\n", "path": "netbox/dcim/api/serializers_/sites.py"}]}
2,003
176
gh_patches_debug_17100
rasdani/github-patches
git_diff
qtile__qtile-4109
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ampersand in window name return an error ### The issue: ## Qtile version 0.22.1 ## Issue Ampersands in window name return an error with the WindowTabs widget ``` Traceback (most recent call last): File "/usr/lib/python3.10/site-packages/libqtile/hook.py", line 404, in fire i(*args, **kwargs) File "/usr/lib/python3.10/site-packages/libqtile/widget/windowtabs.py", line 82, in update self.text = self.separator.join(names) File "/usr/lib/python3.10/site-packages/libqtile/widget/base.py", line 483, in text self.layout.text = self.formatted_text File "/usr/lib/python3.10/site-packages/libqtile/drawer.py", line 72, in text attrlist, value, accel_char = pangocffi.parse_markup(value) File "/usr/lib/python3.10/site-packages/libqtile/pangocffi.py", line 186, in parse_markup raise Exception("parse_markup() failed for %s" % value) Exception: parse_markup() failed for b'<b>Search \xc2\xb7 & \xe2\x80\x94 Mozilla Firefox</b>' ``` The same goes for the Mpris2 widget ``` 2023-01-07 17:07:22,656 ERROR libqtile loop.py:_handle_exception():L63 parse_markup() failed for b'Fireman & Dancer - Royal Republic' NoneType: None 2023-01-07 17:07:22,656 ERROR libqtile loop.py:_handle_exception():L63 parse_markup() failed for b'Fireman & Dancer - Royal Republic' NoneType: None ```` Found a similar issue [#1685](https://github.com/qtile/qtile/issues/1685) but for the WindowName widget ### Required: - [X] I have searched past issues to see if this bug has already been reported. </issue> <code> [start of libqtile/widget/windowtabs.py] 1 # Copyright (c) 2012-2013 Craig Barnes 2 # Copyright (c) 2012 roger 3 # Copyright (c) 2012, 2014 Tycho Andersen 4 # Copyright (c) 2014 Sean Vig 5 # Copyright (c) 2014 Adi Sieker 6 # 7 # Permission is hereby granted, free of charge, to any person obtaining a copy 8 # of this software and associated documentation files (the "Software"), to deal 9 # in the Software without restriction, including without limitation the rights 10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 # copies of the Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice shall be included in 15 # all copies or substantial portions of the Software. 16 # 17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 # SOFTWARE. 24 25 from libqtile import bar, hook 26 from libqtile.log_utils import logger 27 from libqtile.widget import base 28 29 30 class WindowTabs(base._TextBox): 31 """ 32 Displays the name of each window in the current group. 33 Contrary to TaskList this is not an interactive widget. 34 The window that currently has focus is highlighted. 35 """ 36 37 defaults = [ 38 ("separator", " | ", "Task separator text."), 39 ("selected", ("<b>", "</b>"), "Selected task indicator"), 40 ( 41 "parse_text", 42 None, 43 "Function to parse and modify window names. " 44 "e.g. function in config that removes excess " 45 "strings from window name: " 46 "def my_func(text)" 47 ' for string in [" - Chromium", " - Firefox"]:' 48 ' text = text.replace(string, "")' 49 " return text" 50 "then set option parse_text=my_func", 51 ), 52 ] 53 54 def __init__(self, **config): 55 width = config.pop("width", bar.STRETCH) 56 base._TextBox.__init__(self, width=width, **config) 57 self.add_defaults(WindowTabs.defaults) 58 if not isinstance(self.selected, (tuple, list)): 59 self.selected = (self.selected, self.selected) 60 61 def _configure(self, qtile, bar): 62 base._TextBox._configure(self, qtile, bar) 63 hook.subscribe.client_name_updated(self.update) 64 hook.subscribe.focus_change(self.update) 65 hook.subscribe.float_change(self.update) 66 self.add_callbacks({"Button1": self.bar.screen.group.next_window}) 67 68 def update(self, *args): 69 names = [] 70 for w in self.bar.screen.group.windows: 71 state = "" 72 if w.maximized: 73 state = "[] " 74 elif w.minimized: 75 state = "_ " 76 elif w.floating: 77 state = "V " 78 task = "%s%s" % (state, w.name if w and w.name else " ") 79 if w is self.bar.screen.group.current_window: 80 task = task.join(self.selected) 81 names.append(task) 82 self.text = self.separator.join(names) 83 if callable(self.parse_text): 84 try: 85 self.text = self.parse_text(self.text) 86 except: # noqa: E722 87 logger.exception("parse_text function failed:") 88 self.bar.draw() 89 [end of libqtile/widget/windowtabs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/windowtabs.py b/libqtile/widget/windowtabs.py --- a/libqtile/widget/windowtabs.py +++ b/libqtile/widget/windowtabs.py @@ -22,7 +22,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -from libqtile import bar, hook +from libqtile import bar, hook, pangocffi from libqtile.log_utils import logger from libqtile.widget import base @@ -76,6 +76,7 @@ elif w.floating: state = "V " task = "%s%s" % (state, w.name if w and w.name else " ") + task = pangocffi.markup_escape_text(task) if w is self.bar.screen.group.current_window: task = task.join(self.selected) names.append(task)
{"golden_diff": "diff --git a/libqtile/widget/windowtabs.py b/libqtile/widget/windowtabs.py\n--- a/libqtile/widget/windowtabs.py\n+++ b/libqtile/widget/windowtabs.py\n@@ -22,7 +22,7 @@\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n \n-from libqtile import bar, hook\n+from libqtile import bar, hook, pangocffi\n from libqtile.log_utils import logger\n from libqtile.widget import base\n \n@@ -76,6 +76,7 @@\n elif w.floating:\n state = \"V \"\n task = \"%s%s\" % (state, w.name if w and w.name else \" \")\n+ task = pangocffi.markup_escape_text(task)\n if w is self.bar.screen.group.current_window:\n task = task.join(self.selected)\n names.append(task)\n", "issue": "Ampersand in window name return an error\n### The issue:\n\n## Qtile version\r\n0.22.1\r\n\r\n## Issue\r\nAmpersands in window name return an error with the WindowTabs widget\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.10/site-packages/libqtile/hook.py\", line 404, in fire\r\n i(*args, **kwargs)\r\n File \"/usr/lib/python3.10/site-packages/libqtile/widget/windowtabs.py\", line 82, in update\r\n self.text = self.separator.join(names)\r\n File \"/usr/lib/python3.10/site-packages/libqtile/widget/base.py\", line 483, in text\r\n self.layout.text = self.formatted_text\r\n File \"/usr/lib/python3.10/site-packages/libqtile/drawer.py\", line 72, in text\r\n attrlist, value, accel_char = pangocffi.parse_markup(value)\r\n File \"/usr/lib/python3.10/site-packages/libqtile/pangocffi.py\", line 186, in parse_markup\r\n raise Exception(\"parse_markup() failed for %s\" % value)\r\nException: parse_markup() failed for b'<b>Search \\xc2\\xb7 & \\xe2\\x80\\x94 Mozilla Firefox</b>'\r\n```\r\n\r\nThe same goes for the Mpris2 widget\r\n```\r\n2023-01-07 17:07:22,656 ERROR libqtile loop.py:_handle_exception():L63 parse_markup() failed for b'Fireman & Dancer - Royal Republic'\r\nNoneType: None\r\n2023-01-07 17:07:22,656 ERROR libqtile loop.py:_handle_exception():L63 parse_markup() failed for b'Fireman & Dancer - Royal Republic'\r\nNoneType: None\r\n````\r\n\r\nFound a similar issue [#1685](https://github.com/qtile/qtile/issues/1685) but for the WindowName widget\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2012-2013 Craig Barnes\n# Copyright (c) 2012 roger\n# Copyright (c) 2012, 2014 Tycho Andersen\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass WindowTabs(base._TextBox):\n \"\"\"\n Displays the name of each window in the current group.\n Contrary to TaskList this is not an interactive widget.\n The window that currently has focus is highlighted.\n \"\"\"\n\n defaults = [\n (\"separator\", \" | \", \"Task separator text.\"),\n (\"selected\", (\"<b>\", \"</b>\"), \"Selected task indicator\"),\n (\n \"parse_text\",\n None,\n \"Function to parse and modify window names. \"\n \"e.g. function in config that removes excess \"\n \"strings from window name: \"\n \"def my_func(text)\"\n ' for string in [\" - Chromium\", \" - Firefox\"]:'\n ' text = text.replace(string, \"\")'\n \" return text\"\n \"then set option parse_text=my_func\",\n ),\n ]\n\n def __init__(self, **config):\n width = config.pop(\"width\", bar.STRETCH)\n base._TextBox.__init__(self, width=width, **config)\n self.add_defaults(WindowTabs.defaults)\n if not isinstance(self.selected, (tuple, list)):\n self.selected = (self.selected, self.selected)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n hook.subscribe.client_name_updated(self.update)\n hook.subscribe.focus_change(self.update)\n hook.subscribe.float_change(self.update)\n self.add_callbacks({\"Button1\": self.bar.screen.group.next_window})\n\n def update(self, *args):\n names = []\n for w in self.bar.screen.group.windows:\n state = \"\"\n if w.maximized:\n state = \"[] \"\n elif w.minimized:\n state = \"_ \"\n elif w.floating:\n state = \"V \"\n task = \"%s%s\" % (state, w.name if w and w.name else \" \")\n if w is self.bar.screen.group.current_window:\n task = task.join(self.selected)\n names.append(task)\n self.text = self.separator.join(names)\n if callable(self.parse_text):\n try:\n self.text = self.parse_text(self.text)\n except: # noqa: E722\n logger.exception(\"parse_text function failed:\")\n self.bar.draw()\n", "path": "libqtile/widget/windowtabs.py"}]}
1,982
197
gh_patches_debug_26708
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-3552
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] telegram.ext._utils.stack.was_called_by gives incorrect result on 64-bit machine ### Steps to Reproduce 1. Set up virtualenv using `python3 -m venv` on a 64-bit machine. 2. Initialize an `application` object using `telegram.ext.ApplicationBuilder`: ``` python from telegram.ext import ApplicationBuilder def main() -> None: application = ApplicationBuilder().token("TOKEN").build() if __name__ == "__main__": main() ``` 4. Run the bot in virtualenv and it will give a warning messgae like ```PTBUserWarning: `Application` instances should be built via the `ApplicationBuilder`.``` ### Expected behaviour The warning message shouldn't be given since `ApplicationBuilder` is being used. ### Actual behaviour ``` bash $ python test.py ../venv/lib64/python3.11/site-packages/telegram/ext/_applicationbuilder.py:292: PTBUserWarning: `Application` instances should be built via the `ApplicationBuilder`. ] = DefaultValue.get_value( # pylint: disable=not-callable ``` ### Operating System Fedora Linux 37 (Server Edition) ### Version of Python, python-telegram-bot & dependencies ```shell python-telegram-bot 20.0 Bot API 6.4 Python 3.11.1 (main, Dec 7 2022, 00:00:00) [GCC 12.2.1 20221121 (Red Hat 12.2.1-4)] ``` ### Relevant log output _No response_ ### Additional Context I believe this is caused by comparing a resolved path with an unresolved path [here](https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_application.py#L273). In my case, it finds `../venv/lib/python3.11/site-packages/telegram/ext/_applicationbuilder.py` not equal to `../venv/lib64/python3.11/site-packages/telegram/ext/_applicationbuilder.py`, the directory `lib64` being a symlink to `lib`. A quick (maybe not final) fix is to modify [stack.py](https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_utils/stack.py) so that `was_called_by` always resolves paths from frame: ``` python while frame.f_back: frame = frame.f_back if Path(frame.f_code.co_filename).resolve() == caller: return True ``` I have tested it and the warning no longer appears. </issue> <code> [start of telegram/ext/_utils/stack.py] 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2023 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains helper functions related to inspecting the program stack. 20 21 .. versionadded:: 20.0 22 23 Warning: 24 Contents of this module are intended to be used internally by the library and *not* by the 25 user. Changes to this module are not considered breaking changes and may not be documented in 26 the changelog. 27 """ 28 from pathlib import Path 29 from types import FrameType 30 from typing import Optional 31 32 33 def was_called_by(frame: Optional[FrameType], caller: Path) -> bool: 34 """Checks if the passed frame was called by the specified file. 35 36 Example: 37 .. code:: pycon 38 39 >>> was_called_by(inspect.currentframe(), Path(__file__)) 40 True 41 42 Arguments: 43 frame (:obj:`FrameType`): The frame - usually the return value of 44 ``inspect.currentframe()``. If :obj:`None` is passed, the return value will be 45 :obj:`False`. 46 caller (:obj:`pathlib.Path`): File that should be the caller. 47 48 Returns: 49 :obj:`bool`: Whether the frame was called by the specified file. 50 """ 51 if frame is None: 52 return False 53 54 # https://stackoverflow.com/a/57712700/10606962 55 if Path(frame.f_code.co_filename) == caller: 56 return True 57 while frame.f_back: 58 frame = frame.f_back 59 if Path(frame.f_code.co_filename) == caller: 60 return True 61 return False 62 [end of telegram/ext/_utils/stack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/ext/_utils/stack.py b/telegram/ext/_utils/stack.py --- a/telegram/ext/_utils/stack.py +++ b/telegram/ext/_utils/stack.py @@ -25,10 +25,13 @@ user. Changes to this module are not considered breaking changes and may not be documented in the changelog. """ +import logging from pathlib import Path from types import FrameType from typing import Optional +_logger = logging.getLogger(__name__) + def was_called_by(frame: Optional[FrameType], caller: Path) -> bool: """Checks if the passed frame was called by the specified file. @@ -51,11 +54,22 @@ if frame is None: return False + try: + return _was_called_by(frame, caller) + except Exception as exc: + _logger.debug( + "Failed to check if frame was called by `caller`. Assuming that it was not.", + exc_info=exc, + ) + return False + + +def _was_called_by(frame: FrameType, caller: Path) -> bool: # https://stackoverflow.com/a/57712700/10606962 - if Path(frame.f_code.co_filename) == caller: + if Path(frame.f_code.co_filename).resolve() == caller: return True while frame.f_back: frame = frame.f_back - if Path(frame.f_code.co_filename) == caller: + if Path(frame.f_code.co_filename).resolve() == caller: return True return False
{"golden_diff": "diff --git a/telegram/ext/_utils/stack.py b/telegram/ext/_utils/stack.py\n--- a/telegram/ext/_utils/stack.py\n+++ b/telegram/ext/_utils/stack.py\n@@ -25,10 +25,13 @@\n user. Changes to this module are not considered breaking changes and may not be documented in\n the changelog.\n \"\"\"\n+import logging\n from pathlib import Path\n from types import FrameType\n from typing import Optional\n \n+_logger = logging.getLogger(__name__)\n+\n \n def was_called_by(frame: Optional[FrameType], caller: Path) -> bool:\n \"\"\"Checks if the passed frame was called by the specified file.\n@@ -51,11 +54,22 @@\n if frame is None:\n return False\n \n+ try:\n+ return _was_called_by(frame, caller)\n+ except Exception as exc:\n+ _logger.debug(\n+ \"Failed to check if frame was called by `caller`. Assuming that it was not.\",\n+ exc_info=exc,\n+ )\n+ return False\n+\n+\n+def _was_called_by(frame: FrameType, caller: Path) -> bool:\n # https://stackoverflow.com/a/57712700/10606962\n- if Path(frame.f_code.co_filename) == caller:\n+ if Path(frame.f_code.co_filename).resolve() == caller:\n return True\n while frame.f_back:\n frame = frame.f_back\n- if Path(frame.f_code.co_filename) == caller:\n+ if Path(frame.f_code.co_filename).resolve() == caller:\n return True\n return False\n", "issue": "[BUG] telegram.ext._utils.stack.was_called_by gives incorrect result on 64-bit machine\n### Steps to Reproduce\n\n1. Set up virtualenv using `python3 -m venv` on a 64-bit machine.\r\n2. Initialize an `application` object using `telegram.ext.ApplicationBuilder`:\r\n``` python\r\nfrom telegram.ext import ApplicationBuilder\r\ndef main() -> None:\r\n application = ApplicationBuilder().token(\"TOKEN\").build()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n4. Run the bot in virtualenv and it will give a warning messgae like ```PTBUserWarning: `Application` instances should be built via the `ApplicationBuilder`.```\r\n\n\n### Expected behaviour\n\nThe warning message shouldn't be given since `ApplicationBuilder` is being used.\n\n### Actual behaviour\n\n``` bash\r\n$ python test.py \r\n../venv/lib64/python3.11/site-packages/telegram/ext/_applicationbuilder.py:292: \r\nPTBUserWarning: `Application` instances should be built via the `ApplicationBuilder`.\r\n ] = DefaultValue.get_value( # pylint: disable=not-callable\r\n```\r\n\r\n\n\n### Operating System\n\nFedora Linux 37 (Server Edition)\n\n### Version of Python, python-telegram-bot & dependencies\n\n```shell\npython-telegram-bot 20.0\r\nBot API 6.4\r\nPython 3.11.1 (main, Dec 7 2022, 00:00:00) [GCC 12.2.1 20221121 (Red Hat 12.2.1-4)]\n```\n\n\n### Relevant log output\n\n_No response_\n\n### Additional Context\n\nI believe this is caused by comparing a resolved path with an unresolved path [here](https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_application.py#L273). \r\n\r\nIn my case, it finds `../venv/lib/python3.11/site-packages/telegram/ext/_applicationbuilder.py` not equal to `../venv/lib64/python3.11/site-packages/telegram/ext/_applicationbuilder.py`, the directory `lib64` being a symlink to `lib`.\r\n\r\nA quick (maybe not final) fix is to modify [stack.py](https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_utils/stack.py) so that `was_called_by` always resolves paths from frame:\r\n``` python\r\n while frame.f_back:\r\n frame = frame.f_back\r\n if Path(frame.f_code.co_filename).resolve() == caller:\r\n return True\r\n```\r\nI have tested it and the warning no longer appears.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2023\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains helper functions related to inspecting the program stack.\n\n.. versionadded:: 20.0\n\nWarning:\n Contents of this module are intended to be used internally by the library and *not* by the\n user. Changes to this module are not considered breaking changes and may not be documented in\n the changelog.\n\"\"\"\nfrom pathlib import Path\nfrom types import FrameType\nfrom typing import Optional\n\n\ndef was_called_by(frame: Optional[FrameType], caller: Path) -> bool:\n \"\"\"Checks if the passed frame was called by the specified file.\n\n Example:\n .. code:: pycon\n\n >>> was_called_by(inspect.currentframe(), Path(__file__))\n True\n\n Arguments:\n frame (:obj:`FrameType`): The frame - usually the return value of\n ``inspect.currentframe()``. If :obj:`None` is passed, the return value will be\n :obj:`False`.\n caller (:obj:`pathlib.Path`): File that should be the caller.\n\n Returns:\n :obj:`bool`: Whether the frame was called by the specified file.\n \"\"\"\n if frame is None:\n return False\n\n # https://stackoverflow.com/a/57712700/10606962\n if Path(frame.f_code.co_filename) == caller:\n return True\n while frame.f_back:\n frame = frame.f_back\n if Path(frame.f_code.co_filename) == caller:\n return True\n return False\n", "path": "telegram/ext/_utils/stack.py"}]}
1,754
360
gh_patches_debug_8234
rasdani/github-patches
git_diff
easybuilders__easybuild-framework-2914
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error "_set_blas_variables: blas_lib not set" in EasyBuild 3.9.1 I am getting the following error when I am trying to build LAMMPS with EasyBuild 3.9.1. For an extended dry run, the following is included in the logs: ``` WARNING: ignoring error '_set_blas_variables: BLAS_LIB not set' ``` Using EasyBuild 3.8.1 the build succeeds. The eb recipe is this https://github.com/eth-cscs/production/blob/master/easybuild/easyconfigs/l/LAMMPS/LAMMPS-22Aug2018-CrayGNU-18.08.eb, </issue> <code> [start of easybuild/toolchains/linalg/libsci.py] 1 ## 2 # Copyright 2014-2019 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 Support for Cray's LibSci library, which provides BLAS/LAPACK support. 27 cfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/ 28 29 :author: Petar Forai (IMP/IMBA, Austria) 30 :author: Kenneth Hoste (Ghent University) 31 """ 32 import os 33 34 from easybuild.tools.build_log import EasyBuildError 35 from easybuild.tools.toolchain.linalg import LinAlg 36 37 38 CRAY_LIBSCI_MODULE_NAME = 'cray-libsci' 39 TC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci' 40 41 42 class LibSci(LinAlg): 43 """Support for Cray's LibSci library, which provides BLAS/LAPACK support.""" 44 # BLAS/LAPACK support 45 # via cray-libsci module, which gets loaded via the PrgEnv module 46 # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/ 47 BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME] 48 49 # no need to specify libraries, compiler driver takes care of linking the right libraries 50 # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper. 51 BLAS_LIB = [] 52 BLAS_LIB_MT = [] 53 BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI 54 55 LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME] 56 LAPACK_IS_BLAS = True 57 LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI 58 59 BLACS_MODULE_NAME = [] 60 SCALAPACK_MODULE_NAME = [] 61 62 def _get_software_root(self, name): 63 """Get install prefix for specified software name; special treatment for Cray modules.""" 64 if name == 'cray-libsci': 65 # Cray-provided LibSci module 66 env_var = 'CRAY_LIBSCI_PREFIX_DIR' 67 root = os.getenv(env_var, None) 68 if root is None: 69 raise EasyBuildError("Failed to determine install prefix for %s via $%s", name, env_var) 70 else: 71 self.log.debug("Obtained install prefix for %s via $%s: %s", name, env_var, root) 72 else: 73 root = super(LibSci, self)._get_software_root(name) 74 75 return root 76 77 def _set_blacs_variables(self): 78 """Skip setting BLACS related variables""" 79 pass 80 81 def _set_scalapack_variables(self): 82 """Skip setting ScaLAPACK related variables""" 83 pass 84 85 def definition(self): 86 """ 87 Filter BLAS module from toolchain definition. 88 The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module, 89 and thus is not a direct toolchain component. 90 """ 91 tc_def = super(LibSci, self).definition() 92 tc_def['BLAS'] = [] 93 tc_def['LAPACK'] = [] 94 return tc_def 95 [end of easybuild/toolchains/linalg/libsci.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/easybuild/toolchains/linalg/libsci.py b/easybuild/toolchains/linalg/libsci.py --- a/easybuild/toolchains/linalg/libsci.py +++ b/easybuild/toolchains/linalg/libsci.py @@ -48,8 +48,8 @@ # no need to specify libraries, compiler driver takes care of linking the right libraries # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper. - BLAS_LIB = [] - BLAS_LIB_MT = [] + BLAS_LIB = [''] + BLAS_LIB_MT = [''] BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]
{"golden_diff": "diff --git a/easybuild/toolchains/linalg/libsci.py b/easybuild/toolchains/linalg/libsci.py\n--- a/easybuild/toolchains/linalg/libsci.py\n+++ b/easybuild/toolchains/linalg/libsci.py\n@@ -48,8 +48,8 @@\n \n # no need to specify libraries, compiler driver takes care of linking the right libraries\n # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.\n- BLAS_LIB = []\n- BLAS_LIB_MT = []\n+ BLAS_LIB = ['']\n+ BLAS_LIB_MT = ['']\n BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n \n LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n", "issue": "Error \"_set_blas_variables: blas_lib not set\" in EasyBuild 3.9.1\nI am getting the following error when I am trying to build LAMMPS with EasyBuild 3.9.1. \r\nFor an extended dry run, the following is included in the logs:\r\n```\r\nWARNING: ignoring error '_set_blas_variables: BLAS_LIB not set'\r\n```\r\n\r\nUsing EasyBuild 3.8.1 the build succeeds. The eb recipe is this https://github.com/eth-cscs/production/blob/master/easybuild/easyconfigs/l/LAMMPS/LAMMPS-22Aug2018-CrayGNU-18.08.eb,\n", "before_files": [{"content": "##\n# Copyright 2014-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nSupport for Cray's LibSci library, which provides BLAS/LAPACK support.\ncfr. https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n\n:author: Petar Forai (IMP/IMBA, Austria)\n:author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport os\n\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.toolchain.linalg import LinAlg\n\n\nCRAY_LIBSCI_MODULE_NAME = 'cray-libsci'\nTC_CONSTANT_CRAY_LIBSCI = 'CrayLibSci'\n\n\nclass LibSci(LinAlg):\n \"\"\"Support for Cray's LibSci library, which provides BLAS/LAPACK support.\"\"\"\n # BLAS/LAPACK support\n # via cray-libsci module, which gets loaded via the PrgEnv module\n # see https://www.nersc.gov/users/software/programming-libraries/math-libraries/libsci/\n BLAS_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n\n # no need to specify libraries, compiler driver takes care of linking the right libraries\n # FIXME: need to revisit this, on numpy we ended up with a serial BLAS through the wrapper.\n BLAS_LIB = []\n BLAS_LIB_MT = []\n BLAS_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n LAPACK_MODULE_NAME = [CRAY_LIBSCI_MODULE_NAME]\n LAPACK_IS_BLAS = True\n LAPACK_FAMILY = TC_CONSTANT_CRAY_LIBSCI\n\n BLACS_MODULE_NAME = []\n SCALAPACK_MODULE_NAME = []\n\n def _get_software_root(self, name):\n \"\"\"Get install prefix for specified software name; special treatment for Cray modules.\"\"\"\n if name == 'cray-libsci':\n # Cray-provided LibSci module\n env_var = 'CRAY_LIBSCI_PREFIX_DIR'\n root = os.getenv(env_var, None)\n if root is None:\n raise EasyBuildError(\"Failed to determine install prefix for %s via $%s\", name, env_var)\n else:\n self.log.debug(\"Obtained install prefix for %s via $%s: %s\", name, env_var, root)\n else:\n root = super(LibSci, self)._get_software_root(name)\n\n return root\n\n def _set_blacs_variables(self):\n \"\"\"Skip setting BLACS related variables\"\"\"\n pass\n\n def _set_scalapack_variables(self):\n \"\"\"Skip setting ScaLAPACK related variables\"\"\"\n pass\n\n def definition(self):\n \"\"\"\n Filter BLAS module from toolchain definition.\n The cray-libsci module is loaded indirectly (and versionless) via the PrgEnv module,\n and thus is not a direct toolchain component.\n \"\"\"\n tc_def = super(LibSci, self).definition()\n tc_def['BLAS'] = []\n tc_def['LAPACK'] = []\n return tc_def\n", "path": "easybuild/toolchains/linalg/libsci.py"}]}
1,771
165
gh_patches_debug_7724
rasdani/github-patches
git_diff
CTFd__CTFd-1315
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> unlocks api does not check already unlocked hints There is not check in the unlocks api for already unlocked hints in the file [unlocks.py](https://github.com/CTFd/CTFd/blob/master/CTFd/api/v1/unlocks.py) It is possible to unlock multiple times the same hint by just calling the api. </issue> <code> [start of CTFd/api/v1/unlocks.py] 1 from flask import request 2 from flask_restplus import Namespace, Resource 3 4 from CTFd.cache import clear_standings 5 from CTFd.models import Unlocks, db, get_class_by_tablename 6 from CTFd.schemas.awards import AwardSchema 7 from CTFd.schemas.unlocks import UnlockSchema 8 from CTFd.utils.decorators import ( 9 admins_only, 10 authed_only, 11 during_ctf_time_only, 12 require_verified_emails, 13 ) 14 from CTFd.utils.user import get_current_user 15 16 unlocks_namespace = Namespace("unlocks", description="Endpoint to retrieve Unlocks") 17 18 19 @unlocks_namespace.route("") 20 class UnlockList(Resource): 21 @admins_only 22 def get(self): 23 hints = Unlocks.query.all() 24 schema = UnlockSchema() 25 response = schema.dump(hints) 26 27 if response.errors: 28 return {"success": False, "errors": response.errors}, 400 29 30 return {"success": True, "data": response.data} 31 32 @during_ctf_time_only 33 @require_verified_emails 34 @authed_only 35 def post(self): 36 req = request.get_json() 37 user = get_current_user() 38 39 req["user_id"] = user.id 40 req["team_id"] = user.team_id 41 42 Model = get_class_by_tablename(req["type"]) 43 target = Model.query.filter_by(id=req["target"]).first_or_404() 44 45 if target.cost > user.score: 46 return ( 47 { 48 "success": False, 49 "errors": { 50 "score": "You do not have enough points to unlock this hint" 51 }, 52 }, 53 400, 54 ) 55 56 schema = UnlockSchema() 57 response = schema.load(req, session=db.session) 58 59 if response.errors: 60 return {"success": False, "errors": response.errors}, 400 61 62 db.session.add(response.data) 63 64 award_schema = AwardSchema() 65 award = { 66 "user_id": user.id, 67 "team_id": user.team_id, 68 "name": target.name, 69 "description": target.description, 70 "value": (-target.cost), 71 "category": target.category, 72 } 73 74 award = award_schema.load(award) 75 db.session.add(award.data) 76 db.session.commit() 77 clear_standings() 78 79 response = schema.dump(response.data) 80 81 return {"success": True, "data": response.data} 82 [end of CTFd/api/v1/unlocks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/api/v1/unlocks.py b/CTFd/api/v1/unlocks.py --- a/CTFd/api/v1/unlocks.py +++ b/CTFd/api/v1/unlocks.py @@ -59,6 +59,16 @@ if response.errors: return {"success": False, "errors": response.errors}, 400 + existing = Unlocks.query.filter_by(**req).first() + if existing: + return ( + { + "success": False, + "errors": {"target": "You've already unlocked this this target"}, + }, + 400, + ) + db.session.add(response.data) award_schema = AwardSchema()
{"golden_diff": "diff --git a/CTFd/api/v1/unlocks.py b/CTFd/api/v1/unlocks.py\n--- a/CTFd/api/v1/unlocks.py\n+++ b/CTFd/api/v1/unlocks.py\n@@ -59,6 +59,16 @@\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n \n+ existing = Unlocks.query.filter_by(**req).first()\n+ if existing:\n+ return (\n+ {\n+ \"success\": False,\n+ \"errors\": {\"target\": \"You've already unlocked this this target\"},\n+ },\n+ 400,\n+ )\n+\n db.session.add(response.data)\n \n award_schema = AwardSchema()\n", "issue": "unlocks api does not check already unlocked hints \nThere is not check in the unlocks api for already unlocked hints in the file [unlocks.py](https://github.com/CTFd/CTFd/blob/master/CTFd/api/v1/unlocks.py)\r\n\r\nIt is possible to unlock multiple times the same hint by just calling the api.\n", "before_files": [{"content": "from flask import request\nfrom flask_restplus import Namespace, Resource\n\nfrom CTFd.cache import clear_standings\nfrom CTFd.models import Unlocks, db, get_class_by_tablename\nfrom CTFd.schemas.awards import AwardSchema\nfrom CTFd.schemas.unlocks import UnlockSchema\nfrom CTFd.utils.decorators import (\n admins_only,\n authed_only,\n during_ctf_time_only,\n require_verified_emails,\n)\nfrom CTFd.utils.user import get_current_user\n\nunlocks_namespace = Namespace(\"unlocks\", description=\"Endpoint to retrieve Unlocks\")\n\n\n@unlocks_namespace.route(\"\")\nclass UnlockList(Resource):\n @admins_only\n def get(self):\n hints = Unlocks.query.all()\n schema = UnlockSchema()\n response = schema.dump(hints)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n return {\"success\": True, \"data\": response.data}\n\n @during_ctf_time_only\n @require_verified_emails\n @authed_only\n def post(self):\n req = request.get_json()\n user = get_current_user()\n\n req[\"user_id\"] = user.id\n req[\"team_id\"] = user.team_id\n\n Model = get_class_by_tablename(req[\"type\"])\n target = Model.query.filter_by(id=req[\"target\"]).first_or_404()\n\n if target.cost > user.score:\n return (\n {\n \"success\": False,\n \"errors\": {\n \"score\": \"You do not have enough points to unlock this hint\"\n },\n },\n 400,\n )\n\n schema = UnlockSchema()\n response = schema.load(req, session=db.session)\n\n if response.errors:\n return {\"success\": False, \"errors\": response.errors}, 400\n\n db.session.add(response.data)\n\n award_schema = AwardSchema()\n award = {\n \"user_id\": user.id,\n \"team_id\": user.team_id,\n \"name\": target.name,\n \"description\": target.description,\n \"value\": (-target.cost),\n \"category\": target.category,\n }\n\n award = award_schema.load(award)\n db.session.add(award.data)\n db.session.commit()\n clear_standings()\n\n response = schema.dump(response.data)\n\n return {\"success\": True, \"data\": response.data}\n", "path": "CTFd/api/v1/unlocks.py"}]}
1,282
164
gh_patches_debug_25011
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3306
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider alnatura_de is broken During the global build at 2021-08-25-14-42-15, spider **alnatura_de** failed with **134 features** and **5 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/alnatura_de.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/alnatura_de.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/alnatura_de.geojson)) </issue> <code> [start of locations/spiders/alnatura_de.py] 1 import scrapy 2 import re 3 import json 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 9 DAY_MAPPING = { 10 1: 'Mo', 2: 'Tu', 3: 'We', 4: 'Th', 5: 'Fr', 6: 'Sa', 7: 'Su', 11 'Mo': 1, 'Tu': 2, 'We': 3, 'Th': 4, 'Fr': 5, 'Sa': 6, 'Su': 7 12 } 13 14 15 class AlnaturaSpider(scrapy.Spider): 16 name = "alnatura_de" 17 allowed_domains = ["www.alnatura.de"] 18 start_urls = ( 19 'https://www.alnatura.de/api/sitecore/stores/FindStoresforMap?' 20 'ElementsPerPage=10000&lat=50.99820058296841' 21 '&lng=7.811966062500009&radius=1483' 22 '&Tradepartner=Alnatura%20Super%20Natur%20Markt', 23 ) 24 25 def parse_hours(self, store_hours): 26 opening_hours = OpeningHours() 27 match = re.match(r'(.+?)-(.+?) +(\d.*?)-(.+?) Uhr', store_hours) 28 if match: 29 from_day = match.group(1).strip() 30 to_day = match.group(2).strip() 31 from_time = match.group(3).strip() 32 to_time = match.group(4).strip() 33 34 fhours = int(float(from_time)) 35 fminutes = (float(from_time) * 60) % 60 36 fmt_from_time = "%d:%02d" % (fhours, fminutes) 37 thours = int(float(to_time)) 38 tminutes = (float(to_time) * 60) % 60 39 fmt_to_time = "%d:%02d" % (thours, tminutes) 40 41 for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1): 42 opening_hours.add_range( 43 day=DAY_MAPPING[day], 44 open_time=fmt_from_time, 45 close_time=fmt_to_time, 46 time_format='%H:%M' 47 ) 48 49 return opening_hours.as_opening_hours() 50 51 def parse_stores(self, response): 52 store = json.loads(response.text) 53 store = store['Payload'] 54 55 properties = { 56 'lat': response.meta.get('lat'), 57 'lon': response.meta.get('lng'), 58 'name': store['StoreName'], 59 'street': store['Street'], 60 'city': store['City'], 61 'postcode': store['PostalCode'], 62 'phone': store['Tel'], 63 'country': store['Country'], 64 'ref': response.meta.get('id'), 65 } 66 67 if store['OpeningTime']: 68 hours = self.parse_hours(store.get('OpeningTime')) 69 if hours: 70 properties["opening_hours"] = hours 71 72 yield GeojsonPointItem(**properties) 73 74 def parse(self, response): 75 data = json.loads(response.text) 76 77 for stores in data['Payload']: 78 yield scrapy.Request( 79 f"https://www.alnatura.de/api/sitecore/stores/StoreDetails" 80 f"?storeid={stores['Id']}", 81 callback=self.parse_stores, 82 meta={ 83 'lat': stores['Lat'].replace(',', '.'), 84 'lng': stores['Lng'].replace(',', '.'), 85 'id': stores['Id'], 86 } 87 ) 88 [end of locations/spiders/alnatura_de.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/alnatura_de.py b/locations/spiders/alnatura_de.py --- a/locations/spiders/alnatura_de.py +++ b/locations/spiders/alnatura_de.py @@ -28,8 +28,8 @@ if match: from_day = match.group(1).strip() to_day = match.group(2).strip() - from_time = match.group(3).strip() - to_time = match.group(4).strip() + from_time = match.group(3).strip().replace(':','.') + to_time = match.group(4).strip().replace(':','.') fhours = int(float(from_time)) fminutes = (float(from_time) * 60) % 60 @@ -38,13 +38,13 @@ tminutes = (float(to_time) * 60) % 60 fmt_to_time = "%d:%02d" % (thours, tminutes) - for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1): - opening_hours.add_range( - day=DAY_MAPPING[day], - open_time=fmt_from_time, - close_time=fmt_to_time, - time_format='%H:%M' - ) + for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1): + opening_hours.add_range( + day=DAY_MAPPING[day], + open_time=fmt_from_time, + close_time=fmt_to_time, + time_format='%H:%M' + ) return opening_hours.as_opening_hours()
{"golden_diff": "diff --git a/locations/spiders/alnatura_de.py b/locations/spiders/alnatura_de.py\n--- a/locations/spiders/alnatura_de.py\n+++ b/locations/spiders/alnatura_de.py\n@@ -28,8 +28,8 @@\n if match:\n from_day = match.group(1).strip()\n to_day = match.group(2).strip()\n- from_time = match.group(3).strip()\n- to_time = match.group(4).strip()\n+ from_time = match.group(3).strip().replace(':','.')\n+ to_time = match.group(4).strip().replace(':','.')\n \n fhours = int(float(from_time))\n fminutes = (float(from_time) * 60) % 60\n@@ -38,13 +38,13 @@\n tminutes = (float(to_time) * 60) % 60\n fmt_to_time = \"%d:%02d\" % (thours, tminutes)\n \n- for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1):\n- opening_hours.add_range(\n- day=DAY_MAPPING[day],\n- open_time=fmt_from_time,\n- close_time=fmt_to_time,\n- time_format='%H:%M'\n- )\n+ for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1):\n+ opening_hours.add_range(\n+ day=DAY_MAPPING[day],\n+ open_time=fmt_from_time,\n+ close_time=fmt_to_time,\n+ time_format='%H:%M'\n+ )\n \n return opening_hours.as_opening_hours()\n", "issue": "Spider alnatura_de is broken\nDuring the global build at 2021-08-25-14-42-15, spider **alnatura_de** failed with **134 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/logs/alnatura_de.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/alnatura_de.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-25-14-42-15/output/alnatura_de.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n 1: 'Mo', 2: 'Tu', 3: 'We', 4: 'Th', 5: 'Fr', 6: 'Sa', 7: 'Su',\n 'Mo': 1, 'Tu': 2, 'We': 3, 'Th': 4, 'Fr': 5, 'Sa': 6, 'Su': 7\n}\n\n\nclass AlnaturaSpider(scrapy.Spider):\n name = \"alnatura_de\"\n allowed_domains = [\"www.alnatura.de\"]\n start_urls = (\n 'https://www.alnatura.de/api/sitecore/stores/FindStoresforMap?'\n 'ElementsPerPage=10000&lat=50.99820058296841'\n '&lng=7.811966062500009&radius=1483'\n '&Tradepartner=Alnatura%20Super%20Natur%20Markt',\n )\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n match = re.match(r'(.+?)-(.+?) +(\\d.*?)-(.+?) Uhr', store_hours)\n if match:\n from_day = match.group(1).strip()\n to_day = match.group(2).strip()\n from_time = match.group(3).strip()\n to_time = match.group(4).strip()\n\n fhours = int(float(from_time))\n fminutes = (float(from_time) * 60) % 60\n fmt_from_time = \"%d:%02d\" % (fhours, fminutes)\n thours = int(float(to_time))\n tminutes = (float(to_time) * 60) % 60\n fmt_to_time = \"%d:%02d\" % (thours, tminutes)\n\n for day in range(DAY_MAPPING[from_day], DAY_MAPPING[to_day] + 1):\n opening_hours.add_range(\n day=DAY_MAPPING[day],\n open_time=fmt_from_time,\n close_time=fmt_to_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_stores(self, response):\n store = json.loads(response.text)\n store = store['Payload']\n\n properties = {\n 'lat': response.meta.get('lat'),\n 'lon': response.meta.get('lng'),\n 'name': store['StoreName'],\n 'street': store['Street'],\n 'city': store['City'],\n 'postcode': store['PostalCode'],\n 'phone': store['Tel'],\n 'country': store['Country'],\n 'ref': response.meta.get('id'),\n }\n\n if store['OpeningTime']:\n hours = self.parse_hours(store.get('OpeningTime'))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n data = json.loads(response.text)\n\n for stores in data['Payload']:\n yield scrapy.Request(\n f\"https://www.alnatura.de/api/sitecore/stores/StoreDetails\"\n f\"?storeid={stores['Id']}\",\n callback=self.parse_stores,\n meta={\n 'lat': stores['Lat'].replace(',', '.'),\n 'lng': stores['Lng'].replace(',', '.'),\n 'id': stores['Id'],\n }\n )\n", "path": "locations/spiders/alnatura_de.py"}]}
1,685
370
gh_patches_debug_2533
rasdani/github-patches
git_diff
pulp__pulpcore-3646
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The validation of input parameters for the repair endpoint is omitted ``` curl -X POST -H 'Content-Type: application/json' -H 'Authorization: Basic YWRtaW46cGFzc3dvcmQ=' -d '[]' http://localhost:5001/pulp/api/v3/repair/ ``` ``` pulp [804a07335b9f4417ad0c71dde478634e]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repair/ Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner response = get_response(request) File "/usr/local/lib/python3.8/site-packages/django/core/handlers/base.py", line 181, in _get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/usr/local/lib/python3.8/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view return view_func(*args, **kwargs) File "/usr/local/lib/python3.8/site-packages/django/views/generic/base.py", line 70, in view return self.dispatch(request, *args, **kwargs) File "/usr/local/lib/python3.8/site-packages/rest_framework/views.py", line 509, in dispatch response = self.handle_exception(exc) File "/usr/local/lib/python3.8/site-packages/rest_framework/views.py", line 469, in handle_exception self.raise_uncaught_exception(exc) File "/usr/local/lib/python3.8/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception raise exc File "/usr/local/lib/python3.8/site-packages/rest_framework/views.py", line 506, in dispatch response = handler(request, *args, **kwargs) File "/src/pulpcore/pulpcore/app/views/repair.py", line 27, in post verify_checksums = serializer.validated_data["verify_checksums"] KeyError: 'verify_checksums' ``` </issue> <code> [start of pulpcore/app/views/repair.py] 1 from drf_spectacular.utils import extend_schema 2 from rest_framework.views import APIView 3 4 from pulpcore.app.response import OperationPostponedResponse 5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer 6 from pulpcore.app.tasks import repair_all_artifacts 7 from pulpcore.tasking.tasks import dispatch 8 9 10 class RepairView(APIView): 11 @extend_schema( 12 description=( 13 "Trigger an asynchronous task that checks for missing " 14 "or corrupted artifacts, and attempts to redownload them." 15 ), 16 summary="Repair Artifact Storage", 17 request=RepairSerializer, 18 responses={202: AsyncOperationResponseSerializer}, 19 ) 20 def post(self, request): 21 """ 22 Repair artifacts. 23 """ 24 serializer = RepairSerializer(data=request.data) 25 serializer.is_valid() 26 27 verify_checksums = serializer.validated_data["verify_checksums"] 28 29 task = dispatch(repair_all_artifacts, args=[verify_checksums]) 30 31 return OperationPostponedResponse(task, request) 32 [end of pulpcore/app/views/repair.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py --- a/pulpcore/app/views/repair.py +++ b/pulpcore/app/views/repair.py @@ -22,7 +22,7 @@ Repair artifacts. """ serializer = RepairSerializer(data=request.data) - serializer.is_valid() + serializer.is_valid(raise_exception=True) verify_checksums = serializer.validated_data["verify_checksums"]
{"golden_diff": "diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py\n--- a/pulpcore/app/views/repair.py\n+++ b/pulpcore/app/views/repair.py\n@@ -22,7 +22,7 @@\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n- serializer.is_valid()\n+ serializer.is_valid(raise_exception=True)\n \n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n", "issue": "The validation of input parameters for the repair endpoint is omitted\n```\r\ncurl -X POST -H 'Content-Type: application/json' -H 'Authorization: Basic YWRtaW46cGFzc3dvcmQ=' -d '[]' http://localhost:5001/pulp/api/v3/repair/\r\n```\r\n\r\n```\r\npulp [804a07335b9f4417ad0c71dde478634e]: django.request:ERROR: Internal Server Error: /pulp/api/v3/repair/\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\n response = get_response(request)\r\n File \"/usr/local/lib/python3.8/site-packages/django/core/handlers/base.py\", line 181, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/django/views/decorators/csrf.py\", line 54, in wrapped_view\r\n return view_func(*args, **kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/django/views/generic/base.py\", line 70, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/rest_framework/views.py\", line 509, in dispatch\r\n response = self.handle_exception(exc)\r\n File \"/usr/local/lib/python3.8/site-packages/rest_framework/views.py\", line 469, in handle_exception\r\n self.raise_uncaught_exception(exc)\r\n File \"/usr/local/lib/python3.8/site-packages/rest_framework/views.py\", line 480, in raise_uncaught_exception\r\n raise exc\r\n File \"/usr/local/lib/python3.8/site-packages/rest_framework/views.py\", line 506, in dispatch\r\n response = handler(request, *args, **kwargs)\r\n File \"/src/pulpcore/pulpcore/app/views/repair.py\", line 27, in post\r\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\r\nKeyError: 'verify_checksums'\r\n```\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer\nfrom pulpcore.app.tasks import repair_all_artifacts\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass RepairView(APIView):\n @extend_schema(\n description=(\n \"Trigger an asynchronous task that checks for missing \"\n \"or corrupted artifacts, and attempts to redownload them.\"\n ),\n summary=\"Repair Artifact Storage\",\n request=RepairSerializer,\n responses={202: AsyncOperationResponseSerializer},\n )\n def post(self, request):\n \"\"\"\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n serializer.is_valid()\n\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n\n task = dispatch(repair_all_artifacts, args=[verify_checksums])\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/repair.py"}]}
1,288
103
gh_patches_debug_35264
rasdani/github-patches
git_diff
networkx__networkx-1098
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> doc build broken From a clean checkout I ran `python setup.py install` and then I attempted to build a local copy of the docs via `make html` in `doc` and got the following error: ``` (py2k-base)tcaswell@tcaswellpc1:~/other_source/networkx/doc$ make html mkdir -p build ./make_gallery.py atlas.pyTraceback (most recent call last): File "./make_gallery.py", line 57, in <module> execfile(example) File "atlas.py", line 59, in <module> G=atlas6() File "atlas.py", line 25, in atlas6 Atlas=nx.graph_atlas_g()[0:208] # 208 AttributeError: 'module' object has no attribute 'graph_atlas_g' make: *** [build/generate-stamp] Error 1 ``` </issue> <code> [start of examples/drawing/atlas.py] 1 #!/usr/bin/env python 2 """ 3 Atlas of all graphs of 6 nodes or less. 4 5 """ 6 __author__ = """Aric Hagberg ([email protected])""" 7 # Copyright (C) 2004 by 8 # Aric Hagberg <[email protected]> 9 # Dan Schult <[email protected]> 10 # Pieter Swart <[email protected]> 11 # All rights reserved. 12 # BSD license. 13 14 import networkx as nx 15 #from networkx import * 16 #from networkx.generators.atlas import * 17 from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic 18 import random 19 20 def atlas6(): 21 """ Return the atlas of all connected graphs of 6 nodes or less. 22 Attempt to check for isomorphisms and remove. 23 """ 24 25 Atlas=nx.graph_atlas_g()[0:208] # 208 26 # remove isolated nodes, only connected graphs are left 27 U=nx.Graph() # graph for union of all graphs in atlas 28 for G in Atlas: 29 zerodegree=[n for n in G if G.degree(n)==0] 30 for n in zerodegree: 31 G.remove_node(n) 32 U=nx.disjoint_union(U,G) 33 34 # list of graphs of all connected components 35 C=nx.connected_component_subgraphs(U) 36 37 UU=nx.Graph() 38 # do quick isomorphic-like check, not a true isomorphism checker 39 nlist=[] # list of nonisomorphic graphs 40 for G in C: 41 # check against all nonisomorphic graphs so far 42 if not iso(G,nlist): 43 nlist.append(G) 44 UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs 45 return UU 46 47 def iso(G1, glist): 48 """Quick and dirty nonisomorphism checker used to check isomorphisms.""" 49 for G2 in glist: 50 if isomorphic(G1,G2): 51 return True 52 return False 53 54 55 if __name__ == '__main__': 56 57 import networkx as nx 58 59 G=atlas6() 60 61 print("graph has %d nodes with %d edges"\ 62 %(nx.number_of_nodes(G),nx.number_of_edges(G))) 63 print(nx.number_connected_components(G),"connected components") 64 65 66 try: 67 from networkx import graphviz_layout 68 except ImportError: 69 raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot") 70 71 import matplotlib.pyplot as plt 72 plt.figure(1,figsize=(8,8)) 73 # layout graphs with positions using graphviz neato 74 pos=nx.graphviz_layout(G,prog="neato") 75 # color nodes the same in each connected subgraph 76 C=nx.connected_component_subgraphs(G) 77 for g in C: 78 c=[random.random()]*nx.number_of_nodes(g) # random color... 79 nx.draw(g, 80 pos, 81 node_size=40, 82 node_color=c, 83 vmin=0.0, 84 vmax=1.0, 85 with_labels=False 86 ) 87 plt.savefig("atlas.png",dpi=75) 88 [end of examples/drawing/atlas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py deleted file mode 100644 --- a/examples/drawing/atlas.py +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env python -""" -Atlas of all graphs of 6 nodes or less. - -""" -__author__ = """Aric Hagberg ([email protected])""" -# Copyright (C) 2004 by -# Aric Hagberg <[email protected]> -# Dan Schult <[email protected]> -# Pieter Swart <[email protected]> -# All rights reserved. -# BSD license. - -import networkx as nx -#from networkx import * -#from networkx.generators.atlas import * -from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic -import random - -def atlas6(): - """ Return the atlas of all connected graphs of 6 nodes or less. - Attempt to check for isomorphisms and remove. - """ - - Atlas=nx.graph_atlas_g()[0:208] # 208 - # remove isolated nodes, only connected graphs are left - U=nx.Graph() # graph for union of all graphs in atlas - for G in Atlas: - zerodegree=[n for n in G if G.degree(n)==0] - for n in zerodegree: - G.remove_node(n) - U=nx.disjoint_union(U,G) - - # list of graphs of all connected components - C=nx.connected_component_subgraphs(U) - - UU=nx.Graph() - # do quick isomorphic-like check, not a true isomorphism checker - nlist=[] # list of nonisomorphic graphs - for G in C: - # check against all nonisomorphic graphs so far - if not iso(G,nlist): - nlist.append(G) - UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs - return UU - -def iso(G1, glist): - """Quick and dirty nonisomorphism checker used to check isomorphisms.""" - for G2 in glist: - if isomorphic(G1,G2): - return True - return False - - -if __name__ == '__main__': - - import networkx as nx - - G=atlas6() - - print("graph has %d nodes with %d edges"\ - %(nx.number_of_nodes(G),nx.number_of_edges(G))) - print(nx.number_connected_components(G),"connected components") - - - try: - from networkx import graphviz_layout - except ImportError: - raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot") - - import matplotlib.pyplot as plt - plt.figure(1,figsize=(8,8)) - # layout graphs with positions using graphviz neato - pos=nx.graphviz_layout(G,prog="neato") - # color nodes the same in each connected subgraph - C=nx.connected_component_subgraphs(G) - for g in C: - c=[random.random()]*nx.number_of_nodes(g) # random color... - nx.draw(g, - pos, - node_size=40, - node_color=c, - vmin=0.0, - vmax=1.0, - with_labels=False - ) - plt.savefig("atlas.png",dpi=75) diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py new file mode 120000 --- /dev/null +++ b/examples/drawing/atlas.py @@ -0,0 +1 @@ +../graph/atlas.py \ No newline at end of file
{"golden_diff": "diff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py\ndeleted file mode 100644\n--- a/examples/drawing/atlas.py\n+++ /dev/null\n@@ -1,87 +0,0 @@\n-#!/usr/bin/env python\n-\"\"\"\n-Atlas of all graphs of 6 nodes or less.\n-\n-\"\"\"\n-__author__ = \"\"\"Aric Hagberg ([email protected])\"\"\"\n-# Copyright (C) 2004 by \n-# Aric Hagberg <[email protected]>\n-# Dan Schult <[email protected]>\n-# Pieter Swart <[email protected]>\n-# All rights reserved.\n-# BSD license.\n-\n-import networkx as nx\n-#from networkx import *\n-#from networkx.generators.atlas import *\n-from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic\n-import random\n-\n-def atlas6():\n- \"\"\" Return the atlas of all connected graphs of 6 nodes or less.\n- Attempt to check for isomorphisms and remove.\n- \"\"\"\n-\n- Atlas=nx.graph_atlas_g()[0:208] # 208\n- # remove isolated nodes, only connected graphs are left\n- U=nx.Graph() # graph for union of all graphs in atlas\n- for G in Atlas: \n- zerodegree=[n for n in G if G.degree(n)==0]\n- for n in zerodegree:\n- G.remove_node(n)\n- U=nx.disjoint_union(U,G)\n-\n- # list of graphs of all connected components \n- C=nx.connected_component_subgraphs(U) \n- \n- UU=nx.Graph() \n- # do quick isomorphic-like check, not a true isomorphism checker \n- nlist=[] # list of nonisomorphic graphs\n- for G in C:\n- # check against all nonisomorphic graphs so far\n- if not iso(G,nlist):\n- nlist.append(G)\n- UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs \n- return UU \n-\n-def iso(G1, glist):\n- \"\"\"Quick and dirty nonisomorphism checker used to check isomorphisms.\"\"\"\n- for G2 in glist:\n- if isomorphic(G1,G2):\n- return True\n- return False \n-\n-\n-if __name__ == '__main__':\n-\n- import networkx as nx\n-\n- G=atlas6()\n-\n- print(\"graph has %d nodes with %d edges\"\\\n- %(nx.number_of_nodes(G),nx.number_of_edges(G)))\n- print(nx.number_connected_components(G),\"connected components\")\n-\n-\n- try:\n- from networkx import graphviz_layout\n- except ImportError:\n- raise ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\")\n-\n- import matplotlib.pyplot as plt\n- plt.figure(1,figsize=(8,8))\n- # layout graphs with positions using graphviz neato\n- pos=nx.graphviz_layout(G,prog=\"neato\")\n- # color nodes the same in each connected subgraph\n- C=nx.connected_component_subgraphs(G)\n- for g in C:\n- c=[random.random()]*nx.number_of_nodes(g) # random color...\n- nx.draw(g,\n- pos,\n- node_size=40,\n- node_color=c,\n- vmin=0.0,\n- vmax=1.0,\n- with_labels=False\n- )\n- plt.savefig(\"atlas.png\",dpi=75) \ndiff --git a/examples/drawing/atlas.py b/examples/drawing/atlas.py\nnew file mode 120000\n--- /dev/null\n+++ b/examples/drawing/atlas.py\n@@ -0,0 +1 @@\n+../graph/atlas.py\n\\ No newline at end of file\n", "issue": "doc build broken\nFrom a clean checkout I ran `python setup.py install` and then I attempted to build a local copy of the docs via `make html` in `doc` and got the following error:\n\n```\n(py2k-base)tcaswell@tcaswellpc1:~/other_source/networkx/doc$ make html\nmkdir -p build\n./make_gallery.py \natlas.pyTraceback (most recent call last):\n File \"./make_gallery.py\", line 57, in <module>\n execfile(example)\n File \"atlas.py\", line 59, in <module>\n G=atlas6()\n File \"atlas.py\", line 25, in atlas6\n Atlas=nx.graph_atlas_g()[0:208] # 208\nAttributeError: 'module' object has no attribute 'graph_atlas_g'\nmake: *** [build/generate-stamp] Error 1\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nAtlas of all graphs of 6 nodes or less.\n\n\"\"\"\n__author__ = \"\"\"Aric Hagberg ([email protected])\"\"\"\n# Copyright (C) 2004 by \n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\n\nimport networkx as nx\n#from networkx import *\n#from networkx.generators.atlas import *\nfrom networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic\nimport random\n\ndef atlas6():\n \"\"\" Return the atlas of all connected graphs of 6 nodes or less.\n Attempt to check for isomorphisms and remove.\n \"\"\"\n\n Atlas=nx.graph_atlas_g()[0:208] # 208\n # remove isolated nodes, only connected graphs are left\n U=nx.Graph() # graph for union of all graphs in atlas\n for G in Atlas: \n zerodegree=[n for n in G if G.degree(n)==0]\n for n in zerodegree:\n G.remove_node(n)\n U=nx.disjoint_union(U,G)\n\n # list of graphs of all connected components \n C=nx.connected_component_subgraphs(U) \n \n UU=nx.Graph() \n # do quick isomorphic-like check, not a true isomorphism checker \n nlist=[] # list of nonisomorphic graphs\n for G in C:\n # check against all nonisomorphic graphs so far\n if not iso(G,nlist):\n nlist.append(G)\n UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs \n return UU \n\ndef iso(G1, glist):\n \"\"\"Quick and dirty nonisomorphism checker used to check isomorphisms.\"\"\"\n for G2 in glist:\n if isomorphic(G1,G2):\n return True\n return False \n\n\nif __name__ == '__main__':\n\n import networkx as nx\n\n G=atlas6()\n\n print(\"graph has %d nodes with %d edges\"\\\n %(nx.number_of_nodes(G),nx.number_of_edges(G)))\n print(nx.number_connected_components(G),\"connected components\")\n\n\n try:\n from networkx import graphviz_layout\n except ImportError:\n raise ImportError(\"This example needs Graphviz and either PyGraphviz or Pydot\")\n\n import matplotlib.pyplot as plt\n plt.figure(1,figsize=(8,8))\n # layout graphs with positions using graphviz neato\n pos=nx.graphviz_layout(G,prog=\"neato\")\n # color nodes the same in each connected subgraph\n C=nx.connected_component_subgraphs(G)\n for g in C:\n c=[random.random()]*nx.number_of_nodes(g) # random color...\n nx.draw(g,\n pos,\n node_size=40,\n node_color=c,\n vmin=0.0,\n vmax=1.0,\n with_labels=False\n )\n plt.savefig(\"atlas.png\",dpi=75) \n", "path": "examples/drawing/atlas.py"}]}
1,614
892
gh_patches_debug_25942
rasdani/github-patches
git_diff
kivy__python-for-android-1383
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unit test recipes (reportlab to begin with) The test suite is currently running full integration tests for a bunch of recipes. While integration tests are good, we cannot currently use them for all recipes because they run for too long. However having unit tests for all recipes should be feasible and may still cover some issues like https://github.com/kivy/python-for-android/pull/1357#issuecomment-423614116. Unit tests were recently enabled the following pull request https://github.com/kivy/python-for-android/pull/1379. So the idea is to increase the coverage start from reportlab recipe as a use case. </issue> <code> [start of pythonforandroid/recipes/reportlab/__init__.py] 1 import os, sh 2 from pythonforandroid.recipe import CompiledComponentsPythonRecipe 3 from pythonforandroid.util import (current_directory, ensure_dir) 4 from pythonforandroid.logger import (info, shprint) 5 6 7 class ReportLabRecipe(CompiledComponentsPythonRecipe): 8 version = 'c088826211ca' 9 url = 'https://bitbucket.org/rptlab/reportlab/get/{version}.tar.gz' 10 depends = [('python2', 'python3crystax'), 'freetype'] 11 12 def prebuild_arch(self, arch): 13 if not self.is_patched(arch): 14 super(ReportLabRecipe, self).prebuild_arch(arch) 15 self.apply_patch('patches/fix-setup.patch', arch.arch) 16 recipe_dir = self.get_build_dir(arch.arch) 17 shprint(sh.touch, os.path.join(recipe_dir, '.patched')) 18 ft = self.get_recipe('freetype', self.ctx) 19 ft_dir = ft.get_build_dir(arch.arch) 20 ft_lib_dir = os.environ.get('_FT_LIB_', os.path.join(ft_dir, 'objs', '.libs')) 21 ft_inc_dir = os.environ.get('_FT_INC_', os.path.join(ft_dir, 'include')) 22 tmp_dir = os.path.normpath(os.path.join(recipe_dir, "..", "..", "tmp")) 23 info('reportlab recipe: recipe_dir={}'.format(recipe_dir)) 24 info('reportlab recipe: tmp_dir={}'.format(tmp_dir)) 25 info('reportlab recipe: ft_dir={}'.format(ft_dir)) 26 info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir)) 27 info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir)) 28 with current_directory(recipe_dir): 29 sh.ls('-lathr') 30 ensure_dir(tmp_dir) 31 pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip") 32 if not os.path.isfile(pfbfile): 33 sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip", "-O", pfbfile) 34 sh.unzip("-u", "-d", os.path.join(recipe_dir, "src", "reportlab", "fonts"), pfbfile) 35 if os.path.isfile("setup.py"): 36 with open('setup.py', 'rb') as f: 37 text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir) 38 with open('setup.py', 'wb') as f: 39 f.write(text) 40 41 42 recipe = ReportLabRecipe() 43 [end of pythonforandroid/recipes/reportlab/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pythonforandroid/recipes/reportlab/__init__.py b/pythonforandroid/recipes/reportlab/__init__.py --- a/pythonforandroid/recipes/reportlab/__init__.py +++ b/pythonforandroid/recipes/reportlab/__init__.py @@ -26,16 +26,15 @@ info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir)) info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir)) with current_directory(recipe_dir): - sh.ls('-lathr') ensure_dir(tmp_dir) pfbfile = os.path.join(tmp_dir, "pfbfer-20070710.zip") if not os.path.isfile(pfbfile): sh.wget("http://www.reportlab.com/ftp/pfbfer-20070710.zip", "-O", pfbfile) sh.unzip("-u", "-d", os.path.join(recipe_dir, "src", "reportlab", "fonts"), pfbfile) if os.path.isfile("setup.py"): - with open('setup.py', 'rb') as f: + with open('setup.py', 'r') as f: text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir) - with open('setup.py', 'wb') as f: + with open('setup.py', 'w') as f: f.write(text)
{"golden_diff": "diff --git a/pythonforandroid/recipes/reportlab/__init__.py b/pythonforandroid/recipes/reportlab/__init__.py\n--- a/pythonforandroid/recipes/reportlab/__init__.py\n+++ b/pythonforandroid/recipes/reportlab/__init__.py\n@@ -26,16 +26,15 @@\n info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))\n info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))\n with current_directory(recipe_dir):\n- sh.ls('-lathr')\n ensure_dir(tmp_dir)\n pfbfile = os.path.join(tmp_dir, \"pfbfer-20070710.zip\")\n if not os.path.isfile(pfbfile):\n sh.wget(\"http://www.reportlab.com/ftp/pfbfer-20070710.zip\", \"-O\", pfbfile)\n sh.unzip(\"-u\", \"-d\", os.path.join(recipe_dir, \"src\", \"reportlab\", \"fonts\"), pfbfile)\n if os.path.isfile(\"setup.py\"):\n- with open('setup.py', 'rb') as f:\n+ with open('setup.py', 'r') as f:\n text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir)\n- with open('setup.py', 'wb') as f:\n+ with open('setup.py', 'w') as f:\n f.write(text)\n", "issue": "Unit test recipes (reportlab to begin with)\nThe test suite is currently running full integration tests for a bunch of recipes.\r\nWhile integration tests are good, we cannot currently use them for all recipes because they run for too long.\r\nHowever having unit tests for all recipes should be feasible and may still cover some issues like https://github.com/kivy/python-for-android/pull/1357#issuecomment-423614116.\r\nUnit tests were recently enabled the following pull request https://github.com/kivy/python-for-android/pull/1379. So the idea is to increase the coverage start from reportlab recipe as a use case.\n", "before_files": [{"content": "import os, sh\nfrom pythonforandroid.recipe import CompiledComponentsPythonRecipe\nfrom pythonforandroid.util import (current_directory, ensure_dir)\nfrom pythonforandroid.logger import (info, shprint)\n\n\nclass ReportLabRecipe(CompiledComponentsPythonRecipe):\n version = 'c088826211ca'\n url = 'https://bitbucket.org/rptlab/reportlab/get/{version}.tar.gz'\n depends = [('python2', 'python3crystax'), 'freetype']\n\n def prebuild_arch(self, arch):\n if not self.is_patched(arch):\n super(ReportLabRecipe, self).prebuild_arch(arch)\n self.apply_patch('patches/fix-setup.patch', arch.arch)\n recipe_dir = self.get_build_dir(arch.arch)\n shprint(sh.touch, os.path.join(recipe_dir, '.patched'))\n ft = self.get_recipe('freetype', self.ctx)\n ft_dir = ft.get_build_dir(arch.arch)\n ft_lib_dir = os.environ.get('_FT_LIB_', os.path.join(ft_dir, 'objs', '.libs'))\n ft_inc_dir = os.environ.get('_FT_INC_', os.path.join(ft_dir, 'include'))\n tmp_dir = os.path.normpath(os.path.join(recipe_dir, \"..\", \"..\", \"tmp\"))\n info('reportlab recipe: recipe_dir={}'.format(recipe_dir))\n info('reportlab recipe: tmp_dir={}'.format(tmp_dir))\n info('reportlab recipe: ft_dir={}'.format(ft_dir))\n info('reportlab recipe: ft_lib_dir={}'.format(ft_lib_dir))\n info('reportlab recipe: ft_inc_dir={}'.format(ft_inc_dir))\n with current_directory(recipe_dir):\n sh.ls('-lathr')\n ensure_dir(tmp_dir)\n pfbfile = os.path.join(tmp_dir, \"pfbfer-20070710.zip\")\n if not os.path.isfile(pfbfile):\n sh.wget(\"http://www.reportlab.com/ftp/pfbfer-20070710.zip\", \"-O\", pfbfile)\n sh.unzip(\"-u\", \"-d\", os.path.join(recipe_dir, \"src\", \"reportlab\", \"fonts\"), pfbfile)\n if os.path.isfile(\"setup.py\"):\n with open('setup.py', 'rb') as f:\n text = f.read().replace('_FT_LIB_', ft_lib_dir).replace('_FT_INC_', ft_inc_dir)\n with open('setup.py', 'wb') as f:\n f.write(text)\n\n\nrecipe = ReportLabRecipe()\n", "path": "pythonforandroid/recipes/reportlab/__init__.py"}]}
1,312
321
gh_patches_debug_21032
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3344
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider ingles is broken During the global build at 2021-07-07-14-42-19, spider **ingles** failed with **0 features** and **189 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-07-14-42-19/logs/ingles.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-07-14-42-19/output/ingles.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-07-14-42-19/output/ingles.geojson)) </issue> <code> [start of locations/spiders/ingles.py] 1 # -*- coding: utf-8 2 import scrapy 3 import re 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 URL = 'https://www.ingles-markets.com/storelocate/storelocator.php?address=' 9 10 STORE_STATES = ["Alabama", "Georgia", "North%20Carolina", "South%20Carolina", "Tennessee", "Virginia"] 11 12 DAYS = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"] 13 14 class ingles(scrapy.Spider): 15 name = "ingles" 16 item_attributes = { 'brand': "Ingles" } 17 allowed_domains = ["www.ingles-markets.com"] 18 19 def start_requests(self): 20 for state in STORE_STATES: 21 yield scrapy.Request(URL + state, callback=self.parse) 22 23 def parse_hours(self, hours): 24 opening_hours = OpeningHours() 25 26 for day in DAYS: 27 open_time, close_time = hours.split('to') 28 opening_hours.add_range(day=day, open_time=("".join(open_time).strip()), close_time=("".join(close_time).strip()), time_format="%H:%M%p") 29 30 return opening_hours.as_opening_hours() 31 32 def parse_store(self, response): 33 34 properties = { 35 'ref': response.meta["ref"], 36 'name': response.meta["name"], 37 'addr_full': response.meta["addr_full"], 38 'city': response.meta["city"], 39 'state': response.meta["state"], 40 'postcode': re.search(r'(\d{5})',response.xpath("/html/body/fieldset/div[2]/span[2]/strong/text()").get()).group(), 41 'phone': response.xpath("/html/body/fieldset/div[2]/a/text()").get(), 42 'lat': response.meta["lat"], 43 'lon': response.meta["lon"], 44 'website': response.url, 45 } 46 47 hours = self.parse_hours(" ".join(response.xpath("/html/body/fieldset/div[2]/text()")[2].getall()).strip()) 48 if hours: 49 properties["opening_hours"] = hours 50 51 yield GeojsonPointItem(**properties) 52 53 def parse(self, response): 54 for store in response.xpath('//markers/marker'): 55 ids =store.xpath('./@id').extract_first(), 56 name = store.xpath('./@name').get() 57 addr = store.xpath('./@address').get() 58 city = store.xpath('./@city').get() 59 state = store.xpath('./@state').get() 60 lats = store.xpath('./@lat').get() 61 longs = store.xpath('./@lng').get() 62 63 for id in ids: 64 yield scrapy.Request( 65 'https://www.ingles-markets.com/storelocate/storeinfo.php?storenum=' + id, 66 callback=self.parse_store, 67 meta={ 68 'ref': id, 69 'name': name, 70 'addr_full': addr, 71 'city': city, 72 'state': state, 73 'lat': lats, 74 'lon': longs 75 } 76 ) 77 [end of locations/spiders/ingles.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/ingles.py b/locations/spiders/ingles.py --- a/locations/spiders/ingles.py +++ b/locations/spiders/ingles.py @@ -37,14 +37,14 @@ 'addr_full': response.meta["addr_full"], 'city': response.meta["city"], 'state': response.meta["state"], - 'postcode': re.search(r'(\d{5})',response.xpath("/html/body/fieldset/div[2]/span[2]/strong/text()").get()).group(), + 'postcode': re.search(r'(\d{5})',response.xpath("/html/body/div[2]/span[2]/strong/text()").get()).group(), 'phone': response.xpath("/html/body/fieldset/div[2]/a/text()").get(), 'lat': response.meta["lat"], 'lon': response.meta["lon"], 'website': response.url, } - hours = self.parse_hours(" ".join(response.xpath("/html/body/fieldset/div[2]/text()")[2].getall()).strip()) + hours = self.parse_hours(" ".join(response.xpath("/html/body/fieldset/div[2]/text()")[1].getall()).strip()) if hours: properties["opening_hours"] = hours
{"golden_diff": "diff --git a/locations/spiders/ingles.py b/locations/spiders/ingles.py\n--- a/locations/spiders/ingles.py\n+++ b/locations/spiders/ingles.py\n@@ -37,14 +37,14 @@\n 'addr_full': response.meta[\"addr_full\"],\n 'city': response.meta[\"city\"],\n 'state': response.meta[\"state\"],\n- 'postcode': re.search(r'(\\d{5})',response.xpath(\"/html/body/fieldset/div[2]/span[2]/strong/text()\").get()).group(),\n+ 'postcode': re.search(r'(\\d{5})',response.xpath(\"/html/body/div[2]/span[2]/strong/text()\").get()).group(),\n 'phone': response.xpath(\"/html/body/fieldset/div[2]/a/text()\").get(),\n 'lat': response.meta[\"lat\"],\n 'lon': response.meta[\"lon\"],\n 'website': response.url,\n }\n \n- hours = self.parse_hours(\" \".join(response.xpath(\"/html/body/fieldset/div[2]/text()\")[2].getall()).strip())\n+ hours = self.parse_hours(\" \".join(response.xpath(\"/html/body/fieldset/div[2]/text()\")[1].getall()).strip())\n if hours:\n properties[\"opening_hours\"] = hours\n", "issue": "Spider ingles is broken\nDuring the global build at 2021-07-07-14-42-19, spider **ingles** failed with **0 features** and **189 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-07-14-42-19/logs/ingles.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-07-14-42-19/output/ingles.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-07-14-42-19/output/ingles.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nURL = 'https://www.ingles-markets.com/storelocate/storelocator.php?address='\n\nSTORE_STATES = [\"Alabama\", \"Georgia\", \"North%20Carolina\", \"South%20Carolina\", \"Tennessee\", \"Virginia\"]\n\nDAYS = [\"Mo\", \"Tu\", \"We\", \"Th\", \"Fr\", \"Sa\", \"Su\"]\n\nclass ingles(scrapy.Spider):\n name = \"ingles\"\n item_attributes = { 'brand': \"Ingles\" }\n allowed_domains = [\"www.ingles-markets.com\"]\n\n def start_requests(self):\n for state in STORE_STATES:\n yield scrapy.Request(URL + state, callback=self.parse)\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for day in DAYS:\n open_time, close_time = hours.split('to')\n opening_hours.add_range(day=day, open_time=(\"\".join(open_time).strip()), close_time=(\"\".join(close_time).strip()), time_format=\"%H:%M%p\")\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n\n properties = {\n 'ref': response.meta[\"ref\"],\n 'name': response.meta[\"name\"],\n 'addr_full': response.meta[\"addr_full\"],\n 'city': response.meta[\"city\"],\n 'state': response.meta[\"state\"],\n 'postcode': re.search(r'(\\d{5})',response.xpath(\"/html/body/fieldset/div[2]/span[2]/strong/text()\").get()).group(),\n 'phone': response.xpath(\"/html/body/fieldset/div[2]/a/text()\").get(),\n 'lat': response.meta[\"lat\"],\n 'lon': response.meta[\"lon\"],\n 'website': response.url,\n }\n\n hours = self.parse_hours(\" \".join(response.xpath(\"/html/body/fieldset/div[2]/text()\")[2].getall()).strip())\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n for store in response.xpath('//markers/marker'):\n ids =store.xpath('./@id').extract_first(),\n name = store.xpath('./@name').get()\n addr = store.xpath('./@address').get()\n city = store.xpath('./@city').get()\n state = store.xpath('./@state').get()\n lats = store.xpath('./@lat').get()\n longs = store.xpath('./@lng').get()\n\n for id in ids:\n yield scrapy.Request(\n 'https://www.ingles-markets.com/storelocate/storeinfo.php?storenum=' + id,\n callback=self.parse_store,\n meta={\n 'ref': id,\n 'name': name,\n 'addr_full': addr,\n 'city': city,\n 'state': state,\n 'lat': lats,\n 'lon': longs\n }\n )\n", "path": "locations/spiders/ingles.py"}]}
1,512
282
gh_patches_debug_14372
rasdani/github-patches
git_diff
googleapis__python-bigquery-52
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BigQuery: Document the use of the timeout parameter in samples After adding the new `timeout` parameter to various public methods (#9987), we should demonstrate its usage in the code samples. Users should be aware of this new feature, and should probably use it by default to avoid sporadic weird issues related to a method "getting stuck" at the transport layer. </issue> <code> [start of samples/create_dataset.py] 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 def create_dataset(dataset_id): 17 18 # [START bigquery_create_dataset] 19 from google.cloud import bigquery 20 21 # Construct a BigQuery client object. 22 client = bigquery.Client() 23 24 # TODO(developer): Set dataset_id to the ID of the dataset to create. 25 # dataset_id = "{}.your_dataset".format(client.project) 26 27 # Construct a full Dataset object to send to the API. 28 dataset = bigquery.Dataset(dataset_id) 29 30 # TODO(developer): Specify the geographic location where the dataset should reside. 31 dataset.location = "US" 32 33 # Send the dataset to the API for creation. 34 # Raises google.api_core.exceptions.Conflict if the Dataset already 35 # exists within the project. 36 dataset = client.create_dataset(dataset) # Make an API request. 37 print("Created dataset {}.{}".format(client.project, dataset.dataset_id)) 38 # [END bigquery_create_dataset] 39 [end of samples/create_dataset.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/samples/create_dataset.py b/samples/create_dataset.py --- a/samples/create_dataset.py +++ b/samples/create_dataset.py @@ -30,9 +30,9 @@ # TODO(developer): Specify the geographic location where the dataset should reside. dataset.location = "US" - # Send the dataset to the API for creation. + # Send the dataset to the API for creation, with an explicit timeout. # Raises google.api_core.exceptions.Conflict if the Dataset already # exists within the project. - dataset = client.create_dataset(dataset) # Make an API request. + dataset = client.create_dataset(dataset, timeout=30) # Make an API request. print("Created dataset {}.{}".format(client.project, dataset.dataset_id)) # [END bigquery_create_dataset]
{"golden_diff": "diff --git a/samples/create_dataset.py b/samples/create_dataset.py\n--- a/samples/create_dataset.py\n+++ b/samples/create_dataset.py\n@@ -30,9 +30,9 @@\n # TODO(developer): Specify the geographic location where the dataset should reside.\n dataset.location = \"US\"\n \n- # Send the dataset to the API for creation.\n+ # Send the dataset to the API for creation, with an explicit timeout.\n # Raises google.api_core.exceptions.Conflict if the Dataset already\n # exists within the project.\n- dataset = client.create_dataset(dataset) # Make an API request.\n+ dataset = client.create_dataset(dataset, timeout=30) # Make an API request.\n print(\"Created dataset {}.{}\".format(client.project, dataset.dataset_id))\n # [END bigquery_create_dataset]\n", "issue": "BigQuery: Document the use of the timeout parameter in samples\nAfter adding the new `timeout` parameter to various public methods (#9987), we should demonstrate its usage in the code samples.\r\n\r\nUsers should be aware of this new feature, and should probably use it by default to avoid sporadic weird issues related to a method \"getting stuck\" at the transport layer.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef create_dataset(dataset_id):\n\n # [START bigquery_create_dataset]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set dataset_id to the ID of the dataset to create.\n # dataset_id = \"{}.your_dataset\".format(client.project)\n\n # Construct a full Dataset object to send to the API.\n dataset = bigquery.Dataset(dataset_id)\n\n # TODO(developer): Specify the geographic location where the dataset should reside.\n dataset.location = \"US\"\n\n # Send the dataset to the API for creation.\n # Raises google.api_core.exceptions.Conflict if the Dataset already\n # exists within the project.\n dataset = client.create_dataset(dataset) # Make an API request.\n print(\"Created dataset {}.{}\".format(client.project, dataset.dataset_id))\n # [END bigquery_create_dataset]\n", "path": "samples/create_dataset.py"}]}
1,005
181
gh_patches_debug_21580
rasdani/github-patches
git_diff
cupy__cupy-6172
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Incorrect output of `cupy.logaddexp()` For this case, mathematically we should get `inf`, but CuPy returns `nan`: ```python >>> np.logaddexp(np.inf, np.inf) inf >>> >>> cp.logaddexp(np.inf, np.inf) array(nan) ``` The reason is `in0-in1` gives `nan` when both are `inf`, and it propagates all the way out: https://github.com/cupy/cupy/blob/4469fae998df33c72ff40ef954cb08b8f0004b18/cupy/_math/explog.py#L73 </issue> <code> [start of cupy/_math/explog.py] 1 from cupy import _core 2 from cupy._math import ufunc 3 4 5 exp = ufunc.create_math_ufunc( 6 'exp', 1, 'cupy_exp', 7 '''Elementwise exponential function. 8 9 .. seealso:: :data:`numpy.exp` 10 11 ''') 12 13 14 expm1 = ufunc.create_math_ufunc( 15 'expm1', 1, 'cupy_expm1', 16 '''Computes ``exp(x) - 1`` elementwise. 17 18 .. seealso:: :data:`numpy.expm1` 19 20 ''') 21 22 23 exp2 = _core.create_ufunc( 24 'cupy_exp2', 25 ('e->e', 'f->f', 'd->d', 'F->F', 'D->D'), 26 'out0 = pow(in0_type(2), in0)', 27 doc='''Elementwise exponentiation with base 2. 28 29 .. seealso:: :data:`numpy.exp2` 30 31 ''') 32 33 34 log = ufunc.create_math_ufunc( 35 'log', 1, 'cupy_log', 36 '''Elementwise natural logarithm function. 37 38 .. seealso:: :data:`numpy.log` 39 40 ''') 41 42 43 log10 = ufunc.create_math_ufunc( 44 'log10', 1, 'cupy_log10', 45 '''Elementwise common logarithm function. 46 47 .. seealso:: :data:`numpy.log10` 48 49 ''') 50 51 52 log2 = ufunc.create_math_ufunc( 53 'log2', 1, 'cupy_log2', 54 '''Elementwise binary logarithm function. 55 56 .. seealso:: :data:`numpy.log2` 57 58 ''') 59 60 61 log1p = ufunc.create_math_ufunc( 62 'log1p', 1, 'cupy_log1p', 63 '''Computes ``log(1 + x)`` elementwise. 64 65 .. seealso:: :data:`numpy.log1p` 66 67 ''') 68 69 70 logaddexp = _core.create_ufunc( 71 'cupy_logaddexp', 72 ('ee->e', 'ff->f', 'dd->d'), 73 'out0 = fmax(in0, in1) + log1p(exp(-fabs(in0 - in1)))', 74 doc='''Computes ``log(exp(x1) + exp(x2))`` elementwise. 75 76 .. seealso:: :data:`numpy.logaddexp` 77 78 ''') 79 80 81 logaddexp2 = _core.create_ufunc( 82 'cupy_logaddexp2', 83 ('ee->e', 'ff->f', 'dd->d'), 84 'out0 = fmax(in0, in1) + log2(1 + exp2(-fabs(in0 - in1)))', 85 doc='''Computes ``log2(exp2(x1) + exp2(x2))`` elementwise. 86 87 .. seealso:: :data:`numpy.logaddexp2` 88 89 ''') 90 [end of cupy/_math/explog.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/_math/explog.py b/cupy/_math/explog.py --- a/cupy/_math/explog.py +++ b/cupy/_math/explog.py @@ -70,7 +70,14 @@ logaddexp = _core.create_ufunc( 'cupy_logaddexp', ('ee->e', 'ff->f', 'dd->d'), - 'out0 = fmax(in0, in1) + log1p(exp(-fabs(in0 - in1)))', + ''' + if (in0 == in1) { + /* Handles infinities of the same sign */ + out0 = in0 + log(2.0); + } else { + out0 = fmax(in0, in1) + log1p(exp(-fabs(in0 - in1))); + } + ''', doc='''Computes ``log(exp(x1) + exp(x2))`` elementwise. .. seealso:: :data:`numpy.logaddexp` @@ -81,7 +88,14 @@ logaddexp2 = _core.create_ufunc( 'cupy_logaddexp2', ('ee->e', 'ff->f', 'dd->d'), - 'out0 = fmax(in0, in1) + log2(1 + exp2(-fabs(in0 - in1)))', + ''' + if (in0 == in1) { + /* Handles infinities of the same sign */ + out0 = in0 + 1.0; + } else { + out0 = fmax(in0, in1) + log2(1 + exp2(-fabs(in0 - in1))); + } + ''', doc='''Computes ``log2(exp2(x1) + exp2(x2))`` elementwise. .. seealso:: :data:`numpy.logaddexp2`
{"golden_diff": "diff --git a/cupy/_math/explog.py b/cupy/_math/explog.py\n--- a/cupy/_math/explog.py\n+++ b/cupy/_math/explog.py\n@@ -70,7 +70,14 @@\n logaddexp = _core.create_ufunc(\n 'cupy_logaddexp',\n ('ee->e', 'ff->f', 'dd->d'),\n- 'out0 = fmax(in0, in1) + log1p(exp(-fabs(in0 - in1)))',\n+ '''\n+ if (in0 == in1) {\n+ /* Handles infinities of the same sign */\n+ out0 = in0 + log(2.0);\n+ } else {\n+ out0 = fmax(in0, in1) + log1p(exp(-fabs(in0 - in1)));\n+ }\n+ ''',\n doc='''Computes ``log(exp(x1) + exp(x2))`` elementwise.\n \n .. seealso:: :data:`numpy.logaddexp`\n@@ -81,7 +88,14 @@\n logaddexp2 = _core.create_ufunc(\n 'cupy_logaddexp2',\n ('ee->e', 'ff->f', 'dd->d'),\n- 'out0 = fmax(in0, in1) + log2(1 + exp2(-fabs(in0 - in1)))',\n+ '''\n+ if (in0 == in1) {\n+ /* Handles infinities of the same sign */\n+ out0 = in0 + 1.0;\n+ } else {\n+ out0 = fmax(in0, in1) + log2(1 + exp2(-fabs(in0 - in1)));\n+ }\n+ ''',\n doc='''Computes ``log2(exp2(x1) + exp2(x2))`` elementwise.\n \n .. seealso:: :data:`numpy.logaddexp2`\n", "issue": "Incorrect output of `cupy.logaddexp()`\nFor this case, mathematically we should get `inf`, but CuPy returns `nan`:\r\n```python\r\n>>> np.logaddexp(np.inf, np.inf)\r\ninf\r\n>>>\r\n>>> cp.logaddexp(np.inf, np.inf)\r\narray(nan)\r\n\r\n```\r\nThe reason is `in0-in1` gives `nan` when both are `inf`, and it propagates all the way out:\r\nhttps://github.com/cupy/cupy/blob/4469fae998df33c72ff40ef954cb08b8f0004b18/cupy/_math/explog.py#L73\r\n\r\n\n", "before_files": [{"content": "from cupy import _core\nfrom cupy._math import ufunc\n\n\nexp = ufunc.create_math_ufunc(\n 'exp', 1, 'cupy_exp',\n '''Elementwise exponential function.\n\n .. seealso:: :data:`numpy.exp`\n\n ''')\n\n\nexpm1 = ufunc.create_math_ufunc(\n 'expm1', 1, 'cupy_expm1',\n '''Computes ``exp(x) - 1`` elementwise.\n\n .. seealso:: :data:`numpy.expm1`\n\n ''')\n\n\nexp2 = _core.create_ufunc(\n 'cupy_exp2',\n ('e->e', 'f->f', 'd->d', 'F->F', 'D->D'),\n 'out0 = pow(in0_type(2), in0)',\n doc='''Elementwise exponentiation with base 2.\n\n .. seealso:: :data:`numpy.exp2`\n\n ''')\n\n\nlog = ufunc.create_math_ufunc(\n 'log', 1, 'cupy_log',\n '''Elementwise natural logarithm function.\n\n .. seealso:: :data:`numpy.log`\n\n ''')\n\n\nlog10 = ufunc.create_math_ufunc(\n 'log10', 1, 'cupy_log10',\n '''Elementwise common logarithm function.\n\n .. seealso:: :data:`numpy.log10`\n\n ''')\n\n\nlog2 = ufunc.create_math_ufunc(\n 'log2', 1, 'cupy_log2',\n '''Elementwise binary logarithm function.\n\n .. seealso:: :data:`numpy.log2`\n\n ''')\n\n\nlog1p = ufunc.create_math_ufunc(\n 'log1p', 1, 'cupy_log1p',\n '''Computes ``log(1 + x)`` elementwise.\n\n .. seealso:: :data:`numpy.log1p`\n\n ''')\n\n\nlogaddexp = _core.create_ufunc(\n 'cupy_logaddexp',\n ('ee->e', 'ff->f', 'dd->d'),\n 'out0 = fmax(in0, in1) + log1p(exp(-fabs(in0 - in1)))',\n doc='''Computes ``log(exp(x1) + exp(x2))`` elementwise.\n\n .. seealso:: :data:`numpy.logaddexp`\n\n ''')\n\n\nlogaddexp2 = _core.create_ufunc(\n 'cupy_logaddexp2',\n ('ee->e', 'ff->f', 'dd->d'),\n 'out0 = fmax(in0, in1) + log2(1 + exp2(-fabs(in0 - in1)))',\n doc='''Computes ``log2(exp2(x1) + exp2(x2))`` elementwise.\n\n .. seealso:: :data:`numpy.logaddexp2`\n\n ''')\n", "path": "cupy/_math/explog.py"}]}
1,518
430
gh_patches_debug_13269
rasdani/github-patches
git_diff
mars-project__mars-558
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Cannot import new_client **Describe the bug** When trying to import ``new_client`` from ``mars.actors`` in Python 2.7 in Linux, a ValueError is raised: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "mars/actors/core.pyx", line 147, in mars.actors.core.new_client cpdef object new_client(object parallel=None, str backend='gevent'): File "mars/actors/core.pyx", line 151, in mars.actors.core.new_client from .pool.gevent_pool import ActorClient File "mars/actors/pool/gevent_pool.pyx", line 38, in init mars.actors.pool.gevent_pool from ...lib import gipc File "mars/lib/gipc.pyx", line 1159, in init mars.lib.gipc __exec("""def _reraise(tp, value, tb=None): File "mars/lib/gipc.pyx", line 1150, in mars.lib.gipc.__exec frame = sys._getframe(1) ValueError: call stack is not deep enough ``` **To Reproduce** ```python >>> from mars.actors import new_client >>> client = new_client() ``` </issue> <code> [start of mars/actors/__init__.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # Copyright 1999-2018 Alibaba Group Holding Ltd. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 18 from .core import create_actor_pool, Actor, FunctionActor, new_client, \ 19 register_actor_implementation, unregister_actor_implementation 20 from .errors import ActorPoolNotStarted, ActorNotExist, ActorAlreadyExist 21 from .distributor import Distributor 22 [end of mars/actors/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mars/actors/__init__.py b/mars/actors/__init__.py --- a/mars/actors/__init__.py +++ b/mars/actors/__init__.py @@ -14,8 +14,14 @@ # See the License for the specific language governing permissions and # limitations under the License. - from .core import create_actor_pool, Actor, FunctionActor, new_client, \ register_actor_implementation, unregister_actor_implementation from .errors import ActorPoolNotStarted, ActorNotExist, ActorAlreadyExist from .distributor import Distributor + +# import gipc first to avoid stack issue of `call stack is not deep enough` +try: + from ..lib import gipc + del gipc +except ImportError: # pragma: no cover + pass
{"golden_diff": "diff --git a/mars/actors/__init__.py b/mars/actors/__init__.py\n--- a/mars/actors/__init__.py\n+++ b/mars/actors/__init__.py\n@@ -14,8 +14,14 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-\n from .core import create_actor_pool, Actor, FunctionActor, new_client, \\\n register_actor_implementation, unregister_actor_implementation\n from .errors import ActorPoolNotStarted, ActorNotExist, ActorAlreadyExist\n from .distributor import Distributor\n+\n+# import gipc first to avoid stack issue of `call stack is not deep enough`\n+try:\n+ from ..lib import gipc\n+ del gipc\n+except ImportError: # pragma: no cover\n+ pass\n", "issue": "[BUG] Cannot import new_client\n**Describe the bug**\r\nWhen trying to import ``new_client`` from ``mars.actors`` in Python 2.7 in Linux, a ValueError is raised:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"mars/actors/core.pyx\", line 147, in mars.actors.core.new_client\r\n cpdef object new_client(object parallel=None, str backend='gevent'):\r\n File \"mars/actors/core.pyx\", line 151, in mars.actors.core.new_client\r\n from .pool.gevent_pool import ActorClient\r\n File \"mars/actors/pool/gevent_pool.pyx\", line 38, in init mars.actors.pool.gevent_pool\r\n from ...lib import gipc\r\n File \"mars/lib/gipc.pyx\", line 1159, in init mars.lib.gipc\r\n __exec(\"\"\"def _reraise(tp, value, tb=None):\r\n File \"mars/lib/gipc.pyx\", line 1150, in mars.lib.gipc.__exec\r\n frame = sys._getframe(1)\r\nValueError: call stack is not deep enough\r\n```\r\n\r\n**To Reproduce**\r\n```python\r\n>>> from mars.actors import new_client\r\n>>> client = new_client()\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom .core import create_actor_pool, Actor, FunctionActor, new_client, \\\n register_actor_implementation, unregister_actor_implementation\nfrom .errors import ActorPoolNotStarted, ActorNotExist, ActorAlreadyExist\nfrom .distributor import Distributor\n", "path": "mars/actors/__init__.py"}]}
1,069
182
gh_patches_debug_19304
rasdani/github-patches
git_diff
optuna__optuna-3398
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve the `Re-use the best values` tutorial page ### What is an issue? There are some minor issues on the Re-use the best values tutorial page. ## Description - [x] On the [Re-use the best values](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/010_reuse_best_trial.html) page, We should update the title as `re-use the best parameters` instead of `re-use the best values`. As the best value means the best objective value in most of the Optuna references. - [ ] Mention `Study.best_trials` on the page for multi-objective optimization. Context: https://github.com/optuna/optuna/pull/3396#pullrequestreview-917028874 </issue> <code> [start of tutorial/20_recipes/010_reuse_best_trial.py] 1 """ 2 .. _reuse_best_trial: 3 4 Re-use the best values 5 ========================== 6 7 In some cases, you may want to re-evaluate the objective function with the best 8 hyperparameters again after the hyperparameter optimization. 9 10 For example, 11 12 - You have found good hyperparameters with Optuna and want to run a similar `objective` function using the best hyperparameters found so far to further analyze the results, or 13 - You have optimized with Optuna using a partial dataset to reduce training time. After the hyperparameter tuning, you want to train the model using the whole dataset with the best hyperparameter values found. 14 15 :class:`~optuna.study.Study.best_trial` provides an interface to re-evaluate the objective function with the current best hyperparameter values. 16 17 This tutorial shows an example of how to re-run a different `objective` function with the current best values, like the first example above. 18 19 20 Investigating the best model further 21 ------------------------------------- 22 23 Let's consider a classical supervised classification problem with Optuna as follows: 24 """ 25 26 from sklearn import metrics 27 from sklearn.datasets import make_classification 28 from sklearn.linear_model import LogisticRegression 29 from sklearn.model_selection import train_test_split 30 31 32 import optuna 33 34 35 def objective(trial): 36 X, y = make_classification(n_features=10, random_state=1) 37 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) 38 39 C = trial.suggest_float("C", 1e-7, 10.0, log=True) 40 41 clf = LogisticRegression(C=C) 42 clf.fit(X_train, y_train) 43 44 return clf.score(X_test, y_test) 45 46 47 study = optuna.create_study(direction="maximize") 48 study.optimize(objective, n_trials=10) 49 50 print(study.best_trial.value) # Show the best value. 51 52 ################################################################################################### 53 # Suppose after the hyperparameter optimization, you want to calculate other evaluation metrics 54 # such as recall, precision, and f1-score on the same dataset. 55 # You can define another objective function that shares most of the ``objective`` 56 # function to reproduce the model with the best hyperparameters. 57 58 59 def detailed_objective(trial): 60 # Use same code objective to reproduce the best model 61 X, y = make_classification(n_features=10, random_state=1) 62 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) 63 64 C = trial.suggest_float("C", 1e-7, 10.0, log=True) 65 66 clf = LogisticRegression(C=C) 67 clf.fit(X_train, y_train) 68 69 # calculate more evaluation metrics 70 pred = clf.predict(X_test) 71 72 acc = metrics.accuracy_score(pred, y_test) 73 recall = metrics.recall_score(pred, y_test) 74 precision = metrics.precision_score(pred, y_test) 75 f1 = metrics.f1_score(pred, y_test) 76 77 return acc, f1, recall, precision 78 79 80 ################################################################################################### 81 # Pass ``study.best_trial`` as the argument of ``detailed_objective``. 82 83 detailed_objective(study.best_trial) # calculate acc, f1, recall, and precision 84 85 ################################################################################################### 86 # The difference between :class:`~optuna.study.Study.best_trial` and ordinal trials 87 # ---------------------------------------------------------------------------------- 88 # 89 # This uses :class:`~optuna.study.Study.best_trial`, which returns the `best_trial` as a 90 # :class:`~optuna.trial.FrozenTrial`. 91 # The :class:`~optuna.trial.FrozenTrial` is different from an active trial 92 # and behaves differently from :class:`~optuna.trial.Trial` in some situations. 93 # For example, pruning does not work because :class:`~optuna.trial.FrozenTrial.should_prune` 94 # always returns ``False``. 95 [end of tutorial/20_recipes/010_reuse_best_trial.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tutorial/20_recipes/010_reuse_best_trial.py b/tutorial/20_recipes/010_reuse_best_trial.py --- a/tutorial/20_recipes/010_reuse_best_trial.py +++ b/tutorial/20_recipes/010_reuse_best_trial.py @@ -1,8 +1,8 @@ """ .. _reuse_best_trial: -Re-use the best values -========================== +Re-use the best trial +====================== In some cases, you may want to re-evaluate the objective function with the best hyperparameters again after the hyperparameter optimization. @@ -92,3 +92,8 @@ # and behaves differently from :class:`~optuna.trial.Trial` in some situations. # For example, pruning does not work because :class:`~optuna.trial.FrozenTrial.should_prune` # always returns ``False``. +# +# .. note:: +# For multi-objective optimization as demonstrated by :ref:`multi_objective`, +# :attr:`~optuna.study.Study.best_trials` returns a list of :class:`~optuna.trial.FrozenTrial` +# on Pareto front. So we can re-use each trial in the list by the similar way above.
{"golden_diff": "diff --git a/tutorial/20_recipes/010_reuse_best_trial.py b/tutorial/20_recipes/010_reuse_best_trial.py\n--- a/tutorial/20_recipes/010_reuse_best_trial.py\n+++ b/tutorial/20_recipes/010_reuse_best_trial.py\n@@ -1,8 +1,8 @@\n \"\"\"\n .. _reuse_best_trial:\n \n-Re-use the best values\n-==========================\n+Re-use the best trial\n+======================\n \n In some cases, you may want to re-evaluate the objective function with the best\n hyperparameters again after the hyperparameter optimization.\n@@ -92,3 +92,8 @@\n # and behaves differently from :class:`~optuna.trial.Trial` in some situations.\n # For example, pruning does not work because :class:`~optuna.trial.FrozenTrial.should_prune`\n # always returns ``False``.\n+#\n+# .. note::\n+# For multi-objective optimization as demonstrated by :ref:`multi_objective`,\n+# :attr:`~optuna.study.Study.best_trials` returns a list of :class:`~optuna.trial.FrozenTrial`\n+# on Pareto front. So we can re-use each trial in the list by the similar way above.\n", "issue": "Improve the `Re-use the best values` tutorial page\n### What is an issue?\r\n\r\nThere are some minor issues on the Re-use the best values tutorial page.\r\n\r\n## Description\r\n- [x] On the [Re-use the best values](https://optuna.readthedocs.io/en/stable/tutorial/20_recipes/010_reuse_best_trial.html) page, We should update the title as `re-use the best parameters` instead of `re-use the best values`. As the best value means the best objective value in most of the Optuna references.\r\n- [ ] Mention `Study.best_trials` on the page for multi-objective optimization.\r\n\r\n\r\nContext: https://github.com/optuna/optuna/pull/3396#pullrequestreview-917028874\n", "before_files": [{"content": "\"\"\"\n.. _reuse_best_trial:\n\nRe-use the best values\n==========================\n\nIn some cases, you may want to re-evaluate the objective function with the best\nhyperparameters again after the hyperparameter optimization.\n\nFor example,\n\n- You have found good hyperparameters with Optuna and want to run a similar `objective` function using the best hyperparameters found so far to further analyze the results, or\n- You have optimized with Optuna using a partial dataset to reduce training time. After the hyperparameter tuning, you want to train the model using the whole dataset with the best hyperparameter values found.\n\n:class:`~optuna.study.Study.best_trial` provides an interface to re-evaluate the objective function with the current best hyperparameter values.\n\nThis tutorial shows an example of how to re-run a different `objective` function with the current best values, like the first example above.\n\n\nInvestigating the best model further\n-------------------------------------\n\nLet's consider a classical supervised classification problem with Optuna as follows:\n\"\"\"\n\nfrom sklearn import metrics\nfrom sklearn.datasets import make_classification\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\n\nimport optuna\n\n\ndef objective(trial):\n X, y = make_classification(n_features=10, random_state=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n\n C = trial.suggest_float(\"C\", 1e-7, 10.0, log=True)\n\n clf = LogisticRegression(C=C)\n clf.fit(X_train, y_train)\n\n return clf.score(X_test, y_test)\n\n\nstudy = optuna.create_study(direction=\"maximize\")\nstudy.optimize(objective, n_trials=10)\n\nprint(study.best_trial.value) # Show the best value.\n\n###################################################################################################\n# Suppose after the hyperparameter optimization, you want to calculate other evaluation metrics\n# such as recall, precision, and f1-score on the same dataset.\n# You can define another objective function that shares most of the ``objective``\n# function to reproduce the model with the best hyperparameters.\n\n\ndef detailed_objective(trial):\n # Use same code objective to reproduce the best model\n X, y = make_classification(n_features=10, random_state=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)\n\n C = trial.suggest_float(\"C\", 1e-7, 10.0, log=True)\n\n clf = LogisticRegression(C=C)\n clf.fit(X_train, y_train)\n\n # calculate more evaluation metrics\n pred = clf.predict(X_test)\n\n acc = metrics.accuracy_score(pred, y_test)\n recall = metrics.recall_score(pred, y_test)\n precision = metrics.precision_score(pred, y_test)\n f1 = metrics.f1_score(pred, y_test)\n\n return acc, f1, recall, precision\n\n\n###################################################################################################\n# Pass ``study.best_trial`` as the argument of ``detailed_objective``.\n\ndetailed_objective(study.best_trial) # calculate acc, f1, recall, and precision\n\n###################################################################################################\n# The difference between :class:`~optuna.study.Study.best_trial` and ordinal trials\n# ----------------------------------------------------------------------------------\n#\n# This uses :class:`~optuna.study.Study.best_trial`, which returns the `best_trial` as a\n# :class:`~optuna.trial.FrozenTrial`.\n# The :class:`~optuna.trial.FrozenTrial` is different from an active trial\n# and behaves differently from :class:`~optuna.trial.Trial` in some situations.\n# For example, pruning does not work because :class:`~optuna.trial.FrozenTrial.should_prune`\n# always returns ``False``.\n", "path": "tutorial/20_recipes/010_reuse_best_trial.py"}]}
1,722
273
gh_patches_debug_4749
rasdani/github-patches
git_diff
carpentries__amy-2358
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Server Error when editing community roles Reported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production. Reproduced on test AMY: ``` TypeError at /communityroles/role/25/edit/ the JSON object must be str, bytes or bytearray, not list Error during template rendering In template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39 {% crispy_field field %} ``` Also encountered locally when creating a community role and immediately editing it: ``` Exception Value: | 'NoneType' object is not iterable /home/eli/amy/amy/communityroles/fields.py, line 20, in get_context ``` </issue> <code> [start of amy/communityroles/fields.py] 1 import json 2 import logging 3 4 from django import forms 5 from django.http import QueryDict 6 from django.utils.datastructures import MultiValueDict 7 8 logger = logging.getLogger("amy") 9 10 11 class CustomKeysWidget(forms.TextInput): 12 template_name = "widgets/custom_keys_widget.html" 13 14 def __init__(self, *args, **kwargs): 15 self.subwidget_form = kwargs.pop("subwidget_form", forms.TextInput) 16 super().__init__(*args, **kwargs) 17 18 def apply_labels(self, labels: list[str]) -> None: 19 self.labels = labels[:] 20 21 def get_context(self, name: str, value: str, attrs: dict): 22 value_deserialized = json.loads(value) 23 try: 24 value_deserialized_dict = dict(value_deserialized) 25 except (ValueError, TypeError) as e: 26 logger.debug( 27 f"Failed to load custom key values {value_deserialized} to dict: {e}." 28 ) 29 logger.debug("Proceeding without custom key values...") 30 value_deserialized_dict = {} 31 default_values = dict([(label, "") for label in self.labels]) 32 context_value = default_values | value_deserialized_dict 33 34 context = super().get_context(name, context_value, attrs) 35 final_attrs = context["widget"]["attrs"] 36 id_ = context["widget"]["attrs"].get("id") 37 38 subwidgets = [] 39 for index, (label, value) in enumerate(context_value.items()): 40 widget_attrs = final_attrs.copy() 41 if id_: 42 widget_attrs["id"] = "{id_}_{index}".format(id_=id_, index=index) 43 44 widget = self.subwidget_form() 45 subwidget_context = widget.get_context(name, value, widget_attrs)["widget"] 46 subwidgets.append(subwidget_context | {"label": label}) 47 48 context["widget"]["subwidgets"] = subwidgets 49 return context 50 51 def value_from_datadict( 52 self, data: QueryDict, files: MultiValueDict, name: str 53 ) -> list[tuple[str, str]]: 54 """Prepare structure stored in database. The structure is tied to 55 `CommunityRole.custom_keys` expected format: 56 [ 57 (label1, value1), 58 (label2, value2), 59 ... 60 ] 61 """ 62 try: 63 values = data.getlist(name) 64 except AttributeError: 65 values = data.get(name, []) 66 return list(zip(self.labels, values)) 67 68 def value_omitted_from_data( 69 self, data: QueryDict, files: MultiValueDict, name: str 70 ) -> bool: 71 return False 72 73 74 class CustomKeysJSONField(forms.JSONField): 75 def __init__(self, **kwargs): 76 kwargs.setdefault("widget", CustomKeysWidget) 77 super().__init__(**kwargs) 78 79 def apply_labels(self, labels: list[str]) -> None: 80 self.labels = labels[:] 81 self.widget.apply_labels(self.labels) 82 [end of amy/communityroles/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py --- a/amy/communityroles/fields.py +++ b/amy/communityroles/fields.py @@ -63,7 +63,7 @@ values = data.getlist(name) except AttributeError: values = data.get(name, []) - return list(zip(self.labels, values)) + return json.dumps(list(zip(self.labels, values))) def value_omitted_from_data( self, data: QueryDict, files: MultiValueDict, name: str
{"golden_diff": "diff --git a/amy/communityroles/fields.py b/amy/communityroles/fields.py\n--- a/amy/communityroles/fields.py\n+++ b/amy/communityroles/fields.py\n@@ -63,7 +63,7 @@\n values = data.getlist(name)\n except AttributeError:\n values = data.get(name, [])\n- return list(zip(self.labels, values))\n+ return json.dumps(list(zip(self.labels, values)))\n \n def value_omitted_from_data(\n self, data: QueryDict, files: MultiValueDict, name: str\n", "issue": "Server Error when editing community roles\nReported by @ErinBecker at `/workshops/person/<id>/edit/#communityroles` and `/communityroles/role/<another id>/edit/` in production.\r\n\r\nReproduced on test AMY:\r\n```\r\nTypeError at /communityroles/role/25/edit/\r\nthe JSON object must be str, bytes or bytearray, not list\r\nError during template rendering\r\nIn template /webapps/test-amy.carpentries.org/repo/amy/templates/bootstrap4/field.html, error at line 39\r\n\r\n{% crispy_field field %}\r\n```\r\n\r\nAlso encountered locally when creating a community role and immediately editing it:\r\n```\r\nException Value: | 'NoneType' object is not iterable\r\n/home/eli/amy/amy/communityroles/fields.py, line 20, in get_context\r\n\r\n```\n", "before_files": [{"content": "import json\nimport logging\n\nfrom django import forms\nfrom django.http import QueryDict\nfrom django.utils.datastructures import MultiValueDict\n\nlogger = logging.getLogger(\"amy\")\n\n\nclass CustomKeysWidget(forms.TextInput):\n template_name = \"widgets/custom_keys_widget.html\"\n\n def __init__(self, *args, **kwargs):\n self.subwidget_form = kwargs.pop(\"subwidget_form\", forms.TextInput)\n super().__init__(*args, **kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n\n def get_context(self, name: str, value: str, attrs: dict):\n value_deserialized = json.loads(value)\n try:\n value_deserialized_dict = dict(value_deserialized)\n except (ValueError, TypeError) as e:\n logger.debug(\n f\"Failed to load custom key values {value_deserialized} to dict: {e}.\"\n )\n logger.debug(\"Proceeding without custom key values...\")\n value_deserialized_dict = {}\n default_values = dict([(label, \"\") for label in self.labels])\n context_value = default_values | value_deserialized_dict\n\n context = super().get_context(name, context_value, attrs)\n final_attrs = context[\"widget\"][\"attrs\"]\n id_ = context[\"widget\"][\"attrs\"].get(\"id\")\n\n subwidgets = []\n for index, (label, value) in enumerate(context_value.items()):\n widget_attrs = final_attrs.copy()\n if id_:\n widget_attrs[\"id\"] = \"{id_}_{index}\".format(id_=id_, index=index)\n\n widget = self.subwidget_form()\n subwidget_context = widget.get_context(name, value, widget_attrs)[\"widget\"]\n subwidgets.append(subwidget_context | {\"label\": label})\n\n context[\"widget\"][\"subwidgets\"] = subwidgets\n return context\n\n def value_from_datadict(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> list[tuple[str, str]]:\n \"\"\"Prepare structure stored in database. The structure is tied to\n `CommunityRole.custom_keys` expected format:\n [\n (label1, value1),\n (label2, value2),\n ...\n ]\n \"\"\"\n try:\n values = data.getlist(name)\n except AttributeError:\n values = data.get(name, [])\n return list(zip(self.labels, values))\n\n def value_omitted_from_data(\n self, data: QueryDict, files: MultiValueDict, name: str\n ) -> bool:\n return False\n\n\nclass CustomKeysJSONField(forms.JSONField):\n def __init__(self, **kwargs):\n kwargs.setdefault(\"widget\", CustomKeysWidget)\n super().__init__(**kwargs)\n\n def apply_labels(self, labels: list[str]) -> None:\n self.labels = labels[:]\n self.widget.apply_labels(self.labels)\n", "path": "amy/communityroles/fields.py"}]}
1,483
123
gh_patches_debug_31340
rasdani/github-patches
git_diff
rucio__rucio-5215
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in the filter_thread_work algorithm for Postgres backend Motivation ---------- The partitioning algorithm sometimes fails with `Details: (psycopg2.errors.NumericValueOutOfRange) integer out of range` for Postgres backend, e.g. : ``` rucio=> select abs(('x'||md5(requests.id::text))::bit(64)::int) from requests limit 1; ERROR: integer out of range rucio=> select abs(('x'||md5(requests.id::text))::bit(64)::bigint) from requests limit 1; abs --------------------- 9096650785057471382 (1 row) ``` </issue> <code> [start of lib/rucio/db/sqla/__init__.py] 1 # Copyright European Organization for Nuclear Research (CERN) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # You may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 6 7 from sqlalchemy.sql.expression import bindparam, text 8 9 10 def filter_thread_work(session, query, total_threads, thread_id, hash_variable=None): 11 """ Filters a query to partition thread workloads based on the thread id and total number of threads """ 12 if thread_id is not None and total_threads is not None and (total_threads - 1) > 0: 13 if session.bind.dialect.name == 'oracle': 14 bindparams = [bindparam('thread_id', thread_id), bindparam('total_threads', total_threads - 1)] 15 if not hash_variable: 16 query = query.filter(text('ORA_HASH(id, :total_threads) = :thread_id').bindparams(*bindparams)) 17 else: 18 query = query.filter(text('ORA_HASH(%s, :total_threads) = :thread_id' % (hash_variable)).bindparams(*bindparams)) 19 elif session.bind.dialect.name == 'mysql': 20 if not hash_variable: 21 query = query.filter(text('mod(md5(id), %s) = %s' % (total_threads, thread_id))) 22 else: 23 query = query.filter(text('mod(md5(%s), %s) = %s' % (hash_variable, total_threads, thread_id))) 24 elif session.bind.dialect.name == 'postgresql': 25 if not hash_variable: 26 query = query.filter(text('mod(abs((\'x\'||md5(id::text))::bit(32)::int), %s) = %s' % (total_threads, thread_id))) 27 else: 28 query = query.filter(text('mod(abs((\'x\'||md5(%s::text))::bit(32)::int), %s) = %s' % (hash_variable, total_threads, thread_id))) 29 return query 30 [end of lib/rucio/db/sqla/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/rucio/db/sqla/__init__.py b/lib/rucio/db/sqla/__init__.py --- a/lib/rucio/db/sqla/__init__.py +++ b/lib/rucio/db/sqla/__init__.py @@ -1,8 +1,24 @@ -# Copyright European Organization for Nuclear Research (CERN) +# -*- coding: utf-8 -*- +# Copyright 2013-2022 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# - Vincent Garonne <[email protected]>, 2013-2015 +# - Brandon White <[email protected]>, 2019 +# - Martin Barisits <[email protected]>, 2020 +# - Radu Carpa <[email protected]>, 2021 +# - Cedric Serfon <[email protected]>, 2022 from sqlalchemy.sql.expression import bindparam, text @@ -23,7 +39,7 @@ query = query.filter(text('mod(md5(%s), %s) = %s' % (hash_variable, total_threads, thread_id))) elif session.bind.dialect.name == 'postgresql': if not hash_variable: - query = query.filter(text('mod(abs((\'x\'||md5(id::text))::bit(32)::int), %s) = %s' % (total_threads, thread_id))) + query = query.filter(text('mod(abs((\'x\'||md5(id::text))::bit(32)::bigint), %s) = %s' % (total_threads, thread_id))) else: - query = query.filter(text('mod(abs((\'x\'||md5(%s::text))::bit(32)::int), %s) = %s' % (hash_variable, total_threads, thread_id))) + query = query.filter(text('mod(abs((\'x\'||md5(%s::text))::bit(32)::bigint), %s) = %s' % (hash_variable, total_threads, thread_id))) return query
{"golden_diff": "diff --git a/lib/rucio/db/sqla/__init__.py b/lib/rucio/db/sqla/__init__.py\n--- a/lib/rucio/db/sqla/__init__.py\n+++ b/lib/rucio/db/sqla/__init__.py\n@@ -1,8 +1,24 @@\n-# Copyright European Organization for Nuclear Research (CERN)\n+# -*- coding: utf-8 -*-\n+# Copyright 2013-2022 CERN\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n-# You may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+#\n+# Authors:\n+# - Vincent Garonne <[email protected]>, 2013-2015\n+# - Brandon White <[email protected]>, 2019\n+# - Martin Barisits <[email protected]>, 2020\n+# - Radu Carpa <[email protected]>, 2021\n+# - Cedric Serfon <[email protected]>, 2022\n \n from sqlalchemy.sql.expression import bindparam, text\n \n@@ -23,7 +39,7 @@\n query = query.filter(text('mod(md5(%s), %s) = %s' % (hash_variable, total_threads, thread_id)))\n elif session.bind.dialect.name == 'postgresql':\n if not hash_variable:\n- query = query.filter(text('mod(abs((\\'x\\'||md5(id::text))::bit(32)::int), %s) = %s' % (total_threads, thread_id)))\n+ query = query.filter(text('mod(abs((\\'x\\'||md5(id::text))::bit(32)::bigint), %s) = %s' % (total_threads, thread_id)))\n else:\n- query = query.filter(text('mod(abs((\\'x\\'||md5(%s::text))::bit(32)::int), %s) = %s' % (hash_variable, total_threads, thread_id)))\n+ query = query.filter(text('mod(abs((\\'x\\'||md5(%s::text))::bit(32)::bigint), %s) = %s' % (hash_variable, total_threads, thread_id)))\n return query\n", "issue": "Bug in the filter_thread_work algorithm for Postgres backend\nMotivation\r\n----------\r\nThe partitioning algorithm sometimes fails with `Details: (psycopg2.errors.NumericValueOutOfRange) integer out of range` for Postgres backend, e.g. : \r\n```\r\nrucio=> select abs(('x'||md5(requests.id::text))::bit(64)::int) from requests limit 1;\r\nERROR: integer out of range\r\nrucio=> select abs(('x'||md5(requests.id::text))::bit(64)::bigint) from requests limit 1;\r\n abs \r\n---------------------\r\n 9096650785057471382\r\n(1 row)\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\nfrom sqlalchemy.sql.expression import bindparam, text\n\n\ndef filter_thread_work(session, query, total_threads, thread_id, hash_variable=None):\n \"\"\" Filters a query to partition thread workloads based on the thread id and total number of threads \"\"\"\n if thread_id is not None and total_threads is not None and (total_threads - 1) > 0:\n if session.bind.dialect.name == 'oracle':\n bindparams = [bindparam('thread_id', thread_id), bindparam('total_threads', total_threads - 1)]\n if not hash_variable:\n query = query.filter(text('ORA_HASH(id, :total_threads) = :thread_id').bindparams(*bindparams))\n else:\n query = query.filter(text('ORA_HASH(%s, :total_threads) = :thread_id' % (hash_variable)).bindparams(*bindparams))\n elif session.bind.dialect.name == 'mysql':\n if not hash_variable:\n query = query.filter(text('mod(md5(id), %s) = %s' % (total_threads, thread_id)))\n else:\n query = query.filter(text('mod(md5(%s), %s) = %s' % (hash_variable, total_threads, thread_id)))\n elif session.bind.dialect.name == 'postgresql':\n if not hash_variable:\n query = query.filter(text('mod(abs((\\'x\\'||md5(id::text))::bit(32)::int), %s) = %s' % (total_threads, thread_id)))\n else:\n query = query.filter(text('mod(abs((\\'x\\'||md5(%s::text))::bit(32)::int), %s) = %s' % (hash_variable, total_threads, thread_id)))\n return query\n", "path": "lib/rucio/db/sqla/__init__.py"}]}
1,193
637
gh_patches_debug_20349
rasdani/github-patches
git_diff
iterative__dvc-9797
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> shell completion issue with dvc 3.11.0 release build # Bug Report 👋 while trying to upgrade dvc to the latest release, I ran into the shell completion failure ``` Successfully installed dvc-3.11.0 ERROR: unexpected error - unsupported operand type(s) for %: 'tuple' and 'dict' Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help! Error: Failure while executing; `\{\"SHELL\"=\>\"zsh\"\} /opt/homebrew/Cellar/dvc/3.11.0/bin/dvc completion -s zsh` exited with 255. Here's the output: ``` relates to https://github.com/Homebrew/homebrew-core/pull/138416 </issue> <code> [start of dvc/commands/commit.py] 1 import argparse 2 import logging 3 4 from dvc.cli import completion 5 from dvc.cli.command import CmdBase 6 from dvc.cli.utils import append_doc_link 7 8 logger = logging.getLogger(__name__) 9 10 11 class CmdCommit(CmdBase): 12 def run(self): 13 from dvc.exceptions import DvcException 14 15 if not self.args.targets: 16 self.args.targets = [None] 17 18 for target in self.args.targets: 19 try: 20 self.repo.commit( 21 target, 22 with_deps=self.args.with_deps, 23 recursive=self.args.recursive, 24 force=self.args.force, 25 ) 26 except DvcException: 27 logger.exception("failed to commit%s", (" " + target) if target else "") 28 return 1 29 return 0 30 31 32 def add_parser(subparsers, parent_parser): 33 COMMIT_HELP = ( 34 "Record changes to files or directories tracked by DVC" 35 " by storing the current versions in the cache." 36 ) 37 38 commit_parser = subparsers.add_parser( 39 "commit", 40 parents=[parent_parser], 41 description=append_doc_link(COMMIT_HELP, "commit"), 42 help=COMMIT_HELP, 43 formatter_class=argparse.RawDescriptionHelpFormatter, 44 ) 45 commit_parser.add_argument( 46 "-f", 47 "--force", 48 action="store_true", 49 default=False, 50 help=( 51 "Commit data even if hash values for dependencies or", 52 "outputs did not change.", 53 ), 54 ) 55 commit_parser.add_argument( 56 "-d", 57 "--with-deps", 58 action="store_true", 59 default=False, 60 help="Commit all dependencies of the specified target.", 61 ) 62 commit_parser.add_argument( 63 "-R", 64 "--recursive", 65 action="store_true", 66 default=False, 67 help="Commit cache for subdirectories of the specified directory.", 68 ) 69 commit_parser.add_argument( 70 "targets", 71 nargs="*", 72 help=( 73 "Limit command scope to these tracked files/directories, " 74 ".dvc files and stage names." 75 ), 76 ).complete = completion.DVCFILES_AND_STAGE 77 commit_parser.set_defaults(func=CmdCommit) 78 [end of dvc/commands/commit.py] [start of dvc/commands/completion.py] 1 import argparse 2 import logging 3 4 from dvc.cli.command import CmdBaseNoRepo 5 from dvc.cli.completion import PREAMBLE 6 from dvc.cli.utils import append_doc_link 7 from dvc.ui import ui 8 9 logger = logging.getLogger(__name__) 10 11 12 class CmdCompletion(CmdBaseNoRepo): 13 def run(self): 14 import shtab 15 16 shell = self.args.shell 17 parser = self.args.parser 18 script = shtab.complete(parser, shell=shell, preamble=PREAMBLE) # nosec B604 19 ui.write(script, force=True) 20 return 0 21 22 23 def add_parser(subparsers, parent_parser): 24 COMPLETION_HELP = "Generate shell tab completion." 25 COMPLETION_DESCRIPTION = "Prints out shell tab completion scripts." 26 completion_parser = subparsers.add_parser( 27 "completion", 28 parents=[parent_parser], 29 description=append_doc_link(COMPLETION_DESCRIPTION, "completion"), 30 help=COMPLETION_HELP, 31 formatter_class=argparse.RawDescriptionHelpFormatter, 32 ) 33 completion_parser.add_argument( 34 "-s", 35 "--shell", 36 help="Shell syntax for completions.", 37 default="bash", 38 choices=["bash", "zsh"], 39 ) 40 completion_parser.set_defaults(func=CmdCompletion) 41 [end of dvc/commands/completion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/commands/commit.py b/dvc/commands/commit.py --- a/dvc/commands/commit.py +++ b/dvc/commands/commit.py @@ -48,8 +48,8 @@ action="store_true", default=False, help=( - "Commit data even if hash values for dependencies or", - "outputs did not change.", + "Commit data even if hash values for dependencies or " + "outputs did not change." ), ) commit_parser.add_argument( diff --git a/dvc/commands/completion.py b/dvc/commands/completion.py --- a/dvc/commands/completion.py +++ b/dvc/commands/completion.py @@ -9,6 +9,9 @@ logger = logging.getLogger(__name__) +SUPPORTED_SHELLS = ["bash", "zsh"] + + class CmdCompletion(CmdBaseNoRepo): def run(self): import shtab @@ -35,6 +38,6 @@ "--shell", help="Shell syntax for completions.", default="bash", - choices=["bash", "zsh"], + choices=SUPPORTED_SHELLS, ) completion_parser.set_defaults(func=CmdCompletion)
{"golden_diff": "diff --git a/dvc/commands/commit.py b/dvc/commands/commit.py\n--- a/dvc/commands/commit.py\n+++ b/dvc/commands/commit.py\n@@ -48,8 +48,8 @@\n action=\"store_true\",\n default=False,\n help=(\n- \"Commit data even if hash values for dependencies or\",\n- \"outputs did not change.\",\n+ \"Commit data even if hash values for dependencies or \"\n+ \"outputs did not change.\"\n ),\n )\n commit_parser.add_argument(\ndiff --git a/dvc/commands/completion.py b/dvc/commands/completion.py\n--- a/dvc/commands/completion.py\n+++ b/dvc/commands/completion.py\n@@ -9,6 +9,9 @@\n logger = logging.getLogger(__name__)\n \n \n+SUPPORTED_SHELLS = [\"bash\", \"zsh\"]\n+\n+\n class CmdCompletion(CmdBaseNoRepo):\n def run(self):\n import shtab\n@@ -35,6 +38,6 @@\n \"--shell\",\n help=\"Shell syntax for completions.\",\n default=\"bash\",\n- choices=[\"bash\", \"zsh\"],\n+ choices=SUPPORTED_SHELLS,\n )\n completion_parser.set_defaults(func=CmdCompletion)\n", "issue": "shell completion issue with dvc 3.11.0 release build\n# Bug Report\r\n\r\n\ud83d\udc4b while trying to upgrade dvc to the latest release, I ran into the shell completion failure\r\n\r\n```\r\nSuccessfully installed dvc-3.11.0\r\n\u001b[31mERROR\u001b[39m: unexpected error - unsupported operand type(s) for %: 'tuple' and 'dict'\r\n\r\n\u001b[33mHaving any troubles?\u001b[0m Hit us up at \u001b[34mhttps://dvc.org/support\u001b[0m, we are always happy to help!\r\n\u001b[31mError:\u001b[0m Failure while executing; `\\{\\\"SHELL\\\"=\\>\\\"zsh\\\"\\} /opt/homebrew/Cellar/dvc/3.11.0/bin/dvc completion -s zsh` exited with 255. Here's the output:\r\n```\r\n\r\nrelates to https://github.com/Homebrew/homebrew-core/pull/138416\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.cli import completion\nfrom dvc.cli.command import CmdBase\nfrom dvc.cli.utils import append_doc_link\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdCommit(CmdBase):\n def run(self):\n from dvc.exceptions import DvcException\n\n if not self.args.targets:\n self.args.targets = [None]\n\n for target in self.args.targets:\n try:\n self.repo.commit(\n target,\n with_deps=self.args.with_deps,\n recursive=self.args.recursive,\n force=self.args.force,\n )\n except DvcException:\n logger.exception(\"failed to commit%s\", (\" \" + target) if target else \"\")\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n COMMIT_HELP = (\n \"Record changes to files or directories tracked by DVC\"\n \" by storing the current versions in the cache.\"\n )\n\n commit_parser = subparsers.add_parser(\n \"commit\",\n parents=[parent_parser],\n description=append_doc_link(COMMIT_HELP, \"commit\"),\n help=COMMIT_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n commit_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=(\n \"Commit data even if hash values for dependencies or\",\n \"outputs did not change.\",\n ),\n )\n commit_parser.add_argument(\n \"-d\",\n \"--with-deps\",\n action=\"store_true\",\n default=False,\n help=\"Commit all dependencies of the specified target.\",\n )\n commit_parser.add_argument(\n \"-R\",\n \"--recursive\",\n action=\"store_true\",\n default=False,\n help=\"Commit cache for subdirectories of the specified directory.\",\n )\n commit_parser.add_argument(\n \"targets\",\n nargs=\"*\",\n help=(\n \"Limit command scope to these tracked files/directories, \"\n \".dvc files and stage names.\"\n ),\n ).complete = completion.DVCFILES_AND_STAGE\n commit_parser.set_defaults(func=CmdCommit)\n", "path": "dvc/commands/commit.py"}, {"content": "import argparse\nimport logging\n\nfrom dvc.cli.command import CmdBaseNoRepo\nfrom dvc.cli.completion import PREAMBLE\nfrom dvc.cli.utils import append_doc_link\nfrom dvc.ui import ui\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdCompletion(CmdBaseNoRepo):\n def run(self):\n import shtab\n\n shell = self.args.shell\n parser = self.args.parser\n script = shtab.complete(parser, shell=shell, preamble=PREAMBLE) # nosec B604\n ui.write(script, force=True)\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n COMPLETION_HELP = \"Generate shell tab completion.\"\n COMPLETION_DESCRIPTION = \"Prints out shell tab completion scripts.\"\n completion_parser = subparsers.add_parser(\n \"completion\",\n parents=[parent_parser],\n description=append_doc_link(COMPLETION_DESCRIPTION, \"completion\"),\n help=COMPLETION_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n completion_parser.add_argument(\n \"-s\",\n \"--shell\",\n help=\"Shell syntax for completions.\",\n default=\"bash\",\n choices=[\"bash\", \"zsh\"],\n )\n completion_parser.set_defaults(func=CmdCompletion)\n", "path": "dvc/commands/completion.py"}]}
1,703
272
gh_patches_debug_108
rasdani/github-patches
git_diff
codespell-project__codespell-3157
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exit status is always 0 when started as a Python module When started as `python -m codespell_lib` codespell always exits with 0. Is it intentional? </issue> <code> [start of codespell_lib/__main__.py] 1 from ._codespell import _script_main 2 3 if __name__ == "__main__": 4 _script_main() 5 [end of codespell_lib/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/codespell_lib/__main__.py b/codespell_lib/__main__.py --- a/codespell_lib/__main__.py +++ b/codespell_lib/__main__.py @@ -1,4 +1,6 @@ +import sys + from ._codespell import _script_main if __name__ == "__main__": - _script_main() + sys.exit(_script_main())
{"golden_diff": "diff --git a/codespell_lib/__main__.py b/codespell_lib/__main__.py\n--- a/codespell_lib/__main__.py\n+++ b/codespell_lib/__main__.py\n@@ -1,4 +1,6 @@\n+import sys\n+\n from ._codespell import _script_main\n \n if __name__ == \"__main__\":\n- _script_main()\n+ sys.exit(_script_main())\n", "issue": "Exit status is always 0 when started as a Python module\nWhen started as `python -m codespell_lib` codespell always exits with 0.\r\n\r\nIs it intentional?\r\n\n", "before_files": [{"content": "from ._codespell import _script_main\n\nif __name__ == \"__main__\":\n _script_main()\n", "path": "codespell_lib/__main__.py"}]}
606
91
gh_patches_debug_10489
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2691
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E2506 prevents using SourceSecurityGroupId for non-vpc security groups https://github.com/aws-cloudformation/cfn-lint/blob/4a7af2bd53a9ad1ccaba3a509437c53102ade522/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py#L33-L40 I couldn't see any reason in the [cloudformation reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-rule-1.html#cfn-ec2-security-group-rule-sourcesecuritygroupid) that this wouldn't be valid, and I was able successfully create the following stack, where SecurityGroupB seems to be correctly restricted to traffic from SecurityGroupA. I _think_ that this rule is incorrect, unless I'm missing something ```yaml AWSTemplateFormatVersion: '2010-09-09' Resources: SecurityGroupA: Type: AWS::EC2::SecurityGroup Properties: GroupDescription: Group A SecurityGroupIngress: - FromPort: 80 ToPort: 80 IpProtocol: tcp CidrIp: 0.0.0.0/0 SecurityGroupB: Type: AWS::EC2::SecurityGroup Properties: GroupDescription: Group B SecurityGroupIngress: - FromPort: 80 ToPort: 80 IpProtocol: tcp SourceSecurityGroupId: Fn::GetAtt: - SecurityGroupA - GroupId ``` </issue> <code> [start of src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 from cfnlint.rules import CloudFormationLintRule, RuleMatch 6 7 8 class SecurityGroupIngress(CloudFormationLintRule): 9 """Check if EC2 Security Group Ingress Properties""" 10 11 id = "E2506" 12 shortdesc = "Resource EC2 Security Group Ingress Properties" 13 description = ( 14 "See if EC2 Security Group Ingress Properties are set correctly. " 15 'Check that "SourceSecurityGroupId" or "SourceSecurityGroupName" are ' 16 " are exclusive and using the type of Ref or GetAtt " 17 ) 18 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html" 19 tags = ["resources", "ec2", "securitygroup"] 20 21 def check_ingress_rule(self, vpc_id, properties, path): 22 """Check ingress rule""" 23 24 matches = [] 25 if vpc_id: 26 # Check that SourceSecurityGroupName isn't specified 27 if properties.get("SourceSecurityGroupName", None): 28 path_error = path[:] + ["SourceSecurityGroupName"] 29 message = ( 30 "SourceSecurityGroupName shouldn't be specified for " 31 "Vpc Security Group at {0}" 32 ) 33 matches.append( 34 RuleMatch( 35 path_error, message.format("/".join(map(str, path_error))) 36 ) 37 ) 38 39 else: 40 if properties.get("SourceSecurityGroupId", None): 41 path_error = path[:] + ["SourceSecurityGroupId"] 42 message = ( 43 "SourceSecurityGroupId shouldn't be specified for " 44 "Non-Vpc Security Group at {0}" 45 ) 46 matches.append( 47 RuleMatch( 48 path_error, message.format("/".join(map(str, path_error))) 49 ) 50 ) 51 52 return matches 53 54 def match(self, cfn): 55 """Check EC2 Security Group Ingress Resource Parameters""" 56 57 matches = [] 58 59 resources = cfn.get_resources(resource_type="AWS::EC2::SecurityGroup") 60 for resource_name, resource_object in resources.items(): 61 properties = resource_object.get("Properties", {}) 62 if properties: 63 vpc_id = properties.get("VpcId", None) 64 ingress_rules = properties.get("SecurityGroupIngress") 65 if isinstance(ingress_rules, list): 66 for index, ingress_rule in enumerate(ingress_rules): 67 path = [ 68 "Resources", 69 resource_name, 70 "Properties", 71 "SecurityGroupIngress", 72 index, 73 ] 74 matches.extend( 75 self.check_ingress_rule( 76 vpc_id=vpc_id, properties=ingress_rule, path=path 77 ) 78 ) 79 80 resources = None 81 resources = cfn.get_resources(resource_type="AWS::EC2::SecurityGroupIngress") 82 for resource_name, resource_object in resources.items(): 83 properties = resource_object.get("Properties", {}) 84 group_id = properties.get("GroupId", None) 85 path = ["Resources", resource_name, "Properties"] 86 if group_id: 87 vpc_id = "vpc-1234567" 88 else: 89 vpc_id = None 90 91 if properties: 92 path = ["Resources", resource_name, "Properties"] 93 matches.extend( 94 self.check_ingress_rule( 95 vpc_id=vpc_id, properties=properties, path=path 96 ) 97 ) 98 return matches 99 [end of src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py b/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py --- a/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py +++ b/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py @@ -36,19 +36,6 @@ ) ) - else: - if properties.get("SourceSecurityGroupId", None): - path_error = path[:] + ["SourceSecurityGroupId"] - message = ( - "SourceSecurityGroupId shouldn't be specified for " - "Non-Vpc Security Group at {0}" - ) - matches.append( - RuleMatch( - path_error, message.format("/".join(map(str, path_error))) - ) - ) - return matches def match(self, cfn):
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py b/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py\n--- a/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py\n+++ b/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py\n@@ -36,19 +36,6 @@\n )\n )\n \n- else:\n- if properties.get(\"SourceSecurityGroupId\", None):\n- path_error = path[:] + [\"SourceSecurityGroupId\"]\n- message = (\n- \"SourceSecurityGroupId shouldn't be specified for \"\n- \"Non-Vpc Security Group at {0}\"\n- )\n- matches.append(\n- RuleMatch(\n- path_error, message.format(\"/\".join(map(str, path_error)))\n- )\n- )\n-\n return matches\n \n def match(self, cfn):\n", "issue": "E2506 prevents using SourceSecurityGroupId for non-vpc security groups\nhttps://github.com/aws-cloudformation/cfn-lint/blob/4a7af2bd53a9ad1ccaba3a509437c53102ade522/src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py#L33-L40\r\n\r\nI couldn't see any reason in the [cloudformation reference](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-rule-1.html#cfn-ec2-security-group-rule-sourcesecuritygroupid) that this wouldn't be valid, and I was able successfully create the following stack, where SecurityGroupB seems to be correctly restricted to traffic from SecurityGroupA. I _think_ that this rule is incorrect, unless I'm missing something\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: '2010-09-09'\r\nResources:\r\n SecurityGroupA:\r\n Type: AWS::EC2::SecurityGroup\r\n Properties:\r\n GroupDescription: Group A\r\n SecurityGroupIngress:\r\n - FromPort: 80\r\n ToPort: 80\r\n IpProtocol: tcp\r\n CidrIp: 0.0.0.0/0\r\n\r\n SecurityGroupB:\r\n Type: AWS::EC2::SecurityGroup\r\n Properties:\r\n GroupDescription: Group B\r\n SecurityGroupIngress:\r\n - FromPort: 80\r\n ToPort: 80\r\n IpProtocol: tcp\r\n SourceSecurityGroupId:\r\n Fn::GetAtt:\r\n - SecurityGroupA\r\n - GroupId\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SecurityGroupIngress(CloudFormationLintRule):\n \"\"\"Check if EC2 Security Group Ingress Properties\"\"\"\n\n id = \"E2506\"\n shortdesc = \"Resource EC2 Security Group Ingress Properties\"\n description = (\n \"See if EC2 Security Group Ingress Properties are set correctly. \"\n 'Check that \"SourceSecurityGroupId\" or \"SourceSecurityGroupName\" are '\n \" are exclusive and using the type of Ref or GetAtt \"\n )\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html\"\n tags = [\"resources\", \"ec2\", \"securitygroup\"]\n\n def check_ingress_rule(self, vpc_id, properties, path):\n \"\"\"Check ingress rule\"\"\"\n\n matches = []\n if vpc_id:\n # Check that SourceSecurityGroupName isn't specified\n if properties.get(\"SourceSecurityGroupName\", None):\n path_error = path[:] + [\"SourceSecurityGroupName\"]\n message = (\n \"SourceSecurityGroupName shouldn't be specified for \"\n \"Vpc Security Group at {0}\"\n )\n matches.append(\n RuleMatch(\n path_error, message.format(\"/\".join(map(str, path_error)))\n )\n )\n\n else:\n if properties.get(\"SourceSecurityGroupId\", None):\n path_error = path[:] + [\"SourceSecurityGroupId\"]\n message = (\n \"SourceSecurityGroupId shouldn't be specified for \"\n \"Non-Vpc Security Group at {0}\"\n )\n matches.append(\n RuleMatch(\n path_error, message.format(\"/\".join(map(str, path_error)))\n )\n )\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check EC2 Security Group Ingress Resource Parameters\"\"\"\n\n matches = []\n\n resources = cfn.get_resources(resource_type=\"AWS::EC2::SecurityGroup\")\n for resource_name, resource_object in resources.items():\n properties = resource_object.get(\"Properties\", {})\n if properties:\n vpc_id = properties.get(\"VpcId\", None)\n ingress_rules = properties.get(\"SecurityGroupIngress\")\n if isinstance(ingress_rules, list):\n for index, ingress_rule in enumerate(ingress_rules):\n path = [\n \"Resources\",\n resource_name,\n \"Properties\",\n \"SecurityGroupIngress\",\n index,\n ]\n matches.extend(\n self.check_ingress_rule(\n vpc_id=vpc_id, properties=ingress_rule, path=path\n )\n )\n\n resources = None\n resources = cfn.get_resources(resource_type=\"AWS::EC2::SecurityGroupIngress\")\n for resource_name, resource_object in resources.items():\n properties = resource_object.get(\"Properties\", {})\n group_id = properties.get(\"GroupId\", None)\n path = [\"Resources\", resource_name, \"Properties\"]\n if group_id:\n vpc_id = \"vpc-1234567\"\n else:\n vpc_id = None\n\n if properties:\n path = [\"Resources\", resource_name, \"Properties\"]\n matches.extend(\n self.check_ingress_rule(\n vpc_id=vpc_id, properties=properties, path=path\n )\n )\n return matches\n", "path": "src/cfnlint/rules/resources/ectwo/SecurityGroupIngress.py"}]}
1,828
197
gh_patches_debug_37811
rasdani/github-patches
git_diff
encode__uvicorn-755
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> lifespan.shutdown.failed message is not implemented there is no use that is made of the "lifespan.shutdown.failed" in the `async def send`. Is that an omission ? Readin the spec we should deal with it, not sure how yet, any ideas ? _Originally posted by @euri10 in https://github.com/encode/uvicorn/pull/751#issuecomment-674366666_ </issue> <code> [start of uvicorn/lifespan/on.py] 1 import asyncio 2 import logging 3 from asyncio import Queue 4 5 from uvicorn import Config 6 from uvicorn._types import LifespanReceiveMessage, LifespanScope, LifespanSendMessage 7 8 STATE_TRANSITION_ERROR = "Got invalid state transition on lifespan protocol." 9 10 11 class LifespanOn: 12 def __init__(self, config: Config) -> None: 13 if not config.loaded: 14 config.load() 15 16 self.config = config 17 self.logger = logging.getLogger("uvicorn.error") 18 self.startup_event = asyncio.Event() 19 self.shutdown_event = asyncio.Event() 20 self.receive_queue: "Queue[LifespanReceiveMessage]" = asyncio.Queue() 21 self.error_occured = False 22 self.startup_failed = False 23 self.should_exit = False 24 25 async def startup(self) -> None: 26 self.logger.info("Waiting for application startup.") 27 28 loop = asyncio.get_event_loop() 29 loop.create_task(self.main()) 30 31 await self.receive_queue.put({"type": "lifespan.startup"}) 32 await self.startup_event.wait() 33 34 if self.startup_failed or (self.error_occured and self.config.lifespan == "on"): 35 self.logger.error("Application startup failed. Exiting.") 36 self.should_exit = True 37 else: 38 self.logger.info("Application startup complete.") 39 40 async def shutdown(self) -> None: 41 if self.error_occured: 42 return 43 self.logger.info("Waiting for application shutdown.") 44 await self.receive_queue.put({"type": "lifespan.shutdown"}) 45 await self.shutdown_event.wait() 46 self.logger.info("Application shutdown complete.") 47 48 async def main(self) -> None: 49 try: 50 app = self.config.loaded_app 51 scope: LifespanScope = { 52 "type": "lifespan", 53 "asgi": {"version": self.config.asgi_version, "spec_version": "2.0"}, 54 } 55 await app(scope, self.receive, self.send) 56 except BaseException as exc: 57 self.asgi = None 58 self.error_occured = True 59 if self.startup_failed: 60 return 61 if self.config.lifespan == "auto": 62 msg = "ASGI 'lifespan' protocol appears unsupported." 63 self.logger.info(msg) 64 else: 65 msg = "Exception in 'lifespan' protocol\n" 66 self.logger.error(msg, exc_info=exc) 67 finally: 68 self.startup_event.set() 69 self.shutdown_event.set() 70 71 async def send(self, message: LifespanSendMessage) -> None: 72 assert message["type"] in ( 73 "lifespan.startup.complete", 74 "lifespan.startup.failed", 75 "lifespan.shutdown.complete", 76 ) 77 78 if message["type"] == "lifespan.startup.complete": 79 assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR 80 assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR 81 self.startup_event.set() 82 83 elif message["type"] == "lifespan.startup.failed": 84 assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR 85 assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR 86 self.startup_event.set() 87 self.startup_failed = True 88 if message.get("message"): 89 self.logger.error(message["message"]) 90 91 elif message["type"] == "lifespan.shutdown.complete": 92 assert self.startup_event.is_set(), STATE_TRANSITION_ERROR 93 assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR 94 self.shutdown_event.set() 95 96 async def receive(self) -> LifespanReceiveMessage: 97 return await self.receive_queue.get() 98 [end of uvicorn/lifespan/on.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/lifespan/on.py b/uvicorn/lifespan/on.py --- a/uvicorn/lifespan/on.py +++ b/uvicorn/lifespan/on.py @@ -20,6 +20,7 @@ self.receive_queue: "Queue[LifespanReceiveMessage]" = asyncio.Queue() self.error_occured = False self.startup_failed = False + self.shutdown_failed = False self.should_exit = False async def startup(self) -> None: @@ -43,7 +44,14 @@ self.logger.info("Waiting for application shutdown.") await self.receive_queue.put({"type": "lifespan.shutdown"}) await self.shutdown_event.wait() - self.logger.info("Application shutdown complete.") + + if self.shutdown_failed or ( + self.error_occured and self.config.lifespan == "on" + ): + self.logger.error("Application shutdown failed. Exiting.") + self.should_exit = True + else: + self.logger.info("Application shutdown complete.") async def main(self) -> None: try: @@ -56,7 +64,7 @@ except BaseException as exc: self.asgi = None self.error_occured = True - if self.startup_failed: + if self.startup_failed or self.shutdown_failed: return if self.config.lifespan == "auto": msg = "ASGI 'lifespan' protocol appears unsupported." @@ -73,6 +81,7 @@ "lifespan.startup.complete", "lifespan.startup.failed", "lifespan.shutdown.complete", + "lifespan.shutdown.failed", ) if message["type"] == "lifespan.startup.complete": @@ -93,5 +102,13 @@ assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR self.shutdown_event.set() + elif message["type"] == "lifespan.shutdown.failed": + assert self.startup_event.is_set(), STATE_TRANSITION_ERROR + assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR + self.shutdown_event.set() + self.shutdown_failed = True + if message.get("message"): + self.logger.error(message["message"]) + async def receive(self) -> LifespanReceiveMessage: return await self.receive_queue.get()
{"golden_diff": "diff --git a/uvicorn/lifespan/on.py b/uvicorn/lifespan/on.py\n--- a/uvicorn/lifespan/on.py\n+++ b/uvicorn/lifespan/on.py\n@@ -20,6 +20,7 @@\n self.receive_queue: \"Queue[LifespanReceiveMessage]\" = asyncio.Queue()\n self.error_occured = False\n self.startup_failed = False\n+ self.shutdown_failed = False\n self.should_exit = False\n \n async def startup(self) -> None:\n@@ -43,7 +44,14 @@\n self.logger.info(\"Waiting for application shutdown.\")\n await self.receive_queue.put({\"type\": \"lifespan.shutdown\"})\n await self.shutdown_event.wait()\n- self.logger.info(\"Application shutdown complete.\")\n+\n+ if self.shutdown_failed or (\n+ self.error_occured and self.config.lifespan == \"on\"\n+ ):\n+ self.logger.error(\"Application shutdown failed. Exiting.\")\n+ self.should_exit = True\n+ else:\n+ self.logger.info(\"Application shutdown complete.\")\n \n async def main(self) -> None:\n try:\n@@ -56,7 +64,7 @@\n except BaseException as exc:\n self.asgi = None\n self.error_occured = True\n- if self.startup_failed:\n+ if self.startup_failed or self.shutdown_failed:\n return\n if self.config.lifespan == \"auto\":\n msg = \"ASGI 'lifespan' protocol appears unsupported.\"\n@@ -73,6 +81,7 @@\n \"lifespan.startup.complete\",\n \"lifespan.startup.failed\",\n \"lifespan.shutdown.complete\",\n+ \"lifespan.shutdown.failed\",\n )\n \n if message[\"type\"] == \"lifespan.startup.complete\":\n@@ -93,5 +102,13 @@\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.shutdown_event.set()\n \n+ elif message[\"type\"] == \"lifespan.shutdown.failed\":\n+ assert self.startup_event.is_set(), STATE_TRANSITION_ERROR\n+ assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n+ self.shutdown_event.set()\n+ self.shutdown_failed = True\n+ if message.get(\"message\"):\n+ self.logger.error(message[\"message\"])\n+\n async def receive(self) -> LifespanReceiveMessage:\n return await self.receive_queue.get()\n", "issue": "lifespan.shutdown.failed message is not implemented\nthere is no use that is made of the \"lifespan.shutdown.failed\" in the `async def send`. Is that an omission ? Readin the spec we should deal with it, not sure how yet, any ideas ?\r\n\r\n_Originally posted by @euri10 in https://github.com/encode/uvicorn/pull/751#issuecomment-674366666_\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom asyncio import Queue\n\nfrom uvicorn import Config\nfrom uvicorn._types import LifespanReceiveMessage, LifespanScope, LifespanSendMessage\n\nSTATE_TRANSITION_ERROR = \"Got invalid state transition on lifespan protocol.\"\n\n\nclass LifespanOn:\n def __init__(self, config: Config) -> None:\n if not config.loaded:\n config.load()\n\n self.config = config\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.startup_event = asyncio.Event()\n self.shutdown_event = asyncio.Event()\n self.receive_queue: \"Queue[LifespanReceiveMessage]\" = asyncio.Queue()\n self.error_occured = False\n self.startup_failed = False\n self.should_exit = False\n\n async def startup(self) -> None:\n self.logger.info(\"Waiting for application startup.\")\n\n loop = asyncio.get_event_loop()\n loop.create_task(self.main())\n\n await self.receive_queue.put({\"type\": \"lifespan.startup\"})\n await self.startup_event.wait()\n\n if self.startup_failed or (self.error_occured and self.config.lifespan == \"on\"):\n self.logger.error(\"Application startup failed. Exiting.\")\n self.should_exit = True\n else:\n self.logger.info(\"Application startup complete.\")\n\n async def shutdown(self) -> None:\n if self.error_occured:\n return\n self.logger.info(\"Waiting for application shutdown.\")\n await self.receive_queue.put({\"type\": \"lifespan.shutdown\"})\n await self.shutdown_event.wait()\n self.logger.info(\"Application shutdown complete.\")\n\n async def main(self) -> None:\n try:\n app = self.config.loaded_app\n scope: LifespanScope = {\n \"type\": \"lifespan\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.0\"},\n }\n await app(scope, self.receive, self.send)\n except BaseException as exc:\n self.asgi = None\n self.error_occured = True\n if self.startup_failed:\n return\n if self.config.lifespan == \"auto\":\n msg = \"ASGI 'lifespan' protocol appears unsupported.\"\n self.logger.info(msg)\n else:\n msg = \"Exception in 'lifespan' protocol\\n\"\n self.logger.error(msg, exc_info=exc)\n finally:\n self.startup_event.set()\n self.shutdown_event.set()\n\n async def send(self, message: LifespanSendMessage) -> None:\n assert message[\"type\"] in (\n \"lifespan.startup.complete\",\n \"lifespan.startup.failed\",\n \"lifespan.shutdown.complete\",\n )\n\n if message[\"type\"] == \"lifespan.startup.complete\":\n assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.startup_event.set()\n\n elif message[\"type\"] == \"lifespan.startup.failed\":\n assert not self.startup_event.is_set(), STATE_TRANSITION_ERROR\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.startup_event.set()\n self.startup_failed = True\n if message.get(\"message\"):\n self.logger.error(message[\"message\"])\n\n elif message[\"type\"] == \"lifespan.shutdown.complete\":\n assert self.startup_event.is_set(), STATE_TRANSITION_ERROR\n assert not self.shutdown_event.is_set(), STATE_TRANSITION_ERROR\n self.shutdown_event.set()\n\n async def receive(self) -> LifespanReceiveMessage:\n return await self.receive_queue.get()\n", "path": "uvicorn/lifespan/on.py"}]}
1,594
526
gh_patches_debug_66282
rasdani/github-patches
git_diff
python-poetry__poetry-235
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "poetry remove" case-sensitivity (qol) ```bash $ poetry add pyyaml Using version ^3.12 for PyYAML Updating dependencies Resolving dependencies... Package operations: 1 install, 0 updates, 0 removals Writing lock file - Installing pyyaml (3.12) $ poetry remove pyyaml [KeyError] remove [-D|--dev] [--dry-run] [--] <packages> (<packages>)... $ poetry remove PyYAML Updating dependencies Resolving dependencies... Package operations: 0 installs, 0 updates, 1 removal Writing lock file - Removing pyyaml (3.12) ``` Not urgent but sending a hint such as "Dependencies are case sensitive." would have been really helpful. </issue> <code> [start of poetry/console/commands/remove.py] 1 from .venv_command import VenvCommand 2 3 4 class RemoveCommand(VenvCommand): 5 """ 6 Removes a package from the project dependencies. 7 8 remove 9 { packages* : Packages that should be removed. } 10 {--D|dev : Removes a package from the development dependencies. } 11 {--dry-run : Outputs the operations but will not execute anything 12 (implicitly enables --verbose). } 13 """ 14 15 help = """The <info>remove</info> command removes a package from the current 16 list of installed packages 17 18 <info>poetry remove</info>""" 19 20 _loggers = ["poetry.repositories.pypi_repository"] 21 22 def handle(self): 23 from poetry.installation import Installer 24 25 packages = self.argument("packages") 26 is_dev = self.option("dev") 27 28 original_content = self.poetry.file.read() 29 content = self.poetry.file.read() 30 poetry_content = content["tool"]["poetry"] 31 section = "dependencies" 32 if is_dev: 33 section = "dev-dependencies" 34 35 # Deleting entries 36 requirements = {} 37 for name in packages: 38 found = False 39 for key in poetry_content[section]: 40 if key.lower() == name.lower(): 41 found = True 42 requirements[name] = poetry_content[section][name] 43 break 44 45 if not found: 46 raise ValueError("Package {} not found".format(name)) 47 48 for key in requirements: 49 del poetry_content[section][key] 50 51 # Write the new content back 52 self.poetry.file.write(content) 53 54 # Update packages 55 self.reset_poetry() 56 57 installer = Installer( 58 self.output, 59 self.venv, 60 self.poetry.package, 61 self.poetry.locker, 62 self.poetry.pool, 63 ) 64 65 installer.dry_run(self.option("dry-run")) 66 installer.update(True) 67 installer.whitelist(requirements) 68 69 try: 70 status = installer.run() 71 except Exception: 72 self.poetry.file.write(original_content) 73 74 raise 75 76 if status != 0 or self.option("dry-run"): 77 # Revert changes 78 if not self.option("dry-run"): 79 self.error( 80 "\n" 81 "Removal failed, reverting pyproject.toml " 82 "to its original content." 83 ) 84 85 self.poetry.file.write(original_content) 86 87 return status 88 [end of poetry/console/commands/remove.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/poetry/console/commands/remove.py b/poetry/console/commands/remove.py --- a/poetry/console/commands/remove.py +++ b/poetry/console/commands/remove.py @@ -39,7 +39,7 @@ for key in poetry_content[section]: if key.lower() == name.lower(): found = True - requirements[name] = poetry_content[section][name] + requirements[key] = poetry_content[section][key] break if not found:
{"golden_diff": "diff --git a/poetry/console/commands/remove.py b/poetry/console/commands/remove.py\n--- a/poetry/console/commands/remove.py\n+++ b/poetry/console/commands/remove.py\n@@ -39,7 +39,7 @@\n for key in poetry_content[section]:\n if key.lower() == name.lower():\n found = True\n- requirements[name] = poetry_content[section][name]\n+ requirements[key] = poetry_content[section][key]\n break\n \n if not found:\n", "issue": "\"poetry remove\" case-sensitivity (qol)\n```bash\r\n$ poetry add pyyaml\r\nUsing version ^3.12 for PyYAML\r\n\r\nUpdating dependencies\r\nResolving dependencies...\r\n\r\n\r\nPackage operations: 1 install, 0 updates, 0 removals\r\n\r\nWriting lock file\r\n\r\n - Installing pyyaml (3.12)\r\n$ poetry remove pyyaml\r\n\r\n[KeyError]\r\n\r\nremove [-D|--dev] [--dry-run] [--] <packages> (<packages>)...\r\n\r\n$ poetry remove PyYAML\r\nUpdating dependencies\r\nResolving dependencies...\r\n\r\n\r\nPackage operations: 0 installs, 0 updates, 1 removal\r\n\r\nWriting lock file\r\n\r\n - Removing pyyaml (3.12)\r\n```\r\n\r\nNot urgent but sending a hint such as \"Dependencies are case sensitive.\" would have been really helpful.\n", "before_files": [{"content": "from .venv_command import VenvCommand\n\n\nclass RemoveCommand(VenvCommand):\n \"\"\"\n Removes a package from the project dependencies.\n\n remove\n { packages* : Packages that should be removed. }\n {--D|dev : Removes a package from the development dependencies. }\n {--dry-run : Outputs the operations but will not execute anything\n (implicitly enables --verbose). }\n \"\"\"\n\n help = \"\"\"The <info>remove</info> command removes a package from the current\nlist of installed packages\n\n<info>poetry remove</info>\"\"\"\n\n _loggers = [\"poetry.repositories.pypi_repository\"]\n\n def handle(self):\n from poetry.installation import Installer\n\n packages = self.argument(\"packages\")\n is_dev = self.option(\"dev\")\n\n original_content = self.poetry.file.read()\n content = self.poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n section = \"dependencies\"\n if is_dev:\n section = \"dev-dependencies\"\n\n # Deleting entries\n requirements = {}\n for name in packages:\n found = False\n for key in poetry_content[section]:\n if key.lower() == name.lower():\n found = True\n requirements[name] = poetry_content[section][name]\n break\n\n if not found:\n raise ValueError(\"Package {} not found\".format(name))\n\n for key in requirements:\n del poetry_content[section][key]\n\n # Write the new content back\n self.poetry.file.write(content)\n\n # Update packages\n self.reset_poetry()\n\n installer = Installer(\n self.output,\n self.venv,\n self.poetry.package,\n self.poetry.locker,\n self.poetry.pool,\n )\n\n installer.dry_run(self.option(\"dry-run\"))\n installer.update(True)\n installer.whitelist(requirements)\n\n try:\n status = installer.run()\n except Exception:\n self.poetry.file.write(original_content)\n\n raise\n\n if status != 0 or self.option(\"dry-run\"):\n # Revert changes\n if not self.option(\"dry-run\"):\n self.error(\n \"\\n\"\n \"Removal failed, reverting pyproject.toml \"\n \"to its original content.\"\n )\n\n self.poetry.file.write(original_content)\n\n return status\n", "path": "poetry/console/commands/remove.py"}]}
1,375
111
gh_patches_debug_38131
rasdani/github-patches
git_diff
encode__starlette-1648
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Endpoint path parameters type specification compatibility with OAS When specifying a convertor type for a path parameter like so: ``` ... Route('/users/{user_id:int}', user, methods=["GET", "POST"]) ... ``` The OAS schema generated using `SchemaGenerator` interprets the whole portion within `{}` eg. `'user_id:int'` as the required path parameter variable name, which then requires the parameter name in OAS to be `user_id:int` instead of just `user_id`. ![user_idint](https://user-images.githubusercontent.com/26809407/70369654-50312280-18eb-11ea-9089-53743115394c.png) I think the convertor is great in that a GET request to `/users/foo` will result in `404: NOT FOUND`. Of course, another option is to have a `try-except` clause to handle non-integer values within the method... but I was wondering what the community thinks about this and if this even should be supported by the `SchemaGenerator`. </issue> <code> [start of starlette/schemas.py] 1 import inspect 2 import typing 3 4 from starlette.requests import Request 5 from starlette.responses import Response 6 from starlette.routing import BaseRoute, Mount, Route 7 8 try: 9 import yaml 10 except ImportError: # pragma: nocover 11 yaml = None # type: ignore 12 13 14 class OpenAPIResponse(Response): 15 media_type = "application/vnd.oai.openapi" 16 17 def render(self, content: typing.Any) -> bytes: 18 assert yaml is not None, "`pyyaml` must be installed to use OpenAPIResponse." 19 assert isinstance( 20 content, dict 21 ), "The schema passed to OpenAPIResponse should be a dictionary." 22 return yaml.dump(content, default_flow_style=False).encode("utf-8") 23 24 25 class EndpointInfo(typing.NamedTuple): 26 path: str 27 http_method: str 28 func: typing.Callable 29 30 31 class BaseSchemaGenerator: 32 def get_schema(self, routes: typing.List[BaseRoute]) -> dict: 33 raise NotImplementedError() # pragma: no cover 34 35 def get_endpoints( 36 self, routes: typing.List[BaseRoute] 37 ) -> typing.List[EndpointInfo]: 38 """ 39 Given the routes, yields the following information: 40 41 - path 42 eg: /users/ 43 - http_method 44 one of 'get', 'post', 'put', 'patch', 'delete', 'options' 45 - func 46 method ready to extract the docstring 47 """ 48 endpoints_info: list = [] 49 50 for route in routes: 51 if isinstance(route, Mount): 52 routes = route.routes or [] 53 sub_endpoints = [ 54 EndpointInfo( 55 path="".join((route.path, sub_endpoint.path)), 56 http_method=sub_endpoint.http_method, 57 func=sub_endpoint.func, 58 ) 59 for sub_endpoint in self.get_endpoints(routes) 60 ] 61 endpoints_info.extend(sub_endpoints) 62 63 elif not isinstance(route, Route) or not route.include_in_schema: 64 continue 65 66 elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint): 67 for method in route.methods or ["GET"]: 68 if method == "HEAD": 69 continue 70 endpoints_info.append( 71 EndpointInfo(route.path, method.lower(), route.endpoint) 72 ) 73 else: 74 for method in ["get", "post", "put", "patch", "delete", "options"]: 75 if not hasattr(route.endpoint, method): 76 continue 77 func = getattr(route.endpoint, method) 78 endpoints_info.append( 79 EndpointInfo(route.path, method.lower(), func) 80 ) 81 82 return endpoints_info 83 84 def parse_docstring(self, func_or_method: typing.Callable) -> dict: 85 """ 86 Given a function, parse the docstring as YAML and return a dictionary of info. 87 """ 88 docstring = func_or_method.__doc__ 89 if not docstring: 90 return {} 91 92 assert yaml is not None, "`pyyaml` must be installed to use parse_docstring." 93 94 # We support having regular docstrings before the schema 95 # definition. Here we return just the schema part from 96 # the docstring. 97 docstring = docstring.split("---")[-1] 98 99 parsed = yaml.safe_load(docstring) 100 101 if not isinstance(parsed, dict): 102 # A regular docstring (not yaml formatted) can return 103 # a simple string here, which wouldn't follow the schema. 104 return {} 105 106 return parsed 107 108 def OpenAPIResponse(self, request: Request) -> Response: 109 routes = request.app.routes 110 schema = self.get_schema(routes=routes) 111 return OpenAPIResponse(schema) 112 113 114 class SchemaGenerator(BaseSchemaGenerator): 115 def __init__(self, base_schema: dict) -> None: 116 self.base_schema = base_schema 117 118 def get_schema(self, routes: typing.List[BaseRoute]) -> dict: 119 schema = dict(self.base_schema) 120 schema.setdefault("paths", {}) 121 endpoints_info = self.get_endpoints(routes) 122 123 for endpoint in endpoints_info: 124 125 parsed = self.parse_docstring(endpoint.func) 126 127 if not parsed: 128 continue 129 130 if endpoint.path not in schema["paths"]: 131 schema["paths"][endpoint.path] = {} 132 133 schema["paths"][endpoint.path][endpoint.http_method] = parsed 134 135 return schema 136 [end of starlette/schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlette/schemas.py b/starlette/schemas.py --- a/starlette/schemas.py +++ b/starlette/schemas.py @@ -1,4 +1,5 @@ import inspect +import re import typing from starlette.requests import Request @@ -49,10 +50,11 @@ for route in routes: if isinstance(route, Mount): + path = self._remove_converter(route.path) routes = route.routes or [] sub_endpoints = [ EndpointInfo( - path="".join((route.path, sub_endpoint.path)), + path="".join((path, sub_endpoint.path)), http_method=sub_endpoint.http_method, func=sub_endpoint.func, ) @@ -64,23 +66,32 @@ continue elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint): + path = self._remove_converter(route.path) for method in route.methods or ["GET"]: if method == "HEAD": continue endpoints_info.append( - EndpointInfo(route.path, method.lower(), route.endpoint) + EndpointInfo(path, method.lower(), route.endpoint) ) else: + path = self._remove_converter(route.path) for method in ["get", "post", "put", "patch", "delete", "options"]: if not hasattr(route.endpoint, method): continue func = getattr(route.endpoint, method) - endpoints_info.append( - EndpointInfo(route.path, method.lower(), func) - ) + endpoints_info.append(EndpointInfo(path, method.lower(), func)) return endpoints_info + def _remove_converter(self, path: str) -> str: + """ + Remove the converter from the path. + For example, a route like this: + Route("/users/{id:int}", endpoint=get_user, methods=["GET"]) + Should be represented as `/users/{id}` in the OpenAPI schema. + """ + return re.sub(r":\w+}", "}", path) + def parse_docstring(self, func_or_method: typing.Callable) -> dict: """ Given a function, parse the docstring as YAML and return a dictionary of info.
{"golden_diff": "diff --git a/starlette/schemas.py b/starlette/schemas.py\n--- a/starlette/schemas.py\n+++ b/starlette/schemas.py\n@@ -1,4 +1,5 @@\n import inspect\n+import re\n import typing\n \n from starlette.requests import Request\n@@ -49,10 +50,11 @@\n \n for route in routes:\n if isinstance(route, Mount):\n+ path = self._remove_converter(route.path)\n routes = route.routes or []\n sub_endpoints = [\n EndpointInfo(\n- path=\"\".join((route.path, sub_endpoint.path)),\n+ path=\"\".join((path, sub_endpoint.path)),\n http_method=sub_endpoint.http_method,\n func=sub_endpoint.func,\n )\n@@ -64,23 +66,32 @@\n continue\n \n elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint):\n+ path = self._remove_converter(route.path)\n for method in route.methods or [\"GET\"]:\n if method == \"HEAD\":\n continue\n endpoints_info.append(\n- EndpointInfo(route.path, method.lower(), route.endpoint)\n+ EndpointInfo(path, method.lower(), route.endpoint)\n )\n else:\n+ path = self._remove_converter(route.path)\n for method in [\"get\", \"post\", \"put\", \"patch\", \"delete\", \"options\"]:\n if not hasattr(route.endpoint, method):\n continue\n func = getattr(route.endpoint, method)\n- endpoints_info.append(\n- EndpointInfo(route.path, method.lower(), func)\n- )\n+ endpoints_info.append(EndpointInfo(path, method.lower(), func))\n \n return endpoints_info\n \n+ def _remove_converter(self, path: str) -> str:\n+ \"\"\"\n+ Remove the converter from the path.\n+ For example, a route like this:\n+ Route(\"/users/{id:int}\", endpoint=get_user, methods=[\"GET\"])\n+ Should be represented as `/users/{id}` in the OpenAPI schema.\n+ \"\"\"\n+ return re.sub(r\":\\w+}\", \"}\", path)\n+\n def parse_docstring(self, func_or_method: typing.Callable) -> dict:\n \"\"\"\n Given a function, parse the docstring as YAML and return a dictionary of info.\n", "issue": "Endpoint path parameters type specification compatibility with OAS\nWhen specifying a convertor type for a path parameter like so:\r\n```\r\n...\r\nRoute('/users/{user_id:int}', user, methods=[\"GET\", \"POST\"])\r\n...\r\n```\r\n\r\nThe OAS schema generated using `SchemaGenerator` interprets the whole portion within `{}` eg. `'user_id:int'` as the required path parameter variable name, which then requires the parameter name in OAS to be `user_id:int` instead of just `user_id`.\r\n\r\n![user_idint](https://user-images.githubusercontent.com/26809407/70369654-50312280-18eb-11ea-9089-53743115394c.png)\r\n\r\nI think the convertor is great in that a GET request to `/users/foo` will result in `404: NOT FOUND`. Of course, another option is to have a `try-except` clause to handle non-integer values within the method... but I was wondering what the community thinks about this and if this even should be supported by the `SchemaGenerator`.\n", "before_files": [{"content": "import inspect\nimport typing\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import BaseRoute, Mount, Route\n\ntry:\n import yaml\nexcept ImportError: # pragma: nocover\n yaml = None # type: ignore\n\n\nclass OpenAPIResponse(Response):\n media_type = \"application/vnd.oai.openapi\"\n\n def render(self, content: typing.Any) -> bytes:\n assert yaml is not None, \"`pyyaml` must be installed to use OpenAPIResponse.\"\n assert isinstance(\n content, dict\n ), \"The schema passed to OpenAPIResponse should be a dictionary.\"\n return yaml.dump(content, default_flow_style=False).encode(\"utf-8\")\n\n\nclass EndpointInfo(typing.NamedTuple):\n path: str\n http_method: str\n func: typing.Callable\n\n\nclass BaseSchemaGenerator:\n def get_schema(self, routes: typing.List[BaseRoute]) -> dict:\n raise NotImplementedError() # pragma: no cover\n\n def get_endpoints(\n self, routes: typing.List[BaseRoute]\n ) -> typing.List[EndpointInfo]:\n \"\"\"\n Given the routes, yields the following information:\n\n - path\n eg: /users/\n - http_method\n one of 'get', 'post', 'put', 'patch', 'delete', 'options'\n - func\n method ready to extract the docstring\n \"\"\"\n endpoints_info: list = []\n\n for route in routes:\n if isinstance(route, Mount):\n routes = route.routes or []\n sub_endpoints = [\n EndpointInfo(\n path=\"\".join((route.path, sub_endpoint.path)),\n http_method=sub_endpoint.http_method,\n func=sub_endpoint.func,\n )\n for sub_endpoint in self.get_endpoints(routes)\n ]\n endpoints_info.extend(sub_endpoints)\n\n elif not isinstance(route, Route) or not route.include_in_schema:\n continue\n\n elif inspect.isfunction(route.endpoint) or inspect.ismethod(route.endpoint):\n for method in route.methods or [\"GET\"]:\n if method == \"HEAD\":\n continue\n endpoints_info.append(\n EndpointInfo(route.path, method.lower(), route.endpoint)\n )\n else:\n for method in [\"get\", \"post\", \"put\", \"patch\", \"delete\", \"options\"]:\n if not hasattr(route.endpoint, method):\n continue\n func = getattr(route.endpoint, method)\n endpoints_info.append(\n EndpointInfo(route.path, method.lower(), func)\n )\n\n return endpoints_info\n\n def parse_docstring(self, func_or_method: typing.Callable) -> dict:\n \"\"\"\n Given a function, parse the docstring as YAML and return a dictionary of info.\n \"\"\"\n docstring = func_or_method.__doc__\n if not docstring:\n return {}\n\n assert yaml is not None, \"`pyyaml` must be installed to use parse_docstring.\"\n\n # We support having regular docstrings before the schema\n # definition. Here we return just the schema part from\n # the docstring.\n docstring = docstring.split(\"---\")[-1]\n\n parsed = yaml.safe_load(docstring)\n\n if not isinstance(parsed, dict):\n # A regular docstring (not yaml formatted) can return\n # a simple string here, which wouldn't follow the schema.\n return {}\n\n return parsed\n\n def OpenAPIResponse(self, request: Request) -> Response:\n routes = request.app.routes\n schema = self.get_schema(routes=routes)\n return OpenAPIResponse(schema)\n\n\nclass SchemaGenerator(BaseSchemaGenerator):\n def __init__(self, base_schema: dict) -> None:\n self.base_schema = base_schema\n\n def get_schema(self, routes: typing.List[BaseRoute]) -> dict:\n schema = dict(self.base_schema)\n schema.setdefault(\"paths\", {})\n endpoints_info = self.get_endpoints(routes)\n\n for endpoint in endpoints_info:\n\n parsed = self.parse_docstring(endpoint.func)\n\n if not parsed:\n continue\n\n if endpoint.path not in schema[\"paths\"]:\n schema[\"paths\"][endpoint.path] = {}\n\n schema[\"paths\"][endpoint.path][endpoint.http_method] = parsed\n\n return schema\n", "path": "starlette/schemas.py"}]}
1,987
481
gh_patches_debug_11997
rasdani/github-patches
git_diff
Parsl__parsl-175
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PathLike behavior for File With the recent changes to the DataFuture behavior and the File based model that we are moving to in general, we want to have File behave as closely to old behavior as possible so that there's least amount of code breakage. This is one such case: Earlier we could do this since the result of a DataFuture was a string > f = open(DataFuture1.result(), 'r') Now the result of a DataFuture is a File, which can't simply be passed to open: > f = open(File, 'r') <-- Fails This can be fixed by making File follow the [os.PathLike](https://docs.python.org/3/library/os.html#os.PathLike) interface </issue> <code> [start of parsl/data_provider/files.py] 1 """Define the File Type. 2 3 The primary purpose of the File object is to track the protocol to be used 4 to transfer the file as well as to give the appropriate filepath depending 5 on where(client-side, remote-side, intermediary-side) the File.filepath is 6 being called from 7 """ 8 9 import os 10 import logging 11 from urllib.parse import urlparse 12 from parsl.data_provider.data_manager import DataManager 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class File(object): 19 """The Parsl File Class. 20 21 This is planned to be a very simple class that simply 22 captures various attributes of a file, and relies on client-side and worker-side 23 systems to enable to appropriate transfer of files. 24 """ 25 26 def __init__(self, url, dman=None, cache=False, caching_dir=".", staging='direct'): 27 """Construct a File object from a url string. 28 29 Args: 30 - url (string) : url string of the file e.g. 31 - 'input.txt' 32 - 'file:///scratch/proj101/input.txt' 33 - 'globus://go#ep1/~/data/input.txt' 34 - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt' 35 - dman (DataManager) : data manager 36 """ 37 self.url = url 38 parsed_url = urlparse(self.url) 39 self.scheme = parsed_url.scheme if parsed_url.scheme else 'file' 40 self.netloc = parsed_url.netloc 41 self.path = parsed_url.path 42 self.filename = os.path.basename(self.path) 43 self.dman = dman if dman else DataManager.get_data_manager() 44 self.data_future = {} 45 if self.scheme != 'file': 46 self.dman.add_file(self) 47 48 self.cache = cache 49 self.caching_dir = caching_dir 50 self.staging = staging 51 52 def __str__(self): 53 return self.filepath 54 55 def __repr__(self): 56 return self.__str__() 57 58 @property 59 def filepath(self): 60 """Return the resolved filepath on the side where it is called from. 61 62 The appropriate filepath will be returned when called from within 63 an app running remotely as well as regular python on the client side. 64 65 Args: 66 - self 67 Returns: 68 - filepath (string) 69 """ 70 if self.scheme == 'globus': 71 if hasattr(self, 'local_path'): 72 return self.local_path 73 74 if 'exec_site' not in globals() or self.staging == 'direct': 75 # Assume local and direct 76 return self.path 77 else: 78 # Return self.path for now 79 return self.path 80 81 def stage_in(self, site=None): 82 """Transport file from the site of origin to local site.""" 83 return self.dman.stage_in(self, site) 84 85 def stage_out(self): 86 """Transport file from local filesystem to origin site.""" 87 return self.dman.stage_out(self) 88 89 def set_data_future(self, df, site=None): 90 self.data_future[site] = df 91 92 def get_data_future(self, site): 93 return self.data_future.get(site) 94 95 96 if __name__ == '__main__': 97 98 x = File('./files.py') 99 [end of parsl/data_provider/files.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsl/data_provider/files.py b/parsl/data_provider/files.py --- a/parsl/data_provider/files.py +++ b/parsl/data_provider/files.py @@ -15,7 +15,7 @@ logger = logging.getLogger(__name__) -class File(object): +class File(str): """The Parsl File Class. This is planned to be a very simple class that simply @@ -59,6 +59,9 @@ def __repr__(self): return self.__str__() + def __fspath__(self): + return self.filepath + @property def filepath(self): """Returns the resolved filepath on the side where it is called from.
{"golden_diff": "diff --git a/parsl/data_provider/files.py b/parsl/data_provider/files.py\n--- a/parsl/data_provider/files.py\n+++ b/parsl/data_provider/files.py\n@@ -15,7 +15,7 @@\n logger = logging.getLogger(__name__)\n \n \n-class File(object):\n+class File(str):\n \"\"\"The Parsl File Class.\n \n This is planned to be a very simple class that simply\n@@ -59,6 +59,9 @@\n def __repr__(self):\n return self.__str__()\n \n+ def __fspath__(self):\n+ return self.filepath\n+\n @property\n def filepath(self):\n \"\"\"Returns the resolved filepath on the side where it is called from.\n", "issue": "PathLike behavior for File\nWith the recent changes to the DataFuture behavior and the File based model that we are moving to in general, we want to have File behave as closely to old behavior as possible so that there's least amount of code breakage. This is one such case:\r\n\r\nEarlier we could do this since the result of a DataFuture was a string\r\n> f = open(DataFuture1.result(), 'r') \r\n\r\nNow the result of a DataFuture is a File, which can't simply be passed to open:\r\n> f = open(File, 'r') <-- Fails \r\n\r\nThis can be fixed by making File follow the [os.PathLike](https://docs.python.org/3/library/os.html#os.PathLike) interface \n", "before_files": [{"content": "\"\"\"Define the File Type.\n\nThe primary purpose of the File object is to track the protocol to be used\nto transfer the file as well as to give the appropriate filepath depending\non where(client-side, remote-side, intermediary-side) the File.filepath is\nbeing called from\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom parsl.data_provider.data_manager import DataManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass File(object):\n \"\"\"The Parsl File Class.\n\n This is planned to be a very simple class that simply\n captures various attributes of a file, and relies on client-side and worker-side\n systems to enable to appropriate transfer of files.\n \"\"\"\n\n def __init__(self, url, dman=None, cache=False, caching_dir=\".\", staging='direct'):\n \"\"\"Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'\n - dman (DataManager) : data manager\n \"\"\"\n self.url = url\n parsed_url = urlparse(self.url)\n self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n self.netloc = parsed_url.netloc\n self.path = parsed_url.path\n self.filename = os.path.basename(self.path)\n self.dman = dman if dman else DataManager.get_data_manager()\n self.data_future = {}\n if self.scheme != 'file':\n self.dman.add_file(self)\n\n self.cache = cache\n self.caching_dir = caching_dir\n self.staging = staging\n\n def __str__(self):\n return self.filepath\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def filepath(self):\n \"\"\"Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)\n \"\"\"\n if self.scheme == 'globus':\n if hasattr(self, 'local_path'):\n return self.local_path\n\n if 'exec_site' not in globals() or self.staging == 'direct':\n # Assume local and direct\n return self.path\n else:\n # Return self.path for now\n return self.path\n\n def stage_in(self, site=None):\n \"\"\"Transport file from the site of origin to local site.\"\"\"\n return self.dman.stage_in(self, site)\n\n def stage_out(self):\n \"\"\"Transport file from local filesystem to origin site.\"\"\"\n return self.dman.stage_out(self)\n\n def set_data_future(self, df, site=None):\n self.data_future[site] = df\n\n def get_data_future(self, site):\n return self.data_future.get(site)\n\n\nif __name__ == '__main__':\n\n x = File('./files.py')\n", "path": "parsl/data_provider/files.py"}]}
1,586
159
gh_patches_debug_20087
rasdani/github-patches
git_diff
cloudtools__troposphere-840
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add properties to AWS::Events::Rule Target property [AWS::Events::Rule](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html) In the Target property type, use the following properties for input transformation of events and setting Amazon ECS task and Kinesis stream targets. - EcsParameters - InputTransformer - KinesisParameters - RunCommandParameters </issue> <code> [start of troposphere/events.py] 1 # Copyright (c) 2013, Mark Peek <[email protected]> 2 # All rights reserved. 3 # 4 # See LICENSE file for full license. 5 6 from . import AWSObject, AWSProperty 7 8 9 class EcsParameters(AWSProperty): 10 props = { 11 "TaskCount": (int, False), 12 "TaskDefinitionArn": (basestring, True), 13 } 14 15 16 class Target(AWSProperty): 17 props = { 18 'Arn': (basestring, True), 19 "EcsParameters": (EcsParameters, False), 20 'Id': (basestring, True), 21 'Input': (basestring, False), 22 'InputPath': (basestring, False), 23 'RoleArn': (basestring, False), 24 } 25 26 27 class Rule(AWSObject): 28 resource_type = "AWS::Events::Rule" 29 30 props = { 31 32 'Description': (basestring, False), 33 'EventPattern': (dict, False), 34 'Name': (basestring, False), 35 'ScheduleExpression': (basestring, False), 36 'State': (basestring, False), 37 'Targets': ([Target], False), 38 } 39 [end of troposphere/events.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/troposphere/events.py b/troposphere/events.py --- a/troposphere/events.py +++ b/troposphere/events.py @@ -13,6 +13,32 @@ } +class InputTransformer(AWSProperty): + props = { + 'InputPathsMap': (dict, False), + 'InputTemplate': (basestring, True), + } + + +class KinesisParameters(AWSProperty): + props = { + 'PartitionKeyPath': (basestring, True), + } + + +class RunCommandTarget(AWSProperty): + props = { + 'Key': (basestring, True), + 'Values': ([basestring], True), + } + + +class RunCommandParameters(AWSProperty): + props = { + 'RunCommandTargets': ([RunCommandTarget], True), + } + + class Target(AWSProperty): props = { 'Arn': (basestring, True), @@ -20,7 +46,10 @@ 'Id': (basestring, True), 'Input': (basestring, False), 'InputPath': (basestring, False), + 'InputTransformer': (InputTransformer, False), + 'KinesisParameters': (KinesisParameters, False), 'RoleArn': (basestring, False), + 'RunCommandParameters': (RunCommandParameters, False), }
{"golden_diff": "diff --git a/troposphere/events.py b/troposphere/events.py\n--- a/troposphere/events.py\n+++ b/troposphere/events.py\n@@ -13,6 +13,32 @@\n }\n \n \n+class InputTransformer(AWSProperty):\n+ props = {\n+ 'InputPathsMap': (dict, False),\n+ 'InputTemplate': (basestring, True),\n+ }\n+\n+\n+class KinesisParameters(AWSProperty):\n+ props = {\n+ 'PartitionKeyPath': (basestring, True),\n+ }\n+\n+\n+class RunCommandTarget(AWSProperty):\n+ props = {\n+ 'Key': (basestring, True),\n+ 'Values': ([basestring], True),\n+ }\n+\n+\n+class RunCommandParameters(AWSProperty):\n+ props = {\n+ 'RunCommandTargets': ([RunCommandTarget], True),\n+ }\n+\n+\n class Target(AWSProperty):\n props = {\n 'Arn': (basestring, True),\n@@ -20,7 +46,10 @@\n 'Id': (basestring, True),\n 'Input': (basestring, False),\n 'InputPath': (basestring, False),\n+ 'InputTransformer': (InputTransformer, False),\n+ 'KinesisParameters': (KinesisParameters, False),\n 'RoleArn': (basestring, False),\n+ 'RunCommandParameters': (RunCommandParameters, False),\n }\n", "issue": "Add properties to AWS::Events::Rule Target property\n[AWS::Events::Rule](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-events-rule.html)\r\nIn the Target property type, use the following properties for input transformation of events and setting Amazon ECS task and Kinesis stream targets.\r\n\r\n- EcsParameters\r\n- InputTransformer\r\n- KinesisParameters\r\n- RunCommandParameters\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\n\n\nclass EcsParameters(AWSProperty):\n props = {\n \"TaskCount\": (int, False),\n \"TaskDefinitionArn\": (basestring, True),\n }\n\n\nclass Target(AWSProperty):\n props = {\n 'Arn': (basestring, True),\n \"EcsParameters\": (EcsParameters, False),\n 'Id': (basestring, True),\n 'Input': (basestring, False),\n 'InputPath': (basestring, False),\n 'RoleArn': (basestring, False),\n }\n\n\nclass Rule(AWSObject):\n resource_type = \"AWS::Events::Rule\"\n\n props = {\n\n 'Description': (basestring, False),\n 'EventPattern': (dict, False),\n 'Name': (basestring, False),\n 'ScheduleExpression': (basestring, False),\n 'State': (basestring, False),\n 'Targets': ([Target], False),\n }\n", "path": "troposphere/events.py"}]}
932
314
gh_patches_debug_18570
rasdani/github-patches
git_diff
celery__kombu-794
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SQS queue name gets mangled Using Celery/Kombu in an environment with _future_ strings support you end up with mangled queue names in SQS. The code that tries to translate invalid SQS queue characters causes the byte string to get converted to a string representation such as b'my_queue_name' and then translated that to 'b_my_queue_name' I am finding this because we are using Apache Airflow in a Python 2.7 environment using SQS queues as the broker. </issue> <code> [start of kombu/utils/encoding.py] 1 # -*- coding: utf-8 -*- 2 """Text encoding utilities. 3 4 Utilities to encode text, and to safely emit text from running 5 applications without crashing from the infamous 6 :exc:`UnicodeDecodeError` exception. 7 """ 8 from __future__ import absolute_import, unicode_literals 9 10 import sys 11 import traceback 12 13 from kombu.five import text_t 14 15 is_py3k = sys.version_info >= (3, 0) 16 17 #: safe_str takes encoding from this file by default. 18 #: :func:`set_default_encoding_file` can used to set the 19 #: default output file. 20 default_encoding_file = None 21 22 23 def set_default_encoding_file(file): 24 """Set file used to get codec information.""" 25 global default_encoding_file 26 default_encoding_file = file 27 28 29 def get_default_encoding_file(): 30 """Get file used to get codec information.""" 31 return default_encoding_file 32 33 34 if sys.platform.startswith('java'): # pragma: no cover 35 36 def default_encoding(file=None): 37 """Get default encoding.""" 38 return 'utf-8' 39 else: 40 41 def default_encoding(file=None): # noqa 42 """Get default encoding.""" 43 file = file or get_default_encoding_file() 44 return getattr(file, 'encoding', None) or sys.getfilesystemencoding() 45 46 if is_py3k: # pragma: no cover 47 48 def str_to_bytes(s): 49 """Convert str to bytes.""" 50 if isinstance(s, str): 51 return s.encode() 52 return s 53 54 def bytes_to_str(s): 55 """Convert bytes to str.""" 56 if isinstance(s, bytes): 57 return s.decode() 58 return s 59 60 def from_utf8(s, *args, **kwargs): 61 """Get str from utf-8 encoding.""" 62 return s 63 64 def ensure_bytes(s): 65 """Ensure s is bytes, not str.""" 66 if not isinstance(s, bytes): 67 return str_to_bytes(s) 68 return s 69 70 def default_encode(obj): 71 """Encode using default encoding.""" 72 return obj 73 74 str_t = str 75 76 else: 77 78 def str_to_bytes(s): # noqa 79 """Convert str to bytes.""" 80 if isinstance(s, unicode): 81 return s.encode() 82 return s 83 84 def bytes_to_str(s): # noqa 85 """Convert bytes to str.""" 86 return s 87 88 def from_utf8(s, *args, **kwargs): # noqa 89 """Convert utf-8 to ASCII.""" 90 return s.encode('utf-8', *args, **kwargs) 91 92 def default_encode(obj, file=None): # noqa 93 """Get default encoding.""" 94 return unicode(obj, default_encoding(file)) 95 96 str_t = unicode 97 ensure_bytes = str_to_bytes 98 99 100 try: 101 bytes_t = bytes 102 except NameError: # pragma: no cover 103 bytes_t = str # noqa 104 105 106 def safe_str(s, errors='replace'): 107 """Safe form of str(), void of unicode errors.""" 108 s = bytes_to_str(s) 109 if not isinstance(s, (text_t, bytes)): 110 return safe_repr(s, errors) 111 return _safe_str(s, errors) 112 113 114 if is_py3k: # pragma: no cover 115 116 def _safe_str(s, errors='replace', file=None): 117 if isinstance(s, str): 118 return s 119 try: 120 return str(s) 121 except Exception as exc: 122 return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format( 123 type(s), exc, '\n'.join(traceback.format_stack())) 124 else: 125 def _safe_str(s, errors='replace', file=None): # noqa 126 encoding = default_encoding(file) 127 try: 128 if isinstance(s, unicode): 129 return s.encode(encoding, errors) 130 return unicode(s, encoding, errors) 131 except Exception as exc: 132 return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format( 133 type(s), exc, '\n'.join(traceback.format_stack())) 134 135 136 def safe_repr(o, errors='replace'): 137 """Safe form of repr, void of Unicode errors.""" 138 try: 139 return repr(o) 140 except Exception: 141 return _safe_str(o, errors) 142 [end of kombu/utils/encoding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kombu/utils/encoding.py b/kombu/utils/encoding.py --- a/kombu/utils/encoding.py +++ b/kombu/utils/encoding.py @@ -122,11 +122,18 @@ return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format( type(s), exc, '\n'.join(traceback.format_stack())) else: + def _ensure_str(s, encoding, errors): + if isinstance(s, bytes): + return s.decode(encoding, errors) + return s + + def _safe_str(s, errors='replace', file=None): # noqa encoding = default_encoding(file) try: if isinstance(s, unicode): - return s.encode(encoding, errors) + return _ensure_str(s.encode(encoding, errors), + encoding, errors) return unicode(s, encoding, errors) except Exception as exc: return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format(
{"golden_diff": "diff --git a/kombu/utils/encoding.py b/kombu/utils/encoding.py\n--- a/kombu/utils/encoding.py\n+++ b/kombu/utils/encoding.py\n@@ -122,11 +122,18 @@\n return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format(\n type(s), exc, '\\n'.join(traceback.format_stack()))\n else:\n+ def _ensure_str(s, encoding, errors):\n+ if isinstance(s, bytes):\n+ return s.decode(encoding, errors)\n+ return s\n+\n+\n def _safe_str(s, errors='replace', file=None): # noqa\n encoding = default_encoding(file)\n try:\n if isinstance(s, unicode):\n- return s.encode(encoding, errors)\n+ return _ensure_str(s.encode(encoding, errors),\n+ encoding, errors)\n return unicode(s, encoding, errors)\n except Exception as exc:\n return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format(\n", "issue": "SQS queue name gets mangled\nUsing Celery/Kombu in an environment with _future_ strings support you end up with mangled queue names in SQS.\r\n\r\nThe code that tries to translate invalid SQS queue characters causes the byte string to get converted to a string representation such as b'my_queue_name' and then translated that to 'b_my_queue_name'\r\n\r\nI am finding this because we are using Apache Airflow in a Python 2.7 environment using SQS queues as the broker.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Text encoding utilities.\n\nUtilities to encode text, and to safely emit text from running\napplications without crashing from the infamous\n:exc:`UnicodeDecodeError` exception.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport sys\nimport traceback\n\nfrom kombu.five import text_t\n\nis_py3k = sys.version_info >= (3, 0)\n\n#: safe_str takes encoding from this file by default.\n#: :func:`set_default_encoding_file` can used to set the\n#: default output file.\ndefault_encoding_file = None\n\n\ndef set_default_encoding_file(file):\n \"\"\"Set file used to get codec information.\"\"\"\n global default_encoding_file\n default_encoding_file = file\n\n\ndef get_default_encoding_file():\n \"\"\"Get file used to get codec information.\"\"\"\n return default_encoding_file\n\n\nif sys.platform.startswith('java'): # pragma: no cover\n\n def default_encoding(file=None):\n \"\"\"Get default encoding.\"\"\"\n return 'utf-8'\nelse:\n\n def default_encoding(file=None): # noqa\n \"\"\"Get default encoding.\"\"\"\n file = file or get_default_encoding_file()\n return getattr(file, 'encoding', None) or sys.getfilesystemencoding()\n\nif is_py3k: # pragma: no cover\n\n def str_to_bytes(s):\n \"\"\"Convert str to bytes.\"\"\"\n if isinstance(s, str):\n return s.encode()\n return s\n\n def bytes_to_str(s):\n \"\"\"Convert bytes to str.\"\"\"\n if isinstance(s, bytes):\n return s.decode()\n return s\n\n def from_utf8(s, *args, **kwargs):\n \"\"\"Get str from utf-8 encoding.\"\"\"\n return s\n\n def ensure_bytes(s):\n \"\"\"Ensure s is bytes, not str.\"\"\"\n if not isinstance(s, bytes):\n return str_to_bytes(s)\n return s\n\n def default_encode(obj):\n \"\"\"Encode using default encoding.\"\"\"\n return obj\n\n str_t = str\n\nelse:\n\n def str_to_bytes(s): # noqa\n \"\"\"Convert str to bytes.\"\"\"\n if isinstance(s, unicode):\n return s.encode()\n return s\n\n def bytes_to_str(s): # noqa\n \"\"\"Convert bytes to str.\"\"\"\n return s\n\n def from_utf8(s, *args, **kwargs): # noqa\n \"\"\"Convert utf-8 to ASCII.\"\"\"\n return s.encode('utf-8', *args, **kwargs)\n\n def default_encode(obj, file=None): # noqa\n \"\"\"Get default encoding.\"\"\"\n return unicode(obj, default_encoding(file))\n\n str_t = unicode\n ensure_bytes = str_to_bytes\n\n\ntry:\n bytes_t = bytes\nexcept NameError: # pragma: no cover\n bytes_t = str # noqa\n\n\ndef safe_str(s, errors='replace'):\n \"\"\"Safe form of str(), void of unicode errors.\"\"\"\n s = bytes_to_str(s)\n if not isinstance(s, (text_t, bytes)):\n return safe_repr(s, errors)\n return _safe_str(s, errors)\n\n\nif is_py3k: # pragma: no cover\n\n def _safe_str(s, errors='replace', file=None):\n if isinstance(s, str):\n return s\n try:\n return str(s)\n except Exception as exc:\n return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format(\n type(s), exc, '\\n'.join(traceback.format_stack()))\nelse:\n def _safe_str(s, errors='replace', file=None): # noqa\n encoding = default_encoding(file)\n try:\n if isinstance(s, unicode):\n return s.encode(encoding, errors)\n return unicode(s, encoding, errors)\n except Exception as exc:\n return '<Unrepresentable {0!r}: {1!r} {2!r}>'.format(\n type(s), exc, '\\n'.join(traceback.format_stack()))\n\n\ndef safe_repr(o, errors='replace'):\n \"\"\"Safe form of repr, void of Unicode errors.\"\"\"\n try:\n return repr(o)\n except Exception:\n return _safe_str(o, errors)\n", "path": "kombu/utils/encoding.py"}]}
1,863
235
gh_patches_debug_12725
rasdani/github-patches
git_diff
pytorch__vision-3298
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> How to install torchvision to use video_reader backend? I simply installed torchvision from conda (as advertised on pytorch.org). But `torchvision.set_video_backend('video_reader')` prints `video_reader video backend is not available. Please compile torchvision from source and try again`. This should be mentioned in https://pytorch.org/docs/stable/torchvision/index.html#torchvision.set_video_backend and in torchvision README (including if the `video_reader` is temporarily not supported) cc @bjuncek </issue> <code> [start of torchvision/__init__.py] 1 import warnings 2 import os 3 4 from .extension import _HAS_OPS 5 6 from torchvision import models 7 from torchvision import datasets 8 from torchvision import ops 9 from torchvision import transforms 10 from torchvision import utils 11 from torchvision import io 12 13 import torch 14 15 try: 16 from .version import __version__ # noqa: F401 17 except ImportError: 18 pass 19 20 # Check if torchvision is being imported within the root folder 21 if (not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == 22 os.path.join(os.path.realpath(os.getcwd()), 'torchvision')): 23 message = ('You are importing torchvision within its own root folder ({}). ' 24 'This is not expected to work and may give errors. Please exit the ' 25 'torchvision project source and relaunch your python interpreter.') 26 warnings.warn(message.format(os.getcwd())) 27 28 _image_backend = 'PIL' 29 30 _video_backend = "pyav" 31 32 33 def set_image_backend(backend): 34 """ 35 Specifies the package used to load images. 36 37 Args: 38 backend (string): Name of the image backend. one of {'PIL', 'accimage'}. 39 The :mod:`accimage` package uses the Intel IPP library. It is 40 generally faster than PIL, but does not support as many operations. 41 """ 42 global _image_backend 43 if backend not in ['PIL', 'accimage']: 44 raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'" 45 .format(backend)) 46 _image_backend = backend 47 48 49 def get_image_backend(): 50 """ 51 Gets the name of the package used to load images 52 """ 53 return _image_backend 54 55 56 def set_video_backend(backend): 57 """ 58 Specifies the package used to decode videos. 59 60 Args: 61 backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. 62 The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic 63 binding for the FFmpeg libraries. 64 The :mod:`video_reader` package includes a native C++ implementation on 65 top of FFMPEG libraries, and a python API of TorchScript custom operator. 66 It is generally decoding faster than :mod:`pyav`, but perhaps is less robust. 67 """ 68 global _video_backend 69 if backend not in ["pyav", "video_reader"]: 70 raise ValueError( 71 "Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend 72 ) 73 if backend == "video_reader" and not io._HAS_VIDEO_OPT: 74 message = ( 75 "video_reader video backend is not available." 76 " Please compile torchvision from source and try again" 77 ) 78 warnings.warn(message) 79 else: 80 _video_backend = backend 81 82 83 def get_video_backend(): 84 return _video_backend 85 86 87 def _is_tracing(): 88 return torch._C._get_tracing_state() 89 [end of torchvision/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/__init__.py b/torchvision/__init__.py --- a/torchvision/__init__.py +++ b/torchvision/__init__.py @@ -64,6 +64,10 @@ The :mod:`video_reader` package includes a native C++ implementation on top of FFMPEG libraries, and a python API of TorchScript custom operator. It is generally decoding faster than :mod:`pyav`, but perhaps is less robust. + + .. note:: + Building with FFMPEG is disabled by default in the latest master. If you want to use the 'video_reader' + backend, please compile torchvision from source. """ global _video_backend if backend not in ["pyav", "video_reader"]:
{"golden_diff": "diff --git a/torchvision/__init__.py b/torchvision/__init__.py\n--- a/torchvision/__init__.py\n+++ b/torchvision/__init__.py\n@@ -64,6 +64,10 @@\n The :mod:`video_reader` package includes a native C++ implementation on\n top of FFMPEG libraries, and a python API of TorchScript custom operator.\n It is generally decoding faster than :mod:`pyav`, but perhaps is less robust.\n+\n+ .. note::\n+ Building with FFMPEG is disabled by default in the latest master. If you want to use the 'video_reader'\n+ backend, please compile torchvision from source.\n \"\"\"\n global _video_backend\n if backend not in [\"pyav\", \"video_reader\"]:\n", "issue": "How to install torchvision to use video_reader backend?\nI simply installed torchvision from conda (as advertised on pytorch.org). But `torchvision.set_video_backend('video_reader')` prints `video_reader video backend is not available. Please compile torchvision from source and try again`. This should be mentioned in https://pytorch.org/docs/stable/torchvision/index.html#torchvision.set_video_backend and in torchvision README (including if the `video_reader` is temporarily not supported)\n\ncc @bjuncek\n", "before_files": [{"content": "import warnings\nimport os\n\nfrom .extension import _HAS_OPS\n\nfrom torchvision import models\nfrom torchvision import datasets\nfrom torchvision import ops\nfrom torchvision import transforms\nfrom torchvision import utils\nfrom torchvision import io\n\nimport torch\n\ntry:\n from .version import __version__ # noqa: F401\nexcept ImportError:\n pass\n\n# Check if torchvision is being imported within the root folder\nif (not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) ==\n os.path.join(os.path.realpath(os.getcwd()), 'torchvision')):\n message = ('You are importing torchvision within its own root folder ({}). '\n 'This is not expected to work and may give errors. Please exit the '\n 'torchvision project source and relaunch your python interpreter.')\n warnings.warn(message.format(os.getcwd()))\n\n_image_backend = 'PIL'\n\n_video_backend = \"pyav\"\n\n\ndef set_image_backend(backend):\n \"\"\"\n Specifies the package used to load images.\n\n Args:\n backend (string): Name of the image backend. one of {'PIL', 'accimage'}.\n The :mod:`accimage` package uses the Intel IPP library. It is\n generally faster than PIL, but does not support as many operations.\n \"\"\"\n global _image_backend\n if backend not in ['PIL', 'accimage']:\n raise ValueError(\"Invalid backend '{}'. Options are 'PIL' and 'accimage'\"\n .format(backend))\n _image_backend = backend\n\n\ndef get_image_backend():\n \"\"\"\n Gets the name of the package used to load images\n \"\"\"\n return _image_backend\n\n\ndef set_video_backend(backend):\n \"\"\"\n Specifies the package used to decode videos.\n\n Args:\n backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.\n The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic\n binding for the FFmpeg libraries.\n The :mod:`video_reader` package includes a native C++ implementation on\n top of FFMPEG libraries, and a python API of TorchScript custom operator.\n It is generally decoding faster than :mod:`pyav`, but perhaps is less robust.\n \"\"\"\n global _video_backend\n if backend not in [\"pyav\", \"video_reader\"]:\n raise ValueError(\n \"Invalid video backend '%s'. Options are 'pyav' and 'video_reader'\" % backend\n )\n if backend == \"video_reader\" and not io._HAS_VIDEO_OPT:\n message = (\n \"video_reader video backend is not available.\"\n \" Please compile torchvision from source and try again\"\n )\n warnings.warn(message)\n else:\n _video_backend = backend\n\n\ndef get_video_backend():\n return _video_backend\n\n\ndef _is_tracing():\n return torch._C._get_tracing_state()\n", "path": "torchvision/__init__.py"}]}
1,435
171
gh_patches_debug_12212
rasdani/github-patches
git_diff
translate__pootle-5681
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Downloading store in a zip file with no revision lacks revision headers not sure if this is a dupe or related to other PO-headers bugs - but it seems like stores with no unit revision dont get headers - this causes re-uploading to fail </issue> <code> [start of pootle/apps/pootle_store/store/serialize.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from django.core.cache import caches 10 from django.utils.functional import cached_property 11 12 from pootle.core.delegate import config, serializers 13 14 15 class StoreSerialization(object): 16 """Calls configured deserializers for Store""" 17 18 def __init__(self, store): 19 self.store = store 20 21 @property 22 def project_serializers(self): 23 project = self.store.translation_project.project 24 return ( 25 config.get( 26 project.__class__, 27 instance=project, 28 key="pootle.core.serializers") 29 or []) 30 31 @property 32 def pootle_path(self): 33 return self.store.pootle_path 34 35 @cached_property 36 def max_unit_revision(self): 37 return self.store.data.max_unit_revision 38 39 @cached_property 40 def serializers(self): 41 available_serializers = serializers.gather( 42 self.store.translation_project.project.__class__) 43 found_serializers = [] 44 for serializer in self.project_serializers: 45 found_serializers.append(available_serializers[serializer]) 46 return found_serializers 47 48 def tostring(self): 49 store = self.store.syncer.convert() 50 if hasattr(store, "updateheader"): 51 # FIXME We need those headers on import 52 # However some formats just don't support setting metadata 53 store.updateheader(add=True, X_Pootle_Path=self.pootle_path) 54 store.updateheader(add=True, X_Pootle_Revision=self.max_unit_revision) 55 return str(store) 56 57 def pipeline(self, data): 58 if not self.serializers: 59 return data 60 for serializer in self.serializers: 61 data = serializer(self.store, data).output 62 return data 63 64 def serialize(self): 65 cache = caches["exports"] 66 ret = cache.get( 67 self.pootle_path, 68 version=self.max_unit_revision) 69 if not ret: 70 ret = self.pipeline(self.tostring()) 71 cache.set( 72 self.pootle_path, 73 ret, 74 version=self.max_unit_revision) 75 return ret 76 [end of pootle/apps/pootle_store/store/serialize.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/pootle_store/store/serialize.py b/pootle/apps/pootle_store/store/serialize.py --- a/pootle/apps/pootle_store/store/serialize.py +++ b/pootle/apps/pootle_store/store/serialize.py @@ -50,8 +50,9 @@ if hasattr(store, "updateheader"): # FIXME We need those headers on import # However some formats just don't support setting metadata + max_unit_revision = self.max_unit_revision or 0 store.updateheader(add=True, X_Pootle_Path=self.pootle_path) - store.updateheader(add=True, X_Pootle_Revision=self.max_unit_revision) + store.updateheader(add=True, X_Pootle_Revision=max_unit_revision) return str(store) def pipeline(self, data):
{"golden_diff": "diff --git a/pootle/apps/pootle_store/store/serialize.py b/pootle/apps/pootle_store/store/serialize.py\n--- a/pootle/apps/pootle_store/store/serialize.py\n+++ b/pootle/apps/pootle_store/store/serialize.py\n@@ -50,8 +50,9 @@\n if hasattr(store, \"updateheader\"):\n # FIXME We need those headers on import\n # However some formats just don't support setting metadata\n+ max_unit_revision = self.max_unit_revision or 0\n store.updateheader(add=True, X_Pootle_Path=self.pootle_path)\n- store.updateheader(add=True, X_Pootle_Revision=self.max_unit_revision)\n+ store.updateheader(add=True, X_Pootle_Revision=max_unit_revision)\n return str(store)\n \n def pipeline(self, data):\n", "issue": "Downloading store in a zip file with no revision lacks revision headers\nnot sure if this is a dupe or related to other PO-headers bugs - but it seems like stores with no unit revision dont get headers - this causes re-uploading to fail\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.cache import caches\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import config, serializers\n\n\nclass StoreSerialization(object):\n \"\"\"Calls configured deserializers for Store\"\"\"\n\n def __init__(self, store):\n self.store = store\n\n @property\n def project_serializers(self):\n project = self.store.translation_project.project\n return (\n config.get(\n project.__class__,\n instance=project,\n key=\"pootle.core.serializers\")\n or [])\n\n @property\n def pootle_path(self):\n return self.store.pootle_path\n\n @cached_property\n def max_unit_revision(self):\n return self.store.data.max_unit_revision\n\n @cached_property\n def serializers(self):\n available_serializers = serializers.gather(\n self.store.translation_project.project.__class__)\n found_serializers = []\n for serializer in self.project_serializers:\n found_serializers.append(available_serializers[serializer])\n return found_serializers\n\n def tostring(self):\n store = self.store.syncer.convert()\n if hasattr(store, \"updateheader\"):\n # FIXME We need those headers on import\n # However some formats just don't support setting metadata\n store.updateheader(add=True, X_Pootle_Path=self.pootle_path)\n store.updateheader(add=True, X_Pootle_Revision=self.max_unit_revision)\n return str(store)\n\n def pipeline(self, data):\n if not self.serializers:\n return data\n for serializer in self.serializers:\n data = serializer(self.store, data).output\n return data\n\n def serialize(self):\n cache = caches[\"exports\"]\n ret = cache.get(\n self.pootle_path,\n version=self.max_unit_revision)\n if not ret:\n ret = self.pipeline(self.tostring())\n cache.set(\n self.pootle_path,\n ret,\n version=self.max_unit_revision)\n return ret\n", "path": "pootle/apps/pootle_store/store/serialize.py"}]}
1,232
186
gh_patches_debug_22229
rasdani/github-patches
git_diff
adap__flower-1166
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unify documentation to use `-` instead of `_` ### Describe what the documentation is missing. Some docs use `_` as the separator and some use `-`, for example: - `https://flower.dev/docs/example_walkthrough_pytorch_mnist.html` - `https://flower.dev/docs/example-mxnet-walk-through.html` This is inconsistent. ### Suggest your improvement. We want to unify all docs to use `-` ("minus") as the only separator. This involves the following TODO's: - [ ] Rename all docs that use `_` (and replace it with `-`) - [ ] For each renamed doc, configure a redirect from the old naming (e.g., `quickstart_pytorch`) to the new page (e.g., `quickstart-pytorch`) using, for example, https://documatt.gitlab.io/sphinx-reredirects/index.html - [ ] Update all references in the docs with the new naming (e.g., in `index.rst`) - [ ] Update all other references with the new naming (e.g., README.md, code examples, website, ...) Last, but not least, this should all be done in a single PR. We should not split, for example, the renaming of a page and the configuration of a redirect into two PRs because it would lead to a deployed version of the docs where users would see a 404 page when they use a link that points to the old naming. </issue> <code> [start of doc/source/conf.py] 1 # Copyright 2020 Adap GmbH. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 import os 16 import sys 17 from sphinx.application import ConfigError 18 19 # Configuration file for the Sphinx documentation builder. 20 # 21 # This file only contains a selection of the most common options. For a full 22 # list see the documentation: 23 # https://www.sphinx-doc.org/en/master/usage/configuration.html 24 25 26 # Fixing path issue for autodoc 27 sys.path.insert(0, os.path.abspath('../../src/py')) 28 29 30 # -- Project information ----------------------------------------------------- 31 32 project = u"Flower" 33 copyright = u"2022 Adap GmbH" 34 author = u"The Flower Authors" 35 36 # The full version, including alpha/beta/rc tags 37 release = u"0.19.0" 38 39 40 # -- General configuration --------------------------------------------------- 41 42 # Add any Sphinx extension module names here, as strings. They can be 43 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 44 # ones. 45 extensions = [ 46 "sphinx.ext.napoleon", 47 "sphinx.ext.autodoc", 48 "sphinx.ext.mathjax", 49 "sphinx.ext.viewcode", 50 "sphinx.ext.graphviz", 51 "sphinx_copybutton", 52 "sphinx_panels", 53 "sphinxcontrib.mermaid", 54 ] 55 56 # Add any paths that contain templates here, relative to this directory. 57 templates_path = ["_templates"] 58 59 # List of patterns, relative to source directory, that match files and 60 # directories to ignore when looking for source files. 61 # This pattern also affects html_static_path and html_extra_path. 62 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 63 64 65 # -- Options for HTML output ------------------------------------------------- 66 67 # The theme to use for HTML and HTML Help pages. See the documentation for 68 # a list of builtin themes. 69 # 70 html_theme = "furo" 71 html_title = f"Flower {release}" 72 html_logo = "_static/flower-logo.png" 73 html_favicon = "_static/favicon.ico" 74 html_baseurl = "https://flower.dev/docs/" 75 76 html_theme_options = { 77 # Sphinx Book Theme 78 # https://sphinx-book-theme.readthedocs.io/en/latest/configure.html 79 # "repository_url": "https://github.com/adap/flower", 80 # "repository_branch": "main", 81 # "path_to_docs": "doc/source/", 82 # "home_page_in_toc": True, 83 # "use_repository_button": True, 84 # "use_issues_button": True, 85 # "use_edit_page_button": True, 86 87 # Furo 88 # https://pradyunsg.me/furo/customisation/ 89 # "light_css_variables": { 90 # "color-brand-primary": "#292F36", 91 # "color-brand-content": "#292F36", 92 # "color-admonition-background": "#F2B705", 93 # }, 94 "announcement": "Flower Summit 2022 <a href=\"https://flower.dev/conf/flower-summit-2022/\">register now</a>", 95 } 96 97 # Add any paths that contain custom static files (such as style sheets) here, 98 # relative to this directory. They are copied after the builtin static files, 99 # so a file named "default.css" will overwrite the builtin "default.css". 100 html_static_path = ["_static"] 101 html_css_files = ["custom.css"] 102 [end of doc/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -51,6 +51,7 @@ "sphinx_copybutton", "sphinx_panels", "sphinxcontrib.mermaid", + "sphinx_reredirects" ] # Add any paths that contain templates here, relative to this directory. @@ -61,6 +62,19 @@ # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +# Sphinx redirects, implemented after the doc filename changes. +# To prevent 404 errors and redirect to the new pages. +redirects = { + "quickstart_mxnet": "quickstart-mxnet.html", + "quickstart_pytorch_lightning": "quickstart-pytorch-lightning.html", + "example_walkthrough_pytorch_mnist": "example-walkthrough-pytorch-mnist.html", + "quickstart_huggingface": "quickstart-huggingface.html", + "quickstart_pytorch": "quickstart-pytorch.html", + "quickstart_tensorflow": "quickstart-tensorflow.html", + "release_process": "release-process.html", + "quickstart_scikitlearn": "quickstart-scikitlearn.html" +} + # -- Options for HTML output -------------------------------------------------
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -51,6 +51,7 @@\n \"sphinx_copybutton\",\n \"sphinx_panels\",\n \"sphinxcontrib.mermaid\",\n+ \"sphinx_reredirects\"\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n@@ -61,6 +62,19 @@\n # This pattern also affects html_static_path and html_extra_path.\n exclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n \n+# Sphinx redirects, implemented after the doc filename changes.\n+# To prevent 404 errors and redirect to the new pages.\n+redirects = {\n+ \"quickstart_mxnet\": \"quickstart-mxnet.html\",\n+ \"quickstart_pytorch_lightning\": \"quickstart-pytorch-lightning.html\",\n+ \"example_walkthrough_pytorch_mnist\": \"example-walkthrough-pytorch-mnist.html\",\n+ \"quickstart_huggingface\": \"quickstart-huggingface.html\",\n+ \"quickstart_pytorch\": \"quickstart-pytorch.html\",\n+ \"quickstart_tensorflow\": \"quickstart-tensorflow.html\",\n+ \"release_process\": \"release-process.html\",\n+ \"quickstart_scikitlearn\": \"quickstart-scikitlearn.html\"\n+}\n+\n \n # -- Options for HTML output -------------------------------------------------\n", "issue": "Unify documentation to use `-` instead of `_`\n### Describe what the documentation is missing.\n\nSome docs use `_` as the separator and some use `-`, for example:\r\n- `https://flower.dev/docs/example_walkthrough_pytorch_mnist.html`\r\n- `https://flower.dev/docs/example-mxnet-walk-through.html`\r\n\r\nThis is inconsistent.\n\n### Suggest your improvement.\n\nWe want to unify all docs to use `-` (\"minus\") as the only separator. This involves the following TODO's:\r\n\r\n- [ ] Rename all docs that use `_` (and replace it with `-`)\r\n- [ ] For each renamed doc, configure a redirect from the old naming (e.g., `quickstart_pytorch`) to the new page (e.g., `quickstart-pytorch`) using, for example, https://documatt.gitlab.io/sphinx-reredirects/index.html\r\n- [ ] Update all references in the docs with the new naming (e.g., in `index.rst`)\r\n- [ ] Update all other references with the new naming (e.g., README.md, code examples, website, ...)\r\n\r\nLast, but not least, this should all be done in a single PR. We should not split, for example, the renaming of a page and the configuration of a redirect into two PRs because it would lead to a deployed version of the docs where users would see a 404 page when they use a link that points to the old naming.\n", "before_files": [{"content": "# Copyright 2020 Adap GmbH. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport os\nimport sys\nfrom sphinx.application import ConfigError\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n\n# Fixing path issue for autodoc\nsys.path.insert(0, os.path.abspath('../../src/py'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = u\"Flower\"\ncopyright = u\"2022 Adap GmbH\"\nauthor = u\"The Flower Authors\"\n\n# The full version, including alpha/beta/rc tags\nrelease = u\"0.19.0\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.graphviz\",\n \"sphinx_copybutton\",\n \"sphinx_panels\",\n \"sphinxcontrib.mermaid\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_title = f\"Flower {release}\"\nhtml_logo = \"_static/flower-logo.png\"\nhtml_favicon = \"_static/favicon.ico\"\nhtml_baseurl = \"https://flower.dev/docs/\"\n\nhtml_theme_options = {\n # Sphinx Book Theme\n # https://sphinx-book-theme.readthedocs.io/en/latest/configure.html\n # \"repository_url\": \"https://github.com/adap/flower\",\n # \"repository_branch\": \"main\",\n # \"path_to_docs\": \"doc/source/\",\n # \"home_page_in_toc\": True,\n # \"use_repository_button\": True,\n # \"use_issues_button\": True,\n # \"use_edit_page_button\": True,\n\n # Furo\n # https://pradyunsg.me/furo/customisation/\n # \"light_css_variables\": {\n # \"color-brand-primary\": \"#292F36\",\n # \"color-brand-content\": \"#292F36\", \n # \"color-admonition-background\": \"#F2B705\",\n # },\n \"announcement\": \"Flower Summit 2022 <a href=\\\"https://flower.dev/conf/flower-summit-2022/\\\">register now</a>\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"custom.css\"]\n", "path": "doc/source/conf.py"}]}
1,888
314
gh_patches_debug_636
rasdani/github-patches
git_diff
pex-tool__pex-2143
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.135 On the docket: + [x] Add Support for Pip 23.1.1. #2133 + [x] Introduce pex3 venv inspect. #2135 + [x] Add support for Pip 23.1.2. #2142 + [x] Introduce pex3 venv create. #2140 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.134" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.134" +__version__ = "2.1.135"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.134\"\n+__version__ = \"2.1.135\"\n", "issue": "Release 2.1.135\nOn the docket:\r\n+ [x] Add Support for Pip 23.1.1. #2133\r\n+ [x] Introduce pex3 venv inspect. #2135\r\n+ [x] Add support for Pip 23.1.2. #2142\n+ [x] Introduce pex3 venv create. #2140 \r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.134\"\n", "path": "pex/version.py"}]}
683
99
gh_patches_debug_20925
rasdani/github-patches
git_diff
qtile__qtile-2697
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bring_front_click is bool type but can be string # bring_front_click is bool type but can be “floating_only” qtile check reported error error: config.bring_front_click variable differs from runtime type Literal['floating_only'] as per documentation floating_only is supported but `bring_front_click` variable is declared as bool type. is it ok if I leave it as 'floating_only'? # Qtile version : 1.8.1 </issue> <code> [start of libqtile/confreader.py] 1 # Copyright (c) 2008, Aldo Cortesi <[email protected]> 2 # Copyright (c) 2011, Andrew Grigorev <[email protected]> 3 # 4 # All rights reserved. 5 # 6 # Permission is hereby granted, free of charge, to any person obtaining a copy 7 # of this software and associated documentation files (the "Software"), to deal 8 # in the Software without restriction, including without limitation the rights 9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 # copies of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be included in 14 # all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 # SOFTWARE. 23 24 from __future__ import annotations 25 26 import importlib 27 import sys 28 from pathlib import Path 29 from typing import TYPE_CHECKING 30 31 from libqtile.backend.x11 import core 32 33 if TYPE_CHECKING: 34 from typing import Any, Dict, List 35 36 from typing_extensions import Literal 37 38 from libqtile.config import Group, Key, Mouse, Rule, Screen 39 from libqtile.layout.base import Layout 40 41 42 class ConfigError(Exception): 43 pass 44 45 46 config_pyi_header = """ 47 from typing import Any, Dict, List 48 from typing_extensions import Literal 49 from libqtile.config import Group, Key, Mouse, Rule, Screen 50 from libqtile.layout.base import Layout 51 52 """ 53 54 55 class Config: 56 # All configuration options 57 keys: List[Key] 58 mouse: List[Mouse] 59 groups: List[Group] 60 dgroups_key_binder: Any 61 dgroups_app_rules: List[Rule] 62 follow_mouse_focus: bool 63 focus_on_window_activation: Literal["focus", "smart", "urgent", "never"] 64 cursor_warp: bool 65 layouts: List[Layout] 66 floating_layout: Layout 67 screens: List[Screen] 68 auto_fullscreen: bool 69 widget_defaults: Dict[str, Any] 70 extension_defaults: Dict[str, Any] 71 bring_front_click: bool 72 reconfigure_screens: bool 73 wmname: str 74 auto_minimize: bool 75 76 def __init__(self, file_path=None, **settings): 77 """Create a Config() object from settings 78 79 Only attributes found in Config.__annotations__ will be added to object. 80 config attribute precedence is 1.) **settings 2.) self 3.) default_config 81 """ 82 self.file_path = file_path 83 self.update(**settings) 84 85 def update(self, *, fake_screens=None, **settings): 86 from libqtile.resources import default_config 87 88 if fake_screens: 89 self.fake_screens = fake_screens 90 91 default = vars(default_config) 92 for key in self.__annotations__.keys(): 93 try: 94 value = settings[key] 95 except KeyError: 96 value = getattr(self, key, default[key]) 97 setattr(self, key, value) 98 99 def load(self): 100 if not self.file_path: 101 return 102 103 path = Path(self.file_path) 104 name = path.stem 105 sys.path.insert(0, path.parent.as_posix()) 106 107 if name in sys.modules: 108 config = importlib.reload(sys.modules[name]) 109 else: 110 config = importlib.import_module(name) 111 112 self.update(**vars(config)) 113 114 def validate(self) -> None: 115 """ 116 Validate the configuration against the core. 117 """ 118 valid_keys = core.get_keys() 119 valid_mods = core.get_modifiers() 120 # we explicitly do not want to set self.keys and self.mouse above, 121 # because they are dynamically resolved from the default_config. so we 122 # need to ignore the errors here about missing attributes. 123 for k in self.keys: 124 if k.key not in valid_keys: 125 raise ConfigError("No such key: %s" % k.key) 126 for m in k.modifiers: 127 if m not in valid_mods: 128 raise ConfigError("No such modifier: %s" % m) 129 for ms in self.mouse: 130 for m in ms.modifiers: 131 if m not in valid_mods: 132 raise ConfigError("No such modifier: %s" % m) 133 [end of libqtile/confreader.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/confreader.py b/libqtile/confreader.py --- a/libqtile/confreader.py +++ b/libqtile/confreader.py @@ -31,7 +31,7 @@ from libqtile.backend.x11 import core if TYPE_CHECKING: - from typing import Any, Dict, List + from typing import Any, Dict, List, Union from typing_extensions import Literal @@ -44,7 +44,7 @@ config_pyi_header = """ -from typing import Any, Dict, List +from typing import Any, Dict, List, Union from typing_extensions import Literal from libqtile.config import Group, Key, Mouse, Rule, Screen from libqtile.layout.base import Layout @@ -68,7 +68,7 @@ auto_fullscreen: bool widget_defaults: Dict[str, Any] extension_defaults: Dict[str, Any] - bring_front_click: bool + bring_front_click: Union[bool, Literal["floating_only"]] reconfigure_screens: bool wmname: str auto_minimize: bool
{"golden_diff": "diff --git a/libqtile/confreader.py b/libqtile/confreader.py\n--- a/libqtile/confreader.py\n+++ b/libqtile/confreader.py\n@@ -31,7 +31,7 @@\n from libqtile.backend.x11 import core\n \n if TYPE_CHECKING:\n- from typing import Any, Dict, List\n+ from typing import Any, Dict, List, Union\n \n from typing_extensions import Literal\n \n@@ -44,7 +44,7 @@\n \n \n config_pyi_header = \"\"\"\n-from typing import Any, Dict, List\n+from typing import Any, Dict, List, Union\n from typing_extensions import Literal\n from libqtile.config import Group, Key, Mouse, Rule, Screen\n from libqtile.layout.base import Layout\n@@ -68,7 +68,7 @@\n auto_fullscreen: bool\n widget_defaults: Dict[str, Any]\n extension_defaults: Dict[str, Any]\n- bring_front_click: bool\n+ bring_front_click: Union[bool, Literal[\"floating_only\"]]\n reconfigure_screens: bool\n wmname: str\n auto_minimize: bool\n", "issue": "bring_front_click is bool type but can be string\n# bring_front_click is bool type but can be \u201cfloating_only\u201d\r\n\r\nqtile check reported error\r\nerror: config.bring_front_click variable differs from runtime type Literal['floating_only']\r\n\r\nas per documentation floating_only is supported but `bring_front_click` variable is declared as bool type.\r\nis it ok if I leave it as 'floating_only'?\r\n\r\n# Qtile version : 1.8.1\n", "before_files": [{"content": "# Copyright (c) 2008, Aldo Cortesi <[email protected]>\n# Copyright (c) 2011, Andrew Grigorev <[email protected]>\n#\n# All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport importlib\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.backend.x11 import core\n\nif TYPE_CHECKING:\n from typing import Any, Dict, List\n\n from typing_extensions import Literal\n\n from libqtile.config import Group, Key, Mouse, Rule, Screen\n from libqtile.layout.base import Layout\n\n\nclass ConfigError(Exception):\n pass\n\n\nconfig_pyi_header = \"\"\"\nfrom typing import Any, Dict, List\nfrom typing_extensions import Literal\nfrom libqtile.config import Group, Key, Mouse, Rule, Screen\nfrom libqtile.layout.base import Layout\n\n\"\"\"\n\n\nclass Config:\n # All configuration options\n keys: List[Key]\n mouse: List[Mouse]\n groups: List[Group]\n dgroups_key_binder: Any\n dgroups_app_rules: List[Rule]\n follow_mouse_focus: bool\n focus_on_window_activation: Literal[\"focus\", \"smart\", \"urgent\", \"never\"]\n cursor_warp: bool\n layouts: List[Layout]\n floating_layout: Layout\n screens: List[Screen]\n auto_fullscreen: bool\n widget_defaults: Dict[str, Any]\n extension_defaults: Dict[str, Any]\n bring_front_click: bool\n reconfigure_screens: bool\n wmname: str\n auto_minimize: bool\n\n def __init__(self, file_path=None, **settings):\n \"\"\"Create a Config() object from settings\n\n Only attributes found in Config.__annotations__ will be added to object.\n config attribute precedence is 1.) **settings 2.) self 3.) default_config\n \"\"\"\n self.file_path = file_path\n self.update(**settings)\n\n def update(self, *, fake_screens=None, **settings):\n from libqtile.resources import default_config\n\n if fake_screens:\n self.fake_screens = fake_screens\n\n default = vars(default_config)\n for key in self.__annotations__.keys():\n try:\n value = settings[key]\n except KeyError:\n value = getattr(self, key, default[key])\n setattr(self, key, value)\n\n def load(self):\n if not self.file_path:\n return\n\n path = Path(self.file_path)\n name = path.stem\n sys.path.insert(0, path.parent.as_posix())\n\n if name in sys.modules:\n config = importlib.reload(sys.modules[name])\n else:\n config = importlib.import_module(name)\n\n self.update(**vars(config))\n\n def validate(self) -> None:\n \"\"\"\n Validate the configuration against the core.\n \"\"\"\n valid_keys = core.get_keys()\n valid_mods = core.get_modifiers()\n # we explicitly do not want to set self.keys and self.mouse above,\n # because they are dynamically resolved from the default_config. so we\n # need to ignore the errors here about missing attributes.\n for k in self.keys:\n if k.key not in valid_keys:\n raise ConfigError(\"No such key: %s\" % k.key)\n for m in k.modifiers:\n if m not in valid_mods:\n raise ConfigError(\"No such modifier: %s\" % m)\n for ms in self.mouse:\n for m in ms.modifiers:\n if m not in valid_mods:\n raise ConfigError(\"No such modifier: %s\" % m)\n", "path": "libqtile/confreader.py"}]}
1,940
248
gh_patches_debug_32787
rasdani/github-patches
git_diff
qtile__qtile-3464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Chord widget doesn't have default background colour ### The issue: The background colour is set by looking up the chord name in a dictionary. If the name is not in the dictionary then the chord will be displayed with whatever the current background colour is. The widget needs to be updated to restore colours to defaults. ### Required: - [X] I have searched past issues to see if this bug has already been reported. </issue> <code> [start of libqtile/widget/chord.py] 1 # Copyright (c) 2014 Sean Vig 2 # Copyright (c) 2014 roger 3 # Copyright (c) 2014 Adi Sieker 4 # Copyright (c) 2014 Tycho Andersen 5 # 6 # Permission is hereby granted, free of charge, to any person obtaining a copy 7 # of this software and associated documentation files (the "Software"), to deal 8 # in the Software without restriction, including without limitation the rights 9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 # copies of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be included in 14 # all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 # SOFTWARE. 23 24 from libqtile import bar, hook 25 from libqtile.widget import base 26 27 28 class Chord(base._TextBox): 29 """Display current key chord""" 30 31 defaults = [ 32 ("chords_colors", {}, "colors per chord in form of tuple ('bg', 'fg')."), 33 ( 34 "name_transform", 35 lambda txt: txt, 36 "preprocessor for chord name it is pure function string -> string", 37 ), 38 ] 39 40 def __init__(self, width=bar.CALCULATED, **config): 41 base._TextBox.__init__(self, "", width, **config) 42 self.add_defaults(Chord.defaults) 43 44 def _configure(self, qtile, bar): 45 base._TextBox._configure(self, qtile, bar) 46 self.text = "" 47 self._setup_hooks() 48 49 def _setup_hooks(self): 50 def hook_enter_chord(chord_name): 51 if chord_name is True: 52 self.text = "" 53 return 54 55 self.text = self.name_transform(chord_name) 56 if chord_name in self.chords_colors: 57 (self.background, self.foreground) = self.chords_colors.get(chord_name) 58 59 self.bar.draw() 60 61 hook.subscribe.enter_chord(hook_enter_chord) 62 hook.subscribe.leave_chord(self.clear) 63 64 def clear(self, *args): 65 self.text = "" 66 self.bar.draw() 67 [end of libqtile/widget/chord.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/widget/chord.py b/libqtile/widget/chord.py --- a/libqtile/widget/chord.py +++ b/libqtile/widget/chord.py @@ -29,7 +29,13 @@ """Display current key chord""" defaults = [ - ("chords_colors", {}, "colors per chord in form of tuple ('bg', 'fg')."), + ( + "chords_colors", + {}, + "colors per chord in form of tuple {'chord_name': ('bg', 'fg')}. " + "Where a chord name is not in the dictionary, the default ``background`` and ``foreground``" + " values will be used.", + ), ( "name_transform", lambda txt: txt, @@ -43,6 +49,8 @@ def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) + self.default_background = self.background + self.default_foreground = self.foreground self.text = "" self._setup_hooks() @@ -50,17 +58,25 @@ def hook_enter_chord(chord_name): if chord_name is True: self.text = "" + self.reset_colours() return self.text = self.name_transform(chord_name) if chord_name in self.chords_colors: (self.background, self.foreground) = self.chords_colors.get(chord_name) + else: + self.reset_colours() self.bar.draw() hook.subscribe.enter_chord(hook_enter_chord) hook.subscribe.leave_chord(self.clear) + def reset_colours(self): + self.background = self.default_background + self.foreground = self.default_foreground + def clear(self, *args): + self.reset_colours() self.text = "" self.bar.draw()
{"golden_diff": "diff --git a/libqtile/widget/chord.py b/libqtile/widget/chord.py\n--- a/libqtile/widget/chord.py\n+++ b/libqtile/widget/chord.py\n@@ -29,7 +29,13 @@\n \"\"\"Display current key chord\"\"\"\n \n defaults = [\n- (\"chords_colors\", {}, \"colors per chord in form of tuple ('bg', 'fg').\"),\n+ (\n+ \"chords_colors\",\n+ {},\n+ \"colors per chord in form of tuple {'chord_name': ('bg', 'fg')}. \"\n+ \"Where a chord name is not in the dictionary, the default ``background`` and ``foreground``\"\n+ \" values will be used.\",\n+ ),\n (\n \"name_transform\",\n lambda txt: txt,\n@@ -43,6 +49,8 @@\n \n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n+ self.default_background = self.background\n+ self.default_foreground = self.foreground\n self.text = \"\"\n self._setup_hooks()\n \n@@ -50,17 +58,25 @@\n def hook_enter_chord(chord_name):\n if chord_name is True:\n self.text = \"\"\n+ self.reset_colours()\n return\n \n self.text = self.name_transform(chord_name)\n if chord_name in self.chords_colors:\n (self.background, self.foreground) = self.chords_colors.get(chord_name)\n+ else:\n+ self.reset_colours()\n \n self.bar.draw()\n \n hook.subscribe.enter_chord(hook_enter_chord)\n hook.subscribe.leave_chord(self.clear)\n \n+ def reset_colours(self):\n+ self.background = self.default_background\n+ self.foreground = self.default_foreground\n+\n def clear(self, *args):\n+ self.reset_colours()\n self.text = \"\"\n self.bar.draw()\n", "issue": "Chord widget doesn't have default background colour\n### The issue:\n\nThe background colour is set by looking up the chord name in a dictionary. If the name is not in the dictionary then the chord will be displayed with whatever the current background colour is.\r\n\r\nThe widget needs to be updated to restore colours to defaults.\n\n### Required:\n\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Adi Sieker\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook\nfrom libqtile.widget import base\n\n\nclass Chord(base._TextBox):\n \"\"\"Display current key chord\"\"\"\n\n defaults = [\n (\"chords_colors\", {}, \"colors per chord in form of tuple ('bg', 'fg').\"),\n (\n \"name_transform\",\n lambda txt: txt,\n \"preprocessor for chord name it is pure function string -> string\",\n ),\n ]\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n self.add_defaults(Chord.defaults)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = \"\"\n self._setup_hooks()\n\n def _setup_hooks(self):\n def hook_enter_chord(chord_name):\n if chord_name is True:\n self.text = \"\"\n return\n\n self.text = self.name_transform(chord_name)\n if chord_name in self.chords_colors:\n (self.background, self.foreground) = self.chords_colors.get(chord_name)\n\n self.bar.draw()\n\n hook.subscribe.enter_chord(hook_enter_chord)\n hook.subscribe.leave_chord(self.clear)\n\n def clear(self, *args):\n self.text = \"\"\n self.bar.draw()\n", "path": "libqtile/widget/chord.py"}]}
1,319
421
gh_patches_debug_8458
rasdani/github-patches
git_diff
apache__airflow-8834
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> show_dag (save) feature is not supporting any other orientation except 'LR' <!-- Welcome to Apache Airflow! For a smooth issue process, try to answer the following questions. Don't worry if they're not all applicable; just try to include what you can :-) If you need to include code snippets or logs, please put them in fenced code blocks. If they're super-long, please use the details tag like <details><summary>super-long log</summary> lots of stuff </details> Please delete these comment blocks before submitting the issue. --> <!-- IMPORTANT!!! PLEASE CHECK "SIMILAR TO X EXISTING ISSUES" OPTION IF VISIBLE NEXT TO "SUBMIT NEW ISSUE" BUTTON!!! PLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!! Please complete the next sections or the issue will be closed. This questions are the first thing we need to know to understand the context. --> **Apache Airflow version**: master **What happened**: I have DAG in TB orientation and when I tried to save that DAG graph to png file, it is not honoring the orientation, it is taking in 'LR' orientation only. <!-- (please include exact error messages if you can) --> **What you expected to happen**: The graph in the png file should be similar to the orientation of the DAG. <!-- What do you think went wrong? --> **How to reproduce it**: Change the orientation of the DAG other than 'LR' and try to save the graph. <!--- As minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags. If you are using kubernetes, please attempt to recreate the issue using minikube or kind. ## Install minikube/kind - Minikube https://minikube.sigs.k8s.io/docs/start/ - Kind https://kind.sigs.k8s.io/docs/user/quick-start/ If this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action You can include images using the .md sytle of ![alt text](http://url/to/img.png) To record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file. ---> </issue> <code> [start of airflow/utils/dot_renderer.py] 1 #!/usr/bin/env python 2 # 3 # Licensed to the Apache Software Foundation (ASF) under one 4 # or more contributor license agreements. See the NOTICE file 5 # distributed with this work for additional information 6 # regarding copyright ownership. The ASF licenses this file 7 # to you under the Apache License, Version 2.0 (the 8 # "License"); you may not use this file except in compliance 9 # with the License. You may obtain a copy of the License at 10 # 11 # http://www.apache.org/licenses/LICENSE-2.0 12 # 13 # Unless required by applicable law or agreed to in writing, 14 # software distributed under the License is distributed on an 15 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 16 # KIND, either express or implied. See the License for the 17 # specific language governing permissions and limitations 18 # under the License. 19 """ 20 Renderer DAG (tasks and dependencies) to the graphviz object. 21 """ 22 from typing import List, Optional 23 24 import graphviz 25 26 from airflow.models import TaskInstance 27 from airflow.models.dag import DAG 28 from airflow.utils.state import State 29 30 31 def _refine_color(color: str): 32 """ 33 Converts color in #RGB (12 bits) format to #RRGGBB (32 bits), if it possible. 34 Otherwise, it returns the original value. Graphviz does not support colors in #RGB format. 35 36 :param color: Text representation of color 37 :return: Refined representation of color 38 """ 39 if len(color) == 4 and color[0] == "#": 40 color_r = color[1] 41 color_g = color[2] 42 color_b = color[3] 43 return "#" + color_r + color_r + color_g + color_g + color_b + color_b 44 return color 45 46 47 def render_dag(dag: DAG, tis: Optional[List[TaskInstance]] = None) -> graphviz.Digraph: 48 """ 49 Renders the DAG object to the DOT object. 50 51 If an task instance list is passed, the nodes will be painted according to task statuses. 52 53 :param dag: DAG that will be rendered. 54 :type dag: airflow.models.dag.DAG 55 :param tis: List of task instances 56 :type tis: Optional[List[TaskInstance]] 57 :return: Graphviz object 58 :rtype: graphviz.Digraph 59 """ 60 dot = graphviz.Digraph(dag.dag_id, graph_attr={"rankdir": "LR", "labelloc": "t", "label": dag.dag_id}) 61 states_by_task_id = None 62 if tis is not None: 63 states_by_task_id = {ti.task_id: ti.state for ti in tis} 64 for task in dag.tasks: 65 node_attrs = { 66 "shape": "rectangle", 67 "style": "filled,rounded", 68 } 69 if states_by_task_id is None: 70 node_attrs.update({ 71 "color": _refine_color(task.ui_fgcolor), 72 "fillcolor": _refine_color(task.ui_color), 73 }) 74 else: 75 state = states_by_task_id.get(task.task_id, State.NONE) 76 node_attrs.update({ 77 "color": State.color_fg(state), 78 "fillcolor": State.color(state), 79 }) 80 dot.node( 81 task.task_id, 82 _attributes=node_attrs, 83 ) 84 for downstream_task_id in task.downstream_task_ids: 85 dot.edge(task.task_id, downstream_task_id) 86 return dot 87 [end of airflow/utils/dot_renderer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/utils/dot_renderer.py b/airflow/utils/dot_renderer.py --- a/airflow/utils/dot_renderer.py +++ b/airflow/utils/dot_renderer.py @@ -57,7 +57,8 @@ :return: Graphviz object :rtype: graphviz.Digraph """ - dot = graphviz.Digraph(dag.dag_id, graph_attr={"rankdir": "LR", "labelloc": "t", "label": dag.dag_id}) + dot = graphviz.Digraph(dag.dag_id, graph_attr={"rankdir": dag.orientation if dag.orientation else "LR", + "labelloc": "t", "label": dag.dag_id}) states_by_task_id = None if tis is not None: states_by_task_id = {ti.task_id: ti.state for ti in tis}
{"golden_diff": "diff --git a/airflow/utils/dot_renderer.py b/airflow/utils/dot_renderer.py\n--- a/airflow/utils/dot_renderer.py\n+++ b/airflow/utils/dot_renderer.py\n@@ -57,7 +57,8 @@\n :return: Graphviz object\n :rtype: graphviz.Digraph\n \"\"\"\n- dot = graphviz.Digraph(dag.dag_id, graph_attr={\"rankdir\": \"LR\", \"labelloc\": \"t\", \"label\": dag.dag_id})\n+ dot = graphviz.Digraph(dag.dag_id, graph_attr={\"rankdir\": dag.orientation if dag.orientation else \"LR\",\n+ \"labelloc\": \"t\", \"label\": dag.dag_id})\n states_by_task_id = None\n if tis is not None:\n states_by_task_id = {ti.task_id: ti.state for ti in tis}\n", "issue": "show_dag (save) feature is not supporting any other orientation except 'LR'\n<!--\r\n\r\nWelcome to Apache Airflow! For a smooth issue process, try to answer the following questions.\r\nDon't worry if they're not all applicable; just try to include what you can :-)\r\n\r\nIf you need to include code snippets or logs, please put them in fenced code\r\nblocks. If they're super-long, please use the details tag like\r\n<details><summary>super-long log</summary> lots of stuff </details>\r\n\r\nPlease delete these comment blocks before submitting the issue.\r\n\r\n-->\r\n\r\n<!--\r\n\r\nIMPORTANT!!!\r\n\r\nPLEASE CHECK \"SIMILAR TO X EXISTING ISSUES\" OPTION IF VISIBLE\r\nNEXT TO \"SUBMIT NEW ISSUE\" BUTTON!!!\r\n\r\nPLEASE CHECK IF THIS ISSUE HAS BEEN REPORTED PREVIOUSLY USING SEARCH!!!\r\n\r\nPlease complete the next sections or the issue will be closed.\r\nThis questions are the first thing we need to know to understand the context.\r\n\r\n-->\r\n\r\n**Apache Airflow version**: master\r\n\r\n**What happened**: I have DAG in TB orientation and when I tried to save that DAG graph to png file, it is not honoring the orientation, it is taking in 'LR' orientation only.\r\n\r\n<!-- (please include exact error messages if you can) -->\r\n\r\n**What you expected to happen**: The graph in the png file should be similar to the orientation of the DAG.\r\n\r\n<!-- What do you think went wrong? -->\r\n\r\n**How to reproduce it**: Change the orientation of the DAG other than 'LR' and try to save the graph.\r\n<!---\r\n\r\nAs minimally and precisely as possible. Keep in mind we do not have access to your cluster or dags.\r\n\r\nIf you are using kubernetes, please attempt to recreate the issue using minikube or kind.\r\n\r\n## Install minikube/kind\r\n\r\n- Minikube https://minikube.sigs.k8s.io/docs/start/\r\n- Kind https://kind.sigs.k8s.io/docs/user/quick-start/\r\n\r\nIf this is a UI bug, please provide a screenshot of the bug or a link to a youtube video of the bug in action\r\n\r\nYou can include images using the .md sytle of\r\n![alt text](http://url/to/img.png)\r\n\r\nTo record a screencast, mac users can use QuickTime and then create an unlisted youtube video with the resulting .mov file.\r\n\r\n--->\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"\nRenderer DAG (tasks and dependencies) to the graphviz object.\n\"\"\"\nfrom typing import List, Optional\n\nimport graphviz\n\nfrom airflow.models import TaskInstance\nfrom airflow.models.dag import DAG\nfrom airflow.utils.state import State\n\n\ndef _refine_color(color: str):\n \"\"\"\n Converts color in #RGB (12 bits) format to #RRGGBB (32 bits), if it possible.\n Otherwise, it returns the original value. Graphviz does not support colors in #RGB format.\n\n :param color: Text representation of color\n :return: Refined representation of color\n \"\"\"\n if len(color) == 4 and color[0] == \"#\":\n color_r = color[1]\n color_g = color[2]\n color_b = color[3]\n return \"#\" + color_r + color_r + color_g + color_g + color_b + color_b\n return color\n\n\ndef render_dag(dag: DAG, tis: Optional[List[TaskInstance]] = None) -> graphviz.Digraph:\n \"\"\"\n Renders the DAG object to the DOT object.\n\n If an task instance list is passed, the nodes will be painted according to task statuses.\n\n :param dag: DAG that will be rendered.\n :type dag: airflow.models.dag.DAG\n :param tis: List of task instances\n :type tis: Optional[List[TaskInstance]]\n :return: Graphviz object\n :rtype: graphviz.Digraph\n \"\"\"\n dot = graphviz.Digraph(dag.dag_id, graph_attr={\"rankdir\": \"LR\", \"labelloc\": \"t\", \"label\": dag.dag_id})\n states_by_task_id = None\n if tis is not None:\n states_by_task_id = {ti.task_id: ti.state for ti in tis}\n for task in dag.tasks:\n node_attrs = {\n \"shape\": \"rectangle\",\n \"style\": \"filled,rounded\",\n }\n if states_by_task_id is None:\n node_attrs.update({\n \"color\": _refine_color(task.ui_fgcolor),\n \"fillcolor\": _refine_color(task.ui_color),\n })\n else:\n state = states_by_task_id.get(task.task_id, State.NONE)\n node_attrs.update({\n \"color\": State.color_fg(state),\n \"fillcolor\": State.color(state),\n })\n dot.node(\n task.task_id,\n _attributes=node_attrs,\n )\n for downstream_task_id in task.downstream_task_ids:\n dot.edge(task.task_id, downstream_task_id)\n return dot\n", "path": "airflow/utils/dot_renderer.py"}]}
1,930
195
gh_patches_debug_37605
rasdani/github-patches
git_diff
deepchecks__deepchecks-455
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Refactor] Identifier Leakage Check should be plotted with plotly and not matplotlib </issue> <code> [start of deepchecks/checks/methodology/identifier_leakage.py] 1 # ---------------------------------------------------------------------------- 2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com) 3 # 4 # This file is part of Deepchecks. 5 # Deepchecks is distributed under the terms of the GNU Affero General 6 # Public License (version 3 or later). 7 # You should have received a copy of the GNU Affero General Public License 8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>. 9 # ---------------------------------------------------------------------------- 10 # 11 """module contains Identifier Leakage check.""" 12 from typing import Union, Dict 13 14 import pandas as pd 15 16 import deepchecks.ppscore as pps 17 from deepchecks import Dataset 18 from deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult 19 from deepchecks.utils.plot import create_colorbar_barchart_for_check 20 from deepchecks.utils.strings import format_percent 21 from deepchecks.errors import DeepchecksValueError 22 23 24 __all__ = ['IdentifierLeakage'] 25 26 27 class IdentifierLeakage(SingleDatasetBaseCheck): 28 """Check if identifiers (Index/Date) can be used to predict the label. 29 30 Args: 31 ppscore_params: dictionary containing params to pass to ppscore predictor 32 """ 33 34 def __init__(self, ppscore_params=None): 35 super().__init__() 36 self.ppscore_params = ppscore_params 37 38 def run(self, dataset: Dataset, model=None) -> CheckResult: 39 """Run check. 40 41 Args: 42 dataset(Dataset): any dataset. 43 model: ignored in check (default: None). 44 45 Returns: 46 (CheckResult): 47 value is a dictionary with PPS per feature column. 48 data is a bar graph of the PPS of each feature. 49 50 Raises: 51 DeepchecksValueError: If the object is not a Dataset instance with a label 52 """ 53 return self._identifier_leakage(dataset) 54 55 def _identifier_leakage(self, dataset: Union[pd.DataFrame, Dataset], ppscore_params=None) -> CheckResult: 56 Dataset.validate_dataset(dataset) 57 dataset.validate_label() 58 ppscore_params = ppscore_params or {} 59 60 relevant_columns = list(filter(None, [dataset.datetime_name, dataset.index_name, dataset.label_name])) 61 62 if len(relevant_columns) == 1: 63 raise DeepchecksValueError('Dataset needs to have a date or index column.') 64 65 df_pps = pps.predictors(df=dataset.data[relevant_columns], y=dataset.label_name, random_seed=42, 66 **ppscore_params) 67 df_pps = df_pps.set_index('x', drop=True) 68 s_ppscore = df_pps['ppscore'] 69 70 def plot(): 71 # Create graph: 72 create_colorbar_barchart_for_check(x=s_ppscore.index, y=s_ppscore.values, 73 ylabel='predictive power score (PPS)', 74 xlabel='Identifiers', color_map='gist_heat_r', color_shift_midpoint=0.1, 75 color_label='PPS', check_name=self.__class__.__name__) 76 77 text = ['The PPS represents the ability of a feature to single-handedly predict another feature or label.', 78 'For Identifier columns (Index/Date) PPS should be nearly 0, otherwise date and index have some ' 79 'predictive effect on the label.'] 80 81 return CheckResult(value=s_ppscore.to_dict(), display=[plot, *text]) 82 83 def add_condition_pps_not_greater_than(self, max_pps: float = 0): 84 """Add condition - require columns not to have a greater pps than given max. 85 86 Args: 87 max_pps (int): Maximum allowed string length outliers ratio. 88 """ 89 def compare_pps(result: Dict): 90 not_passing_columns = [] 91 for column_name in result.keys(): 92 score = result[column_name] 93 if score > max_pps: 94 not_passing_columns.append(column_name) 95 if not_passing_columns: 96 not_passing_str = ', '.join(map(str, not_passing_columns)) 97 return ConditionResult(False, 98 f'Found columns with greater pps than {format_percent(max_pps)}: ' 99 f'{not_passing_str}') 100 else: 101 return ConditionResult(True) 102 103 return self.add_condition( 104 f'Identifier columns do not have a greater pps than {format_percent(max_pps)}', 105 compare_pps) 106 [end of deepchecks/checks/methodology/identifier_leakage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepchecks/checks/methodology/identifier_leakage.py b/deepchecks/checks/methodology/identifier_leakage.py --- a/deepchecks/checks/methodology/identifier_leakage.py +++ b/deepchecks/checks/methodology/identifier_leakage.py @@ -16,10 +16,9 @@ import deepchecks.ppscore as pps from deepchecks import Dataset from deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult -from deepchecks.utils.plot import create_colorbar_barchart_for_check from deepchecks.utils.strings import format_percent from deepchecks.errors import DeepchecksValueError - +import plotly.express as px __all__ = ['IdentifierLeakage'] @@ -67,18 +66,38 @@ df_pps = df_pps.set_index('x', drop=True) s_ppscore = df_pps['ppscore'] - def plot(): - # Create graph: - create_colorbar_barchart_for_check(x=s_ppscore.index, y=s_ppscore.values, - ylabel='predictive power score (PPS)', - xlabel='Identifiers', color_map='gist_heat_r', color_shift_midpoint=0.1, - color_label='PPS', check_name=self.__class__.__name__) + xaxis_layout = dict(title='Identifiers', type='category') + yaxis_layout = dict(fixedrange=True, + range=(0, 1), + title='predictive power score (PPS)') + + red_heavy_colorscale = [ + [0, 'rgb(255, 255, 255)'], # jan + [0.1, 'rgb(255,155,100)'], + [0.2, 'rgb(255, 50, 50)'], + [0.3, 'rgb(200, 0, 0)'], + [1, 'rgb(55, 0, 0)'] + ] + + figure = px.bar(s_ppscore, x=s_ppscore.index, y='ppscore', color='ppscore', + color_continuous_scale=red_heavy_colorscale) + figure.update_layout(width=700, height=400) + figure.update_layout( + dict( + xaxis=xaxis_layout, + yaxis=yaxis_layout, + coloraxis=dict( + cmin=0, + cmax=1 + ) + ) + ) text = ['The PPS represents the ability of a feature to single-handedly predict another feature or label.', 'For Identifier columns (Index/Date) PPS should be nearly 0, otherwise date and index have some ' 'predictive effect on the label.'] - return CheckResult(value=s_ppscore.to_dict(), display=[plot, *text]) + return CheckResult(value=s_ppscore.to_dict(), display=[figure, *text]) def add_condition_pps_not_greater_than(self, max_pps: float = 0): """Add condition - require columns not to have a greater pps than given max.
{"golden_diff": "diff --git a/deepchecks/checks/methodology/identifier_leakage.py b/deepchecks/checks/methodology/identifier_leakage.py\n--- a/deepchecks/checks/methodology/identifier_leakage.py\n+++ b/deepchecks/checks/methodology/identifier_leakage.py\n@@ -16,10 +16,9 @@\n import deepchecks.ppscore as pps\n from deepchecks import Dataset\n from deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult\n-from deepchecks.utils.plot import create_colorbar_barchart_for_check\n from deepchecks.utils.strings import format_percent\n from deepchecks.errors import DeepchecksValueError\n-\n+import plotly.express as px\n \n __all__ = ['IdentifierLeakage']\n \n@@ -67,18 +66,38 @@\n df_pps = df_pps.set_index('x', drop=True)\n s_ppscore = df_pps['ppscore']\n \n- def plot():\n- # Create graph:\n- create_colorbar_barchart_for_check(x=s_ppscore.index, y=s_ppscore.values,\n- ylabel='predictive power score (PPS)',\n- xlabel='Identifiers', color_map='gist_heat_r', color_shift_midpoint=0.1,\n- color_label='PPS', check_name=self.__class__.__name__)\n+ xaxis_layout = dict(title='Identifiers', type='category')\n+ yaxis_layout = dict(fixedrange=True,\n+ range=(0, 1),\n+ title='predictive power score (PPS)')\n+\n+ red_heavy_colorscale = [\n+ [0, 'rgb(255, 255, 255)'], # jan\n+ [0.1, 'rgb(255,155,100)'],\n+ [0.2, 'rgb(255, 50, 50)'],\n+ [0.3, 'rgb(200, 0, 0)'],\n+ [1, 'rgb(55, 0, 0)']\n+ ]\n+\n+ figure = px.bar(s_ppscore, x=s_ppscore.index, y='ppscore', color='ppscore',\n+ color_continuous_scale=red_heavy_colorscale)\n+ figure.update_layout(width=700, height=400)\n+ figure.update_layout(\n+ dict(\n+ xaxis=xaxis_layout,\n+ yaxis=yaxis_layout,\n+ coloraxis=dict(\n+ cmin=0,\n+ cmax=1\n+ )\n+ )\n+ )\n \n text = ['The PPS represents the ability of a feature to single-handedly predict another feature or label.',\n 'For Identifier columns (Index/Date) PPS should be nearly 0, otherwise date and index have some '\n 'predictive effect on the label.']\n \n- return CheckResult(value=s_ppscore.to_dict(), display=[plot, *text])\n+ return CheckResult(value=s_ppscore.to_dict(), display=[figure, *text])\n \n def add_condition_pps_not_greater_than(self, max_pps: float = 0):\n \"\"\"Add condition - require columns not to have a greater pps than given max.\n", "issue": "[Refactor] Identifier Leakage Check should be plotted with plotly and not matplotlib\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"module contains Identifier Leakage check.\"\"\"\nfrom typing import Union, Dict\n\nimport pandas as pd\n\nimport deepchecks.ppscore as pps\nfrom deepchecks import Dataset\nfrom deepchecks.base.check import CheckResult, SingleDatasetBaseCheck, ConditionResult\nfrom deepchecks.utils.plot import create_colorbar_barchart_for_check\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.errors import DeepchecksValueError\n\n\n__all__ = ['IdentifierLeakage']\n\n\nclass IdentifierLeakage(SingleDatasetBaseCheck):\n \"\"\"Check if identifiers (Index/Date) can be used to predict the label.\n\n Args:\n ppscore_params: dictionary containing params to pass to ppscore predictor\n \"\"\"\n\n def __init__(self, ppscore_params=None):\n super().__init__()\n self.ppscore_params = ppscore_params\n\n def run(self, dataset: Dataset, model=None) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n dataset(Dataset): any dataset.\n model: ignored in check (default: None).\n\n Returns:\n (CheckResult):\n value is a dictionary with PPS per feature column.\n data is a bar graph of the PPS of each feature.\n\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._identifier_leakage(dataset)\n\n def _identifier_leakage(self, dataset: Union[pd.DataFrame, Dataset], ppscore_params=None) -> CheckResult:\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n ppscore_params = ppscore_params or {}\n\n relevant_columns = list(filter(None, [dataset.datetime_name, dataset.index_name, dataset.label_name]))\n\n if len(relevant_columns) == 1:\n raise DeepchecksValueError('Dataset needs to have a date or index column.')\n\n df_pps = pps.predictors(df=dataset.data[relevant_columns], y=dataset.label_name, random_seed=42,\n **ppscore_params)\n df_pps = df_pps.set_index('x', drop=True)\n s_ppscore = df_pps['ppscore']\n\n def plot():\n # Create graph:\n create_colorbar_barchart_for_check(x=s_ppscore.index, y=s_ppscore.values,\n ylabel='predictive power score (PPS)',\n xlabel='Identifiers', color_map='gist_heat_r', color_shift_midpoint=0.1,\n color_label='PPS', check_name=self.__class__.__name__)\n\n text = ['The PPS represents the ability of a feature to single-handedly predict another feature or label.',\n 'For Identifier columns (Index/Date) PPS should be nearly 0, otherwise date and index have some '\n 'predictive effect on the label.']\n\n return CheckResult(value=s_ppscore.to_dict(), display=[plot, *text])\n\n def add_condition_pps_not_greater_than(self, max_pps: float = 0):\n \"\"\"Add condition - require columns not to have a greater pps than given max.\n\n Args:\n max_pps (int): Maximum allowed string length outliers ratio.\n \"\"\"\n def compare_pps(result: Dict):\n not_passing_columns = []\n for column_name in result.keys():\n score = result[column_name]\n if score > max_pps:\n not_passing_columns.append(column_name)\n if not_passing_columns:\n not_passing_str = ', '.join(map(str, not_passing_columns))\n return ConditionResult(False,\n f'Found columns with greater pps than {format_percent(max_pps)}: '\n f'{not_passing_str}')\n else:\n return ConditionResult(True)\n\n return self.add_condition(\n f'Identifier columns do not have a greater pps than {format_percent(max_pps)}',\n compare_pps)\n", "path": "deepchecks/checks/methodology/identifier_leakage.py"}]}
1,699
712
gh_patches_debug_29500
rasdani/github-patches
git_diff
kserve__kserve-889
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SDK can't create and watch inferenceservice at the same time /kind bug **What steps did you take and what happened:** Running the below python SDK code to create and watch the inferenceservice occasionally have race condition on getting the status. ```python KFServing.create(isvc, watch=True, timeout_seconds=120) ``` ```python --------------------------------------------------------------------------- KeyError Traceback (most recent call last) <ipython-input-4-06b315a74aac> in <module> 1 KFServing = KFServingClient() ----> 2 KFServing.create(isvc, watch=True, timeout_seconds=120) ~/.local/lib/python3.6/site-packages/kfserving/api/kf_serving_client.py in create(self, inferenceservice, namespace, watch, timeout_seconds) 114 name=outputs['metadata']['name'], 115 namespace=namespace, --> 116 timeout_seconds=timeout_seconds) 117 else: 118 return outputs ~/.local/lib/python3.6/site-packages/kfserving/api/kf_serving_watch.py in watch(name, namespace, timeout_seconds) 46 continue 47 else: ---> 48 url = isvc['status'].get('url', '') 49 default_traffic = isvc['status'].get('traffic', '') 50 canary_traffic = isvc['status'].get('canaryTraffic', '') KeyError: 'status' ``` **What did you expect to happen:** it should watch the inferenceservice without error out on race condition. **Anything else you would like to add:** This error happens way more often on kfserving SDK 0.3.0+ **Environment:** - Istio Version: 1.1.6 - Knative Version: 1.11.2 - KFServing Version: 0.3.0 - Kubeflow version: 1.0.2 - Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm] - Minikube version: - Kubernetes version: (use `kubectl version`): 1.15 - OS (e.g. from `/etc/os-release`): </issue> <code> [start of python/kfserving/kfserving/api/kf_serving_watch.py] 1 # Copyright 2019 The Kubeflow Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from kubernetes import client 16 from kubernetes import watch as k8s_watch 17 from table_logger import TableLogger 18 19 from ..constants import constants 20 from ..utils import utils 21 22 23 def watch(name=None, namespace=None, timeout_seconds=600): 24 """Watch the created or patched InferenceService in the specified namespace""" 25 26 if namespace is None: 27 namespace = utils.get_default_target_namespace() 28 29 tbl = TableLogger( 30 columns='NAME,READY,DEFAULT_TRAFFIC,CANARY_TRAFFIC,URL', 31 colwidth={'NAME': 20, 'READY':10, 'DEFAULT_TRAFFIC':15, 'CANARY_TRAFFIC':15, 'URL': 50}, 32 border=False) 33 34 stream = k8s_watch.Watch().stream( 35 client.CustomObjectsApi().list_namespaced_custom_object, 36 constants.KFSERVING_GROUP, 37 constants.KFSERVING_VERSION, 38 namespace, 39 constants.KFSERVING_PLURAL, 40 timeout_seconds=timeout_seconds) 41 42 for event in stream: 43 isvc = event['object'] 44 isvc_name = isvc['metadata']['name'] 45 if name and name != isvc_name: 46 continue 47 else: 48 url = isvc['status'].get('url', '') 49 default_traffic = isvc['status'].get('traffic', '') 50 canary_traffic = isvc['status'].get('canaryTraffic', '') 51 status = 'Unknown' 52 for condition in isvc['status'].get('conditions', {}): 53 if condition.get('type', '') == 'Ready': 54 status = condition.get('status', 'Unknown') 55 tbl(isvc_name, status, default_traffic, canary_traffic, url) 56 57 if name == isvc_name and status == 'True': 58 break 59 [end of python/kfserving/kfserving/api/kf_serving_watch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/kfserving/kfserving/api/kf_serving_watch.py b/python/kfserving/kfserving/api/kf_serving_watch.py --- a/python/kfserving/kfserving/api/kf_serving_watch.py +++ b/python/kfserving/kfserving/api/kf_serving_watch.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import time from kubernetes import client from kubernetes import watch as k8s_watch from table_logger import TableLogger @@ -45,14 +46,20 @@ if name and name != isvc_name: continue else: - url = isvc['status'].get('url', '') - default_traffic = isvc['status'].get('traffic', '') - canary_traffic = isvc['status'].get('canaryTraffic', '') - status = 'Unknown' - for condition in isvc['status'].get('conditions', {}): - if condition.get('type', '') == 'Ready': - status = condition.get('status', 'Unknown') - tbl(isvc_name, status, default_traffic, canary_traffic, url) + if isvc.get('status', ''): + url = isvc['status'].get('url', '') + default_traffic = isvc['status'].get('traffic', '') + canary_traffic = isvc['status'].get('canaryTraffic', '') + status = 'Unknown' + for condition in isvc['status'].get('conditions', {}): + if condition.get('type', '') == 'Ready': + status = condition.get('status', 'Unknown') + tbl(isvc_name, status, default_traffic, canary_traffic, url) + else: + tbl(isvc_name, 'Unknown', '', '', '') + # Sleep 2 to avoid status section is not generated within a very short time. + time.sleep(2) + continue if name == isvc_name and status == 'True': break
{"golden_diff": "diff --git a/python/kfserving/kfserving/api/kf_serving_watch.py b/python/kfserving/kfserving/api/kf_serving_watch.py\n--- a/python/kfserving/kfserving/api/kf_serving_watch.py\n+++ b/python/kfserving/kfserving/api/kf_serving_watch.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import time\n from kubernetes import client\n from kubernetes import watch as k8s_watch\n from table_logger import TableLogger\n@@ -45,14 +46,20 @@\n if name and name != isvc_name:\n continue\n else:\n- url = isvc['status'].get('url', '')\n- default_traffic = isvc['status'].get('traffic', '')\n- canary_traffic = isvc['status'].get('canaryTraffic', '')\n- status = 'Unknown'\n- for condition in isvc['status'].get('conditions', {}):\n- if condition.get('type', '') == 'Ready':\n- status = condition.get('status', 'Unknown')\n- tbl(isvc_name, status, default_traffic, canary_traffic, url)\n+ if isvc.get('status', ''):\n+ url = isvc['status'].get('url', '')\n+ default_traffic = isvc['status'].get('traffic', '')\n+ canary_traffic = isvc['status'].get('canaryTraffic', '')\n+ status = 'Unknown'\n+ for condition in isvc['status'].get('conditions', {}):\n+ if condition.get('type', '') == 'Ready':\n+ status = condition.get('status', 'Unknown')\n+ tbl(isvc_name, status, default_traffic, canary_traffic, url)\n+ else:\n+ tbl(isvc_name, 'Unknown', '', '', '')\n+ # Sleep 2 to avoid status section is not generated within a very short time.\n+ time.sleep(2)\n+ continue\n \n if name == isvc_name and status == 'True':\n break\n", "issue": "SDK can't create and watch inferenceservice at the same time\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nRunning the below python SDK code to create and watch the inferenceservice occasionally have race condition on getting the status.\r\n```python\r\nKFServing.create(isvc, watch=True, timeout_seconds=120)\r\n```\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-4-06b315a74aac> in <module>\r\n 1 KFServing = KFServingClient()\r\n----> 2 KFServing.create(isvc, watch=True, timeout_seconds=120)\r\n\r\n~/.local/lib/python3.6/site-packages/kfserving/api/kf_serving_client.py in create(self, inferenceservice, namespace, watch, timeout_seconds)\r\n 114 name=outputs['metadata']['name'],\r\n 115 namespace=namespace,\r\n--> 116 timeout_seconds=timeout_seconds)\r\n 117 else:\r\n 118 return outputs\r\n\r\n~/.local/lib/python3.6/site-packages/kfserving/api/kf_serving_watch.py in watch(name, namespace, timeout_seconds)\r\n 46 continue\r\n 47 else:\r\n---> 48 url = isvc['status'].get('url', '')\r\n 49 default_traffic = isvc['status'].get('traffic', '')\r\n 50 canary_traffic = isvc['status'].get('canaryTraffic', '')\r\n\r\nKeyError: 'status'\r\n```\r\n**What did you expect to happen:**\r\nit should watch the inferenceservice without error out on race condition.\r\n\r\n\r\n**Anything else you would like to add:**\r\nThis error happens way more often on kfserving SDK 0.3.0+\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version: 1.1.6\r\n- Knative Version: 1.11.2\r\n- KFServing Version: 0.3.0\r\n- Kubeflow version: 1.0.2\r\n- Kfdef:[k8s_istio/istio_dex/gcp_basic_auth/gcp_iap/aws/aws_cognito/ibm]\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`): 1.15\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "# Copyright 2019 The Kubeflow Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom kubernetes import client\nfrom kubernetes import watch as k8s_watch\nfrom table_logger import TableLogger\n\nfrom ..constants import constants\nfrom ..utils import utils\n\n\ndef watch(name=None, namespace=None, timeout_seconds=600):\n \"\"\"Watch the created or patched InferenceService in the specified namespace\"\"\"\n\n if namespace is None:\n namespace = utils.get_default_target_namespace()\n\n tbl = TableLogger(\n columns='NAME,READY,DEFAULT_TRAFFIC,CANARY_TRAFFIC,URL',\n colwidth={'NAME': 20, 'READY':10, 'DEFAULT_TRAFFIC':15, 'CANARY_TRAFFIC':15, 'URL': 50},\n border=False)\n\n stream = k8s_watch.Watch().stream(\n client.CustomObjectsApi().list_namespaced_custom_object,\n constants.KFSERVING_GROUP,\n constants.KFSERVING_VERSION,\n namespace,\n constants.KFSERVING_PLURAL,\n timeout_seconds=timeout_seconds)\n\n for event in stream:\n isvc = event['object']\n isvc_name = isvc['metadata']['name']\n if name and name != isvc_name:\n continue\n else:\n url = isvc['status'].get('url', '')\n default_traffic = isvc['status'].get('traffic', '')\n canary_traffic = isvc['status'].get('canaryTraffic', '')\n status = 'Unknown'\n for condition in isvc['status'].get('conditions', {}):\n if condition.get('type', '') == 'Ready':\n status = condition.get('status', 'Unknown')\n tbl(isvc_name, status, default_traffic, canary_traffic, url)\n\n if name == isvc_name and status == 'True':\n break\n", "path": "python/kfserving/kfserving/api/kf_serving_watch.py"}]}
1,702
458
gh_patches_debug_24233
rasdani/github-patches
git_diff
netket__netket-223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Compiling with USE_LAPACK in v2.0 is broken Reported by @fabienalet </issue> <code> [start of setup.py] 1 import os 2 import platform 3 import re 4 import shlex 5 import subprocess 6 import sys 7 8 from distutils import log 9 from setuptools import setup, Extension 10 from setuptools.command.build_ext import build_ext 11 12 13 # Poor man's command-line options parsing 14 def steal_cmake_flags(args): 15 """ 16 Extracts CMake-related arguments from ``args``. ``args`` is a list of 17 strings usually equal to ``sys.argv``. All arguments of the form 18 ``--cmake-args=...`` are extracted (i.e. removed from ``args``!) and 19 accumulated. If there are no arguments of the specified form, 20 ``NETKET_CMAKE_FLAGS`` environment variable is used instead. 21 """ 22 _ARG_PREFIX = "--cmake-args=" 23 24 def _unquote(x): 25 m = re.match(r"'(.*)'", x) 26 if m: 27 return m.group(1) 28 m = re.match(r'"(.*)"', x) 29 if m: 30 return m.group(1) 31 return x 32 33 stolen_args = [x for x in args if x.startswith(_ARG_PREFIX)] 34 for x in stolen_args: 35 args.remove(x) 36 37 if len(stolen_args) > 0: 38 cmake_args = sum( 39 (shlex.split(_unquote(x[len(_ARG_PREFIX) :])) for x in stolen_args), [] 40 ) 41 else: 42 try: 43 cmake_args = shlex.split(os.environ["NETKET_CMAKE_FLAGS"]) 44 except KeyError: 45 cmake_args = [] 46 return cmake_args 47 48 49 """ 50 A list of arguments to be passed to the configuration step of CMake. 51 """ 52 _CMAKE_FLAGS = steal_cmake_flags(sys.argv) 53 54 55 class CMakeExtension(Extension): 56 def __init__(self, name, sourcedir=""): 57 Extension.__init__(self, name, sources=[]) 58 self.sourcedir = os.path.abspath(sourcedir) 59 60 61 class CMakeBuild(build_ext): 62 """ 63 We extend setuptools to support building extensions with CMake. An extension 64 is built with CMake if it inherits from ``CMakeExtension``. 65 """ 66 67 def build_extension(self, ext): 68 if isinstance(ext, CMakeExtension): # Building with CMake 69 cwd = os.getcwd() 70 # Create a directory for building out-of-source 71 if not os.path.exists(self.build_temp): 72 os.makedirs(self.build_temp) 73 # lib_dir is the directory, where the shared libraries will be 74 # stored (it will probably be different from the build_temp 75 # directory so that setuptools find the libraries) 76 lib_dir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) 77 if not os.path.exists(lib_dir): 78 os.makedirs(lib_dir) 79 # Options to pass to CMake during configuration 80 cmake_args = _CMAKE_FLAGS 81 cmake_args.append( 82 "-DNETKET_PYTHON_VERSION={}.{}.{}".format(*sys.version_info[:3]) 83 ) 84 cmake_args.append("-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(lib_dir)) 85 86 def _decode(x): 87 if sys.version_info >= (3, 0): 88 return x.decode() 89 else: 90 return x 91 92 # Building 93 os.chdir(self.build_temp) 94 try: 95 # Configuration step 96 output = subprocess.check_output( 97 ["cmake", ext.sourcedir] + cmake_args, stderr=subprocess.STDOUT 98 ) 99 if self.distribution.verbose: 100 log.info(_decode(output)) 101 if not self.distribution.dry_run: 102 # Build step 103 output = subprocess.check_output( 104 ["cmake", "--build", "."], stderr=subprocess.STDOUT 105 ) 106 if self.distribution.verbose: 107 log.info(_decode(output)) 108 except subprocess.CalledProcessError as e: 109 if hasattr(ext, "optional"): 110 if not ext.optional: 111 self.warn(_decode(e.output)) 112 raise 113 self.warn( 114 'building extension "{}" failed:\n{}'.format( 115 ext.name, _decode(e.output) 116 ) 117 ) 118 else: 119 self.warn(_decode(e.output)) 120 raise 121 os.chdir(cwd) 122 else: # Fall back to the default method 123 if sys.version_info >= (3, 0): 124 super().build_extension(ext) 125 else: 126 super(build_ext, self).build_extension(ext) 127 128 129 setup( 130 name="netket", 131 version="2.0b3", 132 author="Giuseppe Carleo et al.", 133 url="http://github.com/netket/netket", 134 author_email="[email protected]", 135 license="Apache 2.0", 136 packages=["netket"], 137 ext_modules=[CMakeExtension("netket._C_netket")], 138 long_description="""NetKet is an open - source project delivering cutting - edge 139 methods for the study of many - body quantum systems with artificial 140 neural networks and machine learning techniques.""", 141 cmdclass=dict(build_ext=CMakeBuild), 142 zip_safe=False, 143 ) 144 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -58,6 +58,29 @@ self.sourcedir = os.path.abspath(sourcedir) +def _have_ninja(): + """ + Returns `True` if the [ninja](https://ninja-build.org/) build system is + available on the system. + """ + with open(os.devnull, "wb") as devnull: + try: + subprocess.check_call("ninja --version".split(), stdout=devnull) + except OSError: + return False + else: + return True + + +def _generator_specified(args): + """ + Returns `True` if `-G` flag was given to CMake. + """ + for _ in filter(lambda f: f.startswith("-G"), args): + return True + return False + + class CMakeBuild(build_ext): """ We extend setuptools to support building extensions with CMake. An extension @@ -82,6 +105,8 @@ "-DNETKET_PYTHON_VERSION={}.{}.{}".format(*sys.version_info[:3]) ) cmake_args.append("-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(lib_dir)) + if not _generator_specified(cmake_args) and _have_ninja(): + cmake_args.append("-GNinja") def _decode(x): if sys.version_info >= (3, 0):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,6 +58,29 @@\n self.sourcedir = os.path.abspath(sourcedir)\n \n \n+def _have_ninja():\n+ \"\"\"\n+ Returns `True` if the [ninja](https://ninja-build.org/) build system is\n+ available on the system.\n+ \"\"\"\n+ with open(os.devnull, \"wb\") as devnull:\n+ try:\n+ subprocess.check_call(\"ninja --version\".split(), stdout=devnull)\n+ except OSError:\n+ return False\n+ else:\n+ return True\n+\n+\n+def _generator_specified(args):\n+ \"\"\"\n+ Returns `True` if `-G` flag was given to CMake.\n+ \"\"\"\n+ for _ in filter(lambda f: f.startswith(\"-G\"), args):\n+ return True\n+ return False\n+\n+\n class CMakeBuild(build_ext):\n \"\"\"\n We extend setuptools to support building extensions with CMake. An extension\n@@ -82,6 +105,8 @@\n \"-DNETKET_PYTHON_VERSION={}.{}.{}\".format(*sys.version_info[:3])\n )\n cmake_args.append(\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}\".format(lib_dir))\n+ if not _generator_specified(cmake_args) and _have_ninja():\n+ cmake_args.append(\"-GNinja\")\n \n def _decode(x):\n if sys.version_info >= (3, 0):\n", "issue": "Compiling with USE_LAPACK in v2.0 is broken \nReported by @fabienalet \n", "before_files": [{"content": "import os\nimport platform\nimport re\nimport shlex\nimport subprocess\nimport sys\n\nfrom distutils import log\nfrom setuptools import setup, Extension\nfrom setuptools.command.build_ext import build_ext\n\n\n# Poor man's command-line options parsing\ndef steal_cmake_flags(args):\n \"\"\"\n Extracts CMake-related arguments from ``args``. ``args`` is a list of\n strings usually equal to ``sys.argv``. All arguments of the form\n ``--cmake-args=...`` are extracted (i.e. removed from ``args``!) and\n accumulated. If there are no arguments of the specified form,\n ``NETKET_CMAKE_FLAGS`` environment variable is used instead.\n \"\"\"\n _ARG_PREFIX = \"--cmake-args=\"\n\n def _unquote(x):\n m = re.match(r\"'(.*)'\", x)\n if m:\n return m.group(1)\n m = re.match(r'\"(.*)\"', x)\n if m:\n return m.group(1)\n return x\n\n stolen_args = [x for x in args if x.startswith(_ARG_PREFIX)]\n for x in stolen_args:\n args.remove(x)\n\n if len(stolen_args) > 0:\n cmake_args = sum(\n (shlex.split(_unquote(x[len(_ARG_PREFIX) :])) for x in stolen_args), []\n )\n else:\n try:\n cmake_args = shlex.split(os.environ[\"NETKET_CMAKE_FLAGS\"])\n except KeyError:\n cmake_args = []\n return cmake_args\n\n\n\"\"\"\nA list of arguments to be passed to the configuration step of CMake.\n\"\"\"\n_CMAKE_FLAGS = steal_cmake_flags(sys.argv)\n\n\nclass CMakeExtension(Extension):\n def __init__(self, name, sourcedir=\"\"):\n Extension.__init__(self, name, sources=[])\n self.sourcedir = os.path.abspath(sourcedir)\n\n\nclass CMakeBuild(build_ext):\n \"\"\"\n We extend setuptools to support building extensions with CMake. An extension\n is built with CMake if it inherits from ``CMakeExtension``.\n \"\"\"\n\n def build_extension(self, ext):\n if isinstance(ext, CMakeExtension): # Building with CMake\n cwd = os.getcwd()\n # Create a directory for building out-of-source\n if not os.path.exists(self.build_temp):\n os.makedirs(self.build_temp)\n # lib_dir is the directory, where the shared libraries will be\n # stored (it will probably be different from the build_temp\n # directory so that setuptools find the libraries)\n lib_dir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n # Options to pass to CMake during configuration\n cmake_args = _CMAKE_FLAGS\n cmake_args.append(\n \"-DNETKET_PYTHON_VERSION={}.{}.{}\".format(*sys.version_info[:3])\n )\n cmake_args.append(\"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}\".format(lib_dir))\n\n def _decode(x):\n if sys.version_info >= (3, 0):\n return x.decode()\n else:\n return x\n\n # Building\n os.chdir(self.build_temp)\n try:\n # Configuration step\n output = subprocess.check_output(\n [\"cmake\", ext.sourcedir] + cmake_args, stderr=subprocess.STDOUT\n )\n if self.distribution.verbose:\n log.info(_decode(output))\n if not self.distribution.dry_run:\n # Build step\n output = subprocess.check_output(\n [\"cmake\", \"--build\", \".\"], stderr=subprocess.STDOUT\n )\n if self.distribution.verbose:\n log.info(_decode(output))\n except subprocess.CalledProcessError as e:\n if hasattr(ext, \"optional\"):\n if not ext.optional:\n self.warn(_decode(e.output))\n raise\n self.warn(\n 'building extension \"{}\" failed:\\n{}'.format(\n ext.name, _decode(e.output)\n )\n )\n else:\n self.warn(_decode(e.output))\n raise\n os.chdir(cwd)\n else: # Fall back to the default method\n if sys.version_info >= (3, 0):\n super().build_extension(ext)\n else:\n super(build_ext, self).build_extension(ext)\n\n\nsetup(\n name=\"netket\",\n version=\"2.0b3\",\n author=\"Giuseppe Carleo et al.\",\n url=\"http://github.com/netket/netket\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n packages=[\"netket\"],\n ext_modules=[CMakeExtension(\"netket._C_netket\")],\n long_description=\"\"\"NetKet is an open - source project delivering cutting - edge\n methods for the study of many - body quantum systems with artificial\n neural networks and machine learning techniques.\"\"\",\n cmdclass=dict(build_ext=CMakeBuild),\n zip_safe=False,\n)\n", "path": "setup.py"}]}
1,942
330
gh_patches_debug_34561
rasdani/github-patches
git_diff
freedomofpress__securedrop-2620
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SecureDrop desktop shortcuts not displayed correctly on Tails 3.3 # Bug The desktop shortcuts for the SecureDrop Source and Journalist interfaces do not show the correct title and logo on Tail 3.3. Tails 3.3 introduces a fix for the `.desktop` file security issue. The fix requires `.desktop` files to have a `trusted` metadata parameter set before the `.desktop` file is parsed and displayed with a custom name and icon. The shortcuts are displayed with a name like `source.desktop` and requires a user to click "Trust and Launch" before the shortcut is displayed with the correct name and icon. Requiring users to click-through this warning could cause warning fatigue and result in unsafe user behavior. ## Steps to reproduce Click a SecureDrop .desktop shortcut on a Journalist Workstation running Tails 3.3. ## Expected Behavior The SecureDrop .onion opens in Tor Browser. ## Actual Behavior The user receives a warning about an untrusted .desktop file. ## Comments The SecureDrop `tails-config` Ansible role should set the Nautilus `metadata::trusted` parameter for both the Source and Journalist .desktop files. </issue> <code> [start of install_files/ansible-base/roles/tails-config/files/securedrop_init.py] 1 #!/usr/bin/python 2 3 import os 4 import sys 5 import subprocess 6 7 8 # check for root 9 if os.geteuid() != 0: 10 sys.exit('You need to run this as root') 11 12 # paths 13 path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions' 14 path_torrc_backup = '/etc/tor/torrc.bak' 15 path_torrc = '/etc/tor/torrc' 16 17 # load torrc_additions 18 if os.path.isfile(path_torrc_additions): 19 torrc_additions = open(path_torrc_additions).read() 20 else: 21 sys.exit('Error opening {0} for reading'.format(path_torrc_additions)) 22 23 # load torrc 24 if os.path.isfile(path_torrc_backup): 25 torrc = open(path_torrc_backup).read() 26 else: 27 if os.path.isfile(path_torrc): 28 torrc = open(path_torrc).read() 29 else: 30 sys.exit('Error opening {0} for reading'.format(path_torrc)) 31 32 # save a backup 33 open(path_torrc_backup, 'w').write(torrc) 34 35 # append the additions 36 open(path_torrc, 'w').write(torrc + torrc_additions) 37 38 # reload tor 39 try: 40 subprocess.check_call(['systemctl', 'reload', '[email protected]']) 41 except subprocess.CalledProcessError: 42 sys.exit('Error reloading Tor') 43 44 # Turn off "automatic-decompression" in Nautilus to ensure the original 45 # submission filename is restored (see 46 # https://github.com/freedomofpress/securedrop/issues/1862#issuecomment-311519750). 47 subprocess.call(['/usr/bin/dconf', 'write', 48 '/org/gnome/nautilus/preferences/automatic-decompression', 49 'false']) 50 51 # notify the user 52 subprocess.call(['tails-notify-user', 53 'SecureDrop successfully auto-configured!', 54 'You can now access the Journalist Interface.\nIf you are an admin, you can now SSH to the servers.']) 55 [end of install_files/ansible-base/roles/tails-config/files/securedrop_init.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py --- a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py +++ b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py @@ -1,6 +1,8 @@ #!/usr/bin/python +import grp import os +import pwd import sys import subprocess @@ -13,6 +15,8 @@ path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions' path_torrc_backup = '/etc/tor/torrc.bak' path_torrc = '/etc/tor/torrc' +path_desktop = '/home/amnesia/Desktop/' +path_persistent_desktop = '/lib/live/mount/persistence/TailsData_unlocked/dotfiles/Desktop/' # load torrc_additions if os.path.isfile(path_torrc_additions): @@ -48,7 +52,29 @@ '/org/gnome/nautilus/preferences/automatic-decompression', 'false']) -# notify the user +# Set journalist.desktop and source.desktop links as trusted with Nautilus (see +# https://github.com/freedomofpress/securedrop/issues/2586) +# set euid and env variables to amnesia user +amnesia_gid = grp.getgrnam('amnesia').gr_gid +amnesia_uid = pwd.getpwnam('amnesia').pw_uid +os.setresgid(amnesia_gid, amnesia_gid, -1) +os.setresuid(amnesia_uid, amnesia_uid, -1) +env = os.environ.copy() +env['XDG_RUNTIME_DIR'] = '/run/user/{}'.format(amnesia_uid) +env['XDG_DATA_DIR'] = '/usr/share/gnome:/usr/local/share/:/usr/share/' +env['HOME'] = '/home/amnesia' +env['LOGNAME'] = 'amnesia' +env['DBUS_SESSION_BUS_ADDRESS'] = 'unix:path=/run/user/{}/bus'.format(amnesia_uid) + +# remove existing shortcut, recreate symlink and change metadata attribute to trust .desktop +for shortcut in ['source.desktop', 'journalist.desktop']: + subprocess.call(['rm', path_desktop + shortcut], env=env) + subprocess.call(['ln', '-s', path_persistent_desktop + shortcut, path_desktop + shortcut], env=env) + subprocess.call(['gio', 'set', path_desktop + shortcut, 'metadata::trusted', 'yes'], env=env) + +# reacquire uid0 and notify the user +os.setresuid(0,0,-1) +os.setresgid(0,0,-1) subprocess.call(['tails-notify-user', 'SecureDrop successfully auto-configured!', 'You can now access the Journalist Interface.\nIf you are an admin, you can now SSH to the servers.'])
{"golden_diff": "diff --git a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py\n--- a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py\n+++ b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py\n@@ -1,6 +1,8 @@\n #!/usr/bin/python\n \n+import grp\n import os\n+import pwd\n import sys\n import subprocess\n \n@@ -13,6 +15,8 @@\n path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n path_torrc_backup = '/etc/tor/torrc.bak'\n path_torrc = '/etc/tor/torrc'\n+path_desktop = '/home/amnesia/Desktop/'\n+path_persistent_desktop = '/lib/live/mount/persistence/TailsData_unlocked/dotfiles/Desktop/'\n \n # load torrc_additions\n if os.path.isfile(path_torrc_additions):\n@@ -48,7 +52,29 @@\n '/org/gnome/nautilus/preferences/automatic-decompression',\n 'false'])\n \n-# notify the user\n+# Set journalist.desktop and source.desktop links as trusted with Nautilus (see\n+# https://github.com/freedomofpress/securedrop/issues/2586)\n+# set euid and env variables to amnesia user\n+amnesia_gid = grp.getgrnam('amnesia').gr_gid\n+amnesia_uid = pwd.getpwnam('amnesia').pw_uid\n+os.setresgid(amnesia_gid, amnesia_gid, -1)\n+os.setresuid(amnesia_uid, amnesia_uid, -1)\n+env = os.environ.copy()\n+env['XDG_RUNTIME_DIR'] = '/run/user/{}'.format(amnesia_uid)\n+env['XDG_DATA_DIR'] = '/usr/share/gnome:/usr/local/share/:/usr/share/'\n+env['HOME'] = '/home/amnesia'\n+env['LOGNAME'] = 'amnesia'\n+env['DBUS_SESSION_BUS_ADDRESS'] = 'unix:path=/run/user/{}/bus'.format(amnesia_uid)\n+\n+# remove existing shortcut, recreate symlink and change metadata attribute to trust .desktop\n+for shortcut in ['source.desktop', 'journalist.desktop']:\n+ subprocess.call(['rm', path_desktop + shortcut], env=env)\n+ subprocess.call(['ln', '-s', path_persistent_desktop + shortcut, path_desktop + shortcut], env=env)\n+ subprocess.call(['gio', 'set', path_desktop + shortcut, 'metadata::trusted', 'yes'], env=env)\n+\n+# reacquire uid0 and notify the user\n+os.setresuid(0,0,-1)\n+os.setresgid(0,0,-1)\n subprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n 'You can now access the Journalist Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "issue": "SecureDrop desktop shortcuts not displayed correctly on Tails 3.3 \n# Bug\r\n\r\nThe desktop shortcuts for the SecureDrop Source and Journalist interfaces do not show the correct title and logo on Tail 3.3. Tails 3.3 introduces a fix for the `.desktop` file security issue. The fix requires `.desktop` files to have a `trusted` metadata parameter set before the `.desktop` file is parsed and displayed with a custom name and icon.\r\n\r\nThe shortcuts are displayed with a name like `source.desktop` and requires a user to click \"Trust and Launch\" before the shortcut is displayed with the correct name and icon. Requiring users to click-through this warning could cause warning fatigue and result in unsafe user behavior.\r\n\r\n## Steps to reproduce\r\n\r\nClick a SecureDrop .desktop shortcut on a Journalist Workstation running Tails 3.3. \r\n\r\n## Expected Behavior\r\n\r\nThe SecureDrop .onion opens in Tor Browser.\r\n\r\n## Actual Behavior\r\n\r\nThe user receives a warning about an untrusted .desktop file.\r\n\r\n## Comments\r\n\r\nThe SecureDrop `tails-config` Ansible role should set the Nautilus `metadata::trusted` parameter for both the Source and Journalist .desktop files.\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n\nimport os\nimport sys\nimport subprocess\n\n\n# check for root\nif os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n# paths\npath_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\npath_torrc_backup = '/etc/tor/torrc.bak'\npath_torrc = '/etc/tor/torrc'\n\n# load torrc_additions\nif os.path.isfile(path_torrc_additions):\n torrc_additions = open(path_torrc_additions).read()\nelse:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n# load torrc\nif os.path.isfile(path_torrc_backup):\n torrc = open(path_torrc_backup).read()\nelse:\n if os.path.isfile(path_torrc):\n torrc = open(path_torrc).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n open(path_torrc_backup, 'w').write(torrc)\n\n# append the additions\nopen(path_torrc, 'w').write(torrc + torrc_additions)\n\n# reload tor\ntry:\n subprocess.check_call(['systemctl', 'reload', '[email protected]'])\nexcept subprocess.CalledProcessError:\n sys.exit('Error reloading Tor')\n\n# Turn off \"automatic-decompression\" in Nautilus to ensure the original\n# submission filename is restored (see\n# https://github.com/freedomofpress/securedrop/issues/1862#issuecomment-311519750).\nsubprocess.call(['/usr/bin/dconf', 'write',\n '/org/gnome/nautilus/preferences/automatic-decompression',\n 'false'])\n\n# notify the user\nsubprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n 'You can now access the Journalist Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "path": "install_files/ansible-base/roles/tails-config/files/securedrop_init.py"}]}
1,360
656
gh_patches_debug_2623
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-py-471
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tools/embeddings_to_torch.py fails when some word features are included in the preprocessing step When there are some word features appended to each token in the source text, it seems that the `tools/embeddings_to_torch.py` script cannot extract correct vocabulary from the dataset. ``` $ python tools/embeddings_to_torch.py -emb_file /path/to/word.vectors.txt -dict_file dataset.vocab.pt -output dataset.emb Traceback (most recent call last): File "tools/embeddings_to_torch.py", line 94, in <module> main() File "tools/embeddings_to_torch.py", line 62, in main enc_vocab, dec_vocab = get_vocabs(opt.dict_file) File "tools/embeddings_to_torch.py", line 24, in get_vocabs enc_vocab, dec_vocab = [vocab[1] for vocab in vocabs] ValueError: too many values to unpack (expected 2) ``` </issue> <code> [start of tools/embeddings_to_torch.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import print_function 4 from __future__ import division 5 import six 6 import sys 7 import numpy as np 8 import argparse 9 import torch 10 11 parser = argparse.ArgumentParser(description='embeddings_to_torch.py') 12 parser.add_argument('-emb_file', required=True, 13 help="Embeddings from this file") 14 parser.add_argument('-output_file', required=True, 15 help="Output file for the prepared data") 16 parser.add_argument('-dict_file', required=True, 17 help="Dictionary file") 18 parser.add_argument('-verbose', action="store_true", default=False) 19 opt = parser.parse_args() 20 21 22 def get_vocabs(dict_file): 23 vocabs = torch.load(dict_file) 24 enc_vocab, dec_vocab = [vocab[1] for vocab in vocabs] 25 26 print("From: %s" % dict_file) 27 print("\t* source vocab: %d words" % len(enc_vocab)) 28 print("\t* target vocab: %d words" % len(dec_vocab)) 29 30 return enc_vocab, dec_vocab 31 32 33 def get_embeddings(file): 34 embs = dict() 35 for l in open(file, 'rb').readlines(): 36 l_split = l.decode('utf8').strip().split() 37 if len(l_split) == 2: 38 continue 39 embs[l_split[0]] = [float(em) for em in l_split[1:]] 40 print("Got {} embeddings from {}".format(len(embs), file)) 41 42 return embs 43 44 45 def match_embeddings(vocab, emb): 46 dim = len(six.next(six.itervalues(emb))) 47 filtered_embeddings = np.zeros((len(vocab), dim)) 48 count = {"match": 0, "miss": 0} 49 for w, w_id in vocab.stoi.items(): 50 if w in emb: 51 filtered_embeddings[w_id] = emb[w] 52 count['match'] += 1 53 else: 54 if opt.verbose: 55 print(u"not found:\t{}".format(w), file=sys.stderr) 56 count['miss'] += 1 57 58 return torch.Tensor(filtered_embeddings), count 59 60 61 def main(): 62 enc_vocab, dec_vocab = get_vocabs(opt.dict_file) 63 embeddings = get_embeddings(opt.emb_file) 64 65 filtered_enc_embeddings, enc_count = match_embeddings(enc_vocab, 66 embeddings) 67 filtered_dec_embeddings, dec_count = match_embeddings(dec_vocab, 68 embeddings) 69 70 print("\nMatching: ") 71 match_percent = [_['match'] / (_['match'] + _['miss']) * 100 72 for _ in [enc_count, dec_count]] 73 print("\t* enc: %d match, %d missing, (%.2f%%)" % (enc_count['match'], 74 enc_count['miss'], 75 match_percent[0])) 76 print("\t* dec: %d match, %d missing, (%.2f%%)" % (dec_count['match'], 77 dec_count['miss'], 78 match_percent[1])) 79 80 print("\nFiltered embeddings:") 81 print("\t* enc: ", filtered_enc_embeddings.size()) 82 print("\t* dec: ", filtered_dec_embeddings.size()) 83 84 enc_output_file = opt.output_file + ".enc.pt" 85 dec_output_file = opt.output_file + ".dec.pt" 86 print("\nSaving embedding as:\n\t* enc: %s\n\t* dec: %s" 87 % (enc_output_file, dec_output_file)) 88 torch.save(filtered_enc_embeddings, enc_output_file) 89 torch.save(filtered_dec_embeddings, dec_output_file) 90 print("\nDone.") 91 92 93 if __name__ == "__main__": 94 main() 95 [end of tools/embeddings_to_torch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/embeddings_to_torch.py b/tools/embeddings_to_torch.py --- a/tools/embeddings_to_torch.py +++ b/tools/embeddings_to_torch.py @@ -21,7 +21,7 @@ def get_vocabs(dict_file): vocabs = torch.load(dict_file) - enc_vocab, dec_vocab = [vocab[1] for vocab in vocabs] + enc_vocab, dec_vocab = vocabs[0][1], vocabs[-1][1] print("From: %s" % dict_file) print("\t* source vocab: %d words" % len(enc_vocab))
{"golden_diff": "diff --git a/tools/embeddings_to_torch.py b/tools/embeddings_to_torch.py\n--- a/tools/embeddings_to_torch.py\n+++ b/tools/embeddings_to_torch.py\n@@ -21,7 +21,7 @@\n \n def get_vocabs(dict_file):\n vocabs = torch.load(dict_file)\n- enc_vocab, dec_vocab = [vocab[1] for vocab in vocabs]\n+ enc_vocab, dec_vocab = vocabs[0][1], vocabs[-1][1]\n \n print(\"From: %s\" % dict_file)\n print(\"\\t* source vocab: %d words\" % len(enc_vocab))\n", "issue": "tools/embeddings_to_torch.py fails when some word features are included in the preprocessing step\nWhen there are some word features appended to each token in the source text, it seems that the `tools/embeddings_to_torch.py` script cannot extract correct vocabulary from the dataset.\r\n```\r\n$ python tools/embeddings_to_torch.py -emb_file /path/to/word.vectors.txt -dict_file dataset.vocab.pt -output dataset.emb\r\nTraceback (most recent call last):\r\n File \"tools/embeddings_to_torch.py\", line 94, in <module>\r\n main()\r\n File \"tools/embeddings_to_torch.py\", line 62, in main\r\n enc_vocab, dec_vocab = get_vocabs(opt.dict_file)\r\n File \"tools/embeddings_to_torch.py\", line 24, in get_vocabs\r\n enc_vocab, dec_vocab = [vocab[1] for vocab in vocabs]\r\nValueError: too many values to unpack (expected 2)\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import division\nimport six\nimport sys\nimport numpy as np\nimport argparse\nimport torch\n\nparser = argparse.ArgumentParser(description='embeddings_to_torch.py')\nparser.add_argument('-emb_file', required=True,\n help=\"Embeddings from this file\")\nparser.add_argument('-output_file', required=True,\n help=\"Output file for the prepared data\")\nparser.add_argument('-dict_file', required=True,\n help=\"Dictionary file\")\nparser.add_argument('-verbose', action=\"store_true\", default=False)\nopt = parser.parse_args()\n\n\ndef get_vocabs(dict_file):\n vocabs = torch.load(dict_file)\n enc_vocab, dec_vocab = [vocab[1] for vocab in vocabs]\n\n print(\"From: %s\" % dict_file)\n print(\"\\t* source vocab: %d words\" % len(enc_vocab))\n print(\"\\t* target vocab: %d words\" % len(dec_vocab))\n\n return enc_vocab, dec_vocab\n\n\ndef get_embeddings(file):\n embs = dict()\n for l in open(file, 'rb').readlines():\n l_split = l.decode('utf8').strip().split()\n if len(l_split) == 2:\n continue\n embs[l_split[0]] = [float(em) for em in l_split[1:]]\n print(\"Got {} embeddings from {}\".format(len(embs), file))\n\n return embs\n\n\ndef match_embeddings(vocab, emb):\n dim = len(six.next(six.itervalues(emb)))\n filtered_embeddings = np.zeros((len(vocab), dim))\n count = {\"match\": 0, \"miss\": 0}\n for w, w_id in vocab.stoi.items():\n if w in emb:\n filtered_embeddings[w_id] = emb[w]\n count['match'] += 1\n else:\n if opt.verbose:\n print(u\"not found:\\t{}\".format(w), file=sys.stderr)\n count['miss'] += 1\n\n return torch.Tensor(filtered_embeddings), count\n\n\ndef main():\n enc_vocab, dec_vocab = get_vocabs(opt.dict_file)\n embeddings = get_embeddings(opt.emb_file)\n\n filtered_enc_embeddings, enc_count = match_embeddings(enc_vocab,\n embeddings)\n filtered_dec_embeddings, dec_count = match_embeddings(dec_vocab,\n embeddings)\n\n print(\"\\nMatching: \")\n match_percent = [_['match'] / (_['match'] + _['miss']) * 100\n for _ in [enc_count, dec_count]]\n print(\"\\t* enc: %d match, %d missing, (%.2f%%)\" % (enc_count['match'],\n enc_count['miss'],\n match_percent[0]))\n print(\"\\t* dec: %d match, %d missing, (%.2f%%)\" % (dec_count['match'],\n dec_count['miss'],\n match_percent[1]))\n\n print(\"\\nFiltered embeddings:\")\n print(\"\\t* enc: \", filtered_enc_embeddings.size())\n print(\"\\t* dec: \", filtered_dec_embeddings.size())\n\n enc_output_file = opt.output_file + \".enc.pt\"\n dec_output_file = opt.output_file + \".dec.pt\"\n print(\"\\nSaving embedding as:\\n\\t* enc: %s\\n\\t* dec: %s\"\n % (enc_output_file, dec_output_file))\n torch.save(filtered_enc_embeddings, enc_output_file)\n torch.save(filtered_dec_embeddings, dec_output_file)\n print(\"\\nDone.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "tools/embeddings_to_torch.py"}]}
1,711
142
gh_patches_debug_16751
rasdani/github-patches
git_diff
Mailu__Mailu-1871
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Override to use server hostname + IP in outbound headers ## What type of PR? Enhancement ## What does this PR do? Allows users to set the server IP and hostname in the outgoing "Received" header rather than "PUBLIC_HOSTNAME [PUBLIC_IP]". ### Related issue(s) https://github.com/Mailu/Mailu/issues/191 ## Prerequistes Before we can consider review and merge, please make sure the following list is done and checked. If an entry in not applicable, you can check it or remove it from the list. - [x] In case of feature or enhancement: documentation updated accordingly - [ ] Unless it's docs or a minor change: add [changelog](https://mailu.io/master/contributors/guide.html#changelog) entry file. </issue> <code> [start of core/postfix/start.py] 1 #!/usr/bin/python3 2 3 import os 4 import glob 5 import shutil 6 import multiprocessing 7 import logging as log 8 import sys 9 10 from podop import run_server 11 from socrate import system, conf 12 13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 14 15 def start_podop(): 16 os.setuid(100) 17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/" 18 # TODO: Remove verbosity setting from Podop? 19 run_server(0, "postfix", "/tmp/podop.socket", [ 20 ("transport", "url", url + "transport/§"), 21 ("alias", "url", url + "alias/§"), 22 ("domain", "url", url + "domain/§"), 23 ("mailbox", "url", url + "mailbox/§"), 24 ("recipientmap", "url", url + "recipient/map/§"), 25 ("sendermap", "url", url + "sender/map/§"), 26 ("senderaccess", "url", url + "sender/access/§"), 27 ("senderlogin", "url", url + "sender/login/§") 28 ]) 29 30 def is_valid_postconf_line(line): 31 return not line.startswith("#") \ 32 and not line == '' 33 34 # Actual startup script 35 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front") 36 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") 37 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332") 38 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525") 39 40 for postfix_file in glob.glob("/conf/*.cf"): 41 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file))) 42 43 if os.path.exists("/overrides/postfix.cf"): 44 for line in open("/overrides/postfix.cf").read().strip().split("\n"): 45 if is_valid_postconf_line(line): 46 os.system('postconf -e "{}"'.format(line)) 47 48 if os.path.exists("/overrides/postfix.master"): 49 for line in open("/overrides/postfix.master").read().strip().split("\n"): 50 if is_valid_postconf_line(line): 51 os.system('postconf -Me "{}"'.format(line)) 52 53 for map_file in glob.glob("/overrides/*.map"): 54 destination = os.path.join("/etc/postfix", os.path.basename(map_file)) 55 shutil.copyfile(map_file, destination) 56 os.system("postmap {}".format(destination)) 57 os.remove(destination) 58 59 if "RELAYUSER" in os.environ: 60 path = "/etc/postfix/sasl_passwd" 61 conf.jinja("/conf/sasl_passwd", os.environ, path) 62 os.system("postmap {}".format(path)) 63 64 # Run Podop and Postfix 65 multiprocessing.Process(target=start_podop).start() 66 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing") 67 # Before starting postfix, we need to check permissions on /queue 68 # in the event that postfix,postdrop id have changed 69 os.system("postfix set-permissions") 70 os.system("postfix start-fg") 71 [end of core/postfix/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/postfix/start.py b/core/postfix/start.py --- a/core/postfix/start.py +++ b/core/postfix/start.py @@ -36,6 +36,11 @@ os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin") os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332") os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525") +os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0] +try: + os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(os.environ["OUTCLEAN"]) +except: + os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10" for postfix_file in glob.glob("/conf/*.cf"): conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
{"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -36,6 +36,11 @@\n os.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\n os.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\n os.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\n+os.environ[\"OUTCLEAN\"] = os.environ[\"HOSTNAMES\"].split(\",\")[0]\n+try:\n+ os.environ[\"OUTCLEAN_ADDRESS\"] = system.resolve_hostname(os.environ[\"OUTCLEAN\"])\n+except:\n+ os.environ[\"OUTCLEAN_ADDRESS\"] = \"10.10.10.10\"\n \n for postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n", "issue": "Override to use server hostname + IP in outbound headers\n## What type of PR?\r\nEnhancement\r\n\r\n## What does this PR do?\r\nAllows users to set the server IP and hostname in the outgoing \"Received\" header rather than \"PUBLIC_HOSTNAME [PUBLIC_IP]\".\r\n\r\n### Related issue(s)\r\nhttps://github.com/Mailu/Mailu/issues/191\r\n\r\n## Prerequistes\r\nBefore we can consider review and merge, please make sure the following list is done and checked.\r\nIf an entry in not applicable, you can check it or remove it from the list.\r\n\r\n- [x] In case of feature or enhancement: documentation updated accordingly\r\n- [ ] Unless it's docs or a minor change: add [changelog](https://mailu.io/master/contributors/guide.html#changelog) entry file.\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]}
1,550
236
gh_patches_debug_869
rasdani/github-patches
git_diff
databricks__koalas-747
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [DO NOT MERGE] Test </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 # 4 # Copyright (C) 2019 Databricks, Inc. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 # 18 19 import sys 20 from setuptools import setup 21 from os import path 22 23 DESCRIPTION = "Koalas: pandas API on Apache Spark" 24 25 this_directory = path.abspath(path.dirname(__file__)) 26 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 27 LONG_DESCRIPTION = f.read() 28 29 try: 30 exec(open('databricks/koalas/version.py').read()) 31 except IOError: 32 print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.", 33 file=sys.stderr) 34 sys.exit(-1) 35 VERSION = __version__ # noqa 36 37 setup( 38 name='koalas', 39 version=VERSION, 40 packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing', 41 'databricks.koalas.usage_logging'], 42 extras_require={ 43 'spark': ['pyspark>=2.4.0'], 44 'mlflow': ['mlflow>=1.0'], 45 }, 46 python_requires='>=3.5', 47 install_requires=[ 48 'pandas>=0.23', 49 'pyarrow>=0.10', 50 'numpy>=1.14', 51 'matplotlib>=3.0.0', 52 ], 53 maintainer="Databricks", 54 maintainer_email="[email protected]", 55 license='http://www.apache.org/licenses/LICENSE-2.0', 56 url="https://github.com/databricks/koalas", 57 project_urls={ 58 'Bug Tracker': 'https://github.com/databricks/koalas/issues', 59 'Documentation': 'https://koalas.readthedocs.io/', 60 'Source Code': 'https://github.com/databricks/koalas' 61 }, 62 description=DESCRIPTION, 63 long_description=LONG_DESCRIPTION, 64 long_description_content_type='text/markdown', 65 classifiers=[ 66 'Programming Language :: Python :: 3.5', 67 'Programming Language :: Python :: 3.6', 68 'Programming Language :: Python :: 3.7', 69 ], 70 ) 71 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -45,7 +45,7 @@ }, python_requires='>=3.5', install_requires=[ - 'pandas>=0.23', + 'pandas>=0.23.2', 'pyarrow>=0.10', 'numpy>=1.14', 'matplotlib>=3.0.0',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n },\n python_requires='>=3.5',\n install_requires=[\n- 'pandas>=0.23',\n+ 'pandas>=0.23.2',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n 'matplotlib>=3.0.0',\n", "issue": "[DO NOT MERGE] Test\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nfrom setuptools import setup\nfrom os import path\n\nDESCRIPTION = \"Koalas: pandas API on Apache Spark\"\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\ntry:\n exec(open('databricks/koalas/version.py').read())\nexcept IOError:\n print(\"Failed to load Koalas version file for packaging. You must be in Koalas root dir.\",\n file=sys.stderr)\n sys.exit(-1)\nVERSION = __version__ # noqa\n\nsetup(\n name='koalas',\n version=VERSION,\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing',\n 'databricks.koalas.usage_logging'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n 'mlflow': ['mlflow>=1.0'],\n },\n python_requires='>=3.5',\n install_requires=[\n 'pandas>=0.23',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n 'matplotlib>=3.0.0',\n ],\n maintainer=\"Databricks\",\n maintainer_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n url=\"https://github.com/databricks/koalas\",\n project_urls={\n 'Bug Tracker': 'https://github.com/databricks/koalas/issues',\n 'Documentation': 'https://koalas.readthedocs.io/',\n 'Source Code': 'https://github.com/databricks/koalas'\n },\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n)\n", "path": "setup.py"}]}
1,265
101
gh_patches_debug_48468
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2062
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sort list of schemas On the Database Page, the schemas appear in random order. ![image](https://user-images.githubusercontent.com/42411/199863005-2cc007c5-3224-489b-86de-80d924010833.png) I think they should be listed alphabetically, perhaps with `public` first. I'm not sure whether it would be better to handle this on the back end or the front end. Any thoughts @mathemancer @rajatvijay? Update: * This issue is supposed to be handled on the backend. * Part of this issue is solved in https://github.com/centerofci/mathesar/pull/2062 * Refer https://github.com/centerofci/mathesar/issues/1897#issuecomment-1470542703 for the current state of the issue. </issue> <code> [start of mathesar/api/db/viewsets/schemas.py] 1 from django_filters import rest_framework as filters 2 from rest_access_policy import AccessViewSetMixin 3 from rest_framework import status, viewsets 4 from rest_framework.decorators import action 5 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin 6 from rest_framework.response import Response 7 8 from mathesar.api.db.permissions.schema import SchemaAccessPolicy 9 from mathesar.api.dj_filters import SchemaFilter 10 from mathesar.api.pagination import DefaultLimitOffsetPagination 11 from mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer 12 from mathesar.api.serializers.schemas import SchemaSerializer 13 from mathesar.models.base import Schema 14 from mathesar.utils.schemas import create_schema_and_object 15 16 17 class SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin): 18 serializer_class = SchemaSerializer 19 pagination_class = DefaultLimitOffsetPagination 20 filter_backends = (filters.DjangoFilterBackend,) 21 filterset_class = SchemaFilter 22 access_policy = SchemaAccessPolicy 23 24 def get_queryset(self): 25 qs = Schema.objects.all().order_by('-created_at') 26 return self.access_policy.scope_viewset_queryset(self.request, qs) 27 28 def create(self, request): 29 serializer = SchemaSerializer(data=request.data, context={'request': request}) 30 serializer.is_valid(raise_exception=True) 31 database_name = serializer.validated_data['database'].name 32 schema = create_schema_and_object( 33 serializer.validated_data['name'], 34 database_name, 35 comment=serializer.validated_data.get('description') 36 ) 37 serializer = SchemaSerializer(schema) 38 return Response(serializer.data, status=status.HTTP_201_CREATED) 39 40 def partial_update(self, request, pk=None): 41 serializer = SchemaSerializer( 42 data=request.data, context={'request': request}, partial=True 43 ) 44 serializer.is_valid(raise_exception=True) 45 46 schema = self.get_object() 47 schema.update_sa_schema(serializer.validated_data) 48 49 # Reload the schema to avoid cached properties 50 schema = self.get_object() 51 schema.clear_name_cache() 52 serializer = SchemaSerializer(schema, context={'request': request}) 53 return Response(serializer.data) 54 55 def destroy(self, request, pk=None): 56 schema = self.get_object() 57 schema.delete_sa_schema() 58 return Response(status=status.HTTP_204_NO_CONTENT) 59 60 @action(methods=['get'], detail=True) 61 def dependents(self, request, pk=None): 62 serializer = DependentFilterSerializer(data=request.GET) 63 serializer.is_valid(raise_exception=True) 64 types_exclude = serializer.validated_data['exclude'] 65 66 schema = self.get_object() 67 serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request}) 68 return Response(serializer.data) 69 [end of mathesar/api/db/viewsets/schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py --- a/mathesar/api/db/viewsets/schemas.py +++ b/mathesar/api/db/viewsets/schemas.py @@ -22,7 +22,7 @@ access_policy = SchemaAccessPolicy def get_queryset(self): - qs = Schema.objects.all().order_by('-created_at') + qs = Schema.objects.all().order_by('name') return self.access_policy.scope_viewset_queryset(self.request, qs) def create(self, request):
{"golden_diff": "diff --git a/mathesar/api/db/viewsets/schemas.py b/mathesar/api/db/viewsets/schemas.py\n--- a/mathesar/api/db/viewsets/schemas.py\n+++ b/mathesar/api/db/viewsets/schemas.py\n@@ -22,7 +22,7 @@\n access_policy = SchemaAccessPolicy\n \n def get_queryset(self):\n- qs = Schema.objects.all().order_by('-created_at')\n+ qs = Schema.objects.all().order_by('name')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n \n def create(self, request):\n", "issue": "Sort list of schemas\nOn the Database Page, the schemas appear in random order.\r\n\r\n![image](https://user-images.githubusercontent.com/42411/199863005-2cc007c5-3224-489b-86de-80d924010833.png)\r\n\r\nI think they should be listed alphabetically, perhaps with `public` first.\r\n\r\nI'm not sure whether it would be better to handle this on the back end or the front end.\r\n\r\nAny thoughts @mathemancer @rajatvijay?\r\n\r\n\r\nUpdate:\r\n* This issue is supposed to be handled on the backend.\r\n* Part of this issue is solved in https://github.com/centerofci/mathesar/pull/2062\r\n* Refer https://github.com/centerofci/mathesar/issues/1897#issuecomment-1470542703 for the current state of the issue.\n", "before_files": [{"content": "from django_filters import rest_framework as filters\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin\nfrom rest_framework.response import Response\n\nfrom mathesar.api.db.permissions.schema import SchemaAccessPolicy\nfrom mathesar.api.dj_filters import SchemaFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.dependents import DependentSerializer, DependentFilterSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.models.base import Schema\nfrom mathesar.utils.schemas import create_schema_and_object\n\n\nclass SchemaViewSet(AccessViewSetMixin, viewsets.GenericViewSet, ListModelMixin, RetrieveModelMixin):\n serializer_class = SchemaSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = SchemaFilter\n access_policy = SchemaAccessPolicy\n\n def get_queryset(self):\n qs = Schema.objects.all().order_by('-created_at')\n return self.access_policy.scope_viewset_queryset(self.request, qs)\n\n def create(self, request):\n serializer = SchemaSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n database_name = serializer.validated_data['database'].name\n schema = create_schema_and_object(\n serializer.validated_data['name'],\n database_name,\n comment=serializer.validated_data.get('description')\n )\n serializer = SchemaSerializer(schema)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None):\n serializer = SchemaSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n\n schema = self.get_object()\n schema.update_sa_schema(serializer.validated_data)\n\n # Reload the schema to avoid cached properties\n schema = self.get_object()\n schema.clear_name_cache()\n serializer = SchemaSerializer(schema, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n schema = self.get_object()\n schema.delete_sa_schema()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def dependents(self, request, pk=None):\n serializer = DependentFilterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n types_exclude = serializer.validated_data['exclude']\n\n schema = self.get_object()\n serializer = DependentSerializer(schema.get_dependents(types_exclude), many=True, context={'request': request})\n return Response(serializer.data)\n", "path": "mathesar/api/db/viewsets/schemas.py"}]}
1,455
124