problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_32092 | rasdani/github-patches | git_diff | bokeh__bokeh-10229 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Simplify/refactor filters in bokehjs
ref https://github.com/bokeh/bokeh/pull/10084#discussion_r432112063
</issue>
<code>
[start of bokeh/models/filters.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7
8 #-----------------------------------------------------------------------------
9 # Boilerplate
10 #-----------------------------------------------------------------------------
11 import logging # isort:skip
12 log = logging.getLogger(__name__)
13
14 #-----------------------------------------------------------------------------
15 # Imports
16 #-----------------------------------------------------------------------------
17
18 # Bokeh imports
19 from ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String
20 from ..model import Model
21
22 #-----------------------------------------------------------------------------
23 # Globals and constants
24 #-----------------------------------------------------------------------------
25
26 __all__ = (
27 'BooleanFilter',
28 'CustomJSFilter',
29 'Filter',
30 'GroupFilter',
31 'IndexFilter',
32 )
33
34 #-----------------------------------------------------------------------------
35 # General API
36 #-----------------------------------------------------------------------------
37
38 class Filter(Model):
39 ''' A Filter model represents a filtering operation that returns a row-wise subset of
40 data when applied to a ``ColumnDataSource``.
41 '''
42
43 filter = Either(Seq(Int), Seq(Bool), help="""
44 A list that can be either integer indices or booleans representing a row-wise subset of data.
45 """)
46
47 def __init__(self, *args, **kw):
48 if len(args) == 1 and "filter" not in kw:
49 kw["filter"] = args[0]
50
51 super().__init__(**kw)
52
53 class IndexFilter(Filter):
54 ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.
55 '''
56
57 indices = Seq(Int, help="""
58 A list of integer indices representing the subset of data to select.
59 """)
60
61 def __init__(self, *args, **kw):
62 if len(args) == 1 and "indices" not in kw:
63 kw["indices"] = args[0]
64
65 super().__init__(**kw)
66
67 class BooleanFilter(Filter):
68 ''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices
69 where the values of the booleans array is True.
70 '''
71
72 booleans = Seq(Bool, help="""
73 A list of booleans indicating which rows of data to select.
74 """)
75
76 def __init__(self, *args, **kw):
77 if len(args) == 1 and "booleans" not in kw:
78 kw["booleans"] = args[0]
79
80 super().__init__(**kw)
81
82 class GroupFilter(Filter):
83 ''' A ``GroupFilter`` represents the rows of a ``ColumnDataSource`` where the values of the categorical
84 column column_name match the group variable.
85 '''
86
87 column_name = String(help="""
88 The name of the column to perform the group filtering operation on.
89 """)
90
91 group = String(help="""
92 The value of the column indicating the rows of data to keep.
93 """)
94
95 def __init__(self, *args, **kw):
96 if len(args) == 2 and "column_name" not in kw and "group" not in kw:
97 kw["column_name"] = args[0]
98 kw["group"] = args[1]
99
100 super().__init__(**kw)
101
102 class CustomJSFilter(Filter):
103 ''' Filter data sources with a custom defined JavaScript function.
104
105 .. warning::
106 The explicit purpose of this Bokeh Model is to embed *raw JavaScript
107 code* for a browser to execute. If any part of the code is derived
108 from untrusted user inputs, then you must take appropriate care to
109 sanitize the user input prior to passing to Bokeh.
110
111 '''
112
113 args = Dict(String, AnyRef, help="""
114 A mapping of names to Python objects. In particular those can be bokeh's models.
115 These objects are made available to the callback's code snippet as the values of
116 named parameters to the callback.
117 """)
118
119 code = String(default="", help="""
120 A snippet of JavaScript code to filter data contained in a columnar data source.
121 The code is made into the body of a function, and all of of the named objects in
122 ``args`` are available as parameters that the code can use. The variable
123 ``source`` will contain the data source that is associated with the ``CDSView`` this
124 filter is added to.
125
126 The code should either return the indices of the subset or an array of booleans
127 to use to subset data source rows.
128
129 Example:
130
131 .. code-block
132
133 code = '''
134 const indices = []
135 for (var i = 0; i <= source.data['some_column'].length; i++) {
136 if (source.data['some_column'][i] == 'some_value') {
137 indices.push(i)
138 }
139 }
140 return indices
141 '''
142
143 """)
144
145 #-----------------------------------------------------------------------------
146 # Dev API
147 #-----------------------------------------------------------------------------
148
149 #-----------------------------------------------------------------------------
150 # Private API
151 #-----------------------------------------------------------------------------
152
153 #-----------------------------------------------------------------------------
154 # Code
155 #-----------------------------------------------------------------------------
156
[end of bokeh/models/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/filters.py b/bokeh/models/filters.py
--- a/bokeh/models/filters.py
+++ b/bokeh/models/filters.py
@@ -16,7 +16,8 @@
#-----------------------------------------------------------------------------
# Bokeh imports
-from ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String
+from ..core.has_props import abstract
+from ..core.properties import AnyRef, Bool, Dict, Int, Seq, String
from ..model import Model
#-----------------------------------------------------------------------------
@@ -35,21 +36,12 @@
# General API
#-----------------------------------------------------------------------------
+@abstract
class Filter(Model):
''' A Filter model represents a filtering operation that returns a row-wise subset of
data when applied to a ``ColumnDataSource``.
'''
- filter = Either(Seq(Int), Seq(Bool), help="""
- A list that can be either integer indices or booleans representing a row-wise subset of data.
- """)
-
- def __init__(self, *args, **kw):
- if len(args) == 1 and "filter" not in kw:
- kw["filter"] = args[0]
-
- super().__init__(**kw)
-
class IndexFilter(Filter):
''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.
'''
@@ -132,7 +124,7 @@
code = '''
const indices = []
- for (var i = 0; i <= source.data['some_column'].length; i++) {
+ for (let i = 0; i <= source.data['some_column'].length; i++) {
if (source.data['some_column'][i] == 'some_value') {
indices.push(i)
}
| {"golden_diff": "diff --git a/bokeh/models/filters.py b/bokeh/models/filters.py\n--- a/bokeh/models/filters.py\n+++ b/bokeh/models/filters.py\n@@ -16,7 +16,8 @@\n #-----------------------------------------------------------------------------\n \n # Bokeh imports\n-from ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String\n+from ..core.has_props import abstract\n+from ..core.properties import AnyRef, Bool, Dict, Int, Seq, String\n from ..model import Model\n \n #-----------------------------------------------------------------------------\n@@ -35,21 +36,12 @@\n # General API\n #-----------------------------------------------------------------------------\n \n+@abstract\n class Filter(Model):\n ''' A Filter model represents a filtering operation that returns a row-wise subset of\n data when applied to a ``ColumnDataSource``.\n '''\n \n- filter = Either(Seq(Int), Seq(Bool), help=\"\"\"\n- A list that can be either integer indices or booleans representing a row-wise subset of data.\n- \"\"\")\n-\n- def __init__(self, *args, **kw):\n- if len(args) == 1 and \"filter\" not in kw:\n- kw[\"filter\"] = args[0]\n-\n- super().__init__(**kw)\n-\n class IndexFilter(Filter):\n ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.\n '''\n@@ -132,7 +124,7 @@\n \n code = '''\n const indices = []\n- for (var i = 0; i <= source.data['some_column'].length; i++) {\n+ for (let i = 0; i <= source.data['some_column'].length; i++) {\n if (source.data['some_column'][i] == 'some_value') {\n indices.push(i)\n }\n", "issue": "Simplify/refactor filters in bokehjs\nref https://github.com/bokeh/bokeh/pull/10084#discussion_r432112063\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Bokeh imports\nfrom ..core.properties import AnyRef, Bool, Dict, Either, Int, Seq, String\nfrom ..model import Model\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'BooleanFilter',\n 'CustomJSFilter',\n 'Filter',\n 'GroupFilter',\n 'IndexFilter',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\nclass Filter(Model):\n ''' A Filter model represents a filtering operation that returns a row-wise subset of\n data when applied to a ``ColumnDataSource``.\n '''\n\n filter = Either(Seq(Int), Seq(Bool), help=\"\"\"\n A list that can be either integer indices or booleans representing a row-wise subset of data.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"filter\" not in kw:\n kw[\"filter\"] = args[0]\n\n super().__init__(**kw)\n\nclass IndexFilter(Filter):\n ''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.\n '''\n\n indices = Seq(Int, help=\"\"\"\n A list of integer indices representing the subset of data to select.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"indices\" not in kw:\n kw[\"indices\"] = args[0]\n\n super().__init__(**kw)\n\nclass BooleanFilter(Filter):\n ''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices\n where the values of the booleans array is True.\n '''\n\n booleans = Seq(Bool, help=\"\"\"\n A list of booleans indicating which rows of data to select.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 1 and \"booleans\" not in kw:\n kw[\"booleans\"] = args[0]\n\n super().__init__(**kw)\n\nclass GroupFilter(Filter):\n ''' A ``GroupFilter`` represents the rows of a ``ColumnDataSource`` where the values of the categorical\n column column_name match the group variable.\n '''\n\n column_name = String(help=\"\"\"\n The name of the column to perform the group filtering operation on.\n \"\"\")\n\n group = String(help=\"\"\"\n The value of the column indicating the rows of data to keep.\n \"\"\")\n\n def __init__(self, *args, **kw):\n if len(args) == 2 and \"column_name\" not in kw and \"group\" not in kw:\n kw[\"column_name\"] = args[0]\n kw[\"group\"] = args[1]\n\n super().__init__(**kw)\n\nclass CustomJSFilter(Filter):\n ''' Filter data sources with a custom defined JavaScript function.\n\n .. warning::\n The explicit purpose of this Bokeh Model is to embed *raw JavaScript\n code* for a browser to execute. If any part of the code is derived\n from untrusted user inputs, then you must take appropriate care to\n sanitize the user input prior to passing to Bokeh.\n\n '''\n\n args = Dict(String, AnyRef, help=\"\"\"\n A mapping of names to Python objects. In particular those can be bokeh's models.\n These objects are made available to the callback's code snippet as the values of\n named parameters to the callback.\n \"\"\")\n\n code = String(default=\"\", help=\"\"\"\n A snippet of JavaScript code to filter data contained in a columnar data source.\n The code is made into the body of a function, and all of of the named objects in\n ``args`` are available as parameters that the code can use. The variable\n ``source`` will contain the data source that is associated with the ``CDSView`` this\n filter is added to.\n\n The code should either return the indices of the subset or an array of booleans\n to use to subset data source rows.\n\n Example:\n\n .. code-block\n\n code = '''\n const indices = []\n for (var i = 0; i <= source.data['some_column'].length; i++) {\n if (source.data['some_column'][i] == 'some_value') {\n indices.push(i)\n }\n }\n return indices\n '''\n\n \"\"\")\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/models/filters.py"}]} | 2,001 | 397 |
gh_patches_debug_15868 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2479 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PyInstaller should not check for modules specific for other than the current platform
**Python extensions for Windows**
We should not be checking for these modules in os x and linux.
</issue>
<code>
[start of PyInstaller/hooks/pre_safe_import_module/hook-win32com.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2017, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 """
11 PyWin32 package 'win32com' extends it's __path__ attribute with win32comext
12 directory and thus PyInstaller is not able to find modules in it. For example
13 module 'win32com.shell' is in reality 'win32comext.shell'.
14
15 >>> win32com.__path__
16 ['win32com', 'C:\\Python27\\Lib\\site-packages\\win32comext']
17
18 """
19
20
21 import os
22
23 from PyInstaller.utils.hooks import logger, get_module_file_attribute
24
25
26 def pre_safe_import_module(api):
27 win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))
28 comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')
29 logger.debug('win32com: extending __path__ with dir %r' % comext_dir)
30 # Append the __path__ where PyInstaller will look for 'win32com' modules.'
31 api.append_package_path(comext_dir)
32
[end of PyInstaller/hooks/pre_safe_import_module/hook-win32com.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py
--- a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py
+++ b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py
@@ -21,11 +21,13 @@
import os
from PyInstaller.utils.hooks import logger, get_module_file_attribute
+from PyInstaller.compat import is_win
def pre_safe_import_module(api):
- win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))
- comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')
- logger.debug('win32com: extending __path__ with dir %r' % comext_dir)
- # Append the __path__ where PyInstaller will look for 'win32com' modules.'
- api.append_package_path(comext_dir)
+ if is_win:
+ win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))
+ comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')
+ logger.debug('win32com: extending __path__ with dir %r' % comext_dir)
+ # Append the __path__ where PyInstaller will look for 'win32com' modules.'
+ api.append_package_path(comext_dir)
| {"golden_diff": "diff --git a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py\n--- a/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py\n+++ b/PyInstaller/hooks/pre_safe_import_module/hook-win32com.py\n@@ -21,11 +21,13 @@\n import os\n \n from PyInstaller.utils.hooks import logger, get_module_file_attribute\n+from PyInstaller.compat import is_win\n \n \n def pre_safe_import_module(api):\n- win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n- comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n- logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n- # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n- api.append_package_path(comext_dir)\n+ if is_win:\n+ win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n+ comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n+ logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n+ # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n+ api.append_package_path(comext_dir)\n", "issue": "PyInstaller should not check for modules specific for other than the current platform\n**Python extensions for Windows**\r\n\r\nWe should not be checking for these modules in os x and linux. \n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2017, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\nPyWin32 package 'win32com' extends it's __path__ attribute with win32comext\ndirectory and thus PyInstaller is not able to find modules in it. For example\nmodule 'win32com.shell' is in reality 'win32comext.shell'.\n\n>>> win32com.__path__\n['win32com', 'C:\\\\Python27\\\\Lib\\\\site-packages\\\\win32comext']\n\n\"\"\"\n\n\nimport os\n\nfrom PyInstaller.utils.hooks import logger, get_module_file_attribute\n\n\ndef pre_safe_import_module(api):\n win32com_dir = os.path.dirname(get_module_file_attribute('win32com'))\n comext_dir = os.path.join(os.path.dirname(win32com_dir), 'win32comext')\n logger.debug('win32com: extending __path__ with dir %r' % comext_dir)\n # Append the __path__ where PyInstaller will look for 'win32com' modules.'\n api.append_package_path(comext_dir)\n", "path": "PyInstaller/hooks/pre_safe_import_module/hook-win32com.py"}]} | 934 | 331 |
gh_patches_debug_6606 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-2669 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
modprobe: Call 'modprobe -r' instasd of 'rmmod' for absent?
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
modprobe
##### ANSIBLE VERSION
```
ansible 2.0.1.0
config file =
configured module search path = Default w/o overrides
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
N/A (linux target)
##### SUMMARY
Current implementation of modprobe module uses `rmmod` command to remove kernel module.
https://github.com/ansible/ansible-modules-extras/blob/stable-2.1/system/modprobe.py#L114
Why don't we use `modprobe -r` instead of `rmmod` here?
`modprobe -r` would be better because;
1. It will also unload unused modules
2. Straight forward from module name
##### STEPS TO REPRODUCE
I was trying to unload sb_edac module from my server (since it conflict with some hardware monitoring of server), the module depends on edac_core and edac_core was loaded only for sb_edac.
Before applying playbook, on the target server.
```
server# lsmod | grep edac
sb_edac 28672 0
edac_core 53248 1 sb_edac
```
playbook (snippet)
```
- name: unload edac modules
modprobe:
name: sb_edac
state: absent
```
##### EXPECTED RESULTS
edac_core module unloaded, since it no longer be used.
##### ACTUAL RESULTS
After applying playbook, on the target server.
```
server# lsmod | grep edac
edac_core 53248 0
```
</issue>
<code>
[start of system/modprobe.py]
1 #!/usr/bin/python
2 #coding: utf-8 -*-
3
4 # (c) 2013, David Stygstra <[email protected]>
5 #
6 # This file is part of Ansible
7 #
8 # This module is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This software is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with this software. If not, see <http://www.gnu.org/licenses/>.
20
21
22 DOCUMENTATION = '''
23 ---
24 module: modprobe
25 short_description: Add or remove kernel modules
26 requirements: []
27 version_added: 1.4
28 author:
29 - "David Stygstra (@stygstra)"
30 - "Julien Dauphant"
31 - "Matt Jeffery"
32 description:
33 - Add or remove kernel modules.
34 options:
35 name:
36 required: true
37 description:
38 - Name of kernel module to manage.
39 state:
40 required: false
41 default: "present"
42 choices: [ present, absent ]
43 description:
44 - Whether the module should be present or absent.
45 params:
46 required: false
47 default: ""
48 version_added: "1.6"
49 description:
50 - Modules parameters.
51 '''
52
53 EXAMPLES = '''
54 # Add the 802.1q module
55 - modprobe: name=8021q state=present
56 # Add the dummy module
57 - modprobe: name=dummy state=present params="numdummies=2"
58 '''
59
60 from ansible.module_utils.basic import *
61 from ansible.module_utils.pycompat24 import get_exception
62 import shlex
63
64
65 def main():
66 module = AnsibleModule(
67 argument_spec={
68 'name': {'required': True},
69 'state': {'default': 'present', 'choices': ['present', 'absent']},
70 'params': {'default': ''},
71 },
72 supports_check_mode=True,
73 )
74 args = {
75 'changed': False,
76 'failed': False,
77 'name': module.params['name'],
78 'state': module.params['state'],
79 'params': module.params['params'],
80 }
81
82 # Check if module is present
83 try:
84 modules = open('/proc/modules')
85 present = False
86 module_name = args['name'].replace('-', '_') + ' '
87 for line in modules:
88 if line.startswith(module_name):
89 present = True
90 break
91 modules.close()
92 except IOError:
93 e = get_exception()
94 module.fail_json(msg=str(e), **args)
95
96 # Check only; don't modify
97 if module.check_mode:
98 if args['state'] == 'present' and not present:
99 changed = True
100 elif args['state'] == 'absent' and present:
101 changed = True
102 else:
103 changed = False
104 module.exit_json(changed=changed)
105
106 # Add/remove module as needed
107 if args['state'] == 'present':
108 if not present:
109 command = [module.get_bin_path('modprobe', True), args['name']]
110 command.extend(shlex.split(args['params']))
111 rc, _, err = module.run_command(command)
112 if rc != 0:
113 module.fail_json(msg=err, **args)
114 args['changed'] = True
115 elif args['state'] == 'absent':
116 if present:
117 rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
118 if rc != 0:
119 module.fail_json(msg=err, **args)
120 args['changed'] = True
121
122 module.exit_json(**args)
123
124 main()
125
[end of system/modprobe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/system/modprobe.py b/system/modprobe.py
--- a/system/modprobe.py
+++ b/system/modprobe.py
@@ -114,7 +114,7 @@
args['changed'] = True
elif args['state'] == 'absent':
if present:
- rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])
+ rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
| {"golden_diff": "diff --git a/system/modprobe.py b/system/modprobe.py\n--- a/system/modprobe.py\n+++ b/system/modprobe.py\n@@ -114,7 +114,7 @@\n args['changed'] = True\n elif args['state'] == 'absent':\n if present:\n- rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])\n+ rc, _, err = module.run_command([module.get_bin_path('modprobe', True), '-r', args['name']])\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n", "issue": "modprobe: Call 'modprobe -r' instasd of 'rmmod' for absent?\n##### ISSUE TYPE\n- Feature Idea\n##### COMPONENT NAME\n\nmodprobe\n##### ANSIBLE VERSION\n\n```\nansible 2.0.1.0\n config file =\n configured module search path = Default w/o overrides\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nN/A (linux target)\n##### SUMMARY\n\nCurrent implementation of modprobe module uses `rmmod` command to remove kernel module.\nhttps://github.com/ansible/ansible-modules-extras/blob/stable-2.1/system/modprobe.py#L114\n\nWhy don't we use `modprobe -r` instead of `rmmod` here?\n\n`modprobe -r` would be better because; \n1. It will also unload unused modules\n2. Straight forward from module name\n##### STEPS TO REPRODUCE\n\nI was trying to unload sb_edac module from my server (since it conflict with some hardware monitoring of server), the module depends on edac_core and edac_core was loaded only for sb_edac.\n\nBefore applying playbook, on the target server.\n\n```\nserver# lsmod | grep edac\nsb_edac 28672 0\nedac_core 53248 1 sb_edac\n```\n\nplaybook (snippet)\n\n```\n- name: unload edac modules\n modprobe:\n name: sb_edac\n state: absent\n```\n##### EXPECTED RESULTS\n\nedac_core module unloaded, since it no longer be used.\n##### ACTUAL RESULTS\n\nAfter applying playbook, on the target server.\n\n```\nserver# lsmod | grep edac\nedac_core 53248 0\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n#coding: utf-8 -*-\n\n# (c) 2013, David Stygstra <[email protected]>\n#\n# This file is part of Ansible\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\nDOCUMENTATION = '''\n---\nmodule: modprobe\nshort_description: Add or remove kernel modules\nrequirements: []\nversion_added: 1.4\nauthor:\n - \"David Stygstra (@stygstra)\" \n - \"Julien Dauphant\"\n - \"Matt Jeffery\"\ndescription:\n - Add or remove kernel modules.\noptions:\n name:\n required: true\n description:\n - Name of kernel module to manage.\n state:\n required: false\n default: \"present\"\n choices: [ present, absent ]\n description:\n - Whether the module should be present or absent.\n params:\n required: false\n default: \"\"\n version_added: \"1.6\"\n description:\n - Modules parameters.\n'''\n\nEXAMPLES = '''\n# Add the 802.1q module\n- modprobe: name=8021q state=present\n# Add the dummy module\n- modprobe: name=dummy state=present params=\"numdummies=2\"\n'''\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.pycompat24 import get_exception\nimport shlex\n\n\ndef main():\n module = AnsibleModule(\n argument_spec={\n 'name': {'required': True},\n 'state': {'default': 'present', 'choices': ['present', 'absent']},\n 'params': {'default': ''},\n },\n supports_check_mode=True,\n )\n args = {\n 'changed': False,\n 'failed': False,\n 'name': module.params['name'],\n 'state': module.params['state'],\n 'params': module.params['params'],\n }\n\n # Check if module is present\n try:\n modules = open('/proc/modules')\n present = False\n module_name = args['name'].replace('-', '_') + ' '\n for line in modules:\n if line.startswith(module_name):\n present = True\n break\n modules.close()\n except IOError:\n e = get_exception()\n module.fail_json(msg=str(e), **args)\n\n # Check only; don't modify\n if module.check_mode:\n if args['state'] == 'present' and not present:\n changed = True\n elif args['state'] == 'absent' and present:\n changed = True\n else:\n changed = False\n module.exit_json(changed=changed)\n\n # Add/remove module as needed\n if args['state'] == 'present':\n if not present:\n command = [module.get_bin_path('modprobe', True), args['name']]\n command.extend(shlex.split(args['params']))\n rc, _, err = module.run_command(command)\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n elif args['state'] == 'absent':\n if present:\n rc, _, err = module.run_command([module.get_bin_path('rmmod', True), args['name']])\n if rc != 0:\n module.fail_json(msg=err, **args)\n args['changed'] = True\n\n module.exit_json(**args)\n\nmain()\n", "path": "system/modprobe.py"}]} | 2,044 | 145 |
gh_patches_debug_27118 | rasdani/github-patches | git_diff | google__flax-1570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PReLU activation implementation
I wanted to gauge interest on adding a PReLU activation. I noticed that `flax.linen.activations` are simply aliasing `jax.nn` activation functions which also doesn't have a PReLU implementation.
To add some background, PReLU is simply Leaky ReLU where the alpha (slope) parameter is trainable and not fixed. This makes it simple to implement as a Module if desired.
Here's an example implementation from another [project](https://github.com/isaaccorley/jax-enhance) of mine.
```python
from functools import partial
from typing import Any, Sequence
import jax.numpy as jnp
import flax.linen as nn
# This is nearly identical to jnp.ones however multiplies the output of jnp.ones by the constant value
def constant(key, shape: Sequence[int], value: Any, dtype: Any = jnp.float32) -> jnp.ndarray:
value = jnp.asarray(value, dtype)
return jnp.ones(shape, dtype) * value
class PReLU(nn.Module):
negative_slope_init: float = 0.01
dtype: Any = jnp.float32
@nn.compact
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = jnp.asarray(x, self.dtype)
negative_slope = self.param(
"negative_slope",
partial(constant, value=self.negative_slope_init, dtype=self.dtype),
(1,)
)
return jnp.where(x >= 0, x, negative_slope * x)
```
</issue>
<code>
[start of flax/linen/__init__.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The Flax Module system."""
16
17
18 # pylint: disable=g-multiple-import
19 # re-export commonly used modules and functions
20 from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
21 log_softmax, relu, sigmoid, soft_sign, softmax,
22 softplus, swish, silu, tanh)
23 from .attention import (MultiHeadDotProductAttention, SelfAttention,
24 dot_product_attention, make_attention_mask,
25 make_causal_mask, combine_masks)
26 from ..core import broadcast, DenyList, FrozenDict
27 from .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed
28 from .module import (Module, compact, nowrap, enable_named_call,
29 disable_named_call, override_named_call, Variable, init,
30 init_with_output, apply, merge_param)
31 from .normalization import BatchNorm, GroupNorm, LayerNorm
32 from .pooling import avg_pool, max_pool
33 from .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell
34 from .stochastic import Dropout
35 from .transforms import jit, named_call, checkpoint, remat, remat_scan, scan, vmap, map_variables, vjp, jvp
36 from .initializers import zeros, ones
37
38 # pylint: enable=g-multiple-import
39
[end of flax/linen/__init__.py]
[start of flax/linen/activation.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Activation functions.
16 """
17
18 # pylint: disable=unused-import
19 # re-export activation functions from jax.nn
20 from jax.nn import celu
21 from jax.nn import elu
22 from jax.nn import gelu
23 from jax.nn import glu
24 from jax.nn import leaky_relu
25 from jax.nn import log_sigmoid
26 from jax.nn import log_softmax
27 from jax.nn import normalize
28 from jax.nn import relu
29 from jax.nn import sigmoid
30 from jax.nn import soft_sign
31 from jax.nn import softmax
32 from jax.nn import softplus
33 from jax.nn import swish
34 from jax.nn import silu
35 from jax.nn import selu
36 from jax.nn import hard_tanh
37 from jax.nn import relu6
38 from jax.nn import hard_sigmoid
39 from jax.nn import hard_swish
40
41 from jax.numpy import tanh
42 # pylint: enable=unused-import
43
[end of flax/linen/activation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py
--- a/flax/linen/__init__.py
+++ b/flax/linen/__init__.py
@@ -19,7 +19,7 @@
# re-export commonly used modules and functions
from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,
log_softmax, relu, sigmoid, soft_sign, softmax,
- softplus, swish, silu, tanh)
+ softplus, swish, silu, tanh, PReLU)
from .attention import (MultiHeadDotProductAttention, SelfAttention,
dot_product_attention, make_attention_mask,
make_causal_mask, combine_masks)
diff --git a/flax/linen/activation.py b/flax/linen/activation.py
--- a/flax/linen/activation.py
+++ b/flax/linen/activation.py
@@ -40,3 +40,35 @@
from jax.numpy import tanh
# pylint: enable=unused-import
+
+from typing import Any
+
+from flax.linen.module import Module, compact
+import jax.numpy as jnp
+
+
+Array = Any
+
+
+class PReLU(Module):
+ """Parametric Rectified Linear Unit (PReLU) activation function.
+
+ Attributes:
+ negative_slope_init: the value to initialize the negative slope.
+ """
+ negative_slope_init: float = 0.01
+ @compact
+ def __call__(self, inputs: Array) -> Array:
+ """Applies an activation to the inputs.
+
+ Args:
+ inputs: the nd-array to apply the activation function to.
+
+ Returns:
+ The transformed input.
+ """
+ negative_slope = self.param(
+ 'negative_slope',
+ lambda k: jnp.asarray(self.negative_slope_init, jnp.float32)
+ )
+ return jnp.where(inputs >= 0, inputs, jnp.asarray(negative_slope, inputs.dtype) * inputs)
| {"golden_diff": "diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py\n--- a/flax/linen/__init__.py\n+++ b/flax/linen/__init__.py\n@@ -19,7 +19,7 @@\n # re-export commonly used modules and functions\n from .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n- softplus, swish, silu, tanh)\n+ softplus, swish, silu, tanh, PReLU)\n from .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\ndiff --git a/flax/linen/activation.py b/flax/linen/activation.py\n--- a/flax/linen/activation.py\n+++ b/flax/linen/activation.py\n@@ -40,3 +40,35 @@\n \n from jax.numpy import tanh\n # pylint: enable=unused-import\n+\n+from typing import Any\n+\n+from flax.linen.module import Module, compact\n+import jax.numpy as jnp\n+\n+\n+Array = Any\n+\n+\n+class PReLU(Module):\n+ \"\"\"Parametric Rectified Linear Unit (PReLU) activation function.\n+\n+ Attributes:\n+ negative_slope_init: the value to initialize the negative slope.\n+ \"\"\"\n+ negative_slope_init: float = 0.01\n+ @compact\n+ def __call__(self, inputs: Array) -> Array:\n+ \"\"\"Applies an activation to the inputs.\n+\n+ Args:\n+ inputs: the nd-array to apply the activation function to.\n+\n+ Returns:\n+ The transformed input.\n+ \"\"\"\n+ negative_slope = self.param(\n+ 'negative_slope',\n+ lambda k: jnp.asarray(self.negative_slope_init, jnp.float32)\n+ )\n+ return jnp.where(inputs >= 0, inputs, jnp.asarray(negative_slope, inputs.dtype) * inputs)\n", "issue": "PReLU activation implementation\nI wanted to gauge interest on adding a PReLU activation. I noticed that `flax.linen.activations` are simply aliasing `jax.nn` activation functions which also doesn't have a PReLU implementation.\r\n\r\nTo add some background, PReLU is simply Leaky ReLU where the alpha (slope) parameter is trainable and not fixed. This makes it simple to implement as a Module if desired.\r\n\r\nHere's an example implementation from another [project](https://github.com/isaaccorley/jax-enhance) of mine. \r\n\r\n```python\r\nfrom functools import partial\r\nfrom typing import Any, Sequence\r\n\r\nimport jax.numpy as jnp\r\nimport flax.linen as nn\r\n\r\n\r\n# This is nearly identical to jnp.ones however multiplies the output of jnp.ones by the constant value\r\ndef constant(key, shape: Sequence[int], value: Any, dtype: Any = jnp.float32) -> jnp.ndarray:\r\n value = jnp.asarray(value, dtype)\r\n return jnp.ones(shape, dtype) * value\r\n\r\n\r\nclass PReLU(nn.Module):\r\n negative_slope_init: float = 0.01\r\n dtype: Any = jnp.float32\r\n\r\n @nn.compact\r\n def __call__(self, x: jnp.ndarray) -> jnp.ndarray:\r\n x = jnp.asarray(x, self.dtype)\r\n negative_slope = self.param(\r\n \"negative_slope\",\r\n partial(constant, value=self.negative_slope_init, dtype=self.dtype),\r\n (1,)\r\n )\r\n return jnp.where(x >= 0, x, negative_slope * x)\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import\n# re-export commonly used modules and functions\nfrom .activation import (celu, elu, gelu, glu, leaky_relu, log_sigmoid,\n log_softmax, relu, sigmoid, soft_sign, softmax,\n softplus, swish, silu, tanh)\nfrom .attention import (MultiHeadDotProductAttention, SelfAttention,\n dot_product_attention, make_attention_mask,\n make_causal_mask, combine_masks)\nfrom ..core import broadcast, DenyList, FrozenDict\nfrom .linear import Conv, ConvTranspose, Dense, DenseGeneral, Embed\nfrom .module import (Module, compact, nowrap, enable_named_call,\n disable_named_call, override_named_call, Variable, init,\n init_with_output, apply, merge_param)\nfrom .normalization import BatchNorm, GroupNorm, LayerNorm\nfrom .pooling import avg_pool, max_pool\nfrom .recurrent import GRUCell, LSTMCell, ConvLSTM, OptimizedLSTMCell\nfrom .stochastic import Dropout\nfrom .transforms import jit, named_call, checkpoint, remat, remat_scan, scan, vmap, map_variables, vjp, jvp\nfrom .initializers import zeros, ones\n\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}, {"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Activation functions.\n\"\"\"\n\n# pylint: disable=unused-import\n# re-export activation functions from jax.nn\nfrom jax.nn import celu\nfrom jax.nn import elu\nfrom jax.nn import gelu\nfrom jax.nn import glu\nfrom jax.nn import leaky_relu\nfrom jax.nn import log_sigmoid\nfrom jax.nn import log_softmax\nfrom jax.nn import normalize\nfrom jax.nn import relu\nfrom jax.nn import sigmoid\nfrom jax.nn import soft_sign\nfrom jax.nn import softmax\nfrom jax.nn import softplus\nfrom jax.nn import swish\nfrom jax.nn import silu\nfrom jax.nn import selu\nfrom jax.nn import hard_tanh\nfrom jax.nn import relu6\nfrom jax.nn import hard_sigmoid\nfrom jax.nn import hard_swish\n\nfrom jax.numpy import tanh\n# pylint: enable=unused-import\n", "path": "flax/linen/activation.py"}]} | 1,798 | 467 |
gh_patches_debug_12 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1737 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Shrink the map and related divs

</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.4.8'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.4.8'
+hdx_version = 'v0.4.9'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.4.8'\n+hdx_version = 'v0.4.9'\n", "issue": "Shrink the map and related divs\n\n\n", "before_files": [{"content": "hdx_version = 'v0.4.8'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 640 | 107 |
gh_patches_debug_13118 | rasdani/github-patches | git_diff | netbox-community__netbox-3858 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom link in group does not render Jinja2 code
<!--
NOTE: This form is only for reproducible bugs. If you need assistance with
NetBox installation, or if you have a general question, DO NOT open an
issue. Instead, post to our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please describe the environment in which you are running NetBox. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report.
-->
### Environment
* Python version: 3.6.8
* NetBox version: 2.6.11
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of NetBox (or the current beta
release where applicable). Begin with the creation of any necessary
database objects and call out every operation being performed explicitly.
If reporting a bug in the REST API, be sure to reconstruct the raw HTTP
request(s) being made: Don't rely on a wrapper like pynetbox.
-->
### Steps to Reproduce
1. Create a custom link and add it to a group. Make the name use Jinja style code - example {% if True %} Custom Link 1{% endif %}
2. Open a device and look at the custom link
<!-- What did you expect to happen? -->
### Expected Behavior
Expected the link to use the Jinja2 code and show "Custom Link 1" as name.
<!-- What happened instead? -->
### Observed Behavior
The Jinja code is shown as the name. So the name is "{% if True %} Custom Link 1{% endif %}" instead of "Custom Link 1". The link itself is also not rendering Jinja code.
This seems to come from #3461 and due to the `text_rendered` and `link_rendered` not being used in the format statement on https://github.com/netbox-community/netbox/blob/b5455ed882c47273064933f0120b0e368af93a1f/netbox/extras/templatetags/custom_links.py#L72
I did a quick check and changed
```try:
text_rendered = render_jinja2(cl.text, context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ''
links_rendered.append(
GROUP_LINK.format(cl.url, link_target, cl.text)
)
```
to
```try:
text_rendered = render_jinja2(cl.text, context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ''
link_rendered = render_jinja2(cl.url, context)
links_rendered.append(
GROUP_LINK.format(link_rendered, link_target, text_rendered)
)
```
This made it work as expected again.
</issue>
<code>
[start of netbox/extras/templatetags/custom_links.py]
1 from collections import OrderedDict
2
3 from django import template
4 from django.contrib.contenttypes.models import ContentType
5 from django.utils.safestring import mark_safe
6
7 from extras.models import CustomLink
8 from utilities.utils import render_jinja2
9
10
11 register = template.Library()
12
13 LINK_BUTTON = '<a href="{}"{} class="btn btn-sm btn-{}">{}</a>\n'
14 GROUP_BUTTON = '<div class="btn-group">\n' \
15 '<button type="button" class="btn btn-sm btn-{} dropdown-toggle" data-toggle="dropdown">\n' \
16 '{} <span class="caret"></span>\n' \
17 '</button>\n' \
18 '<ul class="dropdown-menu pull-right">\n' \
19 '{}</ul></div>'
20 GROUP_LINK = '<li><a href="{}"{}>{}</a></li>\n'
21
22
23 @register.simple_tag()
24 def custom_links(obj):
25 """
26 Render all applicable links for the given object.
27 """
28 content_type = ContentType.objects.get_for_model(obj)
29 custom_links = CustomLink.objects.filter(content_type=content_type)
30 if not custom_links:
31 return ''
32
33 context = {
34 'obj': obj,
35 }
36 template_code = ''
37 group_names = OrderedDict()
38
39 for cl in custom_links:
40
41 # Organize custom links by group
42 if cl.group_name and cl.group_name in group_names:
43 group_names[cl.group_name].append(cl)
44 elif cl.group_name:
45 group_names[cl.group_name] = [cl]
46
47 # Add non-grouped links
48 else:
49 try:
50 text_rendered = render_jinja2(cl.text, context)
51 if text_rendered:
52 link_rendered = render_jinja2(cl.url, context)
53 link_target = ' target="_blank"' if cl.new_window else ''
54 template_code += LINK_BUTTON.format(
55 link_rendered, link_target, cl.button_class, text_rendered
56 )
57 except Exception as e:
58 template_code += '<a class="btn btn-sm btn-default" disabled="disabled" title="{}">' \
59 '<i class="fa fa-warning"></i> {}</a>\n'.format(e, cl.name)
60
61 # Add grouped links to template
62 for group, links in group_names.items():
63
64 links_rendered = []
65
66 for cl in links:
67 try:
68 text_rendered = render_jinja2(cl.text, context)
69 if text_rendered:
70 link_target = ' target="_blank"' if cl.new_window else ''
71 links_rendered.append(
72 GROUP_LINK.format(cl.url, link_target, cl.text)
73 )
74 except Exception as e:
75 links_rendered.append(
76 '<li><a disabled="disabled" title="{}"><span class="text-muted">'
77 '<i class="fa fa-warning"></i> {}</span></a></li>'.format(e, cl.name)
78 )
79
80 if links_rendered:
81 template_code += GROUP_BUTTON.format(
82 links[0].button_class, group, ''.join(links_rendered)
83 )
84
85 return mark_safe(template_code)
86
[end of netbox/extras/templatetags/custom_links.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/templatetags/custom_links.py b/netbox/extras/templatetags/custom_links.py
--- a/netbox/extras/templatetags/custom_links.py
+++ b/netbox/extras/templatetags/custom_links.py
@@ -68,8 +68,9 @@
text_rendered = render_jinja2(cl.text, context)
if text_rendered:
link_target = ' target="_blank"' if cl.new_window else ''
+ link_rendered = render_jinja2(cl.url, context)
links_rendered.append(
- GROUP_LINK.format(cl.url, link_target, cl.text)
+ GROUP_LINK.format(link_rendered, link_target, text_rendered)
)
except Exception as e:
links_rendered.append(
| {"golden_diff": "diff --git a/netbox/extras/templatetags/custom_links.py b/netbox/extras/templatetags/custom_links.py\n--- a/netbox/extras/templatetags/custom_links.py\n+++ b/netbox/extras/templatetags/custom_links.py\n@@ -68,8 +68,9 @@\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n+ link_rendered = render_jinja2(cl.url, context)\n links_rendered.append(\n- GROUP_LINK.format(cl.url, link_target, cl.text)\n+ GROUP_LINK.format(link_rendered, link_target, text_rendered)\n )\n except Exception as e:\n links_rendered.append(\n", "issue": "Custom link in group does not render Jinja2 code\n<!--\r\n NOTE: This form is only for reproducible bugs. If you need assistance with\r\n NetBox installation, or if you have a general question, DO NOT open an\r\n issue. Instead, post to our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please describe the environment in which you are running NetBox. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report.\r\n-->\r\n### Environment\r\n* Python version: 3.6.8\r\n* NetBox version: 2.6.11\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of NetBox (or the current beta\r\n release where applicable). Begin with the creation of any necessary\r\n database objects and call out every operation being performed explicitly.\r\n If reporting a bug in the REST API, be sure to reconstruct the raw HTTP\r\n request(s) being made: Don't rely on a wrapper like pynetbox.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a custom link and add it to a group. Make the name use Jinja style code - example {% if True %} Custom Link 1{% endif %}\r\n2. Open a device and look at the custom link\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nExpected the link to use the Jinja2 code and show \"Custom Link 1\" as name.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nThe Jinja code is shown as the name. So the name is \"{% if True %} Custom Link 1{% endif %}\" instead of \"Custom Link 1\". The link itself is also not rendering Jinja code.\r\n\r\nThis seems to come from #3461 and due to the `text_rendered` and `link_rendered` not being used in the format statement on https://github.com/netbox-community/netbox/blob/b5455ed882c47273064933f0120b0e368af93a1f/netbox/extras/templatetags/custom_links.py#L72\r\n\r\nI did a quick check and changed \r\n```try:\r\n text_rendered = render_jinja2(cl.text, context)\r\n if text_rendered:\r\n link_target = ' target=\"_blank\"' if cl.new_window else ''\r\n links_rendered.append(\r\n GROUP_LINK.format(cl.url, link_target, cl.text)\r\n )\r\n```\r\nto\r\n\r\n```try:\r\n text_rendered = render_jinja2(cl.text, context)\r\n if text_rendered:\r\n link_target = ' target=\"_blank\"' if cl.new_window else ''\r\n link_rendered = render_jinja2(cl.url, context)\r\n links_rendered.append(\r\n GROUP_LINK.format(link_rendered, link_target, text_rendered)\r\n )\r\n```\r\n\r\nThis made it work as expected again.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\n\nfrom extras.models import CustomLink\nfrom utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nLINK_BUTTON = '<a href=\"{}\"{} class=\"btn btn-sm btn-{}\">{}</a>\\n'\nGROUP_BUTTON = '<div class=\"btn-group\">\\n' \\\n '<button type=\"button\" class=\"btn btn-sm btn-{} dropdown-toggle\" data-toggle=\"dropdown\">\\n' \\\n '{} <span class=\"caret\"></span>\\n' \\\n '</button>\\n' \\\n '<ul class=\"dropdown-menu pull-right\">\\n' \\\n '{}</ul></div>'\nGROUP_LINK = '<li><a href=\"{}\"{}>{}</a></li>\\n'\n\n\[email protected]_tag()\ndef custom_links(obj):\n \"\"\"\n Render all applicable links for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n custom_links = CustomLink.objects.filter(content_type=content_type)\n if not custom_links:\n return ''\n\n context = {\n 'obj': obj,\n }\n template_code = ''\n group_names = OrderedDict()\n\n for cl in custom_links:\n\n # Organize custom links by group\n if cl.group_name and cl.group_name in group_names:\n group_names[cl.group_name].append(cl)\n elif cl.group_name:\n group_names[cl.group_name] = [cl]\n\n # Add non-grouped links\n else:\n try:\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_rendered = render_jinja2(cl.url, context)\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n template_code += LINK_BUTTON.format(\n link_rendered, link_target, cl.button_class, text_rendered\n )\n except Exception as e:\n template_code += '<a class=\"btn btn-sm btn-default\" disabled=\"disabled\" title=\"{}\">' \\\n '<i class=\"fa fa-warning\"></i> {}</a>\\n'.format(e, cl.name)\n\n # Add grouped links to template\n for group, links in group_names.items():\n\n links_rendered = []\n\n for cl in links:\n try:\n text_rendered = render_jinja2(cl.text, context)\n if text_rendered:\n link_target = ' target=\"_blank\"' if cl.new_window else ''\n links_rendered.append(\n GROUP_LINK.format(cl.url, link_target, cl.text)\n )\n except Exception as e:\n links_rendered.append(\n '<li><a disabled=\"disabled\" title=\"{}\"><span class=\"text-muted\">'\n '<i class=\"fa fa-warning\"></i> {}</span></a></li>'.format(e, cl.name)\n )\n\n if links_rendered:\n template_code += GROUP_BUTTON.format(\n links[0].button_class, group, ''.join(links_rendered)\n )\n\n return mark_safe(template_code)\n", "path": "netbox/extras/templatetags/custom_links.py"}]} | 2,008 | 173 |
gh_patches_debug_2899 | rasdani/github-patches | git_diff | hydroshare__hydroshare-4798 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change Mezzanine Form to Disallow Username Changes
**Describe the feature you'd like and what it will do**
The internal mezzanine form for the admin account should be altered to no longer allow username changes if possible, since doing so breaks the resource.
**Why is this feature important?**
This will be policy moving forward, that usernames cannot be changed.
I will work with Scott to see if this change is feasible.
</issue>
<code>
[start of hs_core/admin.py]
1 from django import forms
2 from django.contrib.auth.admin import UserAdmin
3 from django.contrib.auth.forms import UserCreationForm
4 from django.contrib.gis import admin
5 from django.contrib.contenttypes.admin import GenericTabularInline
6 from django.utils.translation import ugettext_lazy as _
7
8 from .models import *
9
10
11 class UserCreationFormExtended(UserCreationForm):
12 def __init__(self, *args, **kwargs):
13 super(UserCreationFormExtended, self).__init__(*args, **kwargs)
14 self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
15
16 UserAdmin.add_form = UserCreationFormExtended
17 UserAdmin.add_fieldsets = (
18 (None, {
19 'classes': ('wide',),
20 'fields': ('email', 'username', 'password1', 'password2',)
21 }),
22 )
23 UserAdmin.list_display = [
24 'username', 'email', 'first_name', 'last_name', 'is_staff',
25 'is_active', 'date_joined', 'last_login'
26 ]
27
28 class InlineResourceFiles(GenericTabularInline):
29 model = ResourceFile
30
31 admin.site.unregister(User)
32 admin.site.register(User, UserAdmin)
33 admin.site.unregister(GenericResource)
34
[end of hs_core/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hs_core/admin.py b/hs_core/admin.py
--- a/hs_core/admin.py
+++ b/hs_core/admin.py
@@ -14,6 +14,7 @@
self.fields['email'] = forms.EmailField(label=_("E-mail"), max_length=75)
UserAdmin.add_form = UserCreationFormExtended
+UserAdmin.readonly_fields = ('username',)
UserAdmin.add_fieldsets = (
(None, {
'classes': ('wide',),
| {"golden_diff": "diff --git a/hs_core/admin.py b/hs_core/admin.py\n--- a/hs_core/admin.py\n+++ b/hs_core/admin.py\n@@ -14,6 +14,7 @@\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n \n UserAdmin.add_form = UserCreationFormExtended\n+UserAdmin.readonly_fields = ('username',)\n UserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n", "issue": "Change Mezzanine Form to Disallow Username Changes\n**Describe the feature you'd like and what it will do**\r\nThe internal mezzanine form for the admin account should be altered to no longer allow username changes if possible, since doing so breaks the resource. \r\n\r\n\r\n**Why is this feature important?**\r\nThis will be policy moving forward, that usernames cannot be changed. \r\nI will work with Scott to see if this change is feasible. \r\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.gis import admin\nfrom django.contrib.contenttypes.admin import GenericTabularInline\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import *\n\n\nclass UserCreationFormExtended(UserCreationForm):\n def __init__(self, *args, **kwargs):\n super(UserCreationFormExtended, self).__init__(*args, **kwargs)\n self.fields['email'] = forms.EmailField(label=_(\"E-mail\"), max_length=75)\n\nUserAdmin.add_form = UserCreationFormExtended\nUserAdmin.add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'username', 'password1', 'password2',)\n }),\n)\nUserAdmin.list_display = [\n 'username', 'email', 'first_name', 'last_name', 'is_staff',\n 'is_active', 'date_joined', 'last_login'\n]\n\nclass InlineResourceFiles(GenericTabularInline):\n model = ResourceFile\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.unregister(GenericResource)\n", "path": "hs_core/admin.py"}]} | 937 | 107 |
gh_patches_debug_37672 | rasdani/github-patches | git_diff | open-mmlab__mmpose-1139 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'ConcatDataset' is not available in MMPOSE
My config is something like this. I want to use the 'ConcatDataset' just like it in the MMDETECTION and MMSEGMENT. However, the 'ConcatDataset' is not available in MMPOSE
```
# myconfig.py
data = dict(
train=dict(
type = 'ConcatDataset',
datasets = [dict(
type='BottomUpCocoDataset',
ann_file='data/keypoints/2021-11-02-bwrat_side6-kp_val.json',
img_prefix='data/keypoints/2021-11-02-bwrat_side6-kp/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info=dataset_info),
dict(
type='BottomUpCocoDataset',
ann_file='data/keypoints/2021-11-24-wrat_side6-kp_val.json',
img_prefix='data/keypoints/2021-11-24-wrat_side6-kp/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info=dataset_info)
])
)
```
</issue>
<code>
[start of mmpose/datasets/builder.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import platform
3 import random
4 from functools import partial
5
6 import numpy as np
7 from mmcv.parallel import collate
8 from mmcv.runner import get_dist_info
9 from mmcv.utils import Registry, build_from_cfg
10 from mmcv.utils.parrots_wrapper import _get_dataloader
11
12 from .samplers import DistributedSampler
13
14 if platform.system() != 'Windows':
15 # https://github.com/pytorch/pytorch/issues/973
16 import resource
17 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
18 base_soft_limit = rlimit[0]
19 hard_limit = rlimit[1]
20 soft_limit = min(max(4096, base_soft_limit), hard_limit)
21 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
22
23 DATASETS = Registry('dataset')
24 PIPELINES = Registry('pipeline')
25
26
27 def build_dataset(cfg, default_args=None):
28 """Build a dataset from config dict.
29
30 Args:
31 cfg (dict): Config dict. It should at least contain the key "type".
32 default_args (dict, optional): Default initialization arguments.
33 Default: None.
34
35 Returns:
36 Dataset: The constructed dataset.
37 """
38 from .dataset_wrappers import RepeatDataset
39
40 if cfg['type'] == 'RepeatDataset':
41 dataset = RepeatDataset(
42 build_dataset(cfg['dataset'], default_args), cfg['times'])
43 else:
44 dataset = build_from_cfg(cfg, DATASETS, default_args)
45 return dataset
46
47
48 def build_dataloader(dataset,
49 samples_per_gpu,
50 workers_per_gpu,
51 num_gpus=1,
52 dist=True,
53 shuffle=True,
54 seed=None,
55 drop_last=True,
56 pin_memory=True,
57 **kwargs):
58 """Build PyTorch DataLoader.
59
60 In distributed training, each GPU/process has a dataloader.
61 In non-distributed training, there is only one dataloader for all GPUs.
62
63 Args:
64 dataset (Dataset): A PyTorch dataset.
65 samples_per_gpu (int): Number of training samples on each GPU, i.e.,
66 batch size of each GPU.
67 workers_per_gpu (int): How many subprocesses to use for data loading
68 for each GPU.
69 num_gpus (int): Number of GPUs. Only used in non-distributed training.
70 dist (bool): Distributed training/test or not. Default: True.
71 shuffle (bool): Whether to shuffle the data at every epoch.
72 Default: True.
73 drop_last (bool): Whether to drop the last incomplete batch in epoch.
74 Default: True
75 pin_memory (bool): Whether to use pin_memory in DataLoader.
76 Default: True
77 kwargs: any keyword argument to be used to initialize DataLoader
78
79 Returns:
80 DataLoader: A PyTorch dataloader.
81 """
82 rank, world_size = get_dist_info()
83 if dist:
84 sampler = DistributedSampler(
85 dataset, world_size, rank, shuffle=shuffle, seed=seed)
86 shuffle = False
87 batch_size = samples_per_gpu
88 num_workers = workers_per_gpu
89 else:
90 sampler = None
91 batch_size = num_gpus * samples_per_gpu
92 num_workers = num_gpus * workers_per_gpu
93
94 init_fn = partial(
95 worker_init_fn, num_workers=num_workers, rank=rank,
96 seed=seed) if seed is not None else None
97
98 _, DataLoader = _get_dataloader()
99 data_loader = DataLoader(
100 dataset,
101 batch_size=batch_size,
102 sampler=sampler,
103 num_workers=num_workers,
104 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
105 pin_memory=pin_memory,
106 shuffle=shuffle,
107 worker_init_fn=init_fn,
108 drop_last=drop_last,
109 **kwargs)
110
111 return data_loader
112
113
114 def worker_init_fn(worker_id, num_workers, rank, seed):
115 """Init the random seed for various workers."""
116 # The seed of each worker equals to
117 # num_worker * rank + worker_id + user_seed
118 worker_seed = num_workers * rank + worker_id + seed
119 np.random.seed(worker_seed)
120 random.seed(worker_seed)
121
[end of mmpose/datasets/builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py
--- a/mmpose/datasets/builder.py
+++ b/mmpose/datasets/builder.py
@@ -1,4 +1,5 @@
# Copyright (c) OpenMMLab. All rights reserved.
+import copy
import platform
import random
from functools import partial
@@ -6,8 +7,9 @@
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
-from mmcv.utils import Registry, build_from_cfg
+from mmcv.utils import Registry, build_from_cfg, is_seq_of
from mmcv.utils.parrots_wrapper import _get_dataloader
+from torch.utils.data.dataset import ConcatDataset
from .samplers import DistributedSampler
@@ -24,6 +26,39 @@
PIPELINES = Registry('pipeline')
+def _concat_dataset(cfg, default_args=None):
+ types = cfg['type']
+ ann_files = cfg['ann_file']
+ img_prefixes = cfg.get('img_prefix', None)
+ dataset_infos = cfg.get('dataset_info', None)
+
+ num_joints = cfg['data_cfg'].get('num_joints', None)
+ dataset_channel = cfg['data_cfg'].get('dataset_channel', None)
+
+ datasets = []
+ num_dset = len(ann_files)
+ for i in range(num_dset):
+ cfg_copy = copy.deepcopy(cfg)
+ cfg_copy['ann_file'] = ann_files[i]
+
+ if isinstance(types, (list, tuple)):
+ cfg_copy['type'] = types[i]
+ if isinstance(img_prefixes, (list, tuple)):
+ cfg_copy['img_prefix'] = img_prefixes[i]
+ if isinstance(dataset_infos, (list, tuple)):
+ cfg_copy['dataset_info'] = dataset_infos[i]
+
+ if isinstance(num_joints, (list, tuple)):
+ cfg_copy['data_cfg']['num_joints'] = num_joints[i]
+
+ if is_seq_of(dataset_channel, list):
+ cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i]
+
+ datasets.append(build_dataset(cfg_copy, default_args))
+
+ return ConcatDataset(datasets)
+
+
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
@@ -37,9 +72,16 @@
"""
from .dataset_wrappers import RepeatDataset
- if cfg['type'] == 'RepeatDataset':
+ if isinstance(cfg, (list, tuple)):
+ dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
+ elif cfg['type'] == 'ConcatDataset':
+ dataset = ConcatDataset(
+ [build_dataset(c, default_args) for c in cfg['datasets']])
+ elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
+ elif isinstance(cfg.get('ann_file'), (list, tuple)):
+ dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
| {"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -1,4 +1,5 @@\n # Copyright (c) OpenMMLab. All rights reserved.\n+import copy\n import platform\n import random\n from functools import partial\n@@ -6,8 +7,9 @@\n import numpy as np\n from mmcv.parallel import collate\n from mmcv.runner import get_dist_info\n-from mmcv.utils import Registry, build_from_cfg\n+from mmcv.utils import Registry, build_from_cfg, is_seq_of\n from mmcv.utils.parrots_wrapper import _get_dataloader\n+from torch.utils.data.dataset import ConcatDataset\n \n from .samplers import DistributedSampler\n \n@@ -24,6 +26,39 @@\n PIPELINES = Registry('pipeline')\n \n \n+def _concat_dataset(cfg, default_args=None):\n+ types = cfg['type']\n+ ann_files = cfg['ann_file']\n+ img_prefixes = cfg.get('img_prefix', None)\n+ dataset_infos = cfg.get('dataset_info', None)\n+\n+ num_joints = cfg['data_cfg'].get('num_joints', None)\n+ dataset_channel = cfg['data_cfg'].get('dataset_channel', None)\n+\n+ datasets = []\n+ num_dset = len(ann_files)\n+ for i in range(num_dset):\n+ cfg_copy = copy.deepcopy(cfg)\n+ cfg_copy['ann_file'] = ann_files[i]\n+\n+ if isinstance(types, (list, tuple)):\n+ cfg_copy['type'] = types[i]\n+ if isinstance(img_prefixes, (list, tuple)):\n+ cfg_copy['img_prefix'] = img_prefixes[i]\n+ if isinstance(dataset_infos, (list, tuple)):\n+ cfg_copy['dataset_info'] = dataset_infos[i]\n+\n+ if isinstance(num_joints, (list, tuple)):\n+ cfg_copy['data_cfg']['num_joints'] = num_joints[i]\n+\n+ if is_seq_of(dataset_channel, list):\n+ cfg_copy['data_cfg']['dataset_channel'] = dataset_channel[i]\n+\n+ datasets.append(build_dataset(cfg_copy, default_args))\n+\n+ return ConcatDataset(datasets)\n+\n+\n def build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n \n@@ -37,9 +72,16 @@\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n \n- if cfg['type'] == 'RepeatDataset':\n+ if isinstance(cfg, (list, tuple)):\n+ dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n+ elif cfg['type'] == 'ConcatDataset':\n+ dataset = ConcatDataset(\n+ [build_dataset(c, default_args) for c in cfg['datasets']])\n+ elif cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n+ elif isinstance(cfg.get('ann_file'), (list, tuple)):\n+ dataset = _concat_dataset(cfg, default_args)\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n", "issue": "'ConcatDataset' is not available in MMPOSE\nMy config is something like this. I want to use the 'ConcatDataset' just like it in the MMDETECTION and MMSEGMENT. However, the 'ConcatDataset' is not available in MMPOSE\r\n```\r\n# myconfig.py\r\ndata = dict(\r\n train=dict(\r\n type = 'ConcatDataset',\r\n datasets = [dict(\r\n type='BottomUpCocoDataset',\r\n ann_file='data/keypoints/2021-11-02-bwrat_side6-kp_val.json',\r\n img_prefix='data/keypoints/2021-11-02-bwrat_side6-kp/',\r\n data_cfg=data_cfg,\r\n pipeline=test_pipeline,\r\n dataset_info=dataset_info),\r\n dict(\r\n type='BottomUpCocoDataset',\r\n ann_file='data/keypoints/2021-11-24-wrat_side6-kp_val.json',\r\n img_prefix='data/keypoints/2021-11-24-wrat_side6-kp/',\r\n data_cfg=data_cfg,\r\n pipeline=val_pipeline,\r\n dataset_info=dataset_info)\r\n ])\r\n)\r\n```\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import Registry, build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n base_soft_limit = rlimit[0]\n hard_limit = rlimit[1]\n soft_limit = min(max(4096, base_soft_limit), hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n from .dataset_wrappers import RepeatDataset\n\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]} | 1,949 | 700 |
gh_patches_debug_37878 | rasdani/github-patches | git_diff | opsdroid__opsdroid-34 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check for base classes instead of name when importing connectors and databases
Currently you must give your connector or database class a specific name for them to be imported.
Now that these classes should inherit from the base classes we can test for `isinstance(baseclass)` instead.
</issue>
<code>
[start of opsdroid/core.py]
1 """Core components of OpsDroid."""
2
3 import logging
4 import sys
5 import weakref
6 from multiprocessing import Process
7 from opsdroid.helper import match
8 from opsdroid.memory import Memory
9
10
11 class OpsDroid():
12 """Root object for opsdroid."""
13
14 instances = []
15
16 def __init__(self):
17 """Start opsdroid."""
18 self.bot_name = 'opsdroid'
19 self.sys_status = 0
20 self.connectors = []
21 self.connector_jobs = []
22 self.skills = []
23 self.memory = Memory()
24 logging.info("Created main opsdroid object")
25
26 def __enter__(self):
27 """Add self to existing instances."""
28 if len(self.__class__.instances) == 0:
29 self.__class__.instances.append(weakref.proxy(self))
30 else:
31 self.critical("opsdroid has already been started", 1)
32 return self
33
34 def __exit__(self, exc_type, exc_value, traceback):
35 """Remove self from existing instances."""
36 self.__class__.instances = []
37
38 def exit(self):
39 """Exit application."""
40 logging.info("Exiting application with return code " +
41 str(self.sys_status))
42 sys.exit(self.sys_status)
43
44 def critical(self, error, code):
45 """Exit due to unrecoverable error."""
46 self.sys_status = code
47 logging.critical(error)
48 print("Error: " + error)
49 self.exit()
50
51 def start_connectors(self, connectors):
52 """Start the connectors."""
53 if len(connectors) == 0:
54 self.critical("All connectors failed to load", 1)
55 elif len(connectors) == 1:
56 for name, cls in connectors[0]["module"].__dict__.items():
57 if isinstance(cls, type) and "Connector" in name:
58 connectors[0]["config"]["bot-name"] = self.bot_name
59 connector = cls(connectors[0]["config"])
60 self.connectors.append(connector)
61 connector.connect(self)
62 else:
63 for connector_module in connectors:
64 for name, cls in connector_module["module"].__dict__.items():
65 if isinstance(cls, type) and "Connector" in name:
66 connector_module["config"]["bot-name"] = self.bot_name
67 connector = cls(connector_module["config"])
68 self.connectors.append(connector)
69 job = Process(target=connector.connect, args=(self,))
70 job.start()
71 self.connector_jobs.append(job)
72 for job in self.connector_jobs:
73 job.join()
74
75 def start_databases(self, databases):
76 """Start the databases."""
77 if len(databases) == 0:
78 logging.warning("All databases failed to load")
79 for database_module in databases:
80 for name, cls in database_module["module"].__dict__.items():
81 if isinstance(cls, type) and "Database" in name:
82 logging.debug("Adding database: " + name)
83 database = cls(database_module["config"])
84 self.memory.databases.append(database)
85 database.connect()
86
87 def load_regex_skill(self, regex, skill):
88 """Load skills."""
89 self.skills.append({"regex": regex, "skill": skill})
90
91 def parse(self, message):
92 """Parse a string against all skills."""
93 if message.text.strip() != "":
94 logging.debug("Parsing input: " + message.text)
95 for skill in self.skills:
96 if "regex" in skill:
97 regex = match(skill["regex"], message.text)
98 if regex:
99 message.regex = regex
100 skill["skill"](self, message)
101
[end of opsdroid/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/core.py b/opsdroid/core.py
--- a/opsdroid/core.py
+++ b/opsdroid/core.py
@@ -6,6 +6,8 @@
from multiprocessing import Process
from opsdroid.helper import match
from opsdroid.memory import Memory
+from opsdroid.connector import Connector
+from opsdroid.database import Database
class OpsDroid():
@@ -54,7 +56,9 @@
self.critical("All connectors failed to load", 1)
elif len(connectors) == 1:
for name, cls in connectors[0]["module"].__dict__.items():
- if isinstance(cls, type) and "Connector" in name:
+ if isinstance(cls, type) and \
+ isinstance(cls({}), Connector):
+ logging.debug("Adding connector: " + name)
connectors[0]["config"]["bot-name"] = self.bot_name
connector = cls(connectors[0]["config"])
self.connectors.append(connector)
@@ -62,7 +66,8 @@
else:
for connector_module in connectors:
for name, cls in connector_module["module"].__dict__.items():
- if isinstance(cls, type) and "Connector" in name:
+ if isinstance(cls, type) and \
+ isinstance(cls({}), Connector):
connector_module["config"]["bot-name"] = self.bot_name
connector = cls(connector_module["config"])
self.connectors.append(connector)
@@ -78,11 +83,12 @@
logging.warning("All databases failed to load")
for database_module in databases:
for name, cls in database_module["module"].__dict__.items():
- if isinstance(cls, type) and "Database" in name:
+ if isinstance(cls, type) and \
+ isinstance(cls({}), Database):
logging.debug("Adding database: " + name)
database = cls(database_module["config"])
self.memory.databases.append(database)
- database.connect()
+ database.connect(self)
def load_regex_skill(self, regex, skill):
"""Load skills."""
| {"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -6,6 +6,8 @@\n from multiprocessing import Process\n from opsdroid.helper import match\n from opsdroid.memory import Memory\n+from opsdroid.connector import Connector\n+from opsdroid.database import Database\n \n \n class OpsDroid():\n@@ -54,7 +56,9 @@\n self.critical(\"All connectors failed to load\", 1)\n elif len(connectors) == 1:\n for name, cls in connectors[0][\"module\"].__dict__.items():\n- if isinstance(cls, type) and \"Connector\" in name:\n+ if isinstance(cls, type) and \\\n+ isinstance(cls({}), Connector):\n+ logging.debug(\"Adding connector: \" + name)\n connectors[0][\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connectors[0][\"config\"])\n self.connectors.append(connector)\n@@ -62,7 +66,8 @@\n else:\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n- if isinstance(cls, type) and \"Connector\" in name:\n+ if isinstance(cls, type) and \\\n+ isinstance(cls({}), Connector):\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n@@ -78,11 +83,12 @@\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n- if isinstance(cls, type) and \"Database\" in name:\n+ if isinstance(cls, type) and \\\n+ isinstance(cls({}), Database):\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n- database.connect()\n+ database.connect(self)\n \n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n", "issue": "Check for base classes instead of name when importing connectors and databases\nCurrently you must give your connector or database class a specific name for them to be imported.\n\nNow that these classes should inherit from the base classes we can test for `isinstance(baseclass)` instead.\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom multiprocessing import Process\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n elif len(connectors) == 1:\n for name, cls in connectors[0][\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connectors[0][\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connectors[0][\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n else:\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n job = Process(target=connector.connect, args=(self,))\n job.start()\n self.connector_jobs.append(job)\n for job in self.connector_jobs:\n job.join()\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}]} | 1,536 | 463 |
gh_patches_debug_2287 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Google Auth test_with_scopes_scoped test failing with google-auth==1.25.0 or greater
The GoogleAuth unittest [test_with_scopes_scoped](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L79) is failing at [this line](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L92).
With `google-auth==1.25.0`, the test fails.
```
tests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped FAILED
```
With `google-auth==1.24.0` , the same test passes.
```
tests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped PASSED
```
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 7):
25 print("google-api-python-client requires python version >= 2.7.", file=sys.stderr)
26 sys.exit(1)
27 if (3, 1) <= sys.version_info < (3, 4):
28 print("google-api-python-client requires python3 version >= 3.4.", file=sys.stderr)
29 sys.exit(1)
30
31 import io
32 import os
33 from setuptools import setup
34
35 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
36
37 install_requires = [
38 "httplib2>=0.15.0,<1dev",
39 "google-auth>=1.16.0",
40 "google-auth-httplib2>=0.0.3",
41 "google-api-core>=1.21.0,<2dev",
42 "six>=1.13.0,<2dev",
43 "uritemplate>=3.0.0,<4dev",
44 ]
45
46 package_root = os.path.abspath(os.path.dirname(__file__))
47
48 readme_filename = os.path.join(package_root, "README.md")
49 with io.open(readme_filename, encoding="utf-8") as readme_file:
50 readme = readme_file.read()
51
52 version = "1.12.8"
53
54 setup(
55 name="google-api-python-client",
56 version=version,
57 description="Google API Client Library for Python",
58 long_description=readme,
59 long_description_content_type='text/markdown',
60 author="Google LLC",
61 author_email="[email protected]",
62 url="https://github.com/googleapis/google-api-python-client/",
63 install_requires=install_requires,
64 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
65 packages=packages,
66 package_data={},
67 license="Apache 2.0",
68 keywords="google api client",
69 classifiers=[
70 "Programming Language :: Python :: 2",
71 "Programming Language :: Python :: 2.7",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.5",
74 "Programming Language :: Python :: 3.6",
75 "Programming Language :: Python :: 3.7",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: OS Independent",
80 "Topic :: Internet :: WWW/HTTP",
81 ],
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,6 +39,8 @@
"google-auth>=1.16.0",
"google-auth-httplib2>=0.0.3",
"google-api-core>=1.21.0,<2dev",
+ # rsa version 4.5 is the last version that is compatible with Python 2.7
+ "rsa==4.5;python_version<'3'",
"six>=1.13.0,<2dev",
"uritemplate>=3.0.0,<4dev",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,6 +39,8 @@\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n+ # rsa version 4.5 is the last version that is compatible with Python 2.7\n+ \"rsa==4.5;python_version<'3'\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n ]\n", "issue": "Google Auth test_with_scopes_scoped test failing with google-auth==1.25.0 or greater\nThe GoogleAuth unittest [test_with_scopes_scoped](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L79) is failing at [this line](https://github.com/googleapis/google-api-python-client/blob/master/tests/test__auth.py#L92). \r\n\r\nWith `google-auth==1.25.0`, the test fails.\r\n```\r\ntests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped FAILED \r\n```\r\n\r\nWith `google-auth==1.24.0` , the same test passes.\r\n```\r\ntests/test__auth.py::TestAuthWithGoogleAuth::test_with_scopes_scoped PASSED \r\n```\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print(\"google-api-python-client requires python version >= 2.7.\", file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 4):\n print(\"google-api-python-client requires python3 version >= 3.4.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"1.12.8\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,567 | 143 |
gh_patches_debug_1003 | rasdani/github-patches | git_diff | ipython__ipython-3556 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
_margv for macros seems to be missing
At one point in time, arguments to macro's could be obtained from _margv , but this seems to be missing now ( https://github.com/ipython/ipython/wiki/Cookbook:-Macro-arguments ).
I searched the entire ipython folder and only found _margv in the documentation in the macro.py file.
Just wondering if this is still supported.
</issue>
<code>
[start of IPython/core/macro.py]
1 """Support for interactive macros in IPython"""
2
3 #*****************************************************************************
4 # Copyright (C) 2001-2005 Fernando Perez <[email protected]>
5 #
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING, distributed as part of this software.
8 #*****************************************************************************
9
10 import re
11
12 from IPython.utils import py3compat
13 from IPython.utils.encoding import DEFAULT_ENCODING
14
15 coding_declaration = re.compile(r"#\s*coding[:=]\s*([-\w.]+)")
16
17 class Macro(object):
18 """Simple class to store the value of macros as strings.
19
20 Macro is just a callable that executes a string of IPython
21 input when called.
22
23 Args to macro are available in _margv list if you need them.
24 """
25
26 def __init__(self,code):
27 """store the macro value, as a single string which can be executed"""
28 lines = []
29 enc = None
30 for line in code.splitlines():
31 coding_match = coding_declaration.match(line)
32 if coding_match:
33 enc = coding_match.group(1)
34 else:
35 lines.append(line)
36 code = "\n".join(lines)
37 if isinstance(code, bytes):
38 code = code.decode(enc or DEFAULT_ENCODING)
39 self.value = code + '\n'
40
41 def __str__(self):
42 return py3compat.unicode_to_str(self.value)
43
44 def __unicode__(self):
45 return self.value
46
47 def __repr__(self):
48 return 'IPython.macro.Macro(%s)' % repr(self.value)
49
50 def __getstate__(self):
51 """ needed for safe pickling via %store """
52 return {'value': self.value}
53
54 def __add__(self, other):
55 if isinstance(other, Macro):
56 return Macro(self.value + other.value)
57 elif isinstance(other, basestring):
58 return Macro(self.value + other)
59 raise TypeError
60
[end of IPython/core/macro.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/core/macro.py b/IPython/core/macro.py
--- a/IPython/core/macro.py
+++ b/IPython/core/macro.py
@@ -19,8 +19,6 @@
Macro is just a callable that executes a string of IPython
input when called.
-
- Args to macro are available in _margv list if you need them.
"""
def __init__(self,code):
| {"golden_diff": "diff --git a/IPython/core/macro.py b/IPython/core/macro.py\n--- a/IPython/core/macro.py\n+++ b/IPython/core/macro.py\n@@ -19,8 +19,6 @@\n \n Macro is just a callable that executes a string of IPython\n input when called.\n- \n- Args to macro are available in _margv list if you need them.\n \"\"\"\n \n def __init__(self,code):\n", "issue": " _margv for macros seems to be missing\nAt one point in time, arguments to macro's could be obtained from _margv , but this seems to be missing now ( https://github.com/ipython/ipython/wiki/Cookbook:-Macro-arguments ). \n\nI searched the entire ipython folder and only found _margv in the documentation in the macro.py file. \n\nJust wondering if this is still supported. \n\n", "before_files": [{"content": "\"\"\"Support for interactive macros in IPython\"\"\"\n\n#*****************************************************************************\n# Copyright (C) 2001-2005 Fernando Perez <[email protected]>\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#*****************************************************************************\n\nimport re\n\nfrom IPython.utils import py3compat\nfrom IPython.utils.encoding import DEFAULT_ENCODING\n\ncoding_declaration = re.compile(r\"#\\s*coding[:=]\\s*([-\\w.]+)\")\n\nclass Macro(object):\n \"\"\"Simple class to store the value of macros as strings.\n\n Macro is just a callable that executes a string of IPython\n input when called.\n \n Args to macro are available in _margv list if you need them.\n \"\"\"\n\n def __init__(self,code):\n \"\"\"store the macro value, as a single string which can be executed\"\"\"\n lines = []\n enc = None\n for line in code.splitlines():\n coding_match = coding_declaration.match(line)\n if coding_match:\n enc = coding_match.group(1)\n else:\n lines.append(line)\n code = \"\\n\".join(lines)\n if isinstance(code, bytes):\n code = code.decode(enc or DEFAULT_ENCODING)\n self.value = code + '\\n'\n \n def __str__(self):\n return py3compat.unicode_to_str(self.value)\n \n def __unicode__(self):\n return self.value\n\n def __repr__(self):\n return 'IPython.macro.Macro(%s)' % repr(self.value)\n \n def __getstate__(self):\n \"\"\" needed for safe pickling via %store \"\"\"\n return {'value': self.value}\n \n def __add__(self, other):\n if isinstance(other, Macro):\n return Macro(self.value + other.value)\n elif isinstance(other, basestring):\n return Macro(self.value + other)\n raise TypeError\n", "path": "IPython/core/macro.py"}]} | 1,156 | 100 |
gh_patches_debug_2751 | rasdani/github-patches | git_diff | abey79__vpype-607 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default to QT_QPA_PLATFORM=xcb on Linux/Wayland
If we detect a linux box running on wayland, we should force Qt to use the xcb platform as the wayland backend doesn't work properly with moderngl.
This maybe a good way to detect wayland:
```
XDG_SESSION_TYPE=wayland
```
Relevant discussions:
- https://github.com/abey79/vsketch/issues/353
- https://discord.com/channels/550302843777712148/696045774970028062/1072436292798926868
</issue>
<code>
[start of vpype_viewer/qtviewer/__init__.py]
1 from .viewer import *
2
[end of vpype_viewer/qtviewer/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vpype_viewer/qtviewer/__init__.py b/vpype_viewer/qtviewer/__init__.py
--- a/vpype_viewer/qtviewer/__init__.py
+++ b/vpype_viewer/qtviewer/__init__.py
@@ -1 +1,18 @@
+def _check_wayland():
+ """Fix QT env variable on Wayland-based systems.
+
+ See https://github.com/abey79/vpype/issues/596
+ """
+ import os
+ import sys
+
+ if sys.platform.startswith("linux"):
+ if os.environ.get("XDG_SESSION_TYPE", "") == "wayland":
+ if "QT_QPA_PLATFORM" not in os.environ:
+ os.environ["QT_QPA_PLATFORM"] = "xcb"
+
+
+_check_wayland()
+
+
from .viewer import *
| {"golden_diff": "diff --git a/vpype_viewer/qtviewer/__init__.py b/vpype_viewer/qtviewer/__init__.py\n--- a/vpype_viewer/qtviewer/__init__.py\n+++ b/vpype_viewer/qtviewer/__init__.py\n@@ -1 +1,18 @@\n+def _check_wayland():\n+ \"\"\"Fix QT env variable on Wayland-based systems.\n+\n+ See https://github.com/abey79/vpype/issues/596\n+ \"\"\"\n+ import os\n+ import sys\n+\n+ if sys.platform.startswith(\"linux\"):\n+ if os.environ.get(\"XDG_SESSION_TYPE\", \"\") == \"wayland\":\n+ if \"QT_QPA_PLATFORM\" not in os.environ:\n+ os.environ[\"QT_QPA_PLATFORM\"] = \"xcb\"\n+\n+\n+_check_wayland()\n+\n+\n from .viewer import *\n", "issue": "Default to QT_QPA_PLATFORM=xcb on Linux/Wayland\nIf we detect a linux box running on wayland, we should force Qt to use the xcb platform as the wayland backend doesn't work properly with moderngl.\r\n\r\nThis maybe a good way to detect wayland:\r\n```\r\nXDG_SESSION_TYPE=wayland\r\n```\r\n\r\nRelevant discussions:\r\n- https://github.com/abey79/vsketch/issues/353\r\n- https://discord.com/channels/550302843777712148/696045774970028062/1072436292798926868\n", "before_files": [{"content": "from .viewer import *\n", "path": "vpype_viewer/qtviewer/__init__.py"}]} | 709 | 187 |
gh_patches_debug_16213 | rasdani/github-patches | git_diff | conan-io__conan-center-index-925 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] asio/1.13.0: broken on Andriod
Commit: https://github.com/conan-io/conan-center-index/commit/804be2ad15b2139960fe10efcd6667d1f2dd2e98 breaks Android because there is no `-lpthread`
</issue>
<code>
[start of recipes/asio/all/conanfile.py]
1 import os
2 from conans import ConanFile, tools
3
4
5 class Asio(ConanFile):
6 name = "asio"
7 url = "https://github.com/conan-io/conan-center-index"
8 homepage = "http://think-async.com/Asio"
9 description = "Asio is a cross-platform C++ library for network and low-level I/O"
10 topics = ("conan", "asio", "network", "io", "low-level")
11 license = "BSL-1.0"
12
13 no_copy_source = True
14 _source_subfolder = "source_subfolder"
15
16 def source(self):
17 tools.get(**self.conan_data["sources"][self.version])
18 archive_name = "asio-" + self.version.replace(".", "-")
19 extracted_name = "asio-" + archive_name
20 os.rename(extracted_name, self._source_subfolder)
21
22 def package(self):
23 root_dir = os.path.join(self._source_subfolder, self.name)
24 include_dir = os.path.join(root_dir, "include")
25 self.copy(pattern="LICENSE_1_0.txt", dst="licenses", src=root_dir)
26 self.copy(pattern="*.hpp", dst="include", src=include_dir)
27 self.copy(pattern="*.ipp", dst="include", src=include_dir)
28
29 def package_info(self):
30 self.cpp_info.defines.append('ASIO_STANDALONE')
31 if tools.os_info.is_linux:
32 self.cpp_info.libs.append('pthread')
33
34 def package_id(self):
35 self.info.header_only()
36
[end of recipes/asio/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/asio/all/conanfile.py b/recipes/asio/all/conanfile.py
--- a/recipes/asio/all/conanfile.py
+++ b/recipes/asio/all/conanfile.py
@@ -8,6 +8,7 @@
homepage = "http://think-async.com/Asio"
description = "Asio is a cross-platform C++ library for network and low-level I/O"
topics = ("conan", "asio", "network", "io", "low-level")
+ settings = "os"
license = "BSL-1.0"
no_copy_source = True
@@ -28,7 +29,7 @@
def package_info(self):
self.cpp_info.defines.append('ASIO_STANDALONE')
- if tools.os_info.is_linux:
+ if str(self.settings.os) in ["Linux", "Android"]:
self.cpp_info.libs.append('pthread')
def package_id(self):
| {"golden_diff": "diff --git a/recipes/asio/all/conanfile.py b/recipes/asio/all/conanfile.py\n--- a/recipes/asio/all/conanfile.py\n+++ b/recipes/asio/all/conanfile.py\n@@ -8,6 +8,7 @@\n homepage = \"http://think-async.com/Asio\"\n description = \"Asio is a cross-platform C++ library for network and low-level I/O\"\n topics = (\"conan\", \"asio\", \"network\", \"io\", \"low-level\")\n+ settings = \"os\"\n license = \"BSL-1.0\"\n \n no_copy_source = True\n@@ -28,7 +29,7 @@\n \n def package_info(self):\n self.cpp_info.defines.append('ASIO_STANDALONE')\n- if tools.os_info.is_linux:\n+ if str(self.settings.os) in [\"Linux\", \"Android\"]:\n self.cpp_info.libs.append('pthread')\n \n def package_id(self):\n", "issue": "[package] asio/1.13.0: broken on Andriod\nCommit: https://github.com/conan-io/conan-center-index/commit/804be2ad15b2139960fe10efcd6667d1f2dd2e98 breaks Android because there is no `-lpthread` \n", "before_files": [{"content": "import os\nfrom conans import ConanFile, tools\n\n\nclass Asio(ConanFile):\n name = \"asio\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"http://think-async.com/Asio\"\n description = \"Asio is a cross-platform C++ library for network and low-level I/O\"\n topics = (\"conan\", \"asio\", \"network\", \"io\", \"low-level\")\n license = \"BSL-1.0\"\n\n no_copy_source = True\n _source_subfolder = \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n archive_name = \"asio-\" + self.version.replace(\".\", \"-\")\n extracted_name = \"asio-\" + archive_name\n os.rename(extracted_name, self._source_subfolder)\n\n def package(self):\n root_dir = os.path.join(self._source_subfolder, self.name)\n include_dir = os.path.join(root_dir, \"include\")\n self.copy(pattern=\"LICENSE_1_0.txt\", dst=\"licenses\", src=root_dir)\n self.copy(pattern=\"*.hpp\", dst=\"include\", src=include_dir)\n self.copy(pattern=\"*.ipp\", dst=\"include\", src=include_dir)\n\n def package_info(self):\n self.cpp_info.defines.append('ASIO_STANDALONE')\n if tools.os_info.is_linux:\n self.cpp_info.libs.append('pthread')\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/asio/all/conanfile.py"}]} | 1,013 | 214 |
gh_patches_debug_18946 | rasdani/github-patches | git_diff | aws__aws-cli-6730 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v2] `aws sso login` should not require a fully-configured profile
Currently, `aws sso login` operates on a particular profile, even [requiring that `sso_account_id` and `sso_role_name` be present in the profile](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/login.py#L32) even though it does not use them, [only fetching the token](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/utils.py#L45) (as it should, because AWS SSO-capable SDKs can use the token to get credentials for the appropriate account and role).
At the very least, `sso_account_id` and `sso_role_name` should be removed from the list of required config variables, which would allow a profile like:
```ini
[profile login]
sso_start_url = https://d-2e69cb2b10.awsapps.com/start
sso_region = us-east-2
```
and then `aws sso login --profile login` would just work without requiring a specific account and role that won't be used anyway.
This matters because not all users in an organization have the same permissions, so there's not a good way to provide them all with a single working config file to start from.
A better alternative would be to have AWS SSO configuration be explicit in the config file, perhaps with a new section type:
```ini
[sso default]
sso_start_url = https://d-2e69cb2b10.awsapps.com/start
sso_region = us-east-2
```
Or, `aws sso login` should check the configured profiles and if there's only one AWS SSO configuration (i.e., they all use the same start URL and region), it should just use that.
I've implemented the latter in [`aws-sso-util login`](https://github.com/benkehoe/aws-sso-util#logging-in-and-out).
</issue>
<code>
[start of awscli/customizations/sso/login.py]
1 # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 from awscli.customizations.commands import BasicCommand
14 from awscli.customizations.sso.utils import do_sso_login
15 from awscli.customizations.utils import uni_print
16 from awscli.customizations.exceptions import ConfigurationError
17
18
19 class InvalidSSOConfigError(ConfigurationError):
20 pass
21
22
23 class LoginCommand(BasicCommand):
24 NAME = 'login'
25 DESCRIPTION = (
26 'Retrieves and caches an AWS SSO access token to exchange for AWS '
27 'credentials. To login, the requested profile must have first been '
28 'setup using ``aws configure sso``. Each time the ``login`` command '
29 'is called, a new SSO access token will be retrieved.'
30 )
31 ARG_TABLE = []
32 _REQUIRED_SSO_CONFIG_VARS = [
33 'sso_start_url',
34 'sso_region',
35 'sso_role_name',
36 'sso_account_id',
37 ]
38
39 def _run_main(self, parsed_args, parsed_globals):
40 sso_config = self._get_sso_config()
41 do_sso_login(
42 session=self._session,
43 sso_region=sso_config['sso_region'],
44 start_url=sso_config['sso_start_url'],
45 force_refresh=True
46 )
47 success_msg = 'Successully logged into Start URL: %s\n'
48 uni_print(success_msg % sso_config['sso_start_url'])
49 return 0
50
51 def _get_sso_config(self):
52 scoped_config = self._session.get_scoped_config()
53 sso_config = {}
54 missing_vars = []
55 for config_var in self._REQUIRED_SSO_CONFIG_VARS:
56 if config_var not in scoped_config:
57 missing_vars.append(config_var)
58 else:
59 sso_config[config_var] = scoped_config[config_var]
60 if missing_vars:
61 raise InvalidSSOConfigError(
62 'Missing the following required SSO configuration values: %s. '
63 'To make sure this profile is properly configured to use SSO, '
64 'please run: aws configure sso' % ', '.join(missing_vars)
65 )
66 return sso_config
67
[end of awscli/customizations/sso/login.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/sso/login.py b/awscli/customizations/sso/login.py
--- a/awscli/customizations/sso/login.py
+++ b/awscli/customizations/sso/login.py
@@ -26,14 +26,15 @@
'Retrieves and caches an AWS SSO access token to exchange for AWS '
'credentials. To login, the requested profile must have first been '
'setup using ``aws configure sso``. Each time the ``login`` command '
- 'is called, a new SSO access token will be retrieved.'
+ 'is called, a new SSO access token will be retrieved. Please note '
+ 'that only one login session can be active for a given SSO Start URL '
+ 'and creating multiple profiles does not allow for multiple users to '
+ 'be authenticated against the same SSO Start URL.'
)
ARG_TABLE = []
_REQUIRED_SSO_CONFIG_VARS = [
'sso_start_url',
'sso_region',
- 'sso_role_name',
- 'sso_account_id',
]
def _run_main(self, parsed_args, parsed_globals):
| {"golden_diff": "diff --git a/awscli/customizations/sso/login.py b/awscli/customizations/sso/login.py\n--- a/awscli/customizations/sso/login.py\n+++ b/awscli/customizations/sso/login.py\n@@ -26,14 +26,15 @@\n 'Retrieves and caches an AWS SSO access token to exchange for AWS '\n 'credentials. To login, the requested profile must have first been '\n 'setup using ``aws configure sso``. Each time the ``login`` command '\n- 'is called, a new SSO access token will be retrieved.'\n+ 'is called, a new SSO access token will be retrieved. Please note '\n+ 'that only one login session can be active for a given SSO Start URL '\n+ 'and creating multiple profiles does not allow for multiple users to '\n+ 'be authenticated against the same SSO Start URL.'\n )\n ARG_TABLE = []\n _REQUIRED_SSO_CONFIG_VARS = [\n 'sso_start_url',\n 'sso_region',\n- 'sso_role_name',\n- 'sso_account_id',\n ]\n \n def _run_main(self, parsed_args, parsed_globals):\n", "issue": "[v2] `aws sso login` should not require a fully-configured profile\nCurrently, `aws sso login` operates on a particular profile, even [requiring that `sso_account_id` and `sso_role_name` be present in the profile](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/login.py#L32) even though it does not use them, [only fetching the token](https://github.com/aws/aws-cli/blob/f2788558422dac42a5ebe37c7e5a3d24b19dee9f/awscli/customizations/sso/utils.py#L45) (as it should, because AWS SSO-capable SDKs can use the token to get credentials for the appropriate account and role).\r\n\r\nAt the very least, `sso_account_id` and `sso_role_name` should be removed from the list of required config variables, which would allow a profile like:\r\n```ini\r\n[profile login]\r\nsso_start_url = https://d-2e69cb2b10.awsapps.com/start\r\nsso_region = us-east-2\r\n```\r\nand then `aws sso login --profile login` would just work without requiring a specific account and role that won't be used anyway.\r\n\r\nThis matters because not all users in an organization have the same permissions, so there's not a good way to provide them all with a single working config file to start from.\r\n\r\nA better alternative would be to have AWS SSO configuration be explicit in the config file, perhaps with a new section type:\r\n```ini\r\n[sso default]\r\nsso_start_url = https://d-2e69cb2b10.awsapps.com/start\r\nsso_region = us-east-2\r\n```\r\n\r\nOr, `aws sso login` should check the configured profiles and if there's only one AWS SSO configuration (i.e., they all use the same start URL and region), it should just use that.\r\n\r\nI've implemented the latter in [`aws-sso-util login`](https://github.com/benkehoe/aws-sso-util#logging-in-and-out).\n", "before_files": [{"content": "# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.sso.utils import do_sso_login\nfrom awscli.customizations.utils import uni_print\nfrom awscli.customizations.exceptions import ConfigurationError\n\n\nclass InvalidSSOConfigError(ConfigurationError):\n pass\n\n\nclass LoginCommand(BasicCommand):\n NAME = 'login'\n DESCRIPTION = (\n 'Retrieves and caches an AWS SSO access token to exchange for AWS '\n 'credentials. To login, the requested profile must have first been '\n 'setup using ``aws configure sso``. Each time the ``login`` command '\n 'is called, a new SSO access token will be retrieved.'\n )\n ARG_TABLE = []\n _REQUIRED_SSO_CONFIG_VARS = [\n 'sso_start_url',\n 'sso_region',\n 'sso_role_name',\n 'sso_account_id',\n ]\n\n def _run_main(self, parsed_args, parsed_globals):\n sso_config = self._get_sso_config()\n do_sso_login(\n session=self._session,\n sso_region=sso_config['sso_region'],\n start_url=sso_config['sso_start_url'],\n force_refresh=True\n )\n success_msg = 'Successully logged into Start URL: %s\\n'\n uni_print(success_msg % sso_config['sso_start_url'])\n return 0\n\n def _get_sso_config(self):\n scoped_config = self._session.get_scoped_config()\n sso_config = {}\n missing_vars = []\n for config_var in self._REQUIRED_SSO_CONFIG_VARS:\n if config_var not in scoped_config:\n missing_vars.append(config_var)\n else:\n sso_config[config_var] = scoped_config[config_var]\n if missing_vars:\n raise InvalidSSOConfigError(\n 'Missing the following required SSO configuration values: %s. '\n 'To make sure this profile is properly configured to use SSO, '\n 'please run: aws configure sso' % ', '.join(missing_vars)\n )\n return sso_config\n", "path": "awscli/customizations/sso/login.py"}]} | 1,736 | 254 |
gh_patches_debug_19864 | rasdani/github-patches | git_diff | watchdogpolska__feder-433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sposób widzenia załączników do nas wysłanych
Nie wiem czemu jest tak, że ja wchodzę w korespondencję z daną gminą w danym monitoringu, to przy mailach widzę załączniki:

A jak już wejdę z konkretną wiadomość, to ich nie ma:

Czy to się da zmienić, żeby po wejściu z konkretną wiadomość też było widać te załączniki?
</issue>
<code>
[start of feder/letters/factories.py]
1 from email.mime.text import MIMEText
2
3 import factory
4 import factory.fuzzy
5 from django.core.mail import EmailMessage
6 from factory.django import FileField
7
8 from feder.cases.factories import CaseFactory
9 from feder.institutions.factories import InstitutionFactory
10 from feder.records.factories import RecordFactory
11 from feder.users.factories import UserFactory
12 from .models import Letter
13
14
15 class MailField(FileField):
16 DEFAULT_FILENAME = 'data.eml'
17
18 def _make_data(self, params):
19 msg = MIMEText("Lorem ipsum")
20 msg['Subject'] = "Example message"
21 msg['From'] = "[email protected]"
22 msg['To'] = "[email protected]"
23
24 return params.get('data', msg.as_string().encode('utf-8'))
25
26
27 class LetterFactory(factory.django.DjangoModelFactory):
28 record = factory.SubFactory(RecordFactory)
29 title = factory.Sequence('title-letter-{0}'.format)
30 body = factory.Sequence('body-{0}'.format)
31 quote = factory.Sequence('quote-{0}'.format)
32
33 class Meta:
34 model = Letter
35
36
37 class IncomingLetterFactory(LetterFactory):
38 author_institution = factory.SubFactory(InstitutionFactory)
39 email = factory.Sequence('xxx-{0}@example.com'.format)
40 note = factory.fuzzy.FuzzyText()
41 eml = MailField()
42
43
44 class OutgoingLetterFactory(LetterFactory):
45 author_user = factory.SubFactory(UserFactory)
46 is_draft = False
47 eml = MailField()
48
49
50 class DraftLetterFactory(OutgoingLetterFactory):
51 is_draft = True
52
53
54 class SendOutgoingLetterFactory(LetterFactory):
55 author_user = factory.SubFactory(UserFactory)
56
57 is_send_yes = factory.PostGenerationMethodCall('send')
58
[end of feder/letters/factories.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/feder/letters/factories.py b/feder/letters/factories.py
--- a/feder/letters/factories.py
+++ b/feder/letters/factories.py
@@ -1,15 +1,12 @@
from email.mime.text import MIMEText
-import factory
import factory.fuzzy
-from django.core.mail import EmailMessage
from factory.django import FileField
-from feder.cases.factories import CaseFactory
from feder.institutions.factories import InstitutionFactory
from feder.records.factories import RecordFactory
from feder.users.factories import UserFactory
-from .models import Letter
+from .models import Letter, Attachment
class MailField(FileField):
@@ -55,3 +52,11 @@
author_user = factory.SubFactory(UserFactory)
is_send_yes = factory.PostGenerationMethodCall('send')
+
+
+class AttachmentFactory(factory.django.DjangoModelFactory):
+ letter = factory.SubFactory(InstitutionFactory)
+ attachment = factory.django.FileField()
+
+ class Meta:
+ model = Attachment
| {"golden_diff": "diff --git a/feder/letters/factories.py b/feder/letters/factories.py\n--- a/feder/letters/factories.py\n+++ b/feder/letters/factories.py\n@@ -1,15 +1,12 @@\n from email.mime.text import MIMEText\n \n-import factory\n import factory.fuzzy\n-from django.core.mail import EmailMessage\n from factory.django import FileField\n \n-from feder.cases.factories import CaseFactory\n from feder.institutions.factories import InstitutionFactory\n from feder.records.factories import RecordFactory\n from feder.users.factories import UserFactory\n-from .models import Letter\n+from .models import Letter, Attachment\n \n \n class MailField(FileField):\n@@ -55,3 +52,11 @@\n author_user = factory.SubFactory(UserFactory)\n \n is_send_yes = factory.PostGenerationMethodCall('send')\n+\n+\n+class AttachmentFactory(factory.django.DjangoModelFactory):\n+ letter = factory.SubFactory(InstitutionFactory)\n+ attachment = factory.django.FileField()\n+\n+ class Meta:\n+ model = Attachment\n", "issue": "spos\u00f3b widzenia za\u0142\u0105cznik\u00f3w do nas wys\u0142anych\nNie wiem czemu jest tak, \u017ce ja wchodz\u0119 w korespondencj\u0119 z dan\u0105 gmin\u0105 w danym monitoringu, to przy mailach widz\u0119 za\u0142\u0105czniki:\r\n\r\n\r\n\r\nA jak ju\u017c wejd\u0119 z konkretn\u0105 wiadomo\u015b\u0107, to ich nie ma:\r\n\r\n\r\n\r\nCzy to si\u0119 da zmieni\u0107, \u017ceby po wej\u015bciu z konkretn\u0105 wiadomo\u015b\u0107 te\u017c by\u0142o wida\u0107 te za\u0142\u0105czniki?\n", "before_files": [{"content": "from email.mime.text import MIMEText\n\nimport factory\nimport factory.fuzzy\nfrom django.core.mail import EmailMessage\nfrom factory.django import FileField\n\nfrom feder.cases.factories import CaseFactory\nfrom feder.institutions.factories import InstitutionFactory\nfrom feder.records.factories import RecordFactory\nfrom feder.users.factories import UserFactory\nfrom .models import Letter\n\n\nclass MailField(FileField):\n DEFAULT_FILENAME = 'data.eml'\n\n def _make_data(self, params):\n msg = MIMEText(\"Lorem ipsum\")\n msg['Subject'] = \"Example message\"\n msg['From'] = \"[email protected]\"\n msg['To'] = \"[email protected]\"\n\n return params.get('data', msg.as_string().encode('utf-8'))\n\n\nclass LetterFactory(factory.django.DjangoModelFactory):\n record = factory.SubFactory(RecordFactory)\n title = factory.Sequence('title-letter-{0}'.format)\n body = factory.Sequence('body-{0}'.format)\n quote = factory.Sequence('quote-{0}'.format)\n\n class Meta:\n model = Letter\n\n\nclass IncomingLetterFactory(LetterFactory):\n author_institution = factory.SubFactory(InstitutionFactory)\n email = factory.Sequence('xxx-{0}@example.com'.format)\n note = factory.fuzzy.FuzzyText()\n eml = MailField()\n\n\nclass OutgoingLetterFactory(LetterFactory):\n author_user = factory.SubFactory(UserFactory)\n is_draft = False\n eml = MailField()\n\n\nclass DraftLetterFactory(OutgoingLetterFactory):\n is_draft = True\n\n\nclass SendOutgoingLetterFactory(LetterFactory):\n author_user = factory.SubFactory(UserFactory)\n\n is_send_yes = factory.PostGenerationMethodCall('send')\n", "path": "feder/letters/factories.py"}]} | 1,256 | 229 |
gh_patches_debug_33644 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1275 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement basic reporting functionality for MVP (besides Domain Growth report)
### Story
As an Admin, I want to quickly export domain data reports directly from the domains page (/admin/registrar/domain/) so that I can easily access and analyze the domain data.
### Acceptance Criteria
- [ ] Three reports are available to download on the domains page:
- [ ] [Domains by type](https://docs.google.com/spreadsheets/d/1_nMU2obW22U6NlOSC2ARxf3PpsJnSe2wMo5AyLSzXzk/edit?usp=sharing) (sorted by domain name)
- [ ] [current-full.csv](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv) (sorted by domain name, then agency, then domain type)
- [ ] [current-federal.csv](https://github.com/cisagov/dotgov-data/blob/main/current-federal.csv) (sorted by domain name, then agency, then domain type)
- [ ] Each CSV report should contain accurate and up-to-date domain data from the database, sorted in the ways they are in the examples above.
- [ ] Single dropdown with the three report options which the user can select
- [ ] Clicking on each report triggers an immediate download of the relevant CSV report
- [ ] The UI components should be consistent with the existing design language of the admin portal.
### Additional Context
- This feature is a stop-gap measure, meant to provide immediate access to crucial reports while the ideal report interface is being developed. Future work is at #997.
- Security email may be pulled from .gov database rather than thru EPP call to registry.
### Issue Links
🔄 Relates to: #938 #143 #1075
</issue>
<code>
[start of src/registrar/utility/csv_export.py]
1 import csv
2 from registrar.models.domain import Domain
3 from registrar.models.domain_information import DomainInformation
4 from registrar.models.public_contact import PublicContact
5
6
7 def export_domains_to_writer(writer, columns, sort_fields, filter_condition):
8 # write columns headers to writer
9 writer.writerow(columns)
10
11 domainInfos = DomainInformation.objects.filter(**filter_condition).order_by(
12 *sort_fields
13 )
14 for domainInfo in domainInfos:
15 security_contacts = domainInfo.domain.contacts.filter(
16 contact_type=PublicContact.ContactTypeChoices.SECURITY
17 )
18
19 # create a dictionary of fields which can be included in output
20 FIELDS = {
21 "Domain name": domainInfo.domain.name,
22 "Domain type": domainInfo.get_organization_type_display()
23 + " - "
24 + domainInfo.get_federal_type_display()
25 if domainInfo.federal_type
26 else domainInfo.get_organization_type_display(),
27 "Agency": domainInfo.federal_agency,
28 "Organization name": domainInfo.organization_name,
29 "City": domainInfo.city,
30 "State": domainInfo.state_territory,
31 "AO": domainInfo.authorizing_official.first_name
32 + " "
33 + domainInfo.authorizing_official.last_name
34 if domainInfo.authorizing_official
35 else " ",
36 "AO email": domainInfo.authorizing_official.email
37 if domainInfo.authorizing_official
38 else " ",
39 "Security Contact Email": security_contacts[0].email
40 if security_contacts
41 else " ",
42 "Status": domainInfo.domain.state,
43 "Expiration Date": domainInfo.domain.expiration_date,
44 }
45 writer.writerow([FIELDS.get(column, "") for column in columns])
46
47
48 def export_data_type_to_csv(csv_file):
49 writer = csv.writer(csv_file)
50 # define columns to include in export
51 columns = [
52 "Domain name",
53 "Domain type",
54 "Agency",
55 "Organization name",
56 "City",
57 "State",
58 "AO",
59 "AO email",
60 "Security Contact Email",
61 "Status",
62 "Expiration Date",
63 ]
64 sort_fields = ["domain__name"]
65 filter_condition = {
66 "domain__state__in": [
67 Domain.State.READY,
68 Domain.State.DNS_NEEDED,
69 Domain.State.ON_HOLD,
70 ],
71 }
72 export_domains_to_writer(writer, columns, sort_fields, filter_condition)
73
74
75 def export_data_full_to_csv(csv_file):
76 writer = csv.writer(csv_file)
77 # define columns to include in export
78 columns = [
79 "Domain name",
80 "Domain type",
81 "Agency",
82 "Organization name",
83 "City",
84 "State",
85 "Security Contact Email",
86 ]
87 sort_fields = ["domain__name", "federal_agency", "organization_type"]
88 filter_condition = {
89 "domain__state__in": [
90 Domain.State.READY,
91 Domain.State.DNS_NEEDED,
92 Domain.State.ON_HOLD,
93 ],
94 }
95 export_domains_to_writer(writer, columns, sort_fields, filter_condition)
96
97
98 def export_data_federal_to_csv(csv_file):
99 writer = csv.writer(csv_file)
100 # define columns to include in export
101 columns = [
102 "Domain name",
103 "Domain type",
104 "Agency",
105 "Organization name",
106 "City",
107 "State",
108 "Security Contact Email",
109 ]
110 sort_fields = ["domain__name", "federal_agency", "organization_type"]
111 filter_condition = {
112 "organization_type__icontains": "federal",
113 "domain__state__in": [
114 Domain.State.READY,
115 Domain.State.DNS_NEEDED,
116 Domain.State.ON_HOLD,
117 ],
118 }
119 export_domains_to_writer(writer, columns, sort_fields, filter_condition)
120
[end of src/registrar/utility/csv_export.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/utility/csv_export.py b/src/registrar/utility/csv_export.py
--- a/src/registrar/utility/csv_export.py
+++ b/src/registrar/utility/csv_export.py
@@ -2,6 +2,8 @@
from registrar.models.domain import Domain
from registrar.models.domain_information import DomainInformation
from registrar.models.public_contact import PublicContact
+from django.db.models import Value
+from django.db.models.functions import Coalesce
def export_domains_to_writer(writer, columns, sort_fields, filter_condition):
@@ -61,7 +63,13 @@
"Status",
"Expiration Date",
]
- sort_fields = ["domain__name"]
+ # Coalesce is used to replace federal_type of None with ZZZZZ
+ sort_fields = [
+ "organization_type",
+ Coalesce("federal_type", Value("ZZZZZ")),
+ "federal_agency",
+ "domain__name",
+ ]
filter_condition = {
"domain__state__in": [
Domain.State.READY,
@@ -84,7 +92,13 @@
"State",
"Security Contact Email",
]
- sort_fields = ["domain__name", "federal_agency", "organization_type"]
+ # Coalesce is used to replace federal_type of None with ZZZZZ
+ sort_fields = [
+ "organization_type",
+ Coalesce("federal_type", Value("ZZZZZ")),
+ "federal_agency",
+ "domain__name",
+ ]
filter_condition = {
"domain__state__in": [
Domain.State.READY,
@@ -107,7 +121,13 @@
"State",
"Security Contact Email",
]
- sort_fields = ["domain__name", "federal_agency", "organization_type"]
+ # Coalesce is used to replace federal_type of None with ZZZZZ
+ sort_fields = [
+ "organization_type",
+ Coalesce("federal_type", Value("ZZZZZ")),
+ "federal_agency",
+ "domain__name",
+ ]
filter_condition = {
"organization_type__icontains": "federal",
"domain__state__in": [
| {"golden_diff": "diff --git a/src/registrar/utility/csv_export.py b/src/registrar/utility/csv_export.py\n--- a/src/registrar/utility/csv_export.py\n+++ b/src/registrar/utility/csv_export.py\n@@ -2,6 +2,8 @@\n from registrar.models.domain import Domain\n from registrar.models.domain_information import DomainInformation\n from registrar.models.public_contact import PublicContact\n+from django.db.models import Value\n+from django.db.models.functions import Coalesce\n \n \n def export_domains_to_writer(writer, columns, sort_fields, filter_condition):\n@@ -61,7 +63,13 @@\n \"Status\",\n \"Expiration Date\",\n ]\n- sort_fields = [\"domain__name\"]\n+ # Coalesce is used to replace federal_type of None with ZZZZZ\n+ sort_fields = [\n+ \"organization_type\",\n+ Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n+ \"federal_agency\",\n+ \"domain__name\",\n+ ]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n@@ -84,7 +92,13 @@\n \"State\",\n \"Security Contact Email\",\n ]\n- sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n+ # Coalesce is used to replace federal_type of None with ZZZZZ\n+ sort_fields = [\n+ \"organization_type\",\n+ Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n+ \"federal_agency\",\n+ \"domain__name\",\n+ ]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n@@ -107,7 +121,13 @@\n \"State\",\n \"Security Contact Email\",\n ]\n- sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n+ # Coalesce is used to replace federal_type of None with ZZZZZ\n+ sort_fields = [\n+ \"organization_type\",\n+ Coalesce(\"federal_type\", Value(\"ZZZZZ\")),\n+ \"federal_agency\",\n+ \"domain__name\",\n+ ]\n filter_condition = {\n \"organization_type__icontains\": \"federal\",\n \"domain__state__in\": [\n", "issue": "Implement basic reporting functionality for MVP (besides Domain Growth report)\n### Story\r\n\r\nAs an Admin, I want to quickly export domain data reports directly from the domains page (/admin/registrar/domain/) so that I can easily access and analyze the domain data.\r\n\r\n### Acceptance Criteria\r\n\r\n- [ ] Three reports are available to download on the domains page:\r\n - [ ] [Domains by type](https://docs.google.com/spreadsheets/d/1_nMU2obW22U6NlOSC2ARxf3PpsJnSe2wMo5AyLSzXzk/edit?usp=sharing) (sorted by domain name)\r\n - [ ] [current-full.csv](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv) (sorted by domain name, then agency, then domain type)\r\n - [ ] [current-federal.csv](https://github.com/cisagov/dotgov-data/blob/main/current-federal.csv) (sorted by domain name, then agency, then domain type)\r\n- [ ] Each CSV report should contain accurate and up-to-date domain data from the database, sorted in the ways they are in the examples above.\r\n- [ ] Single dropdown with the three report options which the user can select\r\n- [ ] Clicking on each report triggers an immediate download of the relevant CSV report\r\n- [ ] The UI components should be consistent with the existing design language of the admin portal.\r\n\r\n### Additional Context\r\n\r\n- This feature is a stop-gap measure, meant to provide immediate access to crucial reports while the ideal report interface is being developed. Future work is at #997.\r\n\r\n- Security email may be pulled from .gov database rather than thru EPP call to registry.\r\n\r\n### Issue Links\r\n\r\n\ud83d\udd04 Relates to: #938 #143 #1075 \n", "before_files": [{"content": "import csv\nfrom registrar.models.domain import Domain\nfrom registrar.models.domain_information import DomainInformation\nfrom registrar.models.public_contact import PublicContact\n\n\ndef export_domains_to_writer(writer, columns, sort_fields, filter_condition):\n # write columns headers to writer\n writer.writerow(columns)\n\n domainInfos = DomainInformation.objects.filter(**filter_condition).order_by(\n *sort_fields\n )\n for domainInfo in domainInfos:\n security_contacts = domainInfo.domain.contacts.filter(\n contact_type=PublicContact.ContactTypeChoices.SECURITY\n )\n\n # create a dictionary of fields which can be included in output\n FIELDS = {\n \"Domain name\": domainInfo.domain.name,\n \"Domain type\": domainInfo.get_organization_type_display()\n + \" - \"\n + domainInfo.get_federal_type_display()\n if domainInfo.federal_type\n else domainInfo.get_organization_type_display(),\n \"Agency\": domainInfo.federal_agency,\n \"Organization name\": domainInfo.organization_name,\n \"City\": domainInfo.city,\n \"State\": domainInfo.state_territory,\n \"AO\": domainInfo.authorizing_official.first_name\n + \" \"\n + domainInfo.authorizing_official.last_name\n if domainInfo.authorizing_official\n else \" \",\n \"AO email\": domainInfo.authorizing_official.email\n if domainInfo.authorizing_official\n else \" \",\n \"Security Contact Email\": security_contacts[0].email\n if security_contacts\n else \" \",\n \"Status\": domainInfo.domain.state,\n \"Expiration Date\": domainInfo.domain.expiration_date,\n }\n writer.writerow([FIELDS.get(column, \"\") for column in columns])\n\n\ndef export_data_type_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"AO\",\n \"AO email\",\n \"Security Contact Email\",\n \"Status\",\n \"Expiration Date\",\n ]\n sort_fields = [\"domain__name\"]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n\n\ndef export_data_full_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"Security Contact Email\",\n ]\n sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n filter_condition = {\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n\n\ndef export_data_federal_to_csv(csv_file):\n writer = csv.writer(csv_file)\n # define columns to include in export\n columns = [\n \"Domain name\",\n \"Domain type\",\n \"Agency\",\n \"Organization name\",\n \"City\",\n \"State\",\n \"Security Contact Email\",\n ]\n sort_fields = [\"domain__name\", \"federal_agency\", \"organization_type\"]\n filter_condition = {\n \"organization_type__icontains\": \"federal\",\n \"domain__state__in\": [\n Domain.State.READY,\n Domain.State.DNS_NEEDED,\n Domain.State.ON_HOLD,\n ],\n }\n export_domains_to_writer(writer, columns, sort_fields, filter_condition)\n", "path": "src/registrar/utility/csv_export.py"}]} | 1,961 | 490 |
gh_patches_debug_5265 | rasdani/github-patches | git_diff | obspy__obspy-2562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix simple typo: whith -> with
There is a small typo in obspy/io/gcf/core.py.
Should read `with` rather than `whith`.
</issue>
<code>
[start of obspy/io/gcf/core.py]
1 # -*- coding: utf-8 -*-
2 """
3 GCF bindings to ObsPy core module.
4 """
5 from __future__ import (absolute_import, division, print_function,
6 unicode_literals)
7 from future.builtins import * # NOQA
8
9 from obspy import Stream, Trace, UTCDateTime
10
11 from . import libgcf
12
13
14 def merge_gcf_stream(st):
15 """
16 Merges GCF stream (replacing Stream.merge(-1) for headonly=True)
17
18 :type st: :class:`~obspy.core.stream.Stream`
19 :param st: GCF Stream object whith no data
20 :rtype: :class:`~obspy.core.stream.Stream`
21 :returns: Stream object containing header and data.
22 """
23 traces = []
24 for tr in st:
25 delta = tr.stats.delta
26 starttime = tr.stats.starttime
27 endtime = tr.stats.endtime
28 for trace in traces:
29 if tr.id == trace.id and delta == trace.stats.delta \
30 and not starttime == trace.stats.starttime:
31 if 0 < starttime - trace.stats.endtime <= delta:
32 trace.stats.npts += tr.stats.npts
33 break
34 elif 0 < trace.stats.starttime - endtime <= delta:
35 trace.stats.starttime = UTCDateTime(starttime)
36 trace.stats.npts += tr.stats.npts
37 break
38 else:
39 traces.append(tr)
40 return Stream(traces=traces)
41
42
43 def _is_gcf(filename):
44 """
45 Checks whether a file is GCF or not.
46
47 :type filename: str
48 :param filename: GCF file to be checked.
49 :rtype: bool
50 :return: ``True`` if a GCF file.
51 """
52 try:
53 with open(filename, 'rb') as f:
54 libgcf.is_gcf(f)
55 except Exception:
56 return False
57 return True
58
59
60 def _read_gcf(filename, headonly=False, **kwargs): # @UnusedVariable
61 """
62 Reads a GCF file and returns a Stream object.
63
64 only GCF files containing data records are supported.
65
66 .. warning::
67 This function should NOT be called directly, it registers via the
68 ObsPy :func:`~obspy.core.stream.read` function, call this instead.
69
70 :type filename: str
71 :param filename: GCF file to be read.
72 :type headonly: bool, optional
73 :param headonly: If True read only head of GCF file.
74 :type channel_prefix: str, optional
75 :param channel_prefix: Channel band and instrument codes.
76 Defaults to ``HH``.
77 :rtype: :class:`~obspy.core.stream.Stream`
78 :returns: Stream object containing header and data.
79
80 .. rubric:: Example
81 >>> from obspy import read
82 >>> st = read("/path/to/20160603_1955n.gcf", format="GCF")
83 """
84 traces = []
85 with open(filename, 'rb') as f:
86 while True:
87 try:
88 if headonly:
89 header = libgcf.read_header(f, **kwargs)
90 if header:
91 traces.append(Trace(header=header))
92 else:
93 hd = libgcf.read(f, **kwargs)
94 if hd:
95 traces.append(Trace(header=hd[0], data=hd[1]))
96 except EOFError:
97 break
98 st = Stream(traces=traces)
99 if headonly:
100 st = merge_gcf_stream(st)
101 else:
102 st.merge(-1)
103 return st
104
[end of obspy/io/gcf/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/obspy/io/gcf/core.py b/obspy/io/gcf/core.py
--- a/obspy/io/gcf/core.py
+++ b/obspy/io/gcf/core.py
@@ -16,7 +16,7 @@
Merges GCF stream (replacing Stream.merge(-1) for headonly=True)
:type st: :class:`~obspy.core.stream.Stream`
- :param st: GCF Stream object whith no data
+ :param st: GCF Stream object with no data
:rtype: :class:`~obspy.core.stream.Stream`
:returns: Stream object containing header and data.
"""
| {"golden_diff": "diff --git a/obspy/io/gcf/core.py b/obspy/io/gcf/core.py\n--- a/obspy/io/gcf/core.py\n+++ b/obspy/io/gcf/core.py\n@@ -16,7 +16,7 @@\n Merges GCF stream (replacing Stream.merge(-1) for headonly=True)\n \n :type st: :class:`~obspy.core.stream.Stream`\n- :param st: GCF Stream object whith no data\n+ :param st: GCF Stream object with no data\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n \"\"\"\n", "issue": "Fix simple typo: whith -> with\nThere is a small typo in obspy/io/gcf/core.py.\nShould read `with` rather than `whith`.\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nGCF bindings to ObsPy core module.\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nfrom obspy import Stream, Trace, UTCDateTime\n\nfrom . import libgcf\n\n\ndef merge_gcf_stream(st):\n \"\"\"\n Merges GCF stream (replacing Stream.merge(-1) for headonly=True)\n\n :type st: :class:`~obspy.core.stream.Stream`\n :param st: GCF Stream object whith no data\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n \"\"\"\n traces = []\n for tr in st:\n delta = tr.stats.delta\n starttime = tr.stats.starttime\n endtime = tr.stats.endtime\n for trace in traces:\n if tr.id == trace.id and delta == trace.stats.delta \\\n and not starttime == trace.stats.starttime:\n if 0 < starttime - trace.stats.endtime <= delta:\n trace.stats.npts += tr.stats.npts\n break\n elif 0 < trace.stats.starttime - endtime <= delta:\n trace.stats.starttime = UTCDateTime(starttime)\n trace.stats.npts += tr.stats.npts\n break\n else:\n traces.append(tr)\n return Stream(traces=traces)\n\n\ndef _is_gcf(filename):\n \"\"\"\n Checks whether a file is GCF or not.\n\n :type filename: str\n :param filename: GCF file to be checked.\n :rtype: bool\n :return: ``True`` if a GCF file.\n \"\"\"\n try:\n with open(filename, 'rb') as f:\n libgcf.is_gcf(f)\n except Exception:\n return False\n return True\n\n\ndef _read_gcf(filename, headonly=False, **kwargs): # @UnusedVariable\n \"\"\"\n Reads a GCF file and returns a Stream object.\n\n only GCF files containing data records are supported.\n\n .. warning::\n This function should NOT be called directly, it registers via the\n ObsPy :func:`~obspy.core.stream.read` function, call this instead.\n\n :type filename: str\n :param filename: GCF file to be read.\n :type headonly: bool, optional\n :param headonly: If True read only head of GCF file.\n :type channel_prefix: str, optional\n :param channel_prefix: Channel band and instrument codes.\n Defaults to ``HH``.\n :rtype: :class:`~obspy.core.stream.Stream`\n :returns: Stream object containing header and data.\n\n .. rubric:: Example\n >>> from obspy import read\n >>> st = read(\"/path/to/20160603_1955n.gcf\", format=\"GCF\")\n \"\"\"\n traces = []\n with open(filename, 'rb') as f:\n while True:\n try:\n if headonly:\n header = libgcf.read_header(f, **kwargs)\n if header:\n traces.append(Trace(header=header))\n else:\n hd = libgcf.read(f, **kwargs)\n if hd:\n traces.append(Trace(header=hd[0], data=hd[1]))\n except EOFError:\n break\n st = Stream(traces=traces)\n if headonly:\n st = merge_gcf_stream(st)\n else:\n st.merge(-1)\n return st\n", "path": "obspy/io/gcf/core.py"}]} | 1,548 | 147 |
gh_patches_debug_14426 | rasdani/github-patches | git_diff | boto__boto-3045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
boto.vpc.routetable.Route does not contain route origin
RouteSet responses will contain an origin for each route, but boto.vpc.routetable.Route is not populated with the route origin:
Example response before:
```
In [8]: vars(all_route_tables[1].routes[1])
Out[8]:
{'destination_cidr_block': u'0.0.0.0/0',
'gateway_id': None,
'instance_id': u'i-123',
'interface_id': u'eni-123',
'state': u'active',
'vpc_peering_connection_id': None}
```
After:
```
In [25]: vars(all_route_tables[1].routes[1])
Out[25]:
{'destination_cidr_block': u'0.0.0.0/0',
'gateway_id': None,
'instance_id': u'i-123',
'interface_id': u'eni-123',
'origin': u'CreateRoute',
'state': u'active',
'vpc_peering_connection_id': None}
```
</issue>
<code>
[start of boto/vpc/routetable.py]
1 # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the
5 # "Software"), to deal in the Software without restriction, including
6 # without limitation the rights to use, copy, modify, merge, publish, dis-
7 # tribute, sublicense, and/or sell copies of the Software, and to permit
8 # persons to whom the Software is furnished to do so, subject to the fol-
9 # lowing conditions:
10 #
11 # The above copyright notice and this permission notice shall be included
12 # in all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 # IN THE SOFTWARE.
21
22 """
23 Represents a Route Table
24 """
25
26 from boto.ec2.ec2object import TaggedEC2Object
27 from boto.resultset import ResultSet
28
29 class RouteTable(TaggedEC2Object):
30
31 def __init__(self, connection=None):
32 super(RouteTable, self).__init__(connection)
33 self.id = None
34 self.vpc_id = None
35 self.routes = []
36 self.associations = []
37
38 def __repr__(self):
39 return 'RouteTable:%s' % self.id
40
41 def startElement(self, name, attrs, connection):
42 result = super(RouteTable, self).startElement(name, attrs, connection)
43
44 if result is not None:
45 # Parent found an interested element, just return it
46 return result
47
48 if name == 'routeSet':
49 self.routes = ResultSet([('item', Route)])
50 return self.routes
51 elif name == 'associationSet':
52 self.associations = ResultSet([('item', RouteAssociation)])
53 return self.associations
54 else:
55 return None
56
57 def endElement(self, name, value, connection):
58 if name == 'routeTableId':
59 self.id = value
60 elif name == 'vpcId':
61 self.vpc_id = value
62 else:
63 setattr(self, name, value)
64
65 class Route(object):
66 def __init__(self, connection=None):
67 self.destination_cidr_block = None
68 self.gateway_id = None
69 self.instance_id = None
70 self.interface_id = None
71 self.vpc_peering_connection_id = None
72 self.state = None
73
74 def __repr__(self):
75 return 'Route:%s' % self.destination_cidr_block
76
77 def startElement(self, name, attrs, connection):
78 return None
79
80 def endElement(self, name, value, connection):
81 if name == 'destinationCidrBlock':
82 self.destination_cidr_block = value
83 elif name == 'gatewayId':
84 self.gateway_id = value
85 elif name == 'instanceId':
86 self.instance_id = value
87 elif name == 'networkInterfaceId':
88 self.interface_id = value
89 elif name == 'vpcPeeringConnectionId':
90 self.vpc_peering_connection_id = value
91 elif name == 'state':
92 self.state = value
93
94 class RouteAssociation(object):
95 def __init__(self, connection=None):
96 self.id = None
97 self.route_table_id = None
98 self.subnet_id = None
99 self.main = False
100
101 def __repr__(self):
102 return 'RouteAssociation:%s' % self.id
103
104 def startElement(self, name, attrs, connection):
105 return None
106
107 def endElement(self, name, value, connection):
108 if name == 'routeTableAssociationId':
109 self.id = value
110 elif name == 'routeTableId':
111 self.route_table_id = value
112 elif name == 'subnetId':
113 self.subnet_id = value
114 elif name == 'main':
115 self.main = value == 'true'
116
[end of boto/vpc/routetable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/boto/vpc/routetable.py b/boto/vpc/routetable.py
--- a/boto/vpc/routetable.py
+++ b/boto/vpc/routetable.py
@@ -70,6 +70,7 @@
self.interface_id = None
self.vpc_peering_connection_id = None
self.state = None
+ self.origin = None
def __repr__(self):
return 'Route:%s' % self.destination_cidr_block
@@ -90,6 +91,8 @@
self.vpc_peering_connection_id = value
elif name == 'state':
self.state = value
+ elif name == 'origin':
+ self.origin = value
class RouteAssociation(object):
def __init__(self, connection=None):
| {"golden_diff": "diff --git a/boto/vpc/routetable.py b/boto/vpc/routetable.py\n--- a/boto/vpc/routetable.py\n+++ b/boto/vpc/routetable.py\n@@ -70,6 +70,7 @@\n self.interface_id = None\n self.vpc_peering_connection_id = None\n self.state = None\n+ self.origin = None\n \n def __repr__(self):\n return 'Route:%s' % self.destination_cidr_block\n@@ -90,6 +91,8 @@\n self.vpc_peering_connection_id = value\n elif name == 'state':\n self.state = value\n+ elif name == 'origin':\n+ self.origin = value\n \n class RouteAssociation(object):\n def __init__(self, connection=None):\n", "issue": "boto.vpc.routetable.Route does not contain route origin\nRouteSet responses will contain an origin for each route, but boto.vpc.routetable.Route is not populated with the route origin:\n\nExample response before:\n\n```\nIn [8]: vars(all_route_tables[1].routes[1])\nOut[8]:\n{'destination_cidr_block': u'0.0.0.0/0',\n 'gateway_id': None,\n 'instance_id': u'i-123',\n 'interface_id': u'eni-123',\n 'state': u'active',\n 'vpc_peering_connection_id': None}\n```\n\nAfter:\n\n```\nIn [25]: vars(all_route_tables[1].routes[1])\nOut[25]:\n{'destination_cidr_block': u'0.0.0.0/0',\n 'gateway_id': None,\n 'instance_id': u'i-123',\n 'interface_id': u'eni-123',\n 'origin': u'CreateRoute',\n 'state': u'active',\n 'vpc_peering_connection_id': None}\n```\n\n", "before_files": [{"content": "# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\n\"\"\"\nRepresents a Route Table\n\"\"\"\n\nfrom boto.ec2.ec2object import TaggedEC2Object\nfrom boto.resultset import ResultSet\n\nclass RouteTable(TaggedEC2Object):\n\n def __init__(self, connection=None):\n super(RouteTable, self).__init__(connection)\n self.id = None\n self.vpc_id = None\n self.routes = []\n self.associations = []\n\n def __repr__(self):\n return 'RouteTable:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n result = super(RouteTable, self).startElement(name, attrs, connection)\n\n if result is not None:\n # Parent found an interested element, just return it\n return result\n\n if name == 'routeSet':\n self.routes = ResultSet([('item', Route)])\n return self.routes\n elif name == 'associationSet':\n self.associations = ResultSet([('item', RouteAssociation)])\n return self.associations\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableId':\n self.id = value\n elif name == 'vpcId':\n self.vpc_id = value\n else:\n setattr(self, name, value)\n\nclass Route(object):\n def __init__(self, connection=None):\n self.destination_cidr_block = None\n self.gateway_id = None\n self.instance_id = None\n self.interface_id = None\n self.vpc_peering_connection_id = None\n self.state = None\n\n def __repr__(self):\n return 'Route:%s' % self.destination_cidr_block\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'destinationCidrBlock':\n self.destination_cidr_block = value\n elif name == 'gatewayId':\n self.gateway_id = value\n elif name == 'instanceId':\n self.instance_id = value\n elif name == 'networkInterfaceId':\n self.interface_id = value\n elif name == 'vpcPeeringConnectionId':\n self.vpc_peering_connection_id = value\n elif name == 'state':\n self.state = value\n\nclass RouteAssociation(object):\n def __init__(self, connection=None):\n self.id = None\n self.route_table_id = None\n self.subnet_id = None\n self.main = False\n\n def __repr__(self):\n return 'RouteAssociation:%s' % self.id\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'routeTableAssociationId':\n self.id = value\n elif name == 'routeTableId':\n self.route_table_id = value\n elif name == 'subnetId':\n self.subnet_id = value\n elif name == 'main':\n self.main = value == 'true'\n", "path": "boto/vpc/routetable.py"}]} | 1,915 | 175 |
gh_patches_debug_24565 | rasdani/github-patches | git_diff | borgbackup__borg-4393 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update bundled zstd code
we have 1.3.4 bundled, current see there: https://github.com/facebook/zstd/releases
</issue>
<code>
[start of setup_zstd.py]
1 # Support code for building a C extension with zstd files
2 #
3 # Copyright (c) 2016-present, Gregory Szorc
4 # 2017-present, Thomas Waldmann (mods to make it more generic)
5 # All rights reserved.
6 #
7 # This software may be modified and distributed under the terms
8 # of the BSD license. See the LICENSE file for details.
9
10 import os
11
12 # zstd files, structure as seen in zstd project repository:
13
14 zstd_sources = [
15 'lib/common/entropy_common.c',
16 'lib/common/error_private.c',
17 'lib/common/fse_decompress.c',
18 'lib/common/pool.c',
19 'lib/common/threading.c',
20 'lib/common/xxhash.c',
21 'lib/common/zstd_common.c',
22 'lib/compress/fse_compress.c',
23 'lib/compress/huf_compress.c',
24 'lib/compress/zstd_compress.c',
25 'lib/compress/zstd_double_fast.c',
26 'lib/compress/zstd_fast.c',
27 'lib/compress/zstd_lazy.c',
28 'lib/compress/zstd_ldm.c',
29 'lib/compress/zstd_opt.c',
30 'lib/compress/zstdmt_compress.c',
31 'lib/decompress/huf_decompress.c',
32 'lib/decompress/zstd_decompress.c',
33 'lib/dictBuilder/cover.c',
34 'lib/dictBuilder/divsufsort.c',
35 'lib/dictBuilder/zdict.c',
36 ]
37
38 zstd_sources_legacy = [
39 'lib/deprecated/zbuff_common.c',
40 'lib/deprecated/zbuff_compress.c',
41 'lib/deprecated/zbuff_decompress.c',
42 'lib/legacy/zstd_v01.c',
43 'lib/legacy/zstd_v02.c',
44 'lib/legacy/zstd_v03.c',
45 'lib/legacy/zstd_v04.c',
46 'lib/legacy/zstd_v05.c',
47 'lib/legacy/zstd_v06.c',
48 'lib/legacy/zstd_v07.c',
49 ]
50
51 zstd_includes = [
52 'lib',
53 'lib/common',
54 'lib/compress',
55 'lib/decompress',
56 'lib/dictBuilder',
57 ]
58
59 zstd_includes_legacy = [
60 'lib/deprecated',
61 'lib/legacy',
62 ]
63
64
65 def zstd_system_prefix(prefixes):
66 for prefix in prefixes:
67 filename = os.path.join(prefix, 'include', 'zstd.h')
68 if os.path.exists(filename):
69 with open(filename, 'rb') as fd:
70 if b'ZSTD_getFrameContentSize' in fd.read(): # checks for zstd >= 1.3.0
71 return prefix
72
73
74 def zstd_ext_kwargs(bundled_path, system_prefix=None, system=False, multithreaded=False, legacy=False, **kwargs):
75 """amend kwargs with zstd suff for a distutils.extension.Extension initialization.
76
77 bundled_path: relative (to this file) path to the bundled library source code files
78 system_prefix: where the system-installed library can be found
79 system: True: use the system-installed shared library, False: use the bundled library code
80 multithreaded: True: define ZSTD_MULTITHREAD
81 legacy: include legacy API support
82 kwargs: distutils.extension.Extension kwargs that should be amended
83 returns: amended kwargs
84 """
85 def multi_join(paths, *path_segments):
86 """apply os.path.join on a list of paths"""
87 return [os.path.join(*(path_segments + (path, ))) for path in paths]
88
89 use_system = system and system_prefix is not None
90
91 sources = kwargs.get('sources', [])
92 if not use_system:
93 sources += multi_join(zstd_sources, bundled_path)
94 if legacy:
95 sources += multi_join(zstd_sources_legacy, bundled_path)
96
97 include_dirs = kwargs.get('include_dirs', [])
98 if use_system:
99 include_dirs += multi_join(['include'], system_prefix)
100 else:
101 include_dirs += multi_join(zstd_includes, bundled_path)
102 if legacy:
103 include_dirs += multi_join(zstd_includes_legacy, bundled_path)
104
105 library_dirs = kwargs.get('library_dirs', [])
106 if use_system:
107 library_dirs += multi_join(['lib'], system_prefix)
108
109 libraries = kwargs.get('libraries', [])
110 if use_system:
111 libraries += ['zstd', ]
112
113 extra_compile_args = kwargs.get('extra_compile_args', [])
114 if multithreaded:
115 extra_compile_args += ['-DZSTD_MULTITHREAD', ]
116 if not use_system:
117 extra_compile_args += ['-DZSTDLIB_VISIBILITY=', '-DZDICTLIB_VISIBILITY=', '-DZSTDERRORLIB_VISIBILITY=', ]
118 # '-fvisibility=hidden' does not work, doesn't find PyInit_compress then
119 if legacy:
120 extra_compile_args += ['-DZSTD_LEGACY_SUPPORT=1', ]
121
122 ret = dict(**kwargs)
123 ret.update(dict(sources=sources, extra_compile_args=extra_compile_args,
124 include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries))
125 return ret
126
[end of setup_zstd.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup_zstd.py b/setup_zstd.py
--- a/setup_zstd.py
+++ b/setup_zstd.py
@@ -12,6 +12,7 @@
# zstd files, structure as seen in zstd project repository:
zstd_sources = [
+ 'lib/common/debug.c',
'lib/common/entropy_common.c',
'lib/common/error_private.c',
'lib/common/fse_decompress.c',
@@ -20,6 +21,7 @@
'lib/common/xxhash.c',
'lib/common/zstd_common.c',
'lib/compress/fse_compress.c',
+ 'lib/compress/hist.c',
'lib/compress/huf_compress.c',
'lib/compress/zstd_compress.c',
'lib/compress/zstd_double_fast.c',
@@ -29,9 +31,12 @@
'lib/compress/zstd_opt.c',
'lib/compress/zstdmt_compress.c',
'lib/decompress/huf_decompress.c',
+ 'lib/decompress/zstd_ddict.c',
'lib/decompress/zstd_decompress.c',
+ 'lib/decompress/zstd_decompress_block.c',
'lib/dictBuilder/cover.c',
'lib/dictBuilder/divsufsort.c',
+ 'lib/dictBuilder/fastcover.c',
'lib/dictBuilder/zdict.c',
]
| {"golden_diff": "diff --git a/setup_zstd.py b/setup_zstd.py\n--- a/setup_zstd.py\n+++ b/setup_zstd.py\n@@ -12,6 +12,7 @@\n # zstd files, structure as seen in zstd project repository:\n \n zstd_sources = [\n+ 'lib/common/debug.c',\n 'lib/common/entropy_common.c',\n 'lib/common/error_private.c',\n 'lib/common/fse_decompress.c',\n@@ -20,6 +21,7 @@\n 'lib/common/xxhash.c',\n 'lib/common/zstd_common.c',\n 'lib/compress/fse_compress.c',\n+ 'lib/compress/hist.c',\n 'lib/compress/huf_compress.c',\n 'lib/compress/zstd_compress.c',\n 'lib/compress/zstd_double_fast.c',\n@@ -29,9 +31,12 @@\n 'lib/compress/zstd_opt.c',\n 'lib/compress/zstdmt_compress.c',\n 'lib/decompress/huf_decompress.c',\n+ 'lib/decompress/zstd_ddict.c',\n 'lib/decompress/zstd_decompress.c',\n+ 'lib/decompress/zstd_decompress_block.c',\n 'lib/dictBuilder/cover.c',\n 'lib/dictBuilder/divsufsort.c',\n+ 'lib/dictBuilder/fastcover.c',\n 'lib/dictBuilder/zdict.c',\n ]\n", "issue": "update bundled zstd code\nwe have 1.3.4 bundled, current see there: https://github.com/facebook/zstd/releases\n", "before_files": [{"content": "# Support code for building a C extension with zstd files\n#\n# Copyright (c) 2016-present, Gregory Szorc\n# 2017-present, Thomas Waldmann (mods to make it more generic)\n# All rights reserved.\n#\n# This software may be modified and distributed under the terms\n# of the BSD license. See the LICENSE file for details.\n\nimport os\n\n# zstd files, structure as seen in zstd project repository:\n\nzstd_sources = [\n 'lib/common/entropy_common.c',\n 'lib/common/error_private.c',\n 'lib/common/fse_decompress.c',\n 'lib/common/pool.c',\n 'lib/common/threading.c',\n 'lib/common/xxhash.c',\n 'lib/common/zstd_common.c',\n 'lib/compress/fse_compress.c',\n 'lib/compress/huf_compress.c',\n 'lib/compress/zstd_compress.c',\n 'lib/compress/zstd_double_fast.c',\n 'lib/compress/zstd_fast.c',\n 'lib/compress/zstd_lazy.c',\n 'lib/compress/zstd_ldm.c',\n 'lib/compress/zstd_opt.c',\n 'lib/compress/zstdmt_compress.c',\n 'lib/decompress/huf_decompress.c',\n 'lib/decompress/zstd_decompress.c',\n 'lib/dictBuilder/cover.c',\n 'lib/dictBuilder/divsufsort.c',\n 'lib/dictBuilder/zdict.c',\n]\n\nzstd_sources_legacy = [\n 'lib/deprecated/zbuff_common.c',\n 'lib/deprecated/zbuff_compress.c',\n 'lib/deprecated/zbuff_decompress.c',\n 'lib/legacy/zstd_v01.c',\n 'lib/legacy/zstd_v02.c',\n 'lib/legacy/zstd_v03.c',\n 'lib/legacy/zstd_v04.c',\n 'lib/legacy/zstd_v05.c',\n 'lib/legacy/zstd_v06.c',\n 'lib/legacy/zstd_v07.c',\n]\n\nzstd_includes = [\n 'lib',\n 'lib/common',\n 'lib/compress',\n 'lib/decompress',\n 'lib/dictBuilder',\n]\n\nzstd_includes_legacy = [\n 'lib/deprecated',\n 'lib/legacy',\n]\n\n\ndef zstd_system_prefix(prefixes):\n for prefix in prefixes:\n filename = os.path.join(prefix, 'include', 'zstd.h')\n if os.path.exists(filename):\n with open(filename, 'rb') as fd:\n if b'ZSTD_getFrameContentSize' in fd.read(): # checks for zstd >= 1.3.0\n return prefix\n\n\ndef zstd_ext_kwargs(bundled_path, system_prefix=None, system=False, multithreaded=False, legacy=False, **kwargs):\n \"\"\"amend kwargs with zstd suff for a distutils.extension.Extension initialization.\n\n bundled_path: relative (to this file) path to the bundled library source code files\n system_prefix: where the system-installed library can be found\n system: True: use the system-installed shared library, False: use the bundled library code\n multithreaded: True: define ZSTD_MULTITHREAD\n legacy: include legacy API support\n kwargs: distutils.extension.Extension kwargs that should be amended\n returns: amended kwargs\n \"\"\"\n def multi_join(paths, *path_segments):\n \"\"\"apply os.path.join on a list of paths\"\"\"\n return [os.path.join(*(path_segments + (path, ))) for path in paths]\n\n use_system = system and system_prefix is not None\n\n sources = kwargs.get('sources', [])\n if not use_system:\n sources += multi_join(zstd_sources, bundled_path)\n if legacy:\n sources += multi_join(zstd_sources_legacy, bundled_path)\n\n include_dirs = kwargs.get('include_dirs', [])\n if use_system:\n include_dirs += multi_join(['include'], system_prefix)\n else:\n include_dirs += multi_join(zstd_includes, bundled_path)\n if legacy:\n include_dirs += multi_join(zstd_includes_legacy, bundled_path)\n\n library_dirs = kwargs.get('library_dirs', [])\n if use_system:\n library_dirs += multi_join(['lib'], system_prefix)\n\n libraries = kwargs.get('libraries', [])\n if use_system:\n libraries += ['zstd', ]\n\n extra_compile_args = kwargs.get('extra_compile_args', [])\n if multithreaded:\n extra_compile_args += ['-DZSTD_MULTITHREAD', ]\n if not use_system:\n extra_compile_args += ['-DZSTDLIB_VISIBILITY=', '-DZDICTLIB_VISIBILITY=', '-DZSTDERRORLIB_VISIBILITY=', ]\n # '-fvisibility=hidden' does not work, doesn't find PyInit_compress then\n if legacy:\n extra_compile_args += ['-DZSTD_LEGACY_SUPPORT=1', ]\n\n ret = dict(**kwargs)\n ret.update(dict(sources=sources, extra_compile_args=extra_compile_args,\n include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries))\n return ret\n", "path": "setup_zstd.py"}]} | 1,936 | 302 |
gh_patches_debug_2260 | rasdani/github-patches | git_diff | googleapis__python-bigquery-859 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase default timeout of retry objects to 10 minutes
Per internal issue 195337762, the general timeout for jobs.insert API is 4 minutes. We should increase our default deadline to 10 minutes to allow for at least 1 retry if the first request fails.
</issue>
<code>
[start of google/cloud/bigquery/retry.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from google.api_core import exceptions
16 from google.api_core import retry
17 from google.auth import exceptions as auth_exceptions
18 import requests.exceptions
19
20
21 _RETRYABLE_REASONS = frozenset(
22 ["rateLimitExceeded", "backendError", "internalError", "badGateway"]
23 )
24
25 _UNSTRUCTURED_RETRYABLE_TYPES = (
26 ConnectionError,
27 exceptions.TooManyRequests,
28 exceptions.InternalServerError,
29 exceptions.BadGateway,
30 requests.exceptions.ChunkedEncodingError,
31 requests.exceptions.ConnectionError,
32 auth_exceptions.TransportError,
33 )
34
35
36 def _should_retry(exc):
37 """Predicate for determining when to retry.
38
39 We retry if and only if the 'reason' is 'backendError'
40 or 'rateLimitExceeded'.
41 """
42 if not hasattr(exc, "errors") or len(exc.errors) == 0:
43 # Check for unstructured error returns, e.g. from GFE
44 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)
45
46 reason = exc.errors[0]["reason"]
47 return reason in _RETRYABLE_REASONS
48
49
50 DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
51 """The default retry object.
52
53 Any method with a ``retry`` parameter will be retried automatically,
54 with reasonable defaults. To disable retry, pass ``retry=None``.
55 To modify the default retry behavior, call a ``with_XXX`` method
56 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,
57 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.
58 """
59
[end of google/cloud/bigquery/retry.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py
--- a/google/cloud/bigquery/retry.py
+++ b/google/cloud/bigquery/retry.py
@@ -47,7 +47,7 @@
return reason in _RETRYABLE_REASONS
-DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
+DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)
"""The default retry object.
Any method with a ``retry`` parameter will be retried automatically,
| {"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -47,7 +47,7 @@\n return reason in _RETRYABLE_REASONS\n \n \n-DEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n+DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)\n \"\"\"The default retry object.\n \n Any method with a ``retry`` parameter will be retried automatically,\n", "issue": "Increase default timeout of retry objects to 10 minutes\nPer internal issue 195337762, the general timeout for jobs.insert API is 4 minutes. We should increase our default deadline to 10 minutes to allow for at least 1 retry if the first request fails.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]} | 1,172 | 119 |
gh_patches_debug_24323 | rasdani/github-patches | git_diff | cltk__cltk-1116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lexicon process for Latin fails on regex special characters
LatinLexiconProcess fails when regex special characters, e.g. single open parenthesis (i.e. ```(```) are included in tokenized input. Occurred while running MacOS 11.4; Python 3.9.5; CLTK 1.0.15; regex 2021.4.4 (but should fail in any case when this input is passed to the regex module). The solution is to escape the input ```lemma``` before running ```regex.match``` at https://github.com/cltk/cltk/blob/5dbfcf6fccade146d322cae036b35533aec59286/src/cltk/lexicon/lat.py#L70
I have written the patch and will make a PR soon.
Example and traceback:
```
from cltk import NLP
text = "Omnes igitur partes mundi (tangam autem maximas) calore fultae sustinentur." # Cic. Nat. D. 2.25
cltk_nlp = NLP(language="lat")
cltk_doc = cltk_nlp.analyze(text=test)
```
```
Traceback (most recent call last):
File "test.py", line 4, in <module>
cltk_doc = cltk_nlp.analyze(text=text)
File "[PATH]/lib/python3.9/site-packages/cltk/nlp.py", line 142, in analyze
doc = a_process.run(doc)
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/processes.py", line 45, in run
word.definition = lookup_algo.lookup(word.lemma)
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py", line 70, in lookup
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
File "[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py", line 70, in <listcomp>
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
File "[PATH]/lib/python3.9/site-packages/regex/regex.py", line 253, in match
pat = _compile(pattern, flags, ignore_unused, kwargs, True)
File "[PATH]/lib/python3.9/site-packages/regex/regex.py", line 532, in _compile
raise error(caught_exception.msg, caught_exception.pattern,
regex._regex_core.error: missing ) at position 9
```
</issue>
<code>
[start of src/cltk/lexicon/lat.py]
1 """Code for querying Latin language dictionaries/lexicons."""
2
3 import regex
4 import yaml
5
6 from cltk.core.exceptions import CLTKException
7 from cltk.data.fetch import FetchCorpus
8 from cltk.utils.file_operations import make_cltk_path
9 from cltk.utils.utils import query_yes_no
10
11 __author__ = ["Clément Besnier <[email protected]>"]
12
13
14 class LatinLewisLexicon:
15 """Access a digital form of Charlton T. Lewis's *An Elementary Latin Dictionary* (1890)."""
16
17 def __init__(self, interactive: bool = True):
18 self.interactive = interactive
19 self.lewis_yaml_fp = make_cltk_path(
20 "lat", "lexicon", "cltk_lat_lewis_elementary_lexicon", "lewis.yaml"
21 )
22 try:
23 self.entries = self._load_entries()
24 except FileNotFoundError:
25 if self.interactive:
26 dl_msg = f"This part of the CLTK depends upon Lewis's *An Elementary Latin Dictionary* (1890)."
27 print(dl_msg)
28 dl_question = "Do you want to download this?"
29 do_download = query_yes_no(question=dl_question)
30 else:
31 do_download = True
32 if do_download:
33 fetch_corpus = FetchCorpus(language="lat")
34 fetch_corpus.import_corpus(
35 corpus_name="cltk_lat_lewis_elementary_lexicon"
36 )
37 else:
38 raise CLTKException(
39 f"File '{self.lewis_yaml_fp}' is not found. It is required for this class."
40 )
41 self.entries = self._load_entries()
42
43 def lookup(self, lemma: str) -> str:
44 """Perform match of a lemma against headwords. If more than one match,
45 then return the concatenated entries. For example:
46
47 >>> lll = LatinLewisLexicon()
48 >>> lll.lookup("clemens")[:50]
49 'clēmēns entis (abl. -tī; rarely -te, L.), adj. wit'
50 >>> lll.lookup("omnia")
51 ''
52 >>> lll.lookup(".")
53 ''
54 >>> lll.lookup("123")
55 ''
56 >>> lll.lookup("175.")
57 ''
58 """
59 if not self.entries:
60 raise CLTKException(
61 "No lexicon entries found in the .yaml file. This should never happen."
62 )
63
64 if regex.match(r"^[0-9\.\?,\:;\!\<\>\-]*$", lemma) is not None:
65 return ""
66
67 lemma = lemma.lower()
68
69 keys = self.entries.keys()
70 matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
71 n_matches = len(matches)
72 if n_matches > 1:
73 return "\n".join([self.entries[key] for key in matches])
74 elif n_matches == 1:
75 return self.entries[matches[0]]
76 else:
77 return ""
78
79 def _load_entries(self):
80 """Read the yaml file of the lexion."""
81 with open(self.lewis_yaml_fp) as file_open:
82 entries = yaml.load(file_open, Loader=yaml.Loader)
83 return entries
84
[end of src/cltk/lexicon/lat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cltk/lexicon/lat.py b/src/cltk/lexicon/lat.py
--- a/src/cltk/lexicon/lat.py
+++ b/src/cltk/lexicon/lat.py
@@ -47,6 +47,8 @@
>>> lll = LatinLewisLexicon()
>>> lll.lookup("clemens")[:50]
'clēmēns entis (abl. -tī; rarely -te, L.), adj. wit'
+ >>> all(word in lll.lookup("levis") for word in ["levis","lēvis"]) # Test for concatenated entries
+ True
>>> lll.lookup("omnia")
''
>>> lll.lookup(".")
@@ -55,6 +57,8 @@
''
>>> lll.lookup("175.")
''
+ >>> lll.lookup("(") # Test for regex special character
+ ''
"""
if not self.entries:
raise CLTKException(
@@ -64,7 +68,7 @@
if regex.match(r"^[0-9\.\?,\:;\!\<\>\-]*$", lemma) is not None:
return ""
- lemma = lemma.lower()
+ lemma = regex.escape(lemma.lower())
keys = self.entries.keys()
matches = [key for key in keys if regex.match(rf"^{lemma}[0-9]?$", key)]
| {"golden_diff": "diff --git a/src/cltk/lexicon/lat.py b/src/cltk/lexicon/lat.py\n--- a/src/cltk/lexicon/lat.py\n+++ b/src/cltk/lexicon/lat.py\n@@ -47,6 +47,8 @@\n >>> lll = LatinLewisLexicon()\n >>> lll.lookup(\"clemens\")[:50]\n 'cl\u0113m\u0113ns entis (abl. -t\u012b; rarely -te, L.), adj. wit'\n+ >>> all(word in lll.lookup(\"levis\") for word in [\"levis\",\"l\u0113vis\"]) # Test for concatenated entries\n+ True\n >>> lll.lookup(\"omnia\")\n ''\n >>> lll.lookup(\".\")\n@@ -55,6 +57,8 @@\n ''\n >>> lll.lookup(\"175.\")\n ''\n+ >>> lll.lookup(\"(\") # Test for regex special character\n+ ''\n \"\"\"\n if not self.entries:\n raise CLTKException(\n@@ -64,7 +68,7 @@\n if regex.match(r\"^[0-9\\.\\?,\\:;\\!\\<\\>\\-]*$\", lemma) is not None:\n return \"\"\n \n- lemma = lemma.lower()\n+ lemma = regex.escape(lemma.lower())\n \n keys = self.entries.keys()\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\n", "issue": "Lexicon process for Latin fails on regex special characters\nLatinLexiconProcess fails when regex special characters, e.g. single open parenthesis (i.e. ```(```) are included in tokenized input. Occurred while running MacOS 11.4; Python 3.9.5; CLTK 1.0.15; regex 2021.4.4 (but should fail in any case when this input is passed to the regex module). The solution is to escape the input ```lemma``` before running ```regex.match``` at https://github.com/cltk/cltk/blob/5dbfcf6fccade146d322cae036b35533aec59286/src/cltk/lexicon/lat.py#L70\r\n\r\nI have written the patch and will make a PR soon.\r\n\r\nExample and traceback:\r\n\r\n```\r\nfrom cltk import NLP\r\ntext = \"Omnes igitur partes mundi (tangam autem maximas) calore fultae sustinentur.\" # Cic. Nat. D. 2.25\r\ncltk_nlp = NLP(language=\"lat\")\r\ncltk_doc = cltk_nlp.analyze(text=test)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 4, in <module>\r\n cltk_doc = cltk_nlp.analyze(text=text)\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/nlp.py\", line 142, in analyze\r\n doc = a_process.run(doc)\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/lexicon/processes.py\", line 45, in run\r\n word.definition = lookup_algo.lookup(word.lemma)\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py\", line 70, in lookup\r\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\r\n File \"[PATH]/lib/python3.9/site-packages/cltk/lexicon/lat.py\", line 70, in <listcomp>\r\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\r\n File \"[PATH]/lib/python3.9/site-packages/regex/regex.py\", line 253, in match\r\n pat = _compile(pattern, flags, ignore_unused, kwargs, True)\r\n File \"[PATH]/lib/python3.9/site-packages/regex/regex.py\", line 532, in _compile\r\n raise error(caught_exception.msg, caught_exception.pattern,\r\nregex._regex_core.error: missing ) at position 9\r\n```\n", "before_files": [{"content": "\"\"\"Code for querying Latin language dictionaries/lexicons.\"\"\"\n\nimport regex\nimport yaml\n\nfrom cltk.core.exceptions import CLTKException\nfrom cltk.data.fetch import FetchCorpus\nfrom cltk.utils.file_operations import make_cltk_path\nfrom cltk.utils.utils import query_yes_no\n\n__author__ = [\"Cl\u00e9ment Besnier <[email protected]>\"]\n\n\nclass LatinLewisLexicon:\n \"\"\"Access a digital form of Charlton T. Lewis's *An Elementary Latin Dictionary* (1890).\"\"\"\n\n def __init__(self, interactive: bool = True):\n self.interactive = interactive\n self.lewis_yaml_fp = make_cltk_path(\n \"lat\", \"lexicon\", \"cltk_lat_lewis_elementary_lexicon\", \"lewis.yaml\"\n )\n try:\n self.entries = self._load_entries()\n except FileNotFoundError:\n if self.interactive:\n dl_msg = f\"This part of the CLTK depends upon Lewis's *An Elementary Latin Dictionary* (1890).\"\n print(dl_msg)\n dl_question = \"Do you want to download this?\"\n do_download = query_yes_no(question=dl_question)\n else:\n do_download = True\n if do_download:\n fetch_corpus = FetchCorpus(language=\"lat\")\n fetch_corpus.import_corpus(\n corpus_name=\"cltk_lat_lewis_elementary_lexicon\"\n )\n else:\n raise CLTKException(\n f\"File '{self.lewis_yaml_fp}' is not found. It is required for this class.\"\n )\n self.entries = self._load_entries()\n\n def lookup(self, lemma: str) -> str:\n \"\"\"Perform match of a lemma against headwords. If more than one match,\n then return the concatenated entries. For example:\n\n >>> lll = LatinLewisLexicon()\n >>> lll.lookup(\"clemens\")[:50]\n 'cl\u0113m\u0113ns entis (abl. -t\u012b; rarely -te, L.), adj. wit'\n >>> lll.lookup(\"omnia\")\n ''\n >>> lll.lookup(\".\")\n ''\n >>> lll.lookup(\"123\")\n ''\n >>> lll.lookup(\"175.\")\n ''\n \"\"\"\n if not self.entries:\n raise CLTKException(\n \"No lexicon entries found in the .yaml file. This should never happen.\"\n )\n\n if regex.match(r\"^[0-9\\.\\?,\\:;\\!\\<\\>\\-]*$\", lemma) is not None:\n return \"\"\n\n lemma = lemma.lower()\n\n keys = self.entries.keys()\n matches = [key for key in keys if regex.match(rf\"^{lemma}[0-9]?$\", key)]\n n_matches = len(matches)\n if n_matches > 1:\n return \"\\n\".join([self.entries[key] for key in matches])\n elif n_matches == 1:\n return self.entries[matches[0]]\n else:\n return \"\"\n\n def _load_entries(self):\n \"\"\"Read the yaml file of the lexion.\"\"\"\n with open(self.lewis_yaml_fp) as file_open:\n entries = yaml.load(file_open, Loader=yaml.Loader)\n return entries\n", "path": "src/cltk/lexicon/lat.py"}]} | 1,974 | 315 |
gh_patches_debug_1348 | rasdani/github-patches | git_diff | translate__pootle-5024 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception in terminology management view
When visiting https://mozilla.locamotion.org/eu/firefox/terminology/ the following exception is thrown:
`'SortedRelatedManager' object does not support indexing`
</issue>
<code>
[start of pootle/apps/pootle_terminology/views.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.core.urlresolvers import reverse
10 from django.shortcuts import render
11
12 from pootle.core.decorators import get_path_obj, permission_required
13 from pootle_app.views.admin import util
14 from pootle_store.models import Store, Unit
15
16 from .forms import term_unit_form_factory
17
18
19 def get_terminology_filename(translation_project):
20 try:
21 # See if a terminology store already exists
22 return translation_project.stores.live().filter(
23 name__startswith='pootle-terminology.',
24 ).values_list('name', flat=True)[0]
25 except IndexError:
26 pass
27
28 return (
29 'pootle-terminology.%s'
30 % translation_project.project.filetypes[0].extension)
31
32
33 def manage_store(request, ctx, language, term_store):
34 TermUnitForm = term_unit_form_factory(term_store)
35 template_name = 'translation_projects/terminology/manage.html'
36 return util.edit(request, template_name, Unit, ctx,
37 None, None, queryset=term_store.units, can_delete=True,
38 form=TermUnitForm)
39
40
41 @get_path_obj
42 @permission_required('administrate')
43 def manage(request, translation_project):
44 ctx = {
45 'page': 'admin-terminology',
46
47 'browse_url': reverse('pootle-tp-browse', kwargs={
48 'language_code': translation_project.language.code,
49 'project_code': translation_project.project.code,
50 }),
51 'translate_url': reverse('pootle-tp-translate', kwargs={
52 'language_code': translation_project.language.code,
53 'project_code': translation_project.project.code,
54 }),
55
56 'translation_project': translation_project,
57 'language': translation_project.language,
58 'project': translation_project.project,
59 'source_language': translation_project.project.source_language,
60 'directory': translation_project.directory,
61 }
62
63 if translation_project.project.is_terminology:
64 # Which file should we edit?
65 stores = list(Store.objects.live().filter(
66 translation_project=translation_project,
67 ))
68 if len(stores) == 1:
69 # There is only one, and we're not going to offer file-level
70 # activities, so let's just edit the one that is there.
71 return manage_store(request, ctx, ctx['language'], stores[0])
72 elif len(stores) > 1:
73 for store in stores:
74 path_length = len(translation_project.pootle_path)
75 store.nice_name = store.pootle_path[path_length:]
76
77 ctx['stores'] = stores
78 return render(request,
79 "translation_projects/terminology/stores.html", ctx)
80
81 try:
82 terminology_filename = get_terminology_filename(translation_project)
83 term_store = Store.objects.get(
84 pootle_path=translation_project.pootle_path + terminology_filename,
85 )
86 return manage_store(request, ctx, ctx['language'], term_store)
87 except Store.DoesNotExist:
88 return render(request, "translation_projects/terminology/manage.html",
89 ctx)
90
[end of pootle/apps/pootle_terminology/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_terminology/views.py b/pootle/apps/pootle_terminology/views.py
--- a/pootle/apps/pootle_terminology/views.py
+++ b/pootle/apps/pootle_terminology/views.py
@@ -27,7 +27,7 @@
return (
'pootle-terminology.%s'
- % translation_project.project.filetypes[0].extension)
+ % translation_project.project.filetypes.first().extension)
def manage_store(request, ctx, language, term_store):
| {"golden_diff": "diff --git a/pootle/apps/pootle_terminology/views.py b/pootle/apps/pootle_terminology/views.py\n--- a/pootle/apps/pootle_terminology/views.py\n+++ b/pootle/apps/pootle_terminology/views.py\n@@ -27,7 +27,7 @@\n \n return (\n 'pootle-terminology.%s'\n- % translation_project.project.filetypes[0].extension)\n+ % translation_project.project.filetypes.first().extension)\n \n \n def manage_store(request, ctx, language, term_store):\n", "issue": "Exception in terminology management view\nWhen visiting https://mozilla.locamotion.org/eu/firefox/terminology/ the following exception is thrown:\n\n`'SortedRelatedManager' object does not support indexing`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render\n\nfrom pootle.core.decorators import get_path_obj, permission_required\nfrom pootle_app.views.admin import util\nfrom pootle_store.models import Store, Unit\n\nfrom .forms import term_unit_form_factory\n\n\ndef get_terminology_filename(translation_project):\n try:\n # See if a terminology store already exists\n return translation_project.stores.live().filter(\n name__startswith='pootle-terminology.',\n ).values_list('name', flat=True)[0]\n except IndexError:\n pass\n\n return (\n 'pootle-terminology.%s'\n % translation_project.project.filetypes[0].extension)\n\n\ndef manage_store(request, ctx, language, term_store):\n TermUnitForm = term_unit_form_factory(term_store)\n template_name = 'translation_projects/terminology/manage.html'\n return util.edit(request, template_name, Unit, ctx,\n None, None, queryset=term_store.units, can_delete=True,\n form=TermUnitForm)\n\n\n@get_path_obj\n@permission_required('administrate')\ndef manage(request, translation_project):\n ctx = {\n 'page': 'admin-terminology',\n\n 'browse_url': reverse('pootle-tp-browse', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n 'translate_url': reverse('pootle-tp-translate', kwargs={\n 'language_code': translation_project.language.code,\n 'project_code': translation_project.project.code,\n }),\n\n 'translation_project': translation_project,\n 'language': translation_project.language,\n 'project': translation_project.project,\n 'source_language': translation_project.project.source_language,\n 'directory': translation_project.directory,\n }\n\n if translation_project.project.is_terminology:\n # Which file should we edit?\n stores = list(Store.objects.live().filter(\n translation_project=translation_project,\n ))\n if len(stores) == 1:\n # There is only one, and we're not going to offer file-level\n # activities, so let's just edit the one that is there.\n return manage_store(request, ctx, ctx['language'], stores[0])\n elif len(stores) > 1:\n for store in stores:\n path_length = len(translation_project.pootle_path)\n store.nice_name = store.pootle_path[path_length:]\n\n ctx['stores'] = stores\n return render(request,\n \"translation_projects/terminology/stores.html\", ctx)\n\n try:\n terminology_filename = get_terminology_filename(translation_project)\n term_store = Store.objects.get(\n pootle_path=translation_project.pootle_path + terminology_filename,\n )\n return manage_store(request, ctx, ctx['language'], term_store)\n except Store.DoesNotExist:\n return render(request, \"translation_projects/terminology/manage.html\",\n ctx)\n", "path": "pootle/apps/pootle_terminology/views.py"}]} | 1,467 | 126 |
gh_patches_debug_23775 | rasdani/github-patches | git_diff | bridgecrewio__checkov-1102 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error while checking Dockerfile USER set with env variable
**Describe the bug**
The **checkov** CLI return an error when analyzing a Dockerfile with USER set according to an ENV variable.
**To Reproduce**
Steps to reproduce the behavior:
1. Get this snippet :
```Dockerfile
FROM python:alpine
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
ENV USERNAME=app
RUN addgroup -S ${USERNAME} && adduser -s /sbin/nologin -S ${USERNAME} -G ${USERNAME} && chown -R ${USERNAME} /app
USER ${USERNAME}
COPY --chown=${USERNAME} script.py .
CMD python3 script.py
```
2. Run cli command 'checkov -f Dockerfile'
3. See error
**Expected behavior**
No error.
**Screenshots**
<img width="750" alt="" src="https://user-images.githubusercontent.com/44492274/115271564-c380b080-a13d-11eb-9c4d-cb086e3bd9fd.png">
**Desktop (please complete the following information):**
- OS: macOS Big Sur 11.2.3
- Checkov Version 2.0.55
</issue>
<code>
[start of checkov/dockerfile/checks/RootUser.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
3
4
5 class RootUser(BaseDockerfileCheck):
6 def __init__(self):
7 name = "Ensure the last USER is not root"
8 id = "CKV_DOCKER_8"
9 supported_instructions = ["USER"]
10 categories = [CheckCategories.APPLICATION_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
12
13 def scan_entity_conf(self, conf):
14 contents = conf.get("USER")
15
16 if contents:
17 last_user = contents[-1]
18 if last_user["value"] == "root":
19 return CheckResult.FAILED, last_user
20
21 return CheckResult.PASSED, last_user
22
23 return CheckResult.UNKNOWN, None
24
25
26 check = RootUser()
27
[end of checkov/dockerfile/checks/RootUser.py]
[start of checkov/dockerfile/checks/MaintainerExists.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
3
4
5 class MaintainerExists(BaseDockerfileCheck):
6 def __init__(self):
7 name = "Ensure that LABEL maintainer is used instead of MAINTAINER (deprecated)"
8 id = "CKV_DOCKER_6"
9 supported_instructions = ["MAINTAINER"]
10 categories = [CheckCategories.CONVENTION]
11 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
12
13 def scan_entity_conf(self, conf):
14 for instruction, content in conf.items():
15 if instruction == "MAINTAINER":
16 return CheckResult.FAILED, content[0]
17 return CheckResult.PASSED, None
18
19
20 check = MaintainerExists()
21
[end of checkov/dockerfile/checks/MaintainerExists.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/dockerfile/checks/MaintainerExists.py b/checkov/dockerfile/checks/MaintainerExists.py
--- a/checkov/dockerfile/checks/MaintainerExists.py
+++ b/checkov/dockerfile/checks/MaintainerExists.py
@@ -11,10 +11,7 @@
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
def scan_entity_conf(self, conf):
- for instruction, content in conf.items():
- if instruction == "MAINTAINER":
- return CheckResult.FAILED, content[0]
- return CheckResult.PASSED, None
+ return CheckResult.FAILED, conf[0]
check = MaintainerExists()
diff --git a/checkov/dockerfile/checks/RootUser.py b/checkov/dockerfile/checks/RootUser.py
--- a/checkov/dockerfile/checks/RootUser.py
+++ b/checkov/dockerfile/checks/RootUser.py
@@ -11,16 +11,11 @@
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
def scan_entity_conf(self, conf):
- contents = conf.get("USER")
+ last_user = conf[-1]
+ if last_user["value"] == "root":
+ return CheckResult.FAILED, last_user
- if contents:
- last_user = contents[-1]
- if last_user["value"] == "root":
- return CheckResult.FAILED, last_user
-
- return CheckResult.PASSED, last_user
-
- return CheckResult.UNKNOWN, None
+ return CheckResult.PASSED, last_user
check = RootUser()
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/MaintainerExists.py b/checkov/dockerfile/checks/MaintainerExists.py\n--- a/checkov/dockerfile/checks/MaintainerExists.py\n+++ b/checkov/dockerfile/checks/MaintainerExists.py\n@@ -11,10 +11,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n def scan_entity_conf(self, conf):\n- for instruction, content in conf.items():\n- if instruction == \"MAINTAINER\":\n- return CheckResult.FAILED, content[0]\n- return CheckResult.PASSED, None\n+ return CheckResult.FAILED, conf[0]\n \n \n check = MaintainerExists()\ndiff --git a/checkov/dockerfile/checks/RootUser.py b/checkov/dockerfile/checks/RootUser.py\n--- a/checkov/dockerfile/checks/RootUser.py\n+++ b/checkov/dockerfile/checks/RootUser.py\n@@ -11,16 +11,11 @@\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n def scan_entity_conf(self, conf):\n- contents = conf.get(\"USER\")\n+ last_user = conf[-1]\n+ if last_user[\"value\"] == \"root\":\n+ return CheckResult.FAILED, last_user\n \n- if contents:\n- last_user = contents[-1]\n- if last_user[\"value\"] == \"root\":\n- return CheckResult.FAILED, last_user\n-\n- return CheckResult.PASSED, last_user\n-\n- return CheckResult.UNKNOWN, None\n+ return CheckResult.PASSED, last_user\n \n \n check = RootUser()\n", "issue": "Error while checking Dockerfile USER set with env variable\n**Describe the bug**\r\nThe **checkov** CLI return an error when analyzing a Dockerfile with USER set according to an ENV variable. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Get this snippet :\r\n```Dockerfile\r\nFROM python:alpine\r\n\r\nWORKDIR /app\r\nCOPY requirements.txt .\r\nRUN pip install -r requirements.txt\r\n\r\nENV USERNAME=app\r\nRUN addgroup -S ${USERNAME} && adduser -s /sbin/nologin -S ${USERNAME} -G ${USERNAME} && chown -R ${USERNAME} /app\r\nUSER ${USERNAME}\r\n\r\nCOPY --chown=${USERNAME} script.py .\r\n\r\nCMD python3 script.py\r\n```\r\n2. Run cli command 'checkov -f Dockerfile'\r\n3. See error\r\n\r\n**Expected behavior**\r\nNo error.\r\n\r\n**Screenshots**\r\n<img width=\"750\" alt=\"\" src=\"https://user-images.githubusercontent.com/44492274/115271564-c380b080-a13d-11eb-9c4d-cb086e3bd9fd.png\">\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: macOS Big Sur 11.2.3\r\n - Checkov Version 2.0.55\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\n\nclass RootUser(BaseDockerfileCheck):\n def __init__(self):\n name = \"Ensure the last USER is not root\"\n id = \"CKV_DOCKER_8\"\n supported_instructions = [\"USER\"]\n categories = [CheckCategories.APPLICATION_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n contents = conf.get(\"USER\")\n\n if contents:\n last_user = contents[-1]\n if last_user[\"value\"] == \"root\":\n return CheckResult.FAILED, last_user\n\n return CheckResult.PASSED, last_user\n\n return CheckResult.UNKNOWN, None\n\n\ncheck = RootUser()\n", "path": "checkov/dockerfile/checks/RootUser.py"}, {"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\n\nclass MaintainerExists(BaseDockerfileCheck):\n def __init__(self):\n name = \"Ensure that LABEL maintainer is used instead of MAINTAINER (deprecated)\"\n id = \"CKV_DOCKER_6\"\n supported_instructions = [\"MAINTAINER\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n for instruction, content in conf.items():\n if instruction == \"MAINTAINER\":\n return CheckResult.FAILED, content[0]\n return CheckResult.PASSED, None\n\n\ncheck = MaintainerExists()\n", "path": "checkov/dockerfile/checks/MaintainerExists.py"}]} | 1,308 | 383 |
gh_patches_debug_6411 | rasdani/github-patches | git_diff | SeldonIO__MLServer-625 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
starting mlserver using `mlserver start .` is not consistent with `mlserver start $PWD`
When I started mlserver using `mlserver start .` in directory tree
```
└── iris1
└── 1
├── model.joblib
└── model-settings.json
```
and settings `{"name":"iris1","implementation":"mlserver_sklearn.SKLearnModel","parameters":{"version":"1"}}`
results in an error:
```
mlserver.errors.InvalidModelURI: Invalid URI specified for model iris1 (iris1/1/iris1/1)
```
However using
`mlserver start $PWD` is successful.
</issue>
<code>
[start of mlserver/repository.py]
1 import os
2 import glob
3
4 from typing import List
5
6 from .settings import ModelParameters, ModelSettings
7 from .errors import ModelNotFound
8 from .logging import logger
9
10 DEFAULT_MODEL_SETTINGS_FILENAME = "model-settings.json"
11
12
13 class ModelRepository:
14 """
15 Model repository, responsible of the discovery of models which can be
16 loaded onto the model registry.
17 """
18
19 def __init__(self, root: str = None):
20 self._root = root
21
22 async def list(self) -> List[ModelSettings]:
23 all_model_settings = []
24
25 # TODO: Use an async alternative for filesys ops
26 if self._root:
27 pattern = os.path.join(self._root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
28 matches = glob.glob(pattern, recursive=True)
29
30 for model_settings_path in matches:
31 model_settings = self._load_model_settings(model_settings_path)
32 all_model_settings.append(model_settings)
33
34 # If there were no matches, try to load model from environment
35 if not all_model_settings:
36 # return default
37 model_settings = ModelSettings()
38 model_settings.parameters = ModelParameters()
39 all_model_settings.append(model_settings)
40
41 return all_model_settings
42
43 def _load_model_settings(self, model_settings_path: str) -> ModelSettings:
44 model_settings = ModelSettings.parse_file(model_settings_path)
45 model_settings._source = model_settings_path
46
47 # If name not present, default to folder name
48 model_settings_folder = os.path.dirname(model_settings_path)
49 folder_name = os.path.basename(model_settings_folder)
50 if model_settings.name:
51 if not self._folder_matches(folder_name, model_settings):
52 # Raise warning if name is different than folder's name
53 logger.warning(
54 f"Model name '{model_settings.name}' is different than "
55 f"model's folder name '{folder_name}'."
56 )
57 else:
58 model_settings.name = folder_name
59
60 if not model_settings.parameters:
61 model_settings.parameters = ModelParameters()
62
63 if not model_settings.parameters.uri:
64 # If not specified, default to its own folder
65 default_model_uri = os.path.dirname(model_settings_path)
66 model_settings.parameters.uri = default_model_uri
67
68 return model_settings
69
70 def _folder_matches(self, folder_name: str, model_settings: ModelSettings) -> bool:
71 if model_settings.name == folder_name:
72 return True
73
74 # To be compatible with Triton, check whether the folder name matches
75 # with the model's version
76 if model_settings.parameters and model_settings.parameters.version:
77 model_version = model_settings.parameters.version
78 if model_version == folder_name:
79 return True
80
81 return False
82
83 async def find(self, name: str) -> List[ModelSettings]:
84 all_settings = await self.list()
85 selected = []
86 for model_settings in all_settings:
87 # TODO: Implement other version policies (e.g. "Last N")
88 if model_settings.name == name:
89 selected.append(model_settings)
90
91 if len(selected) == 0:
92 raise ModelNotFound(name)
93
94 return selected
95
[end of mlserver/repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/repository.py b/mlserver/repository.py
--- a/mlserver/repository.py
+++ b/mlserver/repository.py
@@ -24,7 +24,8 @@
# TODO: Use an async alternative for filesys ops
if self._root:
- pattern = os.path.join(self._root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
+ abs_root = os.path.abspath(self._root)
+ pattern = os.path.join(abs_root, "**", DEFAULT_MODEL_SETTINGS_FILENAME)
matches = glob.glob(pattern, recursive=True)
for model_settings_path in matches:
| {"golden_diff": "diff --git a/mlserver/repository.py b/mlserver/repository.py\n--- a/mlserver/repository.py\n+++ b/mlserver/repository.py\n@@ -24,7 +24,8 @@\n \n # TODO: Use an async alternative for filesys ops\n if self._root:\n- pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n+ abs_root = os.path.abspath(self._root)\n+ pattern = os.path.join(abs_root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n \n for model_settings_path in matches:\n", "issue": "starting mlserver using `mlserver start .` is not consistent with `mlserver start $PWD`\nWhen I started mlserver using `mlserver start .` in directory tree \r\n```\r\n\u2514\u2500\u2500 iris1\r\n \u2514\u2500\u2500 1\r\n \u251c\u2500\u2500 model.joblib\r\n \u2514\u2500\u2500 model-settings.json\r\n```\r\nand settings `{\"name\":\"iris1\",\"implementation\":\"mlserver_sklearn.SKLearnModel\",\"parameters\":{\"version\":\"1\"}}`\r\n\r\nresults in an error:\r\n```\r\nmlserver.errors.InvalidModelURI: Invalid URI specified for model iris1 (iris1/1/iris1/1)\r\n```\r\n\r\nHowever using\r\n`mlserver start $PWD` is successful.\n", "before_files": [{"content": "import os\nimport glob\n\nfrom typing import List\n\nfrom .settings import ModelParameters, ModelSettings\nfrom .errors import ModelNotFound\nfrom .logging import logger\n\nDEFAULT_MODEL_SETTINGS_FILENAME = \"model-settings.json\"\n\n\nclass ModelRepository:\n \"\"\"\n Model repository, responsible of the discovery of models which can be\n loaded onto the model registry.\n \"\"\"\n\n def __init__(self, root: str = None):\n self._root = root\n\n async def list(self) -> List[ModelSettings]:\n all_model_settings = []\n\n # TODO: Use an async alternative for filesys ops\n if self._root:\n pattern = os.path.join(self._root, \"**\", DEFAULT_MODEL_SETTINGS_FILENAME)\n matches = glob.glob(pattern, recursive=True)\n\n for model_settings_path in matches:\n model_settings = self._load_model_settings(model_settings_path)\n all_model_settings.append(model_settings)\n\n # If there were no matches, try to load model from environment\n if not all_model_settings:\n # return default\n model_settings = ModelSettings()\n model_settings.parameters = ModelParameters()\n all_model_settings.append(model_settings)\n\n return all_model_settings\n\n def _load_model_settings(self, model_settings_path: str) -> ModelSettings:\n model_settings = ModelSettings.parse_file(model_settings_path)\n model_settings._source = model_settings_path\n\n # If name not present, default to folder name\n model_settings_folder = os.path.dirname(model_settings_path)\n folder_name = os.path.basename(model_settings_folder)\n if model_settings.name:\n if not self._folder_matches(folder_name, model_settings):\n # Raise warning if name is different than folder's name\n logger.warning(\n f\"Model name '{model_settings.name}' is different than \"\n f\"model's folder name '{folder_name}'.\"\n )\n else:\n model_settings.name = folder_name\n\n if not model_settings.parameters:\n model_settings.parameters = ModelParameters()\n\n if not model_settings.parameters.uri:\n # If not specified, default to its own folder\n default_model_uri = os.path.dirname(model_settings_path)\n model_settings.parameters.uri = default_model_uri\n\n return model_settings\n\n def _folder_matches(self, folder_name: str, model_settings: ModelSettings) -> bool:\n if model_settings.name == folder_name:\n return True\n\n # To be compatible with Triton, check whether the folder name matches\n # with the model's version\n if model_settings.parameters and model_settings.parameters.version:\n model_version = model_settings.parameters.version\n if model_version == folder_name:\n return True\n\n return False\n\n async def find(self, name: str) -> List[ModelSettings]:\n all_settings = await self.list()\n selected = []\n for model_settings in all_settings:\n # TODO: Implement other version policies (e.g. \"Last N\")\n if model_settings.name == name:\n selected.append(model_settings)\n\n if len(selected) == 0:\n raise ModelNotFound(name)\n\n return selected\n", "path": "mlserver/repository.py"}]} | 1,512 | 127 |
gh_patches_debug_2870 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-753 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug for replay feature from pwd
Running the following command inside of a template repo:
`$ cookiecutter -o tmp .`
Will cause `replay.dump` to files like this:
`~/.cookiecutter_replay/..json`
Identified by @eliasdorneles
</issue>
<code>
[start of cookiecutter/main.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import logging
16 import os
17 import re
18
19 from .config import get_user_config, USER_CONFIG_PATH
20 from .exceptions import InvalidModeException, RepositoryNotFound
21 from .prompt import prompt_for_config
22 from .generate import generate_context, generate_files
23 from .vcs import clone
24 from .replay import dump, load
25
26 logger = logging.getLogger(__name__)
27
28 builtin_abbreviations = {
29 'gh': 'https://github.com/{0}.git',
30 'bb': 'https://bitbucket.org/{0}',
31 }
32
33 REPO_REGEX = re.compile(r"""
34 (?x)
35 ((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.
36 | # or
37 (\w+@[\w\.]+) # something like user@...
38 )
39 """)
40
41
42 def is_repo_url(value):
43 """Return True if value is a repository URL."""
44 return bool(REPO_REGEX.match(value))
45
46
47 def expand_abbreviations(template, config_dict):
48 """
49 Expand abbreviations in a template name.
50
51 :param template: The project template name.
52 :param config_dict: The user config, which will contain abbreviation
53 definitions.
54 """
55
56 abbreviations = builtin_abbreviations.copy()
57 abbreviations.update(config_dict.get('abbreviations', {}))
58
59 if template in abbreviations:
60 return abbreviations[template]
61
62 # Split on colon. If there is no colon, rest will be empty
63 # and prefix will be the whole template
64 prefix, sep, rest = template.partition(':')
65 if prefix in abbreviations:
66 return abbreviations[prefix].format(rest)
67
68 return template
69
70
71 def cookiecutter(
72 template, checkout=None, no_input=False, extra_context=None,
73 replay=False, overwrite_if_exists=False, output_dir='.',
74 config_file=USER_CONFIG_PATH):
75 """
76 API equivalent to using Cookiecutter at the command line.
77
78 :param template: A directory containing a project template directory,
79 or a URL to a git repository.
80 :param checkout: The branch, tag or commit ID to checkout after clone.
81 :param no_input: Prompt the user at command line for manual configuration?
82 :param extra_context: A dictionary of context that overrides default
83 and user configuration.
84 :param: overwrite_if_exists: Overwrite the contents of output directory
85 if it exists
86 :param output_dir: Where to output the generated project dir into.
87 :param config_file: User configuration file path.
88 """
89 if replay and ((no_input is not False) or (extra_context is not None)):
90 err_msg = (
91 "You can not use both replay and no_input or extra_context "
92 "at the same time."
93 )
94 raise InvalidModeException(err_msg)
95
96 # Get user config from ~/.cookiecutterrc or equivalent
97 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
98 config_dict = get_user_config(config_file=config_file)
99
100 template = expand_abbreviations(template, config_dict)
101
102 if is_repo_url(template):
103 repo_dir = clone(
104 repo_url=template,
105 checkout=checkout,
106 clone_to_dir=config_dict['cookiecutters_dir'],
107 no_input=no_input
108 )
109 else:
110 # If it's a local repo, no need to clone or copy to your
111 # cookiecutters_dir
112 repo_dir = template
113
114 if not os.path.isdir(repo_dir):
115 raise RepositoryNotFound(
116 'The repository {0} could not be located.'.format(template)
117 )
118
119 template_name = os.path.basename(template)
120
121 if replay:
122 context = load(config_dict['replay_dir'], template_name)
123 else:
124 context_file = os.path.join(repo_dir, 'cookiecutter.json')
125 logging.debug('context_file is {0}'.format(context_file))
126
127 context = generate_context(
128 context_file=context_file,
129 default_context=config_dict['default_context'],
130 extra_context=extra_context,
131 )
132
133 # prompt the user to manually configure at the command line.
134 # except when 'no-input' flag is set
135 context['cookiecutter'] = prompt_for_config(context, no_input)
136
137 dump(config_dict['replay_dir'], template_name, context)
138
139 # Create project from local context and project template.
140 return generate_files(
141 repo_dir=repo_dir,
142 context=context,
143 overwrite_if_exists=overwrite_if_exists,
144 output_dir=output_dir
145 )
146
[end of cookiecutter/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -116,7 +116,7 @@
'The repository {0} could not be located.'.format(template)
)
- template_name = os.path.basename(template)
+ template_name = os.path.basename(os.path.abspath(template))
if replay:
context = load(config_dict['replay_dir'], template_name)
| {"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -116,7 +116,7 @@\n 'The repository {0} could not be located.'.format(template)\n )\n \n- template_name = os.path.basename(template)\n+ template_name = os.path.basename(os.path.abspath(template))\n \n if replay:\n context = load(config_dict['replay_dir'], template_name)\n", "issue": "Bug for replay feature from pwd\nRunning the following command inside of a template repo:\n\n`$ cookiecutter -o tmp .`\n\nWill cause `replay.dump` to files like this:\n\n`~/.cookiecutter_replay/..json`\n\nIdentified by @eliasdorneles \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = re.compile(r\"\"\"\n(?x)\n((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n\"\"\")\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(REPO_REGEX.match(value))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}]} | 1,958 | 111 |
gh_patches_debug_59100 | rasdani/github-patches | git_diff | kserve__kserve-2343 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KServe 0.9 release tracking
/kind feature
**Describe the solution you'd like**
KServe 0.9 release tracking:
RC release Date: 6/13/2022
Release Date: 6/27/2022
## KServe Model Serving:
- [X] Storage spec for unifying single model serving and model mesh
- https://github.com/kserve/kserve/pull/1899 @Tomcli
- [x] Transformer ModelMesh support
- https://github.com/kserve/kserve/pull/2136 @chinhuang007
- [x] Model Status API for unifying single model serving and model mesh
- https://github.com/kserve/kserve/pull/2084 @pvaneck
- https://github.com/kserve/kserve/pull/2088 @Suresh-Nakkeran
- [x] Inferece Graph v1alpha1 API and impmentation
- https://github.com/kserve/kserve/pull/1910 @yuzisun @Iamlovingit
- [X] KServe control plane HA
- https://github.com/kserve/kserve/pull/2160 @Suresh-Nakkeran
- [X] Enable inference protocol version auto selection for servingruntime
- https://github.com/kserve/kserve/pull/2118 @Suresh-Nakkeran
- [x] Webhdfs storage uri support
- https://github.com/kserve/kserve/pull/2077 @markwinter
- [x] Azure file share support for storage initializer
- https://github.com/kserve/kserve/pull/1985 @laozc
- [x] KServe Autoscaling spec API
- https://github.com/kserve/kserve/pull/2082 @andyi2it
- [X] KServe ingress class and domain template support for raw deployment mode
- https://github.com/kserve/kserve/pull/2054 @pradithya
- https://github.com/kserve/kserve/pull/2049 @pradithya
## ModelMesh:
- [X] OpenVINO model server support
- https://github.com/kserve/modelmesh-runtime-adapter/pull/18 @tjohnson31415
- [x] Import ServingRuntime and InferenceService types from KServe
- https://github.com/kserve/modelmesh-serving/pull/146 @tjohnson31415
- https://github.com/kserve/modelmesh-serving/pull/140 @pvaneck
- [x] Azure storage support for ModelMesh
- https://github.com/kserve/modelmesh-runtime-adapter/pull/23 @pvaneck
## Models UI:
- [x] Models Web App KServe 0.8 release support
- https://github.com/kserve/models-web-app/pull/35 @DavidSpek
## Website:
- [x] Website doc update
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
</issue>
<code>
[start of python/kserve/setup.py]
1 # Copyright 2021 The KServe Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 TESTS_REQUIRES = [
18 'pytest',
19 'pytest-xdist',
20 'pytest-cov',
21 'pytest-asyncio',
22 'pytest-tornasync',
23 'mypy'
24 ]
25
26 with open('requirements.txt') as f:
27 REQUIRES = f.readlines()
28
29 setuptools.setup(
30 name='kserve',
31 version='0.9.0rc0',
32 author="The KServe Authors",
33 author_email='[email protected], [email protected], [email protected]',
34 license="Apache License Version 2.0",
35 url="https://github.com/kserve/kserve/tree/master/python/kserve",
36 description="KServe Python SDK",
37 long_description="Python SDK for KServe Server and Client.",
38 python_requires='>=3.7',
39 packages=[
40 'kserve',
41 'kserve.api',
42 'kserve.constants',
43 'kserve.models',
44 'kserve.handlers',
45 'kserve.utils',
46 ],
47 package_data={'': ['requirements.txt']},
48 include_package_data=True,
49 zip_safe=False,
50 classifiers=[
51 'Intended Audience :: Developers',
52 'Intended Audience :: Education',
53 'Intended Audience :: Science/Research',
54 'Programming Language :: Python :: 3',
55 'Programming Language :: Python :: 3.7',
56 'Programming Language :: Python :: 3.8',
57 'Programming Language :: Python :: 3.9',
58 "License :: OSI Approved :: Apache Software License",
59 "Operating System :: OS Independent",
60 'Topic :: Scientific/Engineering',
61 'Topic :: Scientific/Engineering :: Artificial Intelligence',
62 'Topic :: Software Development',
63 'Topic :: Software Development :: Libraries',
64 'Topic :: Software Development :: Libraries :: Python Modules',
65 ],
66 install_requires=REQUIRES,
67 tests_require=TESTS_REQUIRES,
68 extras_require={'test': TESTS_REQUIRES}
69 )
70
[end of python/kserve/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kserve/setup.py b/python/kserve/setup.py
--- a/python/kserve/setup.py
+++ b/python/kserve/setup.py
@@ -28,7 +28,7 @@
setuptools.setup(
name='kserve',
- version='0.9.0rc0',
+ version='0.9.0',
author="The KServe Authors",
author_email='[email protected], [email protected], [email protected]',
license="Apache License Version 2.0",
| {"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -28,7 +28,7 @@\n \n setuptools.setup(\n name='kserve',\n- version='0.9.0rc0',\n+ version='0.9.0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n", "issue": "KServe 0.9 release tracking\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\nKServe 0.9 release tracking:\r\nRC release Date: 6/13/2022\r\nRelease Date: 6/27/2022\r\n\r\n## KServe Model Serving:\r\n- [X] Storage spec for unifying single model serving and model mesh\r\n - https://github.com/kserve/kserve/pull/1899 @Tomcli \r\n- [x] Transformer ModelMesh support\r\n - https://github.com/kserve/kserve/pull/2136 @chinhuang007 \r\n- [x] Model Status API for unifying single model serving and model mesh\r\n - https://github.com/kserve/kserve/pull/2084 @pvaneck \r\n - https://github.com/kserve/kserve/pull/2088 @Suresh-Nakkeran \r\n- [x] Inferece Graph v1alpha1 API and impmentation\r\n - https://github.com/kserve/kserve/pull/1910 @yuzisun @Iamlovingit \r\n- [X] KServe control plane HA\r\n - https://github.com/kserve/kserve/pull/2160 @Suresh-Nakkeran \r\n- [X] Enable inference protocol version auto selection for servingruntime \r\n - https://github.com/kserve/kserve/pull/2118 @Suresh-Nakkeran \r\n- [x] Webhdfs storage uri support\r\n - https://github.com/kserve/kserve/pull/2077 @markwinter \r\n- [x] Azure file share support for storage initializer \r\n - https://github.com/kserve/kserve/pull/1985 @laozc \r\n- [x] KServe Autoscaling spec API\r\n - https://github.com/kserve/kserve/pull/2082 @andyi2it \r\n- [X] KServe ingress class and domain template support for raw deployment mode\r\n - https://github.com/kserve/kserve/pull/2054 @pradithya \r\n - https://github.com/kserve/kserve/pull/2049 @pradithya \r\n\r\n## ModelMesh:\r\n- [X] OpenVINO model server support\r\n - https://github.com/kserve/modelmesh-runtime-adapter/pull/18 @tjohnson31415\r\n- [x] Import ServingRuntime and InferenceService types from KServe \r\n - https://github.com/kserve/modelmesh-serving/pull/146 @tjohnson31415 \r\n - https://github.com/kserve/modelmesh-serving/pull/140 @pvaneck \r\n- [x] Azure storage support for ModelMesh\r\n - https://github.com/kserve/modelmesh-runtime-adapter/pull/23 @pvaneck \r\n\r\n## Models UI:\r\n- [x] Models Web App KServe 0.8 release support \r\n - https://github.com/kserve/models-web-app/pull/35 @DavidSpek \r\n\r\n \r\n## Website: \r\n- [x] Website doc update\r\n\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'pytest-tornasync',\n 'mypy'\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nsetuptools.setup(\n name='kserve',\n version='0.9.0rc0',\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.handlers',\n 'kserve.utils',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]} | 1,916 | 125 |
gh_patches_debug_28851 | rasdani/github-patches | git_diff | webkom__lego-2560 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RestrictedMail notification
> Restricted mail is used when sending mails to multiple users at once by selecting users/events/meetings, and then send the email to <[email protected]> together with the token.
The `restricted mail sent` should be sent to the proper email, not the `user.email` field. The address `user.email_address` should be used instead.
If the `from_address` is not the same as the `user.email_address`, both should receive the mail.
https://github.com/webkom/lego/blob/ccab14fbee223f16842ace6ca2ba0c2f3ac3ac86/lego/apps/restricted/notifications.py#L9
</issue>
<code>
[start of lego/apps/restricted/serializers.py]
1 from lego.apps.events.fields import PublicEventListField
2 from lego.apps.meetings.fields import MeetingListField
3 from lego.apps.restricted.models import RestrictedMail
4 from lego.apps.users.fields import AbakusGroupListField, PublicUserListField
5 from lego.utils.serializers import BasisModelSerializer
6
7
8 class RestrictedMailListSerializer(BasisModelSerializer):
9 class Meta:
10 model = RestrictedMail
11 fields = ("id", "from_address", "hide_sender", "used", "created_at", "weekly")
12 read_only_fields = ("used", "created_at")
13
14 def save(self, **kwargs):
15 kwargs["token"] = RestrictedMail.create_token()
16 return super().save(**kwargs)
17
18
19 class RestrictedMailSerializer(RestrictedMailListSerializer):
20 class Meta(RestrictedMailListSerializer.Meta):
21 fields = RestrictedMailListSerializer.Meta.fields + (
22 "users",
23 "groups",
24 "events",
25 "meetings",
26 "raw_addresses",
27 "weekly",
28 "hide_sender",
29 )
30
31
32 class RestrictedMailDetailSerializer(RestrictedMailSerializer):
33 users = PublicUserListField({"read_only": True})
34 groups = AbakusGroupListField({"read_only": True})
35 events = PublicEventListField({"read_only": True})
36 meetings = MeetingListField({"read_only": True})
37
38 class Meta(RestrictedMailSerializer.Meta):
39 fields = RestrictedMailSerializer.Meta.fields + ("token_query_param",)
40
[end of lego/apps/restricted/serializers.py]
[start of lego/apps/restricted/notifications.py]
1 from lego.apps.notifications.constants import RESTRICTED_MAIL_SENT
2 from lego.apps.notifications.notification import Notification
3
4
5 class RestrictedMailSentNotification(Notification):
6
7 name = RESTRICTED_MAIL_SENT
8
9 def generate_mail(self):
10 return self._delay_mail(
11 to_email=self.user.email,
12 context={"first_name": self.user.first_name},
13 subject="Begrenset epost sendt ut",
14 plain_template="restricted/email/process_success.txt",
15 html_template="restricted/email/process_success.html",
16 )
17
[end of lego/apps/restricted/notifications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lego/apps/restricted/notifications.py b/lego/apps/restricted/notifications.py
--- a/lego/apps/restricted/notifications.py
+++ b/lego/apps/restricted/notifications.py
@@ -8,7 +8,7 @@
def generate_mail(self):
return self._delay_mail(
- to_email=self.user.email,
+ to_email=self.user.email.address,
context={"first_name": self.user.first_name},
subject="Begrenset epost sendt ut",
plain_template="restricted/email/process_success.txt",
diff --git a/lego/apps/restricted/serializers.py b/lego/apps/restricted/serializers.py
--- a/lego/apps/restricted/serializers.py
+++ b/lego/apps/restricted/serializers.py
@@ -1,3 +1,7 @@
+from functools import reduce
+
+from rest_framework import exceptions
+
from lego.apps.events.fields import PublicEventListField
from lego.apps.meetings.fields import MeetingListField
from lego.apps.restricted.models import RestrictedMail
@@ -28,6 +32,18 @@
"hide_sender",
)
+ def create(self, validated_data):
+ groups = validated_data["groups"]
+ events = validated_data["events"]
+ MaxPermittedAmout = 500
+ num = reduce((lambda a, b: a + b.number_of_users), groups, 0)
+ num += reduce((lambda a, b: a + b.registration_count), events, 0)
+ if num > MaxPermittedAmout:
+ raise exceptions.ValidationError(
+ f"The number of students in selected groups/events exceed the permitted amount which is {MaxPermittedAmout}"
+ )
+ return super().create(validated_data)
+
class RestrictedMailDetailSerializer(RestrictedMailSerializer):
users = PublicUserListField({"read_only": True})
| {"golden_diff": "diff --git a/lego/apps/restricted/notifications.py b/lego/apps/restricted/notifications.py\n--- a/lego/apps/restricted/notifications.py\n+++ b/lego/apps/restricted/notifications.py\n@@ -8,7 +8,7 @@\n \n def generate_mail(self):\n return self._delay_mail(\n- to_email=self.user.email,\n+ to_email=self.user.email.address,\n context={\"first_name\": self.user.first_name},\n subject=\"Begrenset epost sendt ut\",\n plain_template=\"restricted/email/process_success.txt\",\ndiff --git a/lego/apps/restricted/serializers.py b/lego/apps/restricted/serializers.py\n--- a/lego/apps/restricted/serializers.py\n+++ b/lego/apps/restricted/serializers.py\n@@ -1,3 +1,7 @@\n+from functools import reduce\n+\n+from rest_framework import exceptions\n+\n from lego.apps.events.fields import PublicEventListField\n from lego.apps.meetings.fields import MeetingListField\n from lego.apps.restricted.models import RestrictedMail\n@@ -28,6 +32,18 @@\n \"hide_sender\",\n )\n \n+ def create(self, validated_data):\n+ groups = validated_data[\"groups\"]\n+ events = validated_data[\"events\"]\n+ MaxPermittedAmout = 500\n+ num = reduce((lambda a, b: a + b.number_of_users), groups, 0)\n+ num += reduce((lambda a, b: a + b.registration_count), events, 0)\n+ if num > MaxPermittedAmout:\n+ raise exceptions.ValidationError(\n+ f\"The number of students in selected groups/events exceed the permitted amount which is {MaxPermittedAmout}\"\n+ )\n+ return super().create(validated_data)\n+\n \n class RestrictedMailDetailSerializer(RestrictedMailSerializer):\n users = PublicUserListField({\"read_only\": True})\n", "issue": "RestrictedMail notification\n> Restricted mail is used when sending mails to multiple users at once by selecting users/events/meetings, and then send the email to <[email protected]> together with the token.\r\n\r\nThe `restricted mail sent` should be sent to the proper email, not the `user.email` field. The address `user.email_address` should be used instead.\r\n\r\nIf the `from_address` is not the same as the `user.email_address`, both should receive the mail.\r\n\r\nhttps://github.com/webkom/lego/blob/ccab14fbee223f16842ace6ca2ba0c2f3ac3ac86/lego/apps/restricted/notifications.py#L9\n", "before_files": [{"content": "from lego.apps.events.fields import PublicEventListField\nfrom lego.apps.meetings.fields import MeetingListField\nfrom lego.apps.restricted.models import RestrictedMail\nfrom lego.apps.users.fields import AbakusGroupListField, PublicUserListField\nfrom lego.utils.serializers import BasisModelSerializer\n\n\nclass RestrictedMailListSerializer(BasisModelSerializer):\n class Meta:\n model = RestrictedMail\n fields = (\"id\", \"from_address\", \"hide_sender\", \"used\", \"created_at\", \"weekly\")\n read_only_fields = (\"used\", \"created_at\")\n\n def save(self, **kwargs):\n kwargs[\"token\"] = RestrictedMail.create_token()\n return super().save(**kwargs)\n\n\nclass RestrictedMailSerializer(RestrictedMailListSerializer):\n class Meta(RestrictedMailListSerializer.Meta):\n fields = RestrictedMailListSerializer.Meta.fields + (\n \"users\",\n \"groups\",\n \"events\",\n \"meetings\",\n \"raw_addresses\",\n \"weekly\",\n \"hide_sender\",\n )\n\n\nclass RestrictedMailDetailSerializer(RestrictedMailSerializer):\n users = PublicUserListField({\"read_only\": True})\n groups = AbakusGroupListField({\"read_only\": True})\n events = PublicEventListField({\"read_only\": True})\n meetings = MeetingListField({\"read_only\": True})\n\n class Meta(RestrictedMailSerializer.Meta):\n fields = RestrictedMailSerializer.Meta.fields + (\"token_query_param\",)\n", "path": "lego/apps/restricted/serializers.py"}, {"content": "from lego.apps.notifications.constants import RESTRICTED_MAIL_SENT\nfrom lego.apps.notifications.notification import Notification\n\n\nclass RestrictedMailSentNotification(Notification):\n\n name = RESTRICTED_MAIL_SENT\n\n def generate_mail(self):\n return self._delay_mail(\n to_email=self.user.email,\n context={\"first_name\": self.user.first_name},\n subject=\"Begrenset epost sendt ut\",\n plain_template=\"restricted/email/process_success.txt\",\n html_template=\"restricted/email/process_success.html\",\n )\n", "path": "lego/apps/restricted/notifications.py"}]} | 1,237 | 423 |
gh_patches_debug_3626 | rasdani/github-patches | git_diff | ivy-llc__ivy-25492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
multinomial
</issue>
<code>
[start of ivy/functional/frontends/paddle/random.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @with_supported_dtypes(
11 {"2.5.1 and below": ("float32", "float64")},
12 "paddle",
13 )
14 @to_ivy_arrays_and_back
15 def normal(mean=0.0, std=1.0, shape=None, name=None):
16 return ivy.random_normal(mean=mean, std=std, shape=shape)
17
18
19 @with_supported_dtypes(
20 {"2.5.1 and below": ("float32", "float64")},
21 "paddle",
22 )
23 @to_ivy_arrays_and_back
24 def poisson(x, name=None):
25 return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)
26
27
28 @with_supported_device_and_dtypes(
29 {
30 "2.5.1 and above": {
31 "cpu": (
32 "bfloat16",
33 "float32",
34 "float64",
35 ),
36 "gpu": (
37 "bfloat16",
38 "float16",
39 "float32",
40 "float64",
41 ),
42 },
43 "2.4.2 and below": {
44 "cpu": (
45 "float32",
46 "float64",
47 ),
48 "gpu": (
49 "float16",
50 "float32",
51 "float64",
52 ),
53 },
54 },
55 "paddle",
56 )
57 @to_ivy_arrays_and_back
58 def rand(shape, dtype=None, name=None):
59 return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)
60
61
62 @to_ivy_arrays_and_back
63 def randint(low=0, high=None, shape=[1], dtype=None, name=None):
64 return ivy.randint(low, high, shape=shape, dtype=dtype)
65
66
67 @with_unsupported_dtypes(
68 {"2.5.1 and below": ("int16", "float16", "bfloat16", "uint8")},
69 "paddle",
70 )
71 @to_ivy_arrays_and_back
72 def randint_like(x, low=0, high=None, dtype=None, name=None):
73 if high is None:
74 high = low
75 low = 0
76 if high <= 0:
77 raise ivy.exceptions.IvyError(
78 "If high is None, low must be greater than 0, but received low = 0."
79 )
80 return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)
81
82
83 def randn(shape, dtype=None, name=None):
84 if dtype not in ["float32", "float64"]:
85 raise ivy.exceptions.IvyError(
86 "Unsupported dtype for randn, only float32 and float64 are supported, "
87 )
88 return ivy.random_normal(shape=shape, dtype=dtype, seed=None)
89
90
91 @with_supported_dtypes(
92 {"2.5.1 and below": ("float32", "float64")},
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def standard_normal(shape, dtype=None, name=None):
97 return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)
98
99
100 @with_supported_dtypes(
101 {"2.5.1 and below": ("float32", "float64")},
102 "paddle",
103 )
104 @to_ivy_arrays_and_back
105 def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
106 return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)
107
[end of ivy/functional/frontends/paddle/random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/random.py b/ivy/functional/frontends/paddle/random.py
--- a/ivy/functional/frontends/paddle/random.py
+++ b/ivy/functional/frontends/paddle/random.py
@@ -7,6 +7,16 @@
)
+@with_supported_dtypes(
+ {"2.5.1 and below": ("float32", "float64")},
+ "paddle",
+)
+@to_ivy_arrays_and_back
+def multinomial(x, num_samples=1, replacement=False, name=None):
+ n = num_samples + 1
+ return ivy.multinomial(n, num_samples, probs=x, replace=replacement)
+
+
@with_supported_dtypes(
{"2.5.1 and below": ("float32", "float64")},
"paddle",
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/random.py b/ivy/functional/frontends/paddle/random.py\n--- a/ivy/functional/frontends/paddle/random.py\n+++ b/ivy/functional/frontends/paddle/random.py\n@@ -7,6 +7,16 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\"2.5.1 and below\": (\"float32\", \"float64\")},\n+ \"paddle\",\n+)\n+@to_ivy_arrays_and_back\n+def multinomial(x, num_samples=1, replacement=False, name=None):\n+ n = num_samples + 1\n+ return ivy.multinomial(n, num_samples, probs=x, replace=replacement)\n+\n+\n @with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n", "issue": "multinomial\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.func_wrapper import with_supported_device_and_dtypes, with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef normal(mean=0.0, std=1.0, shape=None, name=None):\n return ivy.random_normal(mean=mean, std=std, shape=shape)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef poisson(x, name=None):\n return ivy.poisson(x, shape=None, device=None, dtype=None, seed=None, out=None)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.5.1 and above\": {\n \"cpu\": (\n \"bfloat16\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"bfloat16\",\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n \"2.4.2 and below\": {\n \"cpu\": (\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\n \"float16\",\n \"float32\",\n \"float64\",\n ),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rand(shape, dtype=None, name=None):\n return ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=None)\n\n\n@to_ivy_arrays_and_back\ndef randint(low=0, high=None, shape=[1], dtype=None, name=None):\n return ivy.randint(low, high, shape=shape, dtype=dtype)\n\n\n@with_unsupported_dtypes(\n {\"2.5.1 and below\": (\"int16\", \"float16\", \"bfloat16\", \"uint8\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef randint_like(x, low=0, high=None, dtype=None, name=None):\n if high is None:\n high = low\n low = 0\n if high <= 0:\n raise ivy.exceptions.IvyError(\n \"If high is None, low must be greater than 0, but received low = 0.\"\n )\n return ivy.randint(low, high, shape=x.shape, dtype=dtype, seed=None)\n\n\ndef randn(shape, dtype=None, name=None):\n if dtype not in [\"float32\", \"float64\"]:\n raise ivy.exceptions.IvyError(\n \"Unsupported dtype for randn, only float32 and float64 are supported, \"\n )\n return ivy.random_normal(shape=shape, dtype=dtype, seed=None)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef standard_normal(shape, dtype=None, name=None):\n return ivy.random_normal(mean=0, std=1, shape=shape, dtype=dtype)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):\n return ivy.random_uniform(low=min, high=max, shape=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/paddle/random.py"}]} | 1,607 | 191 |
gh_patches_debug_25453 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`python3.7` raises `flake8` warning
It is a bug in `flake8`:
- https://github.com/PyCQA/pycodestyle/issues/728
We currently allow `python3.7` build to fail.
```
=============================== warnings summary ===============================
tests/test_visitors/test_wrong_class/test_base_class.py::FLAKE8
/home/travis/virtualenv/python3.7.0/lib/python3.7/site-packages/pycodestyle.py:113: FutureWarning: Possible nested set at position 1
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
-- Docs: https://docs.pytest.org/en/latest/warnings.html
=================== 1514 passed, 1 warnings in 27.96 seconds ===================
```
</issue>
<code>
[start of wemake_python_styleguide/compat.py]
1 # -*- coding: utf-8 -*-
2
3 """
4 This module contains ugly hacks and fixes for version compat issues.
5
6 Do not be over-exited to add anything here.
7 """
8
9 import ast
10
11
12 def maybe_set_parent(tree: ast.AST) -> ast.AST:
13 """Sets parents for all nodes that do not have this prop."""
14 for statement in ast.walk(tree):
15 for child in ast.iter_child_nodes(statement):
16 if not hasattr(child, 'parent'): # noqa: Z113
17 setattr(child, 'parent', statement)
18
19 return tree
20
[end of wemake_python_styleguide/compat.py]
[start of wemake_python_styleguide/checker.py]
1 # -*- coding: utf-8 -*-
2
3 from ast import Module
4 from typing import Generator
5
6 from flake8.options.manager import OptionManager
7
8 from wemake_python_styleguide import constants
9 from wemake_python_styleguide.compat import maybe_set_parent
10 from wemake_python_styleguide.options.config import Configuration
11 from wemake_python_styleguide.types import (
12 CheckerSequence,
13 CheckResult,
14 ConfigurationOptions,
15 )
16 from wemake_python_styleguide.version import version
17 from wemake_python_styleguide.visitors.ast.complexity.counts import (
18 MethodMembersVisitor,
19 ModuleMembersVisitor,
20 )
21 from wemake_python_styleguide.visitors.ast.complexity.function import (
22 FunctionComplexityVisitor,
23 )
24 from wemake_python_styleguide.visitors.ast.complexity.nested import (
25 NestedComplexityVisitor,
26 )
27 from wemake_python_styleguide.visitors.ast.complexity.offset import (
28 OffsetVisitor,
29 )
30 from wemake_python_styleguide.visitors.ast.wrong_class import WrongClassVisitor
31 from wemake_python_styleguide.visitors.ast.wrong_contents import (
32 WrongContentsVisitor,
33 )
34 from wemake_python_styleguide.visitors.ast.wrong_function_call import (
35 WrongFunctionCallVisitor,
36 )
37 from wemake_python_styleguide.visitors.ast.wrong_import import (
38 WrongImportVisitor,
39 )
40 from wemake_python_styleguide.visitors.ast.wrong_keyword import (
41 WrongKeywordVisitor,
42 WrongRaiseVisitor,
43 )
44 from wemake_python_styleguide.visitors.ast.wrong_name import (
45 WrongModuleMetadataVisitor,
46 WrongNameVisitor,
47 )
48 from wemake_python_styleguide.visitors.ast.wrong_string import (
49 WrongStringVisitor,
50 )
51 from wemake_python_styleguide.visitors.filenames.wrong_module_name import (
52 WrongModuleNameVisitor,
53 )
54
55 #: Visitors that should be working by default:
56 ENABLED_VISITORS: CheckerSequence = [
57 # Styling and correctness:
58 WrongRaiseVisitor,
59 WrongFunctionCallVisitor,
60 WrongImportVisitor,
61 WrongKeywordVisitor,
62 WrongNameVisitor,
63 WrongModuleMetadataVisitor,
64 WrongClassVisitor,
65 WrongStringVisitor,
66 WrongContentsVisitor,
67
68 # Complexity:
69 FunctionComplexityVisitor,
70 NestedComplexityVisitor,
71 OffsetVisitor,
72 ModuleMembersVisitor,
73 MethodMembersVisitor,
74
75 # Modules:
76 WrongModuleNameVisitor,
77 ]
78
79
80 class Checker(object):
81 """
82 Main checker class.
83
84 Runs all checks that are bundled with this package.
85 If you want to add new checks they should be added to ``ENABLED_VISITORS``.
86 """
87
88 name = 'wemake-python-styleguide'
89 version = version
90
91 config = Configuration()
92 options: ConfigurationOptions
93
94 def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:
95 """Creates new checker instance."""
96 self.tree = maybe_set_parent(tree)
97 self.filename = filename
98
99 @classmethod
100 def add_options(cls, parser: OptionManager) -> None:
101 """Calls Configuration instance method for registering options."""
102 cls.config.register_options(parser)
103
104 @classmethod
105 def parse_options(cls, options: ConfigurationOptions) -> None:
106 """Parses registered options for providing to the visitor."""
107 cls.options = options
108
109 def run(self) -> Generator[CheckResult, None, None]:
110 """
111 Runs the checker.
112
113 This method is used by `flake8` API.
114 After all configuration is parsed and passed.
115 """
116 for visitor_class in ENABLED_VISITORS:
117 visitor = visitor_class(
118 self.options,
119 tree=self.tree,
120 filename=self.filename,
121 )
122 visitor.run()
123
124 for error in visitor.errors:
125 yield (*error.node_items(), type(self))
126
[end of wemake_python_styleguide/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py
--- a/wemake_python_styleguide/checker.py
+++ b/wemake_python_styleguide/checker.py
@@ -6,7 +6,6 @@
from flake8.options.manager import OptionManager
from wemake_python_styleguide import constants
-from wemake_python_styleguide.compat import maybe_set_parent
from wemake_python_styleguide.options.config import Configuration
from wemake_python_styleguide.types import (
CheckerSequence,
@@ -93,7 +92,7 @@
def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:
"""Creates new checker instance."""
- self.tree = maybe_set_parent(tree)
+ self.tree = tree
self.filename = filename
@classmethod
diff --git a/wemake_python_styleguide/compat.py b/wemake_python_styleguide/compat.py
deleted file mode 100644
--- a/wemake_python_styleguide/compat.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""
-This module contains ugly hacks and fixes for version compat issues.
-
-Do not be over-exited to add anything here.
-"""
-
-import ast
-
-
-def maybe_set_parent(tree: ast.AST) -> ast.AST:
- """Sets parents for all nodes that do not have this prop."""
- for statement in ast.walk(tree):
- for child in ast.iter_child_nodes(statement):
- if not hasattr(child, 'parent'): # noqa: Z113
- setattr(child, 'parent', statement)
-
- return tree
| {"golden_diff": "diff --git a/wemake_python_styleguide/checker.py b/wemake_python_styleguide/checker.py\n--- a/wemake_python_styleguide/checker.py\n+++ b/wemake_python_styleguide/checker.py\n@@ -6,7 +6,6 @@\n from flake8.options.manager import OptionManager\n \n from wemake_python_styleguide import constants\n-from wemake_python_styleguide.compat import maybe_set_parent\n from wemake_python_styleguide.options.config import Configuration\n from wemake_python_styleguide.types import (\n CheckerSequence,\n@@ -93,7 +92,7 @@\n \n def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:\n \"\"\"Creates new checker instance.\"\"\"\n- self.tree = maybe_set_parent(tree)\n+ self.tree = tree\n self.filename = filename\n \n @classmethod\ndiff --git a/wemake_python_styleguide/compat.py b/wemake_python_styleguide/compat.py\ndeleted file mode 100644\n--- a/wemake_python_styleguide/compat.py\n+++ /dev/null\n@@ -1,19 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\n-\"\"\"\n-This module contains ugly hacks and fixes for version compat issues.\n-\n-Do not be over-exited to add anything here.\n-\"\"\"\n-\n-import ast\n-\n-\n-def maybe_set_parent(tree: ast.AST) -> ast.AST:\n- \"\"\"Sets parents for all nodes that do not have this prop.\"\"\"\n- for statement in ast.walk(tree):\n- for child in ast.iter_child_nodes(statement):\n- if not hasattr(child, 'parent'): # noqa: Z113\n- setattr(child, 'parent', statement)\n-\n- return tree\n", "issue": "`python3.7` raises `flake8` warning\nIt is a bug in `flake8`:\r\n- https://github.com/PyCQA/pycodestyle/issues/728\r\n\r\nWe currently allow `python3.7` build to fail.\r\n\r\n```\r\n=============================== warnings summary ===============================\r\ntests/test_visitors/test_wrong_class/test_base_class.py::FLAKE8\r\n /home/travis/virtualenv/python3.7.0/lib/python3.7/site-packages/pycodestyle.py:113: FutureWarning: Possible nested set at position 1\r\n EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')\r\n-- Docs: https://docs.pytest.org/en/latest/warnings.html\r\n=================== 1514 passed, 1 warnings in 27.96 seconds ===================\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module contains ugly hacks and fixes for version compat issues.\n\nDo not be over-exited to add anything here.\n\"\"\"\n\nimport ast\n\n\ndef maybe_set_parent(tree: ast.AST) -> ast.AST:\n \"\"\"Sets parents for all nodes that do not have this prop.\"\"\"\n for statement in ast.walk(tree):\n for child in ast.iter_child_nodes(statement):\n if not hasattr(child, 'parent'): # noqa: Z113\n setattr(child, 'parent', statement)\n\n return tree\n", "path": "wemake_python_styleguide/compat.py"}, {"content": "# -*- coding: utf-8 -*-\n\nfrom ast import Module\nfrom typing import Generator\n\nfrom flake8.options.manager import OptionManager\n\nfrom wemake_python_styleguide import constants\nfrom wemake_python_styleguide.compat import maybe_set_parent\nfrom wemake_python_styleguide.options.config import Configuration\nfrom wemake_python_styleguide.types import (\n CheckerSequence,\n CheckResult,\n ConfigurationOptions,\n)\nfrom wemake_python_styleguide.version import version\nfrom wemake_python_styleguide.visitors.ast.complexity.counts import (\n MethodMembersVisitor,\n ModuleMembersVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.function import (\n FunctionComplexityVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.nested import (\n NestedComplexityVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.complexity.offset import (\n OffsetVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_class import WrongClassVisitor\nfrom wemake_python_styleguide.visitors.ast.wrong_contents import (\n WrongContentsVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_function_call import (\n WrongFunctionCallVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_import import (\n WrongImportVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_keyword import (\n WrongKeywordVisitor,\n WrongRaiseVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_name import (\n WrongModuleMetadataVisitor,\n WrongNameVisitor,\n)\nfrom wemake_python_styleguide.visitors.ast.wrong_string import (\n WrongStringVisitor,\n)\nfrom wemake_python_styleguide.visitors.filenames.wrong_module_name import (\n WrongModuleNameVisitor,\n)\n\n#: Visitors that should be working by default:\nENABLED_VISITORS: CheckerSequence = [\n # Styling and correctness:\n WrongRaiseVisitor,\n WrongFunctionCallVisitor,\n WrongImportVisitor,\n WrongKeywordVisitor,\n WrongNameVisitor,\n WrongModuleMetadataVisitor,\n WrongClassVisitor,\n WrongStringVisitor,\n WrongContentsVisitor,\n\n # Complexity:\n FunctionComplexityVisitor,\n NestedComplexityVisitor,\n OffsetVisitor,\n ModuleMembersVisitor,\n MethodMembersVisitor,\n\n # Modules:\n WrongModuleNameVisitor,\n]\n\n\nclass Checker(object):\n \"\"\"\n Main checker class.\n\n Runs all checks that are bundled with this package.\n If you want to add new checks they should be added to ``ENABLED_VISITORS``.\n \"\"\"\n\n name = 'wemake-python-styleguide'\n version = version\n\n config = Configuration()\n options: ConfigurationOptions\n\n def __init__(self, tree: Module, filename: str = constants.STDIN) -> None:\n \"\"\"Creates new checker instance.\"\"\"\n self.tree = maybe_set_parent(tree)\n self.filename = filename\n\n @classmethod\n def add_options(cls, parser: OptionManager) -> None:\n \"\"\"Calls Configuration instance method for registering options.\"\"\"\n cls.config.register_options(parser)\n\n @classmethod\n def parse_options(cls, options: ConfigurationOptions) -> None:\n \"\"\"Parses registered options for providing to the visitor.\"\"\"\n cls.options = options\n\n def run(self) -> Generator[CheckResult, None, None]:\n \"\"\"\n Runs the checker.\n\n This method is used by `flake8` API.\n After all configuration is parsed and passed.\n \"\"\"\n for visitor_class in ENABLED_VISITORS:\n visitor = visitor_class(\n self.options,\n tree=self.tree,\n filename=self.filename,\n )\n visitor.run()\n\n for error in visitor.errors:\n yield (*error.node_items(), type(self))\n", "path": "wemake_python_styleguide/checker.py"}]} | 1,964 | 382 |
gh_patches_debug_28846 | rasdani/github-patches | git_diff | mozilla__pontoon-2416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove aurora redirects
I just looked at our root urls.py, and saw a bunch of aurora-related redirects.
It's been ... a decade or so, let's get rid of them.
CC @flodolo
</issue>
<code>
[start of pontoon/urls.py]
1 from django.urls import include, path, register_converter
2 from django.urls.converters import StringConverter
3 from django.contrib import admin
4 from django.contrib.auth import logout
5 from django.views.generic import RedirectView, TemplateView
6
7 from pontoon.teams.views import team
8
9
10 class LocaleConverter(StringConverter):
11 regex = r"[A-Za-z0-9\-\@\.]+"
12
13
14 register_converter(LocaleConverter, "locale")
15
16 pontoon_js_view = TemplateView.as_view(
17 template_name="js/pontoon.js", content_type="text/javascript"
18 )
19
20 permission_denied_view = TemplateView.as_view(template_name="403.html")
21 page_not_found_view = TemplateView.as_view(template_name="404.html")
22 server_error_view = TemplateView.as_view(template_name="500.html")
23
24 urlpatterns = [
25 # Redirect legacy Aurora projects
26 path(
27 "projects/firefox-aurora/<path:url>",
28 RedirectView.as_view(url="/projects/firefox/%(url)s", permanent=True),
29 ),
30 path(
31 "projects/firefox-for-android-aurora/<path:url>",
32 RedirectView.as_view(
33 url="/projects/firefox-for-android/%(url)s", permanent=True
34 ),
35 ),
36 path(
37 "projects/thunderbird-aurora/<path:url>",
38 RedirectView.as_view(url="/projects/thunderbird/%(url)s", permanent=True),
39 ),
40 path(
41 "projects/lightning-aurora/<path:url>",
42 RedirectView.as_view(url="/projects/lightning/%(url)s", permanent=True),
43 ),
44 path(
45 "projects/seamonkey-aurora/<path:url>",
46 RedirectView.as_view(url="/projects/seamonkey/%(url)s", permanent=True),
47 ),
48 path(
49 "<locale:locale>/firefox-aurora/<path:url>",
50 RedirectView.as_view(url="/%(locale)s/firefox/%(url)s", permanent=True),
51 ),
52 path(
53 "<locale:locale>/firefox-for-android-aurora/<path:url>",
54 RedirectView.as_view(
55 url="/%(locale)s/firefox-for-android/%(url)s", permanent=True
56 ),
57 ),
58 path(
59 "<locale:locale>/thunderbird-aurora/<path:url>",
60 RedirectView.as_view(url="/%(locale)s/thunderbird/%(url)s", permanent=True),
61 ),
62 path(
63 "<locale:locale>/lightning-aurora/<path:url>",
64 RedirectView.as_view(url="/%(locale)s/lightning/%(url)s", permanent=True),
65 ),
66 path(
67 "<locale:locale>/seamonkey-aurora/<path:url>",
68 RedirectView.as_view(url="/%(locale)s/seamonkey/%(url)s", permanent=True),
69 ),
70 # Accounts
71 path("accounts/", include("pontoon.allauth_urls")),
72 # Admin
73 path("admin/", include("pontoon.administration.urls")),
74 # Django admin: Disable the login form
75 path("a/login/", permission_denied_view),
76 # Django admin
77 path("a/", admin.site.urls),
78 # Logout
79 path("signout/", logout, {"next_page": "/"}, name="signout"),
80 # Error pages
81 path("403/", permission_denied_view),
82 path("404/", page_not_found_view),
83 path("500/", server_error_view),
84 # Robots.txt
85 path(
86 "robots.txt",
87 TemplateView.as_view(template_name="robots.txt", content_type="text/plain"),
88 ),
89 # contribute.json
90 path(
91 "contribute.json",
92 TemplateView.as_view(
93 template_name="contribute.json", content_type="text/plain"
94 ),
95 ),
96 # Favicon
97 path(
98 "favicon.ico",
99 RedirectView.as_view(url="/static/img/favicon.ico", permanent=True),
100 ),
101 # Include script
102 path("pontoon.js", pontoon_js_view),
103 path("static/js/pontoon.js", pontoon_js_view),
104 # Include URL configurations from installed apps
105 path("terminology/", include("pontoon.terminology.urls")),
106 path("translations/", include("pontoon.translations.urls")),
107 path("", include("pontoon.teams.urls")),
108 path("", include("pontoon.tour.urls")),
109 path("", include("pontoon.tags.urls")),
110 path("", include("pontoon.sync.urls")),
111 path("", include("pontoon.projects.urls")),
112 path("", include("pontoon.machinery.urls")),
113 path("", include("pontoon.contributors.urls")),
114 path("", include("pontoon.localizations.urls")),
115 path("", include("pontoon.base.urls")),
116 path("", include("pontoon.translate.urls")),
117 path("", include("pontoon.batch.urls")),
118 path("", include("pontoon.api.urls")),
119 path("", include("pontoon.homepage.urls")),
120 path("", include("pontoon.in_context.urls")),
121 path("", include("pontoon.uxactionlog.urls")),
122 # Team page: Must be at the end
123 path("<locale:locale>/", team, name="pontoon.teams.team"),
124 ]
125
[end of pontoon/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/urls.py b/pontoon/urls.py
--- a/pontoon/urls.py
+++ b/pontoon/urls.py
@@ -22,51 +22,6 @@
server_error_view = TemplateView.as_view(template_name="500.html")
urlpatterns = [
- # Redirect legacy Aurora projects
- path(
- "projects/firefox-aurora/<path:url>",
- RedirectView.as_view(url="/projects/firefox/%(url)s", permanent=True),
- ),
- path(
- "projects/firefox-for-android-aurora/<path:url>",
- RedirectView.as_view(
- url="/projects/firefox-for-android/%(url)s", permanent=True
- ),
- ),
- path(
- "projects/thunderbird-aurora/<path:url>",
- RedirectView.as_view(url="/projects/thunderbird/%(url)s", permanent=True),
- ),
- path(
- "projects/lightning-aurora/<path:url>",
- RedirectView.as_view(url="/projects/lightning/%(url)s", permanent=True),
- ),
- path(
- "projects/seamonkey-aurora/<path:url>",
- RedirectView.as_view(url="/projects/seamonkey/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/firefox-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/firefox/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/firefox-for-android-aurora/<path:url>",
- RedirectView.as_view(
- url="/%(locale)s/firefox-for-android/%(url)s", permanent=True
- ),
- ),
- path(
- "<locale:locale>/thunderbird-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/thunderbird/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/lightning-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/lightning/%(url)s", permanent=True),
- ),
- path(
- "<locale:locale>/seamonkey-aurora/<path:url>",
- RedirectView.as_view(url="/%(locale)s/seamonkey/%(url)s", permanent=True),
- ),
# Accounts
path("accounts/", include("pontoon.allauth_urls")),
# Admin
| {"golden_diff": "diff --git a/pontoon/urls.py b/pontoon/urls.py\n--- a/pontoon/urls.py\n+++ b/pontoon/urls.py\n@@ -22,51 +22,6 @@\n server_error_view = TemplateView.as_view(template_name=\"500.html\")\n \n urlpatterns = [\n- # Redirect legacy Aurora projects\n- path(\n- \"projects/firefox-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/firefox/%(url)s\", permanent=True),\n- ),\n- path(\n- \"projects/firefox-for-android-aurora/<path:url>\",\n- RedirectView.as_view(\n- url=\"/projects/firefox-for-android/%(url)s\", permanent=True\n- ),\n- ),\n- path(\n- \"projects/thunderbird-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/thunderbird/%(url)s\", permanent=True),\n- ),\n- path(\n- \"projects/lightning-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/lightning/%(url)s\", permanent=True),\n- ),\n- path(\n- \"projects/seamonkey-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/projects/seamonkey/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/firefox-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/firefox/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/firefox-for-android-aurora/<path:url>\",\n- RedirectView.as_view(\n- url=\"/%(locale)s/firefox-for-android/%(url)s\", permanent=True\n- ),\n- ),\n- path(\n- \"<locale:locale>/thunderbird-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/thunderbird/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/lightning-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/lightning/%(url)s\", permanent=True),\n- ),\n- path(\n- \"<locale:locale>/seamonkey-aurora/<path:url>\",\n- RedirectView.as_view(url=\"/%(locale)s/seamonkey/%(url)s\", permanent=True),\n- ),\n # Accounts\n path(\"accounts/\", include(\"pontoon.allauth_urls\")),\n # Admin\n", "issue": "Remove aurora redirects\nI just looked at our root urls.py, and saw a bunch of aurora-related redirects.\r\n\r\nIt's been ... a decade or so, let's get rid of them.\r\n\r\nCC @flodolo \n", "before_files": [{"content": "from django.urls import include, path, register_converter\nfrom django.urls.converters import StringConverter\nfrom django.contrib import admin\nfrom django.contrib.auth import logout\nfrom django.views.generic import RedirectView, TemplateView\n\nfrom pontoon.teams.views import team\n\n\nclass LocaleConverter(StringConverter):\n regex = r\"[A-Za-z0-9\\-\\@\\.]+\"\n\n\nregister_converter(LocaleConverter, \"locale\")\n\npontoon_js_view = TemplateView.as_view(\n template_name=\"js/pontoon.js\", content_type=\"text/javascript\"\n)\n\npermission_denied_view = TemplateView.as_view(template_name=\"403.html\")\npage_not_found_view = TemplateView.as_view(template_name=\"404.html\")\nserver_error_view = TemplateView.as_view(template_name=\"500.html\")\n\nurlpatterns = [\n # Redirect legacy Aurora projects\n path(\n \"projects/firefox-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/firefox/%(url)s\", permanent=True),\n ),\n path(\n \"projects/firefox-for-android-aurora/<path:url>\",\n RedirectView.as_view(\n url=\"/projects/firefox-for-android/%(url)s\", permanent=True\n ),\n ),\n path(\n \"projects/thunderbird-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/thunderbird/%(url)s\", permanent=True),\n ),\n path(\n \"projects/lightning-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/lightning/%(url)s\", permanent=True),\n ),\n path(\n \"projects/seamonkey-aurora/<path:url>\",\n RedirectView.as_view(url=\"/projects/seamonkey/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/firefox-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/firefox/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/firefox-for-android-aurora/<path:url>\",\n RedirectView.as_view(\n url=\"/%(locale)s/firefox-for-android/%(url)s\", permanent=True\n ),\n ),\n path(\n \"<locale:locale>/thunderbird-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/thunderbird/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/lightning-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/lightning/%(url)s\", permanent=True),\n ),\n path(\n \"<locale:locale>/seamonkey-aurora/<path:url>\",\n RedirectView.as_view(url=\"/%(locale)s/seamonkey/%(url)s\", permanent=True),\n ),\n # Accounts\n path(\"accounts/\", include(\"pontoon.allauth_urls\")),\n # Admin\n path(\"admin/\", include(\"pontoon.administration.urls\")),\n # Django admin: Disable the login form\n path(\"a/login/\", permission_denied_view),\n # Django admin\n path(\"a/\", admin.site.urls),\n # Logout\n path(\"signout/\", logout, {\"next_page\": \"/\"}, name=\"signout\"),\n # Error pages\n path(\"403/\", permission_denied_view),\n path(\"404/\", page_not_found_view),\n path(\"500/\", server_error_view),\n # Robots.txt\n path(\n \"robots.txt\",\n TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\"),\n ),\n # contribute.json\n path(\n \"contribute.json\",\n TemplateView.as_view(\n template_name=\"contribute.json\", content_type=\"text/plain\"\n ),\n ),\n # Favicon\n path(\n \"favicon.ico\",\n RedirectView.as_view(url=\"/static/img/favicon.ico\", permanent=True),\n ),\n # Include script\n path(\"pontoon.js\", pontoon_js_view),\n path(\"static/js/pontoon.js\", pontoon_js_view),\n # Include URL configurations from installed apps\n path(\"terminology/\", include(\"pontoon.terminology.urls\")),\n path(\"translations/\", include(\"pontoon.translations.urls\")),\n path(\"\", include(\"pontoon.teams.urls\")),\n path(\"\", include(\"pontoon.tour.urls\")),\n path(\"\", include(\"pontoon.tags.urls\")),\n path(\"\", include(\"pontoon.sync.urls\")),\n path(\"\", include(\"pontoon.projects.urls\")),\n path(\"\", include(\"pontoon.machinery.urls\")),\n path(\"\", include(\"pontoon.contributors.urls\")),\n path(\"\", include(\"pontoon.localizations.urls\")),\n path(\"\", include(\"pontoon.base.urls\")),\n path(\"\", include(\"pontoon.translate.urls\")),\n path(\"\", include(\"pontoon.batch.urls\")),\n path(\"\", include(\"pontoon.api.urls\")),\n path(\"\", include(\"pontoon.homepage.urls\")),\n path(\"\", include(\"pontoon.in_context.urls\")),\n path(\"\", include(\"pontoon.uxactionlog.urls\")),\n # Team page: Must be at the end\n path(\"<locale:locale>/\", team, name=\"pontoon.teams.team\"),\n]\n", "path": "pontoon/urls.py"}]} | 1,937 | 541 |
gh_patches_debug_1707 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing AWS RDS CA in CKV_AWS_211
**Describe the issue**
In check CKV_AWS_211, checkov currently only checks for one possible CA on AWS RDS instances, namely `rds-ca-2019` (see [associated code](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py#L24)) whereas RDS supports several (see [AWS docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.RegionCertificateAuthorities)). The check should accept those CAs: `rds-ca-rsa2048-g1`, `rds-ca-rsa4096-g1` and `rds-ca-ecc384-g1`.
**Examples**
Terraform code on which the check should pass:
```terraform
resource "aws_db_instance" "pass3" {
allocated_storage = 20
storage_type = "gp2"
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t2.micro"
db_name = "mydb"
username = "foo"
password = "foobarbaz"
iam_database_authentication_enabled = true
storage_encrypted = true
ca_cert_identifier = "rds-ca-rsa2048-g1"
}
```
When I run checkov on this Terraform example, I get an error whereas the test should pass:
```
Check: CKV_AWS_211: "Ensure RDS uses a modern CaCert"
FAILED for resource: aws_db_instance.pass3
File: /main.tf:43-55
Guide: https://docs.paloaltonetworks.com/content/techdocs/en_US/prisma/prisma-cloud/prisma-cloud-code-security-policy-reference/aws-policies/aws-general-policies/ensure-aws-rds-uses-a-modern-cacert.html
43 | resource "aws_db_instance" "pass3" {
44 | allocated_storage = 20
45 | storage_type = "gp2"
46 | engine = "mysql"
47 | engine_version = "5.7"
48 | instance_class = "db.t2.micro"
49 | db_name = "mydb"
50 | username = "foo"
51 | password = "foobarbaz"
52 | iam_database_authentication_enabled = true
53 | storage_encrypted = true
54 | ca_cert_identifier = "rds-ca-rsa2048-g1"
55 | }
```
**Version (please complete the following information):**
- Checkov Version 2.0.930
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py]
1 from checkov.common.models.enums import CheckCategories, CheckResult
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3 from typing import Any, List
4
5
6 class RDSCACertIsRecent(BaseResourceValueCheck):
7 def __init__(self):
8 name = "Ensure RDS uses a modern CaCert"
9 id = "CKV_AWS_211"
10 supported_resources = ["aws_db_instance"]
11 categories = [CheckCategories.GENERAL_SECURITY]
12 super().__init__(
13 name=name,
14 id=id,
15 categories=categories,
16 supported_resources=supported_resources,
17 missing_block_result=CheckResult.PASSED
18 )
19
20 def get_inspected_key(self) -> str:
21 return "ca_cert_identifier"
22
23 def get_expected_values(self) -> List[Any]:
24 return ["rds-ca-2019"]
25
26
27 check = RDSCACertIsRecent()
28
[end of checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
--- a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
+++ b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
@@ -21,7 +21,7 @@
return "ca_cert_identifier"
def get_expected_values(self) -> List[Any]:
- return ["rds-ca-2019"]
+ return ["rds-ca-rsa2048-g1", "rds-ca-rsa4096-g1", "rds-ca-ecc384-g1"]
check = RDSCACertIsRecent()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py\n--- a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py\n+++ b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py\n@@ -21,7 +21,7 @@\n return \"ca_cert_identifier\"\n \n def get_expected_values(self) -> List[Any]:\n- return [\"rds-ca-2019\"]\n+ return [\"rds-ca-rsa2048-g1\", \"rds-ca-rsa4096-g1\", \"rds-ca-ecc384-g1\"]\n \n \n check = RDSCACertIsRecent()\n", "issue": "Missing AWS RDS CA in CKV_AWS_211\n**Describe the issue**\r\nIn check CKV_AWS_211, checkov currently only checks for one possible CA on AWS RDS instances, namely `rds-ca-2019` (see [associated code](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py#L24)) whereas RDS supports several (see [AWS docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.RegionCertificateAuthorities)). The check should accept those CAs: `rds-ca-rsa2048-g1`, `rds-ca-rsa4096-g1` and `rds-ca-ecc384-g1`.\r\n\r\n**Examples**\r\nTerraform code on which the check should pass:\r\n```terraform\r\nresource \"aws_db_instance\" \"pass3\" {\r\n allocated_storage = 20\r\n storage_type = \"gp2\"\r\n engine = \"mysql\"\r\n engine_version = \"5.7\"\r\n instance_class = \"db.t2.micro\"\r\n db_name = \"mydb\"\r\n username = \"foo\"\r\n password = \"foobarbaz\"\r\n iam_database_authentication_enabled = true\r\n storage_encrypted = true\r\n ca_cert_identifier = \"rds-ca-rsa2048-g1\"\r\n}\r\n```\r\nWhen I run checkov on this Terraform example, I get an error whereas the test should pass:\r\n\r\n```\r\nCheck: CKV_AWS_211: \"Ensure RDS uses a modern CaCert\"\r\n\tFAILED for resource: aws_db_instance.pass3\r\n\tFile: /main.tf:43-55\r\n\tGuide: https://docs.paloaltonetworks.com/content/techdocs/en_US/prisma/prisma-cloud/prisma-cloud-code-security-policy-reference/aws-policies/aws-general-policies/ensure-aws-rds-uses-a-modern-cacert.html\r\n\r\n\t\t43 | resource \"aws_db_instance\" \"pass3\" {\r\n\t\t44 | allocated_storage = 20\r\n\t\t45 | storage_type = \"gp2\"\r\n\t\t46 | engine = \"mysql\"\r\n\t\t47 | engine_version = \"5.7\"\r\n\t\t48 | instance_class = \"db.t2.micro\"\r\n\t\t49 | db_name = \"mydb\"\r\n\t\t50 | username = \"foo\"\r\n\t\t51 | password = \"foobarbaz\"\r\n\t\t52 | iam_database_authentication_enabled = true\r\n\t\t53 | storage_encrypted = true\r\n\t\t54 | ca_cert_identifier = \"rds-ca-rsa2048-g1\"\r\n\t\t55 | }\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.0.930\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom typing import Any, List\n\n\nclass RDSCACertIsRecent(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure RDS uses a modern CaCert\"\n id = \"CKV_AWS_211\"\n supported_resources = [\"aws_db_instance\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(\n name=name,\n id=id,\n categories=categories,\n supported_resources=supported_resources,\n missing_block_result=CheckResult.PASSED\n )\n\n def get_inspected_key(self) -> str:\n return \"ca_cert_identifier\"\n\n def get_expected_values(self) -> List[Any]:\n return [\"rds-ca-2019\"]\n\n\ncheck = RDSCACertIsRecent()\n", "path": "checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py"}]} | 1,454 | 172 |
gh_patches_debug_42294 | rasdani/github-patches | git_diff | lightly-ai__lightly-491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lightly-Crop: memory leak
When using lightly-crop some users experience a memory leak.
- [ ] Try to reproduce it.
- [ ] Fix it
</issue>
<code>
[start of lightly/utils/cropping/crop_image_by_bounding_boxes.py]
1 import os.path
2 import warnings
3 from pathlib import Path
4 from typing import List
5
6 from PIL import Image
7 from tqdm import tqdm
8
9 from lightly.active_learning.utils import BoundingBox
10 from lightly.data import LightlyDataset
11
12
13 def crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:
14 image = Image.open(image_filepath)
15 cropped_images = []
16 for bbox in bounding_boxes:
17 w, h = image.size
18 crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
19 crop_box = tuple(int(i) for i in crop_box)
20 cropped_image = image.crop(crop_box)
21 cropped_images.append(cropped_image)
22 return cropped_images
23
24
25 def crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,
26 output_dir: str,
27 bounding_boxes_list_list: List[List[BoundingBox]],
28 class_indices_list_list: List[List[int]],
29 class_names: List[str] = None
30 ) -> List[List[str]]:
31 """Crops all images in a dataset by the bounding boxes and saves them in the output dir
32
33 Args:
34 dataset:
35 The dataset with the images to be cropped. Must contain M images.
36 output_dir:
37 The output directory to saved the cropped images to.
38 bounding_boxes_list_list:
39 The bounding boxes of the detections for each image. Must have M sublists, one for each image.
40 Each sublist contains the bounding boxes for each detection, thus N_m elements.
41 class_indices_list_list:
42 The object class ids of the detections for each image. Must have M sublists, one for each image.
43 Each sublist contains the bounding boxes for each detection, thus N_m elements.
44 class_names:
45 The names of the classes, used to map the class id to the class name.
46
47
48 Returns:
49 The filepaths to all saved cropped images. Has M sublists, one for each image.
50 Each sublist contains the filepath of the crop each detection, thus N_m elements.
51
52 """
53 filenames_images = dataset.get_filenames()
54 if len(filenames_images) != len(bounding_boxes_list_list) or len(filenames_images) != len(class_indices_list_list):
55 raise ValueError("There must be one bounding box and class index list for each image in the datasets,"
56 "but the lengths dont align.")
57
58 cropped_image_filepath_list_list: List[List[Image]] = []
59
60
61 print(f"Cropping objects out of {len(filenames_images)} images...")
62 for filename_image, class_indices, bounding_boxes in \
63 tqdm(zip(filenames_images, class_indices_list_list, bounding_boxes_list_list)):
64
65 if not len(class_indices) == len(bounding_boxes):
66 warnings.warn(UserWarning(f"Length of class indices ({len(class_indices)} does not equal length of bounding boxes"
67 f"({len(bounding_boxes)}. This is an error in the input arguments. "
68 f"Skipping this image {filename_image}."))
69 continue
70
71 filepath_image = dataset.get_filepath_from_filename(filename_image)
72 filepath_image_base, image_extension = os.path.splitext(filepath_image)
73
74 filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')
75 Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)
76
77 cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)
78 cropped_images_filepaths = []
79 for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):
80 if class_names:
81 class_name = class_names[class_index]
82 else:
83 class_name = f"class{class_index}"
84 cropped_image_last_filename = f'{index}_{class_name}{image_extension}'
85 cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)
86 cropped_image.save(cropped_image_filepath)
87
88 cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)
89 cropped_images_filepaths.append(cropped_image_filename)
90
91 cropped_image_filepath_list_list.append(cropped_images_filepaths)
92
93 return cropped_image_filepath_list_list
94
[end of lightly/utils/cropping/crop_image_by_bounding_boxes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/utils/cropping/crop_image_by_bounding_boxes.py b/lightly/utils/cropping/crop_image_by_bounding_boxes.py
--- a/lightly/utils/cropping/crop_image_by_bounding_boxes.py
+++ b/lightly/utils/cropping/crop_image_by_bounding_boxes.py
@@ -10,18 +10,6 @@
from lightly.data import LightlyDataset
-def crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:
- image = Image.open(image_filepath)
- cropped_images = []
- for bbox in bounding_boxes:
- w, h = image.size
- crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
- crop_box = tuple(int(i) for i in crop_box)
- cropped_image = image.crop(crop_box)
- cropped_images.append(cropped_image)
- return cropped_images
-
-
def crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,
output_dir: str,
bounding_boxes_list_list: List[List[BoundingBox]],
@@ -55,7 +43,7 @@
raise ValueError("There must be one bounding box and class index list for each image in the datasets,"
"but the lengths dont align.")
- cropped_image_filepath_list_list: List[List[Image]] = []
+ cropped_image_filepath_list_list: List[List[str]] = []
print(f"Cropping objects out of {len(filenames_images)} images...")
@@ -71,21 +59,38 @@
filepath_image = dataset.get_filepath_from_filename(filename_image)
filepath_image_base, image_extension = os.path.splitext(filepath_image)
- filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')
+ filepath_out_dir = os.path.join(output_dir, filename_image)\
+ .replace(image_extension, '')
Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)
- cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)
+ image = Image.open(filepath_image)
+
cropped_images_filepaths = []
- for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):
+ # For every image, crop out multiple cropped images, one for each
+ # bounding box
+ for index, (class_index, bbox) in \
+ enumerate((zip(class_indices, bounding_boxes))):
+
+ # determine the filename and filepath of the cropped image
if class_names:
class_name = class_names[class_index]
else:
class_name = f"class{class_index}"
cropped_image_last_filename = f'{index}_{class_name}{image_extension}'
cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)
+
+ # crop out the image and save it
+ w, h = image.size
+ crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
+ crop_box = tuple(int(i) for i in crop_box)
+ cropped_image = image.crop(crop_box)
cropped_image.save(cropped_image_filepath)
- cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)
+ # add the filename of the cropped image to the corresponding list
+ cropped_image_filename: str = os.path.join(
+ filename_image.replace(image_extension, ''),
+ cropped_image_last_filename
+ )
cropped_images_filepaths.append(cropped_image_filename)
cropped_image_filepath_list_list.append(cropped_images_filepaths)
| {"golden_diff": "diff --git a/lightly/utils/cropping/crop_image_by_bounding_boxes.py b/lightly/utils/cropping/crop_image_by_bounding_boxes.py\n--- a/lightly/utils/cropping/crop_image_by_bounding_boxes.py\n+++ b/lightly/utils/cropping/crop_image_by_bounding_boxes.py\n@@ -10,18 +10,6 @@\n from lightly.data import LightlyDataset\n \n \n-def crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:\n- image = Image.open(image_filepath)\n- cropped_images = []\n- for bbox in bounding_boxes:\n- w, h = image.size\n- crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n- crop_box = tuple(int(i) for i in crop_box)\n- cropped_image = image.crop(crop_box)\n- cropped_images.append(cropped_image)\n- return cropped_images\n-\n-\n def crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,\n output_dir: str,\n bounding_boxes_list_list: List[List[BoundingBox]],\n@@ -55,7 +43,7 @@\n raise ValueError(\"There must be one bounding box and class index list for each image in the datasets,\"\n \"but the lengths dont align.\")\n \n- cropped_image_filepath_list_list: List[List[Image]] = []\n+ cropped_image_filepath_list_list: List[List[str]] = []\n \n \n print(f\"Cropping objects out of {len(filenames_images)} images...\")\n@@ -71,21 +59,38 @@\n filepath_image = dataset.get_filepath_from_filename(filename_image)\n filepath_image_base, image_extension = os.path.splitext(filepath_image)\n \n- filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')\n+ filepath_out_dir = os.path.join(output_dir, filename_image)\\\n+ .replace(image_extension, '')\n Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)\n \n- cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)\n+ image = Image.open(filepath_image)\n+ \n cropped_images_filepaths = []\n- for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):\n+ # For every image, crop out multiple cropped images, one for each\n+ # bounding box\n+ for index, (class_index, bbox) in \\\n+ enumerate((zip(class_indices, bounding_boxes))):\n+\n+ # determine the filename and filepath of the cropped image\n if class_names:\n class_name = class_names[class_index]\n else:\n class_name = f\"class{class_index}\"\n cropped_image_last_filename = f'{index}_{class_name}{image_extension}'\n cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)\n+\n+ # crop out the image and save it\n+ w, h = image.size\n+ crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n+ crop_box = tuple(int(i) for i in crop_box)\n+ cropped_image = image.crop(crop_box)\n cropped_image.save(cropped_image_filepath)\n \n- cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)\n+ # add the filename of the cropped image to the corresponding list\n+ cropped_image_filename: str = os.path.join(\n+ filename_image.replace(image_extension, ''),\n+ cropped_image_last_filename\n+ )\n cropped_images_filepaths.append(cropped_image_filename)\n \n cropped_image_filepath_list_list.append(cropped_images_filepaths)\n", "issue": "Lightly-Crop: memory leak\nWhen using lightly-crop some users experience a memory leak.\r\n\r\n- [ ] Try to reproduce it.\r\n- [ ] Fix it\n", "before_files": [{"content": "import os.path\nimport warnings\nfrom pathlib import Path\nfrom typing import List\n\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom lightly.active_learning.utils import BoundingBox\nfrom lightly.data import LightlyDataset\n\n\ndef crop_image_by_bounding_boxes(image_filepath: str, bounding_boxes: List[BoundingBox]) -> List[Image.Image]:\n image = Image.open(image_filepath)\n cropped_images = []\n for bbox in bounding_boxes:\n w, h = image.size\n crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)\n crop_box = tuple(int(i) for i in crop_box)\n cropped_image = image.crop(crop_box)\n cropped_images.append(cropped_image)\n return cropped_images\n\n\ndef crop_dataset_by_bounding_boxes_and_save(dataset: LightlyDataset,\n output_dir: str,\n bounding_boxes_list_list: List[List[BoundingBox]],\n class_indices_list_list: List[List[int]],\n class_names: List[str] = None\n ) -> List[List[str]]:\n \"\"\"Crops all images in a dataset by the bounding boxes and saves them in the output dir\n\n Args:\n dataset:\n The dataset with the images to be cropped. Must contain M images.\n output_dir:\n The output directory to saved the cropped images to.\n bounding_boxes_list_list:\n The bounding boxes of the detections for each image. Must have M sublists, one for each image.\n Each sublist contains the bounding boxes for each detection, thus N_m elements.\n class_indices_list_list:\n The object class ids of the detections for each image. Must have M sublists, one for each image.\n Each sublist contains the bounding boxes for each detection, thus N_m elements.\n class_names:\n The names of the classes, used to map the class id to the class name.\n\n\n Returns:\n The filepaths to all saved cropped images. Has M sublists, one for each image.\n Each sublist contains the filepath of the crop each detection, thus N_m elements.\n\n \"\"\"\n filenames_images = dataset.get_filenames()\n if len(filenames_images) != len(bounding_boxes_list_list) or len(filenames_images) != len(class_indices_list_list):\n raise ValueError(\"There must be one bounding box and class index list for each image in the datasets,\"\n \"but the lengths dont align.\")\n\n cropped_image_filepath_list_list: List[List[Image]] = []\n\n\n print(f\"Cropping objects out of {len(filenames_images)} images...\")\n for filename_image, class_indices, bounding_boxes in \\\n tqdm(zip(filenames_images, class_indices_list_list, bounding_boxes_list_list)):\n\n if not len(class_indices) == len(bounding_boxes):\n warnings.warn(UserWarning(f\"Length of class indices ({len(class_indices)} does not equal length of bounding boxes\"\n f\"({len(bounding_boxes)}. This is an error in the input arguments. \"\n f\"Skipping this image {filename_image}.\"))\n continue\n\n filepath_image = dataset.get_filepath_from_filename(filename_image)\n filepath_image_base, image_extension = os.path.splitext(filepath_image)\n\n filepath_out_dir = os.path.join(output_dir, filename_image).replace(image_extension, '')\n Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)\n\n cropped_images = crop_image_by_bounding_boxes(filepath_image, bounding_boxes)\n cropped_images_filepaths = []\n for index, (class_index, cropped_image) in enumerate((zip(class_indices, cropped_images))):\n if class_names:\n class_name = class_names[class_index]\n else:\n class_name = f\"class{class_index}\"\n cropped_image_last_filename = f'{index}_{class_name}{image_extension}'\n cropped_image_filepath = os.path.join(filepath_out_dir, cropped_image_last_filename)\n cropped_image.save(cropped_image_filepath)\n\n cropped_image_filename = os.path.join(filename_image.replace(image_extension, ''), cropped_image_last_filename)\n cropped_images_filepaths.append(cropped_image_filename)\n\n cropped_image_filepath_list_list.append(cropped_images_filepaths)\n\n return cropped_image_filepath_list_list\n", "path": "lightly/utils/cropping/crop_image_by_bounding_boxes.py"}]} | 1,634 | 794 |
gh_patches_debug_3391 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2636 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mitmdump does not apply filter to saved data
##### Steps to reproduce the problem:
1. I captured some traffic, and ran the following to filter it:
```
$ mitmdump -r traffic.mitm -w out.mitm '~u main.css'
Proxy server listening at http://[::]:8080
172.16.122.1:51049: GET https://www.sjoerdlangkemper.nl/css/main.css
<< 304 Not Modified 0b
$
```
It displays only the matched URL, but it saves all traffic. When done, out.mitm contains the same requests and responses as traffic.mitm. I.e. `mitmproxy -r out.mitm` shows a lot of requests, where I would expect only the request for main.css.
##### Any other comments? What have you tried so far?
I tried this with release 2.0.2, and there it worked as expected. This issue seems to be similar to #1089.
##### System information
```
$ mitmdump --version
Mitmproxy version: 3.0.0 (2.0.0dev0965-0x168c72a)
Python version: 3.5.2
Platform: Linux-4.4.0-98-generic-x86_64-with-Ubuntu-16.04-xenial
SSL version: OpenSSL 1.1.0f 25 May 2017
Linux distro: Ubuntu 16.04 xenial
```
</issue>
<code>
[start of mitmproxy/addons/save.py]
1 import os.path
2 import typing
3
4 from mitmproxy import exceptions
5 from mitmproxy import flowfilter
6 from mitmproxy import io
7 from mitmproxy import ctx
8 from mitmproxy import flow
9
10
11 class Save:
12 def __init__(self):
13 self.stream = None
14 self.filt = None
15 self.active_flows = set() # type: Set[flow.Flow]
16
17 def open_file(self, path):
18 if path.startswith("+"):
19 path = path[1:]
20 mode = "ab"
21 else:
22 mode = "wb"
23 path = os.path.expanduser(path)
24 return open(path, mode)
25
26 def start_stream_to_path(self, path, flt):
27 try:
28 f = self.open_file(path)
29 except IOError as v:
30 raise exceptions.OptionsError(str(v))
31 self.stream = io.FilteredFlowWriter(f, flt)
32 self.active_flows = set()
33
34 def configure(self, updated):
35 # We're already streaming - stop the previous stream and restart
36 if "save_stream_filter" in updated:
37 if ctx.options.save_stream_filter:
38 self.filt = flowfilter.parse(ctx.options.save_stream_filter)
39 if not self.filt:
40 raise exceptions.OptionsError(
41 "Invalid filter specification: %s" % ctx.options.save_stream_filter
42 )
43 else:
44 self.filt = None
45 if "save_stream_file" in updated:
46 if self.stream:
47 self.done()
48 if ctx.options.save_stream_file:
49 self.start_stream_to_path(ctx.options.save_stream_file, self.filt)
50
51 def save(self, flows: typing.Sequence[flow.Flow], path: str) -> None:
52 """
53 Save flows to a file. If the path starts with a +, flows are
54 appended to the file, otherwise it is over-written.
55 """
56 try:
57 f = self.open_file(path)
58 except IOError as v:
59 raise exceptions.CommandError(v) from v
60 stream = io.FlowWriter(f)
61 for i in flows:
62 stream.add(i)
63 f.close()
64 ctx.log.alert("Saved %s flows." % len(flows))
65
66 def load(self, l):
67 l.add_command("save.file", self.save)
68
69 def tcp_start(self, flow):
70 if self.stream:
71 self.active_flows.add(flow)
72
73 def tcp_end(self, flow):
74 if self.stream:
75 self.stream.add(flow)
76 self.active_flows.discard(flow)
77
78 def response(self, flow):
79 if self.stream:
80 self.stream.add(flow)
81 self.active_flows.discard(flow)
82
83 def request(self, flow):
84 if self.stream:
85 self.active_flows.add(flow)
86
87 def done(self):
88 if self.stream:
89 for f in self.active_flows:
90 self.stream.add(f)
91 self.active_flows = set([])
92 self.stream.fo.close()
93 self.stream = None
94
[end of mitmproxy/addons/save.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/save.py b/mitmproxy/addons/save.py
--- a/mitmproxy/addons/save.py
+++ b/mitmproxy/addons/save.py
@@ -42,7 +42,7 @@
)
else:
self.filt = None
- if "save_stream_file" in updated:
+ if "save_stream_file" in updated or "save_stream_filter" in updated:
if self.stream:
self.done()
if ctx.options.save_stream_file:
| {"golden_diff": "diff --git a/mitmproxy/addons/save.py b/mitmproxy/addons/save.py\n--- a/mitmproxy/addons/save.py\n+++ b/mitmproxy/addons/save.py\n@@ -42,7 +42,7 @@\n )\n else:\n self.filt = None\n- if \"save_stream_file\" in updated:\n+ if \"save_stream_file\" in updated or \"save_stream_filter\" in updated:\n if self.stream:\n self.done()\n if ctx.options.save_stream_file:\n", "issue": "mitmdump does not apply filter to saved data\n##### Steps to reproduce the problem:\r\n\r\n1. I captured some traffic, and ran the following to filter it:\r\n\r\n```\r\n$ mitmdump -r traffic.mitm -w out.mitm '~u main.css'\r\nProxy server listening at http://[::]:8080\r\n172.16.122.1:51049: GET https://www.sjoerdlangkemper.nl/css/main.css\r\n << 304 Not Modified 0b\r\n$\r\n```\r\n\r\nIt displays only the matched URL, but it saves all traffic. When done, out.mitm contains the same requests and responses as traffic.mitm. I.e. `mitmproxy -r out.mitm` shows a lot of requests, where I would expect only the request for main.css.\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nI tried this with release 2.0.2, and there it worked as expected. This issue seems to be similar to #1089.\r\n\r\n##### System information\r\n\r\n```\r\n$ mitmdump --version\r\nMitmproxy version: 3.0.0 (2.0.0dev0965-0x168c72a) \r\nPython version: 3.5.2\r\nPlatform: Linux-4.4.0-98-generic-x86_64-with-Ubuntu-16.04-xenial\r\nSSL version: OpenSSL 1.1.0f 25 May 2017\r\nLinux distro: Ubuntu 16.04 xenial\r\n```\r\n\n", "before_files": [{"content": "import os.path\nimport typing\n\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy import io\nfrom mitmproxy import ctx\nfrom mitmproxy import flow\n\n\nclass Save:\n def __init__(self):\n self.stream = None\n self.filt = None\n self.active_flows = set() # type: Set[flow.Flow]\n\n def open_file(self, path):\n if path.startswith(\"+\"):\n path = path[1:]\n mode = \"ab\"\n else:\n mode = \"wb\"\n path = os.path.expanduser(path)\n return open(path, mode)\n\n def start_stream_to_path(self, path, flt):\n try:\n f = self.open_file(path)\n except IOError as v:\n raise exceptions.OptionsError(str(v))\n self.stream = io.FilteredFlowWriter(f, flt)\n self.active_flows = set()\n\n def configure(self, updated):\n # We're already streaming - stop the previous stream and restart\n if \"save_stream_filter\" in updated:\n if ctx.options.save_stream_filter:\n self.filt = flowfilter.parse(ctx.options.save_stream_filter)\n if not self.filt:\n raise exceptions.OptionsError(\n \"Invalid filter specification: %s\" % ctx.options.save_stream_filter\n )\n else:\n self.filt = None\n if \"save_stream_file\" in updated:\n if self.stream:\n self.done()\n if ctx.options.save_stream_file:\n self.start_stream_to_path(ctx.options.save_stream_file, self.filt)\n\n def save(self, flows: typing.Sequence[flow.Flow], path: str) -> None:\n \"\"\"\n Save flows to a file. If the path starts with a +, flows are\n appended to the file, otherwise it is over-written.\n \"\"\"\n try:\n f = self.open_file(path)\n except IOError as v:\n raise exceptions.CommandError(v) from v\n stream = io.FlowWriter(f)\n for i in flows:\n stream.add(i)\n f.close()\n ctx.log.alert(\"Saved %s flows.\" % len(flows))\n\n def load(self, l):\n l.add_command(\"save.file\", self.save)\n\n def tcp_start(self, flow):\n if self.stream:\n self.active_flows.add(flow)\n\n def tcp_end(self, flow):\n if self.stream:\n self.stream.add(flow)\n self.active_flows.discard(flow)\n\n def response(self, flow):\n if self.stream:\n self.stream.add(flow)\n self.active_flows.discard(flow)\n\n def request(self, flow):\n if self.stream:\n self.active_flows.add(flow)\n\n def done(self):\n if self.stream:\n for f in self.active_flows:\n self.stream.add(f)\n self.active_flows = set([])\n self.stream.fo.close()\n self.stream = None\n", "path": "mitmproxy/addons/save.py"}]} | 1,696 | 112 |
gh_patches_debug_29098 | rasdani/github-patches | git_diff | mesonbuild__meson-2815 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
windows.compile_resources() can't be used with custom targets
```meson
rc_target = custom_target('Windows resource file',
command : [preprocess_command, rcdefs, '@INPUT@', '@OUTPUT@'],
build_always : true,
input : 'taisei.rc.in',
output : 'taisei.rc',
)
version_deps += winmod.compile_resources(rc_target)
```
```
Meson encountered an error in file src/meson.build, line 59, column 4:
Windows resource arguments must be strings or files not <CustomTargetHolder Windows resource file@cus: ['/data/git/taisei/scripts/configure-file.py', '--rootdir', '/data/git/taisei', '--fallback-version', 'v1.1.0-9999', '-DMESON_BUILD_TYPE=release', '-DICONS_DIR=/data/git/taisei/misc/icons', '-DBUILDTYPE_DEFINE=#define RELEASE_BUILD', '@INPUT@', '@OUTPUT@']>
```
This bug makes it impossible to reliably regenerate the `.rc` source on every rebuild.
Add something like depend_files to windows.compile_resources()
Resource script can include various other files (bitmap, cursor, font, html, icon, message table, binary data, manifest), it would be nice if it were possible to declare the resource script depends on these.
</issue>
<code>
[start of mesonbuild/modules/windows.py]
1 # Copyright 2015 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from .. import mlog
18 from .. import mesonlib, dependencies, build
19 from ..mesonlib import MesonException, extract_as_list
20 from . import get_include_args
21 from . import ModuleReturnValue
22 from . import ExtensionModule
23 from ..interpreterbase import permittedKwargs
24
25 class WindowsModule(ExtensionModule):
26
27 def detect_compiler(self, compilers):
28 for l in ('c', 'cpp'):
29 if l in compilers:
30 return compilers[l]
31 raise MesonException('Resource compilation requires a C or C++ compiler.')
32
33 @permittedKwargs({'args', 'include_directories'})
34 def compile_resources(self, state, args, kwargs):
35 comp = self.detect_compiler(state.compilers)
36
37 extra_args = mesonlib.stringlistify(kwargs.get('args', []))
38 inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)
39 for incd in inc_dirs:
40 if not isinstance(incd.held_object, (str, build.IncludeDirs)):
41 raise MesonException('Resource include dirs should be include_directories().')
42 extra_args += get_include_args(inc_dirs)
43
44 if comp.id == 'msvc':
45 rescomp = dependencies.ExternalProgram('rc', silent=True)
46 res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']
47 suffix = 'res'
48 else:
49 m = 'Argument {!r} has a space which may not work with windres due to ' \
50 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'
51 for arg in extra_args:
52 if ' ' in arg:
53 mlog.warning(m.format(arg))
54 rescomp_name = None
55 # FIXME: Does not handle `native: true` executables, see
56 # https://github.com/mesonbuild/meson/issues/1531
57 if state.environment.is_cross_build():
58 # If cross compiling see if windres has been specified in the
59 # cross file before trying to find it another way.
60 rescomp_name = state.environment.cross_info.config['binaries'].get('windres')
61 if rescomp_name is None:
62 # Pick-up env var WINDRES if set. This is often used for
63 # specifying an arch-specific windres.
64 rescomp_name = os.environ.get('WINDRES', 'windres')
65 rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)
66 res_args = extra_args + ['@INPUT@', '@OUTPUT@']
67 suffix = 'o'
68 if not rescomp.found():
69 raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))
70 res_kwargs = {'output': '@BASENAME@.' + suffix,
71 'arguments': res_args}
72 res_gen = build.Generator([rescomp], res_kwargs)
73 res_output = res_gen.process_files('Windows resource', args, state)
74 return ModuleReturnValue(res_output, [res_output])
75
76 def initialize():
77 return WindowsModule()
78
[end of mesonbuild/modules/windows.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py
--- a/mesonbuild/modules/windows.py
+++ b/mesonbuild/modules/windows.py
@@ -67,11 +67,42 @@
suffix = 'o'
if not rescomp.found():
raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))
- res_kwargs = {'output': '@BASENAME@.' + suffix,
- 'arguments': res_args}
- res_gen = build.Generator([rescomp], res_kwargs)
- res_output = res_gen.process_files('Windows resource', args, state)
- return ModuleReturnValue(res_output, [res_output])
+
+ res_targets = []
+
+ def add_target(src):
+ if isinstance(src, list):
+ for subsrc in src:
+ add_target(subsrc)
+ return
+
+ if hasattr(src, 'held_object'):
+ src = src.held_object
+
+ res_kwargs = {
+ 'output': '@BASENAME@.' + suffix,
+ 'input': [src],
+ 'command': [rescomp] + res_args,
+ }
+
+ if isinstance(src, (str, mesonlib.File)):
+ name = 'file {!r}'.format(str(src))
+ elif isinstance(src, build.CustomTarget):
+ if len(src.get_outputs()) > 1:
+ raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')
+
+ name = 'target {!r}'.format(src.get_id())
+ else:
+ raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))
+
+ # Path separators are not allowed in target names
+ name = name.replace('/', '_').replace('\\', '_')
+
+ res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))
+
+ add_target(args)
+
+ return ModuleReturnValue(res_targets, [res_targets])
def initialize():
return WindowsModule()
| {"golden_diff": "diff --git a/mesonbuild/modules/windows.py b/mesonbuild/modules/windows.py\n--- a/mesonbuild/modules/windows.py\n+++ b/mesonbuild/modules/windows.py\n@@ -67,11 +67,42 @@\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))\n- res_kwargs = {'output': '@BASENAME@.' + suffix,\n- 'arguments': res_args}\n- res_gen = build.Generator([rescomp], res_kwargs)\n- res_output = res_gen.process_files('Windows resource', args, state)\n- return ModuleReturnValue(res_output, [res_output])\n+\n+ res_targets = []\n+\n+ def add_target(src):\n+ if isinstance(src, list):\n+ for subsrc in src:\n+ add_target(subsrc)\n+ return\n+\n+ if hasattr(src, 'held_object'):\n+ src = src.held_object\n+\n+ res_kwargs = {\n+ 'output': '@BASENAME@.' + suffix,\n+ 'input': [src],\n+ 'command': [rescomp] + res_args,\n+ }\n+\n+ if isinstance(src, (str, mesonlib.File)):\n+ name = 'file {!r}'.format(str(src))\n+ elif isinstance(src, build.CustomTarget):\n+ if len(src.get_outputs()) > 1:\n+ raise MesonException('windows.compile_resources does not accept custom targets with more than 1 output.')\n+\n+ name = 'target {!r}'.format(src.get_id())\n+ else:\n+ raise MesonException('Unexpected source type {!r}. windows.compile_resources accepts only strings, files, custom targets, and lists thereof.'.format(src))\n+\n+ # Path separators are not allowed in target names\n+ name = name.replace('/', '_').replace('\\\\', '_')\n+\n+ res_targets.append(build.CustomTarget('Windows resource for ' + name, state.subdir, state.subproject, res_kwargs))\n+\n+ add_target(args)\n+\n+ return ModuleReturnValue(res_targets, [res_targets])\n \n def initialize():\n return WindowsModule()\n", "issue": "windows.compile_resources() can't be used with custom targets\n```meson\r\n rc_target = custom_target('Windows resource file',\r\n command : [preprocess_command, rcdefs, '@INPUT@', '@OUTPUT@'],\r\n build_always : true,\r\n input : 'taisei.rc.in',\r\n output : 'taisei.rc',\r\n )\r\n\r\n version_deps += winmod.compile_resources(rc_target)\r\n```\r\n\r\n```\r\nMeson encountered an error in file src/meson.build, line 59, column 4:\r\nWindows resource arguments must be strings or files not <CustomTargetHolder Windows resource file@cus: ['/data/git/taisei/scripts/configure-file.py', '--rootdir', '/data/git/taisei', '--fallback-version', 'v1.1.0-9999', '-DMESON_BUILD_TYPE=release', '-DICONS_DIR=/data/git/taisei/misc/icons', '-DBUILDTYPE_DEFINE=#define RELEASE_BUILD', '@INPUT@', '@OUTPUT@']>\r\n```\r\n\r\nThis bug makes it impossible to reliably regenerate the `.rc` source on every rebuild.\nAdd something like depend_files to windows.compile_resources()\nResource script can include various other files (bitmap, cursor, font, html, icon, message table, binary data, manifest), it would be nice if it were possible to declare the resource script depends on these.\n", "before_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .. import mlog\nfrom .. import mesonlib, dependencies, build\nfrom ..mesonlib import MesonException, extract_as_list\nfrom . import get_include_args\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs\n\nclass WindowsModule(ExtensionModule):\n\n def detect_compiler(self, compilers):\n for l in ('c', 'cpp'):\n if l in compilers:\n return compilers[l]\n raise MesonException('Resource compilation requires a C or C++ compiler.')\n\n @permittedKwargs({'args', 'include_directories'})\n def compile_resources(self, state, args, kwargs):\n comp = self.detect_compiler(state.compilers)\n\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n inc_dirs = extract_as_list(kwargs, 'include_directories', pop = True)\n for incd in inc_dirs:\n if not isinstance(incd.held_object, (str, build.IncludeDirs)):\n raise MesonException('Resource include dirs should be include_directories().')\n extra_args += get_include_args(inc_dirs)\n\n if comp.id == 'msvc':\n rescomp = dependencies.ExternalProgram('rc', silent=True)\n res_args = extra_args + ['/nologo', '/fo@OUTPUT@', '@INPUT@']\n suffix = 'res'\n else:\n m = 'Argument {!r} has a space which may not work with windres due to ' \\\n 'a MinGW bug: https://sourceware.org/bugzilla/show_bug.cgi?id=4933'\n for arg in extra_args:\n if ' ' in arg:\n mlog.warning(m.format(arg))\n rescomp_name = None\n # FIXME: Does not handle `native: true` executables, see\n # https://github.com/mesonbuild/meson/issues/1531\n if state.environment.is_cross_build():\n # If cross compiling see if windres has been specified in the\n # cross file before trying to find it another way.\n rescomp_name = state.environment.cross_info.config['binaries'].get('windres')\n if rescomp_name is None:\n # Pick-up env var WINDRES if set. This is often used for\n # specifying an arch-specific windres.\n rescomp_name = os.environ.get('WINDRES', 'windres')\n rescomp = dependencies.ExternalProgram(rescomp_name, silent=True)\n res_args = extra_args + ['@INPUT@', '@OUTPUT@']\n suffix = 'o'\n if not rescomp.found():\n raise MesonException('Could not find Windows resource compiler %s.' % ' '.join(rescomp.get_command()))\n res_kwargs = {'output': '@BASENAME@.' + suffix,\n 'arguments': res_args}\n res_gen = build.Generator([rescomp], res_kwargs)\n res_output = res_gen.process_files('Windows resource', args, state)\n return ModuleReturnValue(res_output, [res_output])\n\ndef initialize():\n return WindowsModule()\n", "path": "mesonbuild/modules/windows.py"}]} | 1,760 | 470 |
gh_patches_debug_57622 | rasdani/github-patches | git_diff | AnalogJ__lexicon-164 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Namecheap support not optional
Unlike route53 or softlayer and unlike what setup.py suggests, the namecheap provider is not optional in 2.1.17.
</issue>
<code>
[start of lexicon/providers/namecheap.py]
1 from __future__ import absolute_import
2 from __future__ import print_function
3
4 import logging
5
6 import namecheap
7
8 from .base import Provider as BaseProvider
9
10 logger = logging.getLogger(__name__)
11
12
13 def ProviderParser(subparser):
14 subparser.add_argument(
15 '--auth-token',
16 help='specify api token used to authenticate'
17 )
18 subparser.add_argument(
19 '--auth-username',
20 help='specify email address used to authenticate'
21 )
22 # FIXME What is the client IP used for?
23 subparser.add_argument(
24 '--auth-client-ip',
25 help='Client IP address to send to Namecheap API calls',
26 default='127.0.0.1'
27 )
28 subparser.add_argument(
29 '--auth-sandbox',
30 help='Whether to use the sandbox server',
31 action='store_true'
32 )
33
34 class Provider(BaseProvider):
35
36 def __init__(self, options, engine_overrides=None):
37 super(Provider, self).__init__(options, engine_overrides)
38 self.options = options
39 self.client = namecheap.Api(
40 ApiUser=options.get('auth_username',''),
41 ApiKey=options.get('auth_token',''),
42 UserName=options.get('auth_username',''),
43 ClientIP=options.get('auth_client_ip',''),
44 sandbox=options.get('auth_sandbox', False),
45 debug=False
46 )
47 self.domain = self.options['domain']
48 self.domain_id = None
49
50 def authenticate(self):
51 try:
52 domain_names = [x['Name'] for x in self.client.domains_getList()]
53 except namecheap.ApiError:
54 raise Exception('Authentication failed')
55 if self.domain not in domain_names:
56 raise Exception('The domain {} is not controlled by this Namecheap '
57 'account'.format(self.domain))
58 # FIXME What is this for?
59 self.domain_id = self.domain
60
61 # Create record. If record already exists with the same content, do nothing
62 def create_record(self, type, name, content):
63 record = {
64 # required
65 'Type': type,
66 'Name': self._relative_name(name),
67 'Address': content
68 }
69 # logger.debug('create_record: %s', 'id' in payload)
70 # return 'id' in payload
71 self.client.domains_dns_addHost(self.domain, record)
72 return True
73
74 # List all records. Return an empty list if no records found.
75 # type, name and content are used to filter records.
76 # If possible filter during the query, otherwise filter after response is
77 # received.
78 def list_records(self, type=None, name=None, content=None, id=None):
79
80
81 records = []
82 raw_records = self.client.domains_dns_getHosts(self.domain)
83 for record in raw_records:
84 records.append(self._convert_to_lexicon(record))
85
86 if id:
87 records = [record for record in records if record['id'] == id]
88 if type:
89 records = [record for record in records if record['type'] == type]
90 if name:
91 if name.endswith('.'):
92 name = name[:-1]
93 records = [record for record in records if name in record['name'] ]
94 if content:
95 records = [record for record in records if record['content'].lower() == content.lower()]
96
97 logger.debug('list_records: %s', records)
98 return records
99
100 # Create or update a record.
101 def update_record(self, identifier, type=None, name=None, content=None):
102 # Delete record if it exists
103 self.delete_record(identifier, type, name, content)
104 return self.create_record(type, name, content)
105
106 # Delete an existing record.
107 # If record does not exist, do nothing.
108 def delete_record(self, identifier=None, type=None, name=None, content=None):
109
110 record = self.list_records(type=type, name=name, content=content, id=identifier)
111 if record:
112 self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))
113 return True
114 else:
115 return False
116
117 def _convert_to_namecheap(self, record):
118 """ converts from lexicon format record to namecheap format record,
119 suitable to sending through the api to namecheap"""
120
121 name = record['name']
122 if name.endswith('.'):
123 name = name[:-1]
124
125 short_name = name[:name.find(self.domain)-1]
126 processed_record = {
127 'Type': record['type'],
128 'Name': short_name,
129 'TTL': record['ttl'],
130 'Address': record['content'],
131 'HostId': record['id']
132 }
133
134 return processed_record
135
136 def _convert_to_lexicon(self, record):
137 """ converts from namecheap raw record format to lexicon format record
138 """
139
140 name = record['Name']
141 if self.domain not in name:
142 name = "{}.{}".format(name,self.domain)
143
144 processed_record = {
145 'type': record['Type'],
146 'name': '{0}.{1}'.format(record['Name'], self.domain),
147 'ttl': record['TTL'],
148 'content': record['Address'],
149 'id': record['HostId']
150 }
151
152 return processed_record
153
[end of lexicon/providers/namecheap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lexicon/providers/namecheap.py b/lexicon/providers/namecheap.py
--- a/lexicon/providers/namecheap.py
+++ b/lexicon/providers/namecheap.py
@@ -3,10 +3,14 @@
import logging
-import namecheap
from .base import Provider as BaseProvider
+try:
+ import namecheap #optional dep
+except ImportError:
+ pass
+
logger = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/lexicon/providers/namecheap.py b/lexicon/providers/namecheap.py\n--- a/lexicon/providers/namecheap.py\n+++ b/lexicon/providers/namecheap.py\n@@ -3,10 +3,14 @@\n \n import logging\n \n-import namecheap\n \n from .base import Provider as BaseProvider\n \n+try:\n+ import namecheap #optional dep\n+except ImportError:\n+ pass\n+\n logger = logging.getLogger(__name__)\n", "issue": "Namecheap support not optional\nUnlike route53 or softlayer and unlike what setup.py suggests, the namecheap provider is not optional in 2.1.17.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\n\nimport namecheap\n\nfrom .base import Provider as BaseProvider\n\nlogger = logging.getLogger(__name__)\n\n\ndef ProviderParser(subparser):\n subparser.add_argument(\n '--auth-token',\n help='specify api token used to authenticate'\n )\n subparser.add_argument(\n '--auth-username',\n help='specify email address used to authenticate'\n )\n # FIXME What is the client IP used for?\n subparser.add_argument(\n '--auth-client-ip',\n help='Client IP address to send to Namecheap API calls',\n default='127.0.0.1'\n )\n subparser.add_argument(\n '--auth-sandbox',\n help='Whether to use the sandbox server',\n action='store_true'\n )\n\nclass Provider(BaseProvider):\n\n def __init__(self, options, engine_overrides=None):\n super(Provider, self).__init__(options, engine_overrides)\n self.options = options\n self.client = namecheap.Api(\n ApiUser=options.get('auth_username',''),\n ApiKey=options.get('auth_token',''),\n UserName=options.get('auth_username',''),\n ClientIP=options.get('auth_client_ip',''),\n sandbox=options.get('auth_sandbox', False),\n debug=False\n )\n self.domain = self.options['domain']\n self.domain_id = None\n\n def authenticate(self):\n try:\n domain_names = [x['Name'] for x in self.client.domains_getList()]\n except namecheap.ApiError:\n raise Exception('Authentication failed')\n if self.domain not in domain_names:\n raise Exception('The domain {} is not controlled by this Namecheap '\n 'account'.format(self.domain))\n # FIXME What is this for?\n self.domain_id = self.domain\n\n # Create record. If record already exists with the same content, do nothing\n def create_record(self, type, name, content):\n record = {\n # required\n 'Type': type,\n 'Name': self._relative_name(name),\n 'Address': content\n }\n # logger.debug('create_record: %s', 'id' in payload)\n # return 'id' in payload\n self.client.domains_dns_addHost(self.domain, record)\n return True\n\n # List all records. Return an empty list if no records found.\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is\n # received.\n def list_records(self, type=None, name=None, content=None, id=None):\n\n\n records = []\n raw_records = self.client.domains_dns_getHosts(self.domain)\n for record in raw_records:\n records.append(self._convert_to_lexicon(record))\n\n if id:\n records = [record for record in records if record['id'] == id]\n if type:\n records = [record for record in records if record['type'] == type]\n if name:\n if name.endswith('.'):\n name = name[:-1]\n records = [record for record in records if name in record['name'] ]\n if content:\n records = [record for record in records if record['content'].lower() == content.lower()]\n\n logger.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def update_record(self, identifier, type=None, name=None, content=None):\n # Delete record if it exists\n self.delete_record(identifier, type, name, content)\n return self.create_record(type, name, content)\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def delete_record(self, identifier=None, type=None, name=None, content=None):\n\n record = self.list_records(type=type, name=name, content=content, id=identifier)\n if record:\n self.client.domains_dns_delHost(self.domain, self._convert_to_namecheap(record[0]))\n return True\n else:\n return False\n\n def _convert_to_namecheap(self, record):\n \"\"\" converts from lexicon format record to namecheap format record,\n suitable to sending through the api to namecheap\"\"\"\n\n name = record['name']\n if name.endswith('.'):\n name = name[:-1]\n\n short_name = name[:name.find(self.domain)-1]\n processed_record = {\n 'Type': record['type'],\n 'Name': short_name,\n 'TTL': record['ttl'],\n 'Address': record['content'],\n 'HostId': record['id']\n }\n\n return processed_record\n\n def _convert_to_lexicon(self, record):\n \"\"\" converts from namecheap raw record format to lexicon format record\n \"\"\"\n\n name = record['Name']\n if self.domain not in name:\n name = \"{}.{}\".format(name,self.domain)\n\n processed_record = {\n 'type': record['Type'],\n 'name': '{0}.{1}'.format(record['Name'], self.domain),\n 'ttl': record['TTL'],\n 'content': record['Address'],\n 'id': record['HostId']\n }\n\n return processed_record\n", "path": "lexicon/providers/namecheap.py"}]} | 2,047 | 98 |
gh_patches_debug_60787 | rasdani/github-patches | git_diff | liqd__a4-product-1090 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_newsletters during normal register is broken
If checked, the user still has get_newsletters = False. But when changed in the account settings, it's changed.
</issue>
<code>
[start of apps/users/forms.py]
1 from allauth.socialaccount.adapter import get_adapter
2 from allauth.utils import email_address_exists
3 from django import forms
4 from django.contrib.auth import forms as auth_forms
5 from django.contrib.auth import get_user_model
6 from django.utils.translation import ugettext_lazy as _
7
8 User = get_user_model()
9
10
11 class TermsSignupForm(auth_forms.UserCreationForm):
12 terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={
13 'required': _('Please accept the terms of use.')
14 })
15
16 def signup(self, request, user):
17 user.signup(
18 self.cleaned_data['username'],
19 self.cleaned_data['email'],
20 )
21
22 class Meta:
23 model = User
24 fields = ('email', 'username', 'password1', 'password2',
25 'terms_of_use', 'get_newsletters')
26
27 # Tried to add form as described in allauth documentation:
28 # https://django-allauth.readthedocs.io/en/latest/forms.html#socialaccount-forms
29 # ran into the following error:
30 # https://stackoverflow.com/questions/57254251/custom-form-with-socialaccount-in-django-allauth
31 # added this solution, maybe not the best
32
33
34 class SignupForm(forms.Form):
35 terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={
36 'required': _('Please accept the terms of use.')
37 })
38 get_newsletters = forms.BooleanField(
39 label=_('Send me newsletters'), required=False)
40 email = forms.EmailField(widget=forms.HiddenInput())
41 username = forms.CharField(widget=forms.HiddenInput())
42
43 def __init__(self, *args, **kwargs):
44 self.sociallogin = kwargs.pop('sociallogin')
45 initial = get_adapter().get_signup_form_initial_data(
46 self.sociallogin)
47 kwargs.update({
48 'initial': initial})
49 super().__init__(*args, **kwargs)
50
51 def save(self, request):
52 adapter = get_adapter(request)
53 user = adapter.save_user(request, self.sociallogin, form=self)
54 user.get_newsletters = self.cleaned_data['get_newsletters']
55 user.save()
56 user.signup(
57 user.username,
58 user.email
59 )
60 return user
61
62 def clean(self):
63 email = self.cleaned_data['email']
64 if email_address_exists(email):
65 raise forms.ValidationError(
66 get_adapter().error_messages['email_taken']
67 % self.sociallogin.account.get_provider().name)
68 return super().clean()
69
[end of apps/users/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/users/forms.py b/apps/users/forms.py
--- a/apps/users/forms.py
+++ b/apps/users/forms.py
@@ -14,6 +14,7 @@
})
def signup(self, request, user):
+ user.get_newsletters = self.cleaned_data["get_newsletters"]
user.signup(
self.cleaned_data['username'],
self.cleaned_data['email'],
| {"golden_diff": "diff --git a/apps/users/forms.py b/apps/users/forms.py\n--- a/apps/users/forms.py\n+++ b/apps/users/forms.py\n@@ -14,6 +14,7 @@\n })\n \n def signup(self, request, user):\n+ user.get_newsletters = self.cleaned_data[\"get_newsletters\"]\n user.signup(\n self.cleaned_data['username'],\n self.cleaned_data['email'],\n", "issue": "get_newsletters during normal register is broken\nIf checked, the user still has get_newsletters = False. But when changed in the account settings, it's changed.\n", "before_files": [{"content": "from allauth.socialaccount.adapter import get_adapter\nfrom allauth.utils import email_address_exists\nfrom django import forms\nfrom django.contrib.auth import forms as auth_forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\n\nUser = get_user_model()\n\n\nclass TermsSignupForm(auth_forms.UserCreationForm):\n terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={\n 'required': _('Please accept the terms of use.')\n })\n\n def signup(self, request, user):\n user.signup(\n self.cleaned_data['username'],\n self.cleaned_data['email'],\n )\n\n class Meta:\n model = User\n fields = ('email', 'username', 'password1', 'password2',\n 'terms_of_use', 'get_newsletters')\n\n# Tried to add form as described in allauth documentation:\n# https://django-allauth.readthedocs.io/en/latest/forms.html#socialaccount-forms\n# ran into the following error:\n# https://stackoverflow.com/questions/57254251/custom-form-with-socialaccount-in-django-allauth\n# added this solution, maybe not the best\n\n\nclass SignupForm(forms.Form):\n terms_of_use = forms.BooleanField(label=_('Terms of use'), error_messages={\n 'required': _('Please accept the terms of use.')\n })\n get_newsletters = forms.BooleanField(\n label=_('Send me newsletters'), required=False)\n email = forms.EmailField(widget=forms.HiddenInput())\n username = forms.CharField(widget=forms.HiddenInput())\n\n def __init__(self, *args, **kwargs):\n self.sociallogin = kwargs.pop('sociallogin')\n initial = get_adapter().get_signup_form_initial_data(\n self.sociallogin)\n kwargs.update({\n 'initial': initial})\n super().__init__(*args, **kwargs)\n\n def save(self, request):\n adapter = get_adapter(request)\n user = adapter.save_user(request, self.sociallogin, form=self)\n user.get_newsletters = self.cleaned_data['get_newsletters']\n user.save()\n user.signup(\n user.username,\n user.email\n )\n return user\n\n def clean(self):\n email = self.cleaned_data['email']\n if email_address_exists(email):\n raise forms.ValidationError(\n get_adapter().error_messages['email_taken']\n % self.sociallogin.account.get_provider().name)\n return super().clean()\n", "path": "apps/users/forms.py"}]} | 1,202 | 86 |
gh_patches_debug_6976 | rasdani/github-patches | git_diff | svthalia__concrexit-1369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Searching in photos api is broken
### Describe the bug
Searching in photos api is broken
### How to reproduce
Steps to reproduce the behaviour:
1. Go to https://thalia.nu/api/v1/photos/albums/?search=Test
### Expected behaviour
This should not crash.
</issue>
<code>
[start of website/photos/api/viewsets.py]
1 from rest_framework import permissions, filters
2 from rest_framework.exceptions import PermissionDenied
3 from rest_framework.mixins import CreateModelMixin, UpdateModelMixin
4 from rest_framework.viewsets import ModelViewSet, GenericViewSet
5
6 from photos import services
7 from photos.api import serializers
8 from photos.models import Album, Photo
9
10
11 class AlbumsViewSet(ModelViewSet):
12 """ViewSet class for an Album object."""
13
14 permission_classes = (permissions.IsAuthenticated,)
15 queryset = Album.objects.all()
16 filter_backends = (filters.SearchFilter,)
17 search_fields = ("title_en", "title_nl", "date", "slug")
18
19 def get_queryset(self):
20 """Return albums that are annotated to be accessible by the request user."""
21 return services.get_annotated_accessible_albums(
22 self.request, Album.objects.all()
23 )
24
25 def create(self, request, *args, **kwargs):
26 """Create album if the request user is allowed to."""
27 if self.request.user.has_perm("photos.create_album"):
28 return super().create(request, *args, **kwargs)
29 raise PermissionDenied
30
31 def update(self, request, *args, **kwargs):
32 """Create album if the request user is allowed to."""
33 if self.request.user.has_perm("photos.change_album"):
34 return super().update(request, *args, **kwargs)
35 raise PermissionDenied
36
37 def get_serializer_class(self):
38 """Return AlbumListSerializer if the current action is list else return AlbumSerializer."""
39 if self.action == "list":
40 return serializers.AlbumListSerializer
41 return serializers.AlbumSerializer
42
43
44 class PhotosViewSet(GenericViewSet, CreateModelMixin, UpdateModelMixin):
45 """ViewSet class for a Photo object."""
46
47 queryset = Photo.objects.all()
48 permission_classes = (permissions.IsAuthenticated,)
49 serializer_class = serializers.PhotoCreateSerializer
50
51 def create(self, request, *args, **kwargs):
52 """Create photo if the request user is allowed to."""
53 if self.request.user.has_perm("photos.create_photo"):
54 return super().create(request, *args, **kwargs)
55 raise PermissionDenied
56
57 def update(self, request, *args, **kwargs):
58 """Update photo if the request user is allowed to."""
59 if self.request.user.has_perm("photos.change_photo"):
60 return super().update(request, *args, **kwargs)
61 raise PermissionDenied
62
[end of website/photos/api/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/photos/api/viewsets.py b/website/photos/api/viewsets.py
--- a/website/photos/api/viewsets.py
+++ b/website/photos/api/viewsets.py
@@ -14,7 +14,7 @@
permission_classes = (permissions.IsAuthenticated,)
queryset = Album.objects.all()
filter_backends = (filters.SearchFilter,)
- search_fields = ("title_en", "title_nl", "date", "slug")
+ search_fields = ("title_en", "date", "slug")
def get_queryset(self):
"""Return albums that are annotated to be accessible by the request user."""
| {"golden_diff": "diff --git a/website/photos/api/viewsets.py b/website/photos/api/viewsets.py\n--- a/website/photos/api/viewsets.py\n+++ b/website/photos/api/viewsets.py\n@@ -14,7 +14,7 @@\n permission_classes = (permissions.IsAuthenticated,)\n queryset = Album.objects.all()\n filter_backends = (filters.SearchFilter,)\n- search_fields = (\"title_en\", \"title_nl\", \"date\", \"slug\")\n+ search_fields = (\"title_en\", \"date\", \"slug\")\n \n def get_queryset(self):\n \"\"\"Return albums that are annotated to be accessible by the request user.\"\"\"\n", "issue": "Searching in photos api is broken\n### Describe the bug\r\nSearching in photos api is broken\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to https://thalia.nu/api/v1/photos/albums/?search=Test\r\n\r\n### Expected behaviour\r\nThis should not crash.\r\n\r\n\n", "before_files": [{"content": "from rest_framework import permissions, filters\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.mixins import CreateModelMixin, UpdateModelMixin\nfrom rest_framework.viewsets import ModelViewSet, GenericViewSet\n\nfrom photos import services\nfrom photos.api import serializers\nfrom photos.models import Album, Photo\n\n\nclass AlbumsViewSet(ModelViewSet):\n \"\"\"ViewSet class for an Album object.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n queryset = Album.objects.all()\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title_en\", \"title_nl\", \"date\", \"slug\")\n\n def get_queryset(self):\n \"\"\"Return albums that are annotated to be accessible by the request user.\"\"\"\n return services.get_annotated_accessible_albums(\n self.request, Album.objects.all()\n )\n\n def create(self, request, *args, **kwargs):\n \"\"\"Create album if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.create_album\"):\n return super().create(request, *args, **kwargs)\n raise PermissionDenied\n\n def update(self, request, *args, **kwargs):\n \"\"\"Create album if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.change_album\"):\n return super().update(request, *args, **kwargs)\n raise PermissionDenied\n\n def get_serializer_class(self):\n \"\"\"Return AlbumListSerializer if the current action is list else return AlbumSerializer.\"\"\"\n if self.action == \"list\":\n return serializers.AlbumListSerializer\n return serializers.AlbumSerializer\n\n\nclass PhotosViewSet(GenericViewSet, CreateModelMixin, UpdateModelMixin):\n \"\"\"ViewSet class for a Photo object.\"\"\"\n\n queryset = Photo.objects.all()\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.PhotoCreateSerializer\n\n def create(self, request, *args, **kwargs):\n \"\"\"Create photo if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.create_photo\"):\n return super().create(request, *args, **kwargs)\n raise PermissionDenied\n\n def update(self, request, *args, **kwargs):\n \"\"\"Update photo if the request user is allowed to.\"\"\"\n if self.request.user.has_perm(\"photos.change_photo\"):\n return super().update(request, *args, **kwargs)\n raise PermissionDenied\n", "path": "website/photos/api/viewsets.py"}]} | 1,206 | 135 |
gh_patches_debug_64869 | rasdani/github-patches | git_diff | kedro-org__kedro-2345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release Kedro `0.18.5`
### Description
Release Kedro `0.18.5` which will contain lots of new features for configuration. The release depends on the following tickets to be finished:
- [x] BLOCKER: https://github.com/kedro-org/kedro/issues/2255
- [x] #1909 (Docs)
- [x] #2148
- [x] #2170
- [x] #2225
Initially we wanted to include the below issues as well, but the implementation turned out to be trickier than expected, so we'll take more time to investigate a solution and won't let it block the release.
- [x] #2146
- [x] #2212
</issue>
<code>
[start of kedro/__init__.py]
1 """Kedro is a framework that makes it easy to build robust and scalable
2 data pipelines by providing uniform project templates, data abstraction,
3 configuration and pipeline assembly.
4 """
5
6 __version__ = "0.18.4"
7
8
9 import logging
10
11 logging.getLogger(__name__).addHandler(logging.NullHandler())
12
[end of kedro/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kedro/__init__.py b/kedro/__init__.py
--- a/kedro/__init__.py
+++ b/kedro/__init__.py
@@ -3,7 +3,7 @@
configuration and pipeline assembly.
"""
-__version__ = "0.18.4"
+__version__ = "0.18.5"
import logging
| {"golden_diff": "diff --git a/kedro/__init__.py b/kedro/__init__.py\n--- a/kedro/__init__.py\n+++ b/kedro/__init__.py\n@@ -3,7 +3,7 @@\n configuration and pipeline assembly.\n \"\"\"\n \n-__version__ = \"0.18.4\"\n+__version__ = \"0.18.5\"\n \n \n import logging\n", "issue": "Release Kedro `0.18.5`\n### Description\r\n\r\nRelease Kedro `0.18.5` which will contain lots of new features for configuration. The release depends on the following tickets to be finished:\r\n\r\n- [x] BLOCKER: https://github.com/kedro-org/kedro/issues/2255\r\n- [x] #1909 (Docs)\r\n- [x] #2148 \r\n- [x] #2170\r\n- [x] #2225 \r\n\r\nInitially we wanted to include the below issues as well, but the implementation turned out to be trickier than expected, so we'll take more time to investigate a solution and won't let it block the release.\r\n- [x] #2146 \r\n- [x] #2212 \r\n\n", "before_files": [{"content": "\"\"\"Kedro is a framework that makes it easy to build robust and scalable\ndata pipelines by providing uniform project templates, data abstraction,\nconfiguration and pipeline assembly.\n\"\"\"\n\n__version__ = \"0.18.4\"\n\n\nimport logging\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "kedro/__init__.py"}]} | 796 | 88 |
gh_patches_debug_24683 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5620 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Case mismatch for fragment identifiers between menus and page heading anchor
### Describe the issue
The menu item "Groups / Ops and Management" sends you off to https://datatracker.ietf.org/wg/#ops but "#ops" is not recognised on the page because the heading anchor is "#OPS" and so that menu item takes you to the top of the page not the Ops heading.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/doc/templatetags/wg_menu.py]
1 # Copyright The IETF Trust 2009-2022, All Rights Reserved
2
3 # Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
4 # All rights reserved. Contact: Pasi Eronen <[email protected]>
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions
8 # are met:
9 #
10 # * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above
14 # copyright notice, this list of conditions and the following
15 # disclaimer in the documentation and/or other materials provided
16 # with the distribution.
17 #
18 # * Neither the name of the Nokia Corporation and/or its
19 # subsidiary(-ies) nor the names of its contributors may be used
20 # to endorse or promote products derived from this software
21 # without specific prior written permission.
22 #
23 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
35 from django import template
36 from django.template.loader import render_to_string
37 from django.db import models
38
39 from ietf.group.models import Group
40
41 register = template.Library()
42
43 parent_short_names = {
44 "ops": "Ops & Management",
45 "rai": "RAI",
46 "iab": "IAB",
47 "art": "Apps & Realtime",
48 "ietfadminllc": "IETF LLC",
49 }
50
51 parents = Group.objects.filter(
52 models.Q(type="area")
53 | models.Q(type="irtf", acronym="irtf")
54 | models.Q(acronym="iab")
55 | models.Q(acronym="ietfadminllc")
56 | models.Q(acronym="rfceditor"),
57 state="active",
58 ).order_by("type__order", "type_id", "acronym")
59
60
61 @register.simple_tag
62 def wg_menu(flavor):
63 global parents
64
65 for p in parents:
66 p.short_name = parent_short_names.get(p.acronym) or p.name
67 if p.short_name.endswith(" Area"):
68 p.short_name = p.short_name[: -len(" Area")]
69
70 if p.type_id == "area":
71 p.menu_url = "/wg/#" + p.acronym
72 elif p.acronym == "irtf":
73 p.menu_url = "/rg/"
74 elif p.acronym == "iab":
75 p.menu_url = "/program/"
76 elif p.acronym == "ietfadminllc":
77 p.menu_url = "/adm/"
78 elif p.acronym == "rfceditor":
79 p.menu_url = "/rfcedtyp/"
80
81 return render_to_string(
82 "base/menu_wg.html", {"parents": parents, "flavor": flavor}
83 )
84
[end of ietf/doc/templatetags/wg_menu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/doc/templatetags/wg_menu.py b/ietf/doc/templatetags/wg_menu.py
--- a/ietf/doc/templatetags/wg_menu.py
+++ b/ietf/doc/templatetags/wg_menu.py
@@ -1,4 +1,4 @@
-# Copyright The IETF Trust 2009-2022, All Rights Reserved
+# Copyright The IETF Trust 2009-2023, All Rights Reserved
# Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <[email protected]>
@@ -32,6 +32,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import debug # pyflakes: ignore
+
from django import template
from django.template.loader import render_to_string
from django.db import models
@@ -68,7 +70,7 @@
p.short_name = p.short_name[: -len(" Area")]
if p.type_id == "area":
- p.menu_url = "/wg/#" + p.acronym
+ p.menu_url = "/wg/#" + p.acronym.upper()
elif p.acronym == "irtf":
p.menu_url = "/rg/"
elif p.acronym == "iab":
| {"golden_diff": "diff --git a/ietf/doc/templatetags/wg_menu.py b/ietf/doc/templatetags/wg_menu.py\n--- a/ietf/doc/templatetags/wg_menu.py\n+++ b/ietf/doc/templatetags/wg_menu.py\n@@ -1,4 +1,4 @@\n-# Copyright The IETF Trust 2009-2022, All Rights Reserved\n+# Copyright The IETF Trust 2009-2023, All Rights Reserved\n \n # Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n # All rights reserved. Contact: Pasi Eronen <[email protected]>\n@@ -32,6 +32,8 @@\n # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n \n+import debug # pyflakes: ignore\n+\n from django import template\n from django.template.loader import render_to_string\n from django.db import models\n@@ -68,7 +70,7 @@\n p.short_name = p.short_name[: -len(\" Area\")]\n \n if p.type_id == \"area\":\n- p.menu_url = \"/wg/#\" + p.acronym\n+ p.menu_url = \"/wg/#\" + p.acronym.upper()\n elif p.acronym == \"irtf\":\n p.menu_url = \"/rg/\"\n elif p.acronym == \"iab\":\n", "issue": "Case mismatch for fragment identifiers between menus and page heading anchor\n### Describe the issue\n\nThe menu item \"Groups / Ops and Management\" sends you off to https://datatracker.ietf.org/wg/#ops but \"#ops\" is not recognised on the page because the heading anchor is \"#OPS\" and so that menu item takes you to the top of the page not the Ops heading.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2009-2022, All Rights Reserved\n\n# Copyright (C) 2009-2010 Nokia Corporation and/or its subsidiary(-ies).\n# All rights reserved. Contact: Pasi Eronen <[email protected]>\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# * Neither the name of the Nokia Corporation and/or its\n# subsidiary(-ies) nor the names of its contributors may be used\n# to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom django import template\nfrom django.template.loader import render_to_string\nfrom django.db import models\n\nfrom ietf.group.models import Group\n\nregister = template.Library()\n\nparent_short_names = {\n \"ops\": \"Ops & Management\",\n \"rai\": \"RAI\",\n \"iab\": \"IAB\",\n \"art\": \"Apps & Realtime\",\n \"ietfadminllc\": \"IETF LLC\",\n}\n\nparents = Group.objects.filter(\n models.Q(type=\"area\")\n | models.Q(type=\"irtf\", acronym=\"irtf\")\n | models.Q(acronym=\"iab\")\n | models.Q(acronym=\"ietfadminllc\")\n | models.Q(acronym=\"rfceditor\"),\n state=\"active\",\n).order_by(\"type__order\", \"type_id\", \"acronym\")\n\n\[email protected]_tag\ndef wg_menu(flavor):\n global parents\n\n for p in parents:\n p.short_name = parent_short_names.get(p.acronym) or p.name\n if p.short_name.endswith(\" Area\"):\n p.short_name = p.short_name[: -len(\" Area\")]\n\n if p.type_id == \"area\":\n p.menu_url = \"/wg/#\" + p.acronym\n elif p.acronym == \"irtf\":\n p.menu_url = \"/rg/\"\n elif p.acronym == \"iab\":\n p.menu_url = \"/program/\"\n elif p.acronym == \"ietfadminllc\":\n p.menu_url = \"/adm/\"\n elif p.acronym == \"rfceditor\":\n p.menu_url = \"/rfcedtyp/\"\n\n return render_to_string(\n \"base/menu_wg.html\", {\"parents\": parents, \"flavor\": flavor}\n )\n", "path": "ietf/doc/templatetags/wg_menu.py"}]} | 1,580 | 332 |
gh_patches_debug_33213 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-738 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consolidate serialization helpers to be static and shared
In the teams_helper there are 2 serialization helper methods. Currently they both create a big dict of all the Model objects that exist in Teams and BF. We should make the optimization to make the big dict once, and update the 2 helpers to use the new dict.
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from inspect import getmembers
5 from typing import Type
6 from enum import Enum
7
8 from msrest.serialization import Model, Deserializer, Serializer
9
10 import botbuilder.schema as schema
11 import botbuilder.schema.teams as teams_schema
12
13 # Optimization: The dependencies dictionary could be cached here,
14 # and shared between the two methods.
15
16
17 def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:
18 dependencies = [
19 schema_cls
20 for key, schema_cls in getmembers(schema)
21 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
22 ]
23 dependencies += [
24 schema_cls
25 for key, schema_cls in getmembers(teams_schema)
26 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
27 ]
28 dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
29 deserializer = Deserializer(dependencies_dict)
30 return deserializer(msrest_cls.__name__, dict_to_deserialize)
31
32
33 def serializer_helper(object_to_serialize: Model) -> dict:
34 if object_to_serialize is None:
35 return None
36
37 dependencies = [
38 schema_cls
39 for key, schema_cls in getmembers(schema)
40 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
41 ]
42 dependencies += [
43 schema_cls
44 for key, schema_cls in getmembers(teams_schema)
45 if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
46 ]
47 dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
48 serializer = Serializer(dependencies_dict)
49 # pylint: disable=protected-access
50 return serializer._serialize(object_to_serialize)
51
[end of libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py
--- a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py
+++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py
@@ -10,23 +10,21 @@
import botbuilder.schema as schema
import botbuilder.schema.teams as teams_schema
-# Optimization: The dependencies dictionary could be cached here,
-# and shared between the two methods.
+DEPENDICIES = [
+ schema_cls
+ for key, schema_cls in getmembers(schema)
+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
+]
+DEPENDICIES += [
+ schema_cls
+ for key, schema_cls in getmembers(teams_schema)
+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
+]
+DEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}
def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:
- dependencies = [
- schema_cls
- for key, schema_cls in getmembers(schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies += [
- schema_cls
- for key, schema_cls in getmembers(teams_schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
- deserializer = Deserializer(dependencies_dict)
+ deserializer = Deserializer(DEPENDICIES_DICT)
return deserializer(msrest_cls.__name__, dict_to_deserialize)
@@ -34,17 +32,6 @@
if object_to_serialize is None:
return None
- dependencies = [
- schema_cls
- for key, schema_cls in getmembers(schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies += [
- schema_cls
- for key, schema_cls in getmembers(teams_schema)
- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))
- ]
- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}
- serializer = Serializer(dependencies_dict)
+ serializer = Serializer(DEPENDICIES_DICT)
# pylint: disable=protected-access
return serializer._serialize(object_to_serialize)
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py\n--- a/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py\n+++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py\n@@ -10,23 +10,21 @@\n import botbuilder.schema as schema\n import botbuilder.schema.teams as teams_schema\n \n-# Optimization: The dependencies dictionary could be cached here,\n-# and shared between the two methods.\n+DEPENDICIES = [\n+ schema_cls\n+ for key, schema_cls in getmembers(schema)\n+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n+]\n+DEPENDICIES += [\n+ schema_cls\n+ for key, schema_cls in getmembers(teams_schema)\n+ if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n+]\n+DEPENDICIES_DICT = {dependency.__name__: dependency for dependency in DEPENDICIES}\n \n \n def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n- dependencies = [\n- schema_cls\n- for key, schema_cls in getmembers(schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies += [\n- schema_cls\n- for key, schema_cls in getmembers(teams_schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n- deserializer = Deserializer(dependencies_dict)\n+ deserializer = Deserializer(DEPENDICIES_DICT)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n \n \n@@ -34,17 +32,6 @@\n if object_to_serialize is None:\n return None\n \n- dependencies = [\n- schema_cls\n- for key, schema_cls in getmembers(schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies += [\n- schema_cls\n- for key, schema_cls in getmembers(teams_schema)\n- if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n- ]\n- dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n- serializer = Serializer(dependencies_dict)\n+ serializer = Serializer(DEPENDICIES_DICT)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "issue": "Consolidate serialization helpers to be static and shared\nIn the teams_helper there are 2 serialization helper methods. Currently they both create a big dict of all the Model objects that exist in Teams and BF. We should make the optimization to make the big dict once, and update the 2 helpers to use the new dict.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom inspect import getmembers\nfrom typing import Type\nfrom enum import Enum\n\nfrom msrest.serialization import Model, Deserializer, Serializer\n\nimport botbuilder.schema as schema\nimport botbuilder.schema.teams as teams_schema\n\n# Optimization: The dependencies dictionary could be cached here,\n# and shared between the two methods.\n\n\ndef deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model:\n dependencies = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n deserializer = Deserializer(dependencies_dict)\n return deserializer(msrest_cls.__name__, dict_to_deserialize)\n\n\ndef serializer_helper(object_to_serialize: Model) -> dict:\n if object_to_serialize is None:\n return None\n\n dependencies = [\n schema_cls\n for key, schema_cls in getmembers(schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies += [\n schema_cls\n for key, schema_cls in getmembers(teams_schema)\n if isinstance(schema_cls, type) and issubclass(schema_cls, (Model, Enum))\n ]\n dependencies_dict = {dependency.__name__: dependency for dependency in dependencies}\n serializer = Serializer(dependencies_dict)\n # pylint: disable=protected-access\n return serializer._serialize(object_to_serialize)\n", "path": "libraries/botbuilder-core/botbuilder/core/teams/teams_helper.py"}]} | 1,098 | 583 |
gh_patches_debug_5273 | rasdani/github-patches | git_diff | crytic__slither-1339 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
README is not correctly rendered on PyPi
### Describe the desired feature
The description on https://pypi.org/project/slither-analyzer/ is not being rendered as markdown. Add the line `long_description_content_type="text/markdown",` to the `setup.py` for it to render correctly in future releases.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 with open("README.md", "r", encoding="utf-8") as f:
4 long_description = f.read()
5
6 setup(
7 name="slither-analyzer",
8 description="Slither is a Solidity static analysis framework written in Python 3.",
9 url="https://github.com/crytic/slither",
10 author="Trail of Bits",
11 version="0.8.3",
12 packages=find_packages(),
13 python_requires=">=3.8",
14 install_requires=[
15 "prettytable>=0.7.2",
16 "pysha3>=1.0.2",
17 # "crytic-compile>=0.2.3",
18 "crytic-compile",
19 ],
20 extras_require={
21 "dev": [
22 "black==22.3.0",
23 "pylint==2.13.4",
24 "pytest",
25 "pytest-cov",
26 "deepdiff",
27 "numpy",
28 "solc-select>=v1.0.0b1",
29 ]
30 },
31 dependency_links=["git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile"],
32 license="AGPL-3.0",
33 long_description=long_description,
34 entry_points={
35 "console_scripts": [
36 "slither = slither.__main__:main",
37 "slither-check-upgradeability = slither.tools.upgradeability.__main__:main",
38 "slither-find-paths = slither.tools.possible_paths.__main__:main",
39 "slither-simil = slither.tools.similarity.__main__:main",
40 "slither-flat = slither.tools.flattening.__main__:main",
41 "slither-format = slither.tools.slither_format.__main__:main",
42 "slither-check-erc = slither.tools.erc_conformance.__main__:main",
43 "slither-check-kspec = slither.tools.kspec_coverage.__main__:main",
44 "slither-prop = slither.tools.properties.__main__:main",
45 "slither-mutate = slither.tools.mutator.__main__:main",
46 "slither-read-storage = slither.tools.read_storage.__main__:main",
47 ]
48 },
49 )
50
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,7 @@
dependency_links=["git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile"],
license="AGPL-3.0",
long_description=long_description,
+ long_description_content_type="text/markdown",
entry_points={
"console_scripts": [
"slither = slither.__main__:main",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,6 +31,7 @@\n dependency_links=[\"git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile\"],\n license=\"AGPL-3.0\",\n long_description=long_description,\n+ long_description_content_type=\"text/markdown\",\n entry_points={\n \"console_scripts\": [\n \"slither = slither.__main__:main\",\n", "issue": "README is not correctly rendered on PyPi\n### Describe the desired feature\n\nThe description on https://pypi.org/project/slither-analyzer/ is not being rendered as markdown. Add the line `long_description_content_type=\"text/markdown\",` to the `setup.py` for it to render correctly in future releases.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"slither-analyzer\",\n description=\"Slither is a Solidity static analysis framework written in Python 3.\",\n url=\"https://github.com/crytic/slither\",\n author=\"Trail of Bits\",\n version=\"0.8.3\",\n packages=find_packages(),\n python_requires=\">=3.8\",\n install_requires=[\n \"prettytable>=0.7.2\",\n \"pysha3>=1.0.2\",\n # \"crytic-compile>=0.2.3\",\n \"crytic-compile\",\n ],\n extras_require={\n \"dev\": [\n \"black==22.3.0\",\n \"pylint==2.13.4\",\n \"pytest\",\n \"pytest-cov\",\n \"deepdiff\",\n \"numpy\",\n \"solc-select>=v1.0.0b1\",\n ]\n },\n dependency_links=[\"git+https://github.com/crytic/crytic-compile.git@master#egg=crytic-compile\"],\n license=\"AGPL-3.0\",\n long_description=long_description,\n entry_points={\n \"console_scripts\": [\n \"slither = slither.__main__:main\",\n \"slither-check-upgradeability = slither.tools.upgradeability.__main__:main\",\n \"slither-find-paths = slither.tools.possible_paths.__main__:main\",\n \"slither-simil = slither.tools.similarity.__main__:main\",\n \"slither-flat = slither.tools.flattening.__main__:main\",\n \"slither-format = slither.tools.slither_format.__main__:main\",\n \"slither-check-erc = slither.tools.erc_conformance.__main__:main\",\n \"slither-check-kspec = slither.tools.kspec_coverage.__main__:main\",\n \"slither-prop = slither.tools.properties.__main__:main\",\n \"slither-mutate = slither.tools.mutator.__main__:main\",\n \"slither-read-storage = slither.tools.read_storage.__main__:main\",\n ]\n },\n)\n", "path": "setup.py"}]} | 1,168 | 112 |
gh_patches_debug_5135 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-4845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WebSocket view jumps to top on new message
The WebSocket view keeps jumping to the top every time a new message arrives. This makes it basically impossible to work with while the connection is open. I hold down arrow -> it scrolls a bit -> message arrives -> I'm back at the top
_Originally posted by @Prinzhorn in https://github.com/mitmproxy/mitmproxy/issues/4486#issuecomment-796578909_
</issue>
<code>
[start of mitmproxy/tools/console/searchable.py]
1 import urwid
2
3 from mitmproxy.tools.console import signals
4
5
6 class Highlight(urwid.AttrMap):
7
8 def __init__(self, t):
9 urwid.AttrMap.__init__(
10 self,
11 urwid.Text(t.text),
12 "focusfield",
13 )
14 self.backup = t
15
16
17 class Searchable(urwid.ListBox):
18
19 def __init__(self, contents):
20 self.walker = urwid.SimpleFocusListWalker(contents)
21 urwid.ListBox.__init__(self, self.walker)
22 self.search_offset = 0
23 self.current_highlight = None
24 self.search_term = None
25 self.last_search = None
26
27 def keypress(self, size, key):
28 if key == "/":
29 signals.status_prompt.send(
30 prompt = "Search for",
31 text = "",
32 callback = self.set_search
33 )
34 elif key == "n":
35 self.find_next(False)
36 elif key == "N":
37 self.find_next(True)
38 elif key == "m_start":
39 self.set_focus(0)
40 self.walker._modified()
41 elif key == "m_end":
42 self.set_focus(len(self.walker) - 1)
43 self.walker._modified()
44 else:
45 return super().keypress(size, key)
46
47 def set_search(self, text):
48 self.last_search = text
49 self.search_term = text or None
50 self.find_next(False)
51
52 def set_highlight(self, offset):
53 if self.current_highlight is not None:
54 old = self.body[self.current_highlight]
55 self.body[self.current_highlight] = old.backup
56 if offset is None:
57 self.current_highlight = None
58 else:
59 self.body[offset] = Highlight(self.body[offset])
60 self.current_highlight = offset
61
62 def get_text(self, w):
63 if isinstance(w, urwid.Text):
64 return w.text
65 elif isinstance(w, Highlight):
66 return w.backup.text
67 else:
68 return None
69
70 def find_next(self, backwards):
71 if not self.search_term:
72 if self.last_search:
73 self.search_term = self.last_search
74 else:
75 self.set_highlight(None)
76 return
77 # Start search at focus + 1
78 if backwards:
79 rng = range(len(self.body) - 1, -1, -1)
80 else:
81 rng = range(1, len(self.body) + 1)
82 for i in rng:
83 off = (self.focus_position + i) % len(self.body)
84 w = self.body[off]
85 txt = self.get_text(w)
86 if txt and self.search_term in txt:
87 self.set_highlight(off)
88 self.set_focus(off, coming_from="above")
89 self.body._modified()
90 return
91 else:
92 self.set_highlight(None)
93 signals.status_message.send(message="Search not found.", expire=1)
94
[end of mitmproxy/tools/console/searchable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/tools/console/searchable.py b/mitmproxy/tools/console/searchable.py
--- a/mitmproxy/tools/console/searchable.py
+++ b/mitmproxy/tools/console/searchable.py
@@ -19,6 +19,7 @@
def __init__(self, contents):
self.walker = urwid.SimpleFocusListWalker(contents)
urwid.ListBox.__init__(self, self.walker)
+ self.set_focus(len(self.walker) - 1)
self.search_offset = 0
self.current_highlight = None
self.search_term = None
| {"golden_diff": "diff --git a/mitmproxy/tools/console/searchable.py b/mitmproxy/tools/console/searchable.py\n--- a/mitmproxy/tools/console/searchable.py\n+++ b/mitmproxy/tools/console/searchable.py\n@@ -19,6 +19,7 @@\n def __init__(self, contents):\n self.walker = urwid.SimpleFocusListWalker(contents)\n urwid.ListBox.__init__(self, self.walker)\n+ self.set_focus(len(self.walker) - 1)\n self.search_offset = 0\n self.current_highlight = None\n self.search_term = None\n", "issue": "WebSocket view jumps to top on new message\nThe WebSocket view keeps jumping to the top every time a new message arrives. This makes it basically impossible to work with while the connection is open. I hold down arrow -> it scrolls a bit -> message arrives -> I'm back at the top\r\n\r\n_Originally posted by @Prinzhorn in https://github.com/mitmproxy/mitmproxy/issues/4486#issuecomment-796578909_\n", "before_files": [{"content": "import urwid\n\nfrom mitmproxy.tools.console import signals\n\n\nclass Highlight(urwid.AttrMap):\n\n def __init__(self, t):\n urwid.AttrMap.__init__(\n self,\n urwid.Text(t.text),\n \"focusfield\",\n )\n self.backup = t\n\n\nclass Searchable(urwid.ListBox):\n\n def __init__(self, contents):\n self.walker = urwid.SimpleFocusListWalker(contents)\n urwid.ListBox.__init__(self, self.walker)\n self.search_offset = 0\n self.current_highlight = None\n self.search_term = None\n self.last_search = None\n\n def keypress(self, size, key):\n if key == \"/\":\n signals.status_prompt.send(\n prompt = \"Search for\",\n text = \"\",\n callback = self.set_search\n )\n elif key == \"n\":\n self.find_next(False)\n elif key == \"N\":\n self.find_next(True)\n elif key == \"m_start\":\n self.set_focus(0)\n self.walker._modified()\n elif key == \"m_end\":\n self.set_focus(len(self.walker) - 1)\n self.walker._modified()\n else:\n return super().keypress(size, key)\n\n def set_search(self, text):\n self.last_search = text\n self.search_term = text or None\n self.find_next(False)\n\n def set_highlight(self, offset):\n if self.current_highlight is not None:\n old = self.body[self.current_highlight]\n self.body[self.current_highlight] = old.backup\n if offset is None:\n self.current_highlight = None\n else:\n self.body[offset] = Highlight(self.body[offset])\n self.current_highlight = offset\n\n def get_text(self, w):\n if isinstance(w, urwid.Text):\n return w.text\n elif isinstance(w, Highlight):\n return w.backup.text\n else:\n return None\n\n def find_next(self, backwards):\n if not self.search_term:\n if self.last_search:\n self.search_term = self.last_search\n else:\n self.set_highlight(None)\n return\n # Start search at focus + 1\n if backwards:\n rng = range(len(self.body) - 1, -1, -1)\n else:\n rng = range(1, len(self.body) + 1)\n for i in rng:\n off = (self.focus_position + i) % len(self.body)\n w = self.body[off]\n txt = self.get_text(w)\n if txt and self.search_term in txt:\n self.set_highlight(off)\n self.set_focus(off, coming_from=\"above\")\n self.body._modified()\n return\n else:\n self.set_highlight(None)\n signals.status_message.send(message=\"Search not found.\", expire=1)\n", "path": "mitmproxy/tools/console/searchable.py"}]} | 1,426 | 128 |
gh_patches_debug_58219 | rasdani/github-patches | git_diff | opsdroid__opsdroid-169 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
arrow dep missing
Fresh install of ubuntu 16.04
```
$ sudo apt update && sudo apt install python3-pip
...
$ pip3 install opsdroid
...
$ opsdroid
Traceback (most recent call last):
File "/home/ubuntu/.local/bin/opsdroid", line 7, in <module>
from opsdroid.__main__ import main
File "/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/__main__.py", line 8, in <module>
from opsdroid.core import OpsDroid
File "/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/core.py", line 15, in <module>
from opsdroid.parsers.crontab import parse_crontab
File "/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/parsers/crontab.py", line 6, in <module>
import arrow
ImportError: No module named 'arrow'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from opsdroid.const import __version__
5
6 PACKAGE_NAME = 'opsdroid'
7 HERE = os.path.abspath(os.path.dirname(__file__))
8
9 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',
10 'modules.*', 'docs', 'docs.*'])
11
12 REQUIRES = [
13 'pyyaml>=3.11,<4',
14 'aiohttp>=1.2.0,<2',
15 'pycron>=0.40',
16 ]
17
18 setup(
19 name=PACKAGE_NAME,
20 version=__version__,
21 license='GNU GENERAL PUBLIC LICENSE V3',
22 url='',
23 download_url='',
24 author='Jacob Tomlinson',
25 author_email='[email protected]',
26 description='An open source chat-ops bot.',
27 packages=PACKAGES,
28 include_package_data=True,
29 zip_safe=False,
30 platforms='any',
31 install_requires=REQUIRES,
32 test_suite='tests',
33 keywords=['bot', 'chatops'],
34 entry_points={
35 'console_scripts': [
36 'opsdroid = opsdroid.__main__:main'
37 ]
38 },
39 )
40
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,9 +10,10 @@
'modules.*', 'docs', 'docs.*'])
REQUIRES = [
- 'pyyaml>=3.11,<4',
- 'aiohttp>=1.2.0,<2',
- 'pycron>=0.40',
+ 'arrow==0.10.0',
+ 'aiohttp==2.1.0',
+ 'pycron==0.40',
+ 'pyyaml==3.12'
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,9 +10,10 @@\n 'modules.*', 'docs', 'docs.*'])\n \n REQUIRES = [\n- 'pyyaml>=3.11,<4',\n- 'aiohttp>=1.2.0,<2',\n- 'pycron>=0.40',\n+ 'arrow==0.10.0',\n+ 'aiohttp==2.1.0',\n+ 'pycron==0.40',\n+ 'pyyaml==3.12'\n ]\n \n setup(\n", "issue": "arrow dep missing\nFresh install of ubuntu 16.04\r\n\r\n```\r\n$ sudo apt update && sudo apt install python3-pip\r\n...\r\n$ pip3 install opsdroid\r\n...\r\n$ opsdroid\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/.local/bin/opsdroid\", line 7, in <module>\r\n from opsdroid.__main__ import main\r\n File \"/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/__main__.py\", line 8, in <module>\r\n from opsdroid.core import OpsDroid\r\n File \"/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/core.py\", line 15, in <module>\r\n from opsdroid.parsers.crontab import parse_crontab\r\n File \"/home/ubuntu/.local/lib/python3.5/site-packages/opsdroid/parsers/crontab.py\", line 6, in <module>\r\n import arrow\r\nImportError: No module named 'arrow'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom opsdroid.const import __version__\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\nREQUIRES = [\n 'pyyaml>=3.11,<4',\n 'aiohttp>=1.2.0,<2',\n 'pycron>=0.40',\n]\n\nsetup(\n name=PACKAGE_NAME,\n version=__version__,\n license='GNU GENERAL PUBLIC LICENSE V3',\n url='',\n download_url='',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source chat-ops bot.',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=['bot', 'chatops'],\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}]} | 1,076 | 141 |
gh_patches_debug_31455 | rasdani/github-patches | git_diff | pypa__pipenv-3186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Quote command if parentheses exist
Thank you for contributing to Pipenv!
### The issue
Fixes #3168
### The fix
Quote the command if it contains `()`.
### The checklist
* [x] Associated issue
* [x] A news fragment in the `news/` directory to describe this fix with the extension `.bugfix`, `.feature`, `.behavior`, `.doc`. `.vendor`. or `.trivial` (this will appear in the release changelog). Use semantic line breaks and name the file after the issue number or the PR #.
<!--
### If this is a patch to the `vendor` directory…
Please try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.
A pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv’s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.
If your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.
-->
</issue>
<code>
[start of pipenv/cmdparse.py]
1 import re
2 import shlex
3
4 import six
5
6
7 class ScriptEmptyError(ValueError):
8 pass
9
10
11 class Script(object):
12 """Parse a script line (in Pipfile's [scripts] section).
13
14 This always works in POSIX mode, even on Windows.
15 """
16
17 def __init__(self, command, args=None):
18 self._parts = [command]
19 if args:
20 self._parts.extend(args)
21
22 @classmethod
23 def parse(cls, value):
24 if isinstance(value, six.string_types):
25 value = shlex.split(value)
26 if not value:
27 raise ScriptEmptyError(value)
28 return cls(value[0], value[1:])
29
30 def __repr__(self):
31 return "Script({0!r})".format(self._parts)
32
33 @property
34 def command(self):
35 return self._parts[0]
36
37 @property
38 def args(self):
39 return self._parts[1:]
40
41 def extend(self, extra_args):
42 self._parts.extend(extra_args)
43
44 def cmdify(self):
45 """Encode into a cmd-executable string.
46
47 This re-implements CreateProcess's quoting logic to turn a list of
48 arguments into one single string for the shell to interpret.
49
50 * All double quotes are escaped with a backslash.
51 * Existing backslashes before a quote are doubled, so they are all
52 escaped properly.
53 * Backslashes elsewhere are left as-is; cmd will interpret them
54 literally.
55
56 The result is then quoted into a pair of double quotes to be grouped.
57
58 An argument is intentionally not quoted if it does not contain
59 whitespaces. This is done to be compatible with Windows built-in
60 commands that don't work well with quotes, e.g. everything with `echo`,
61 and DOS-style (forward slash) switches.
62
63 The intended use of this function is to pre-process an argument list
64 before passing it into ``subprocess.Popen(..., shell=True)``.
65
66 See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
67 """
68 return " ".join(
69 arg if not next(re.finditer(r'\s', arg), None)
70 else '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', arg))
71 for arg in self._parts
72 )
73
[end of pipenv/cmdparse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pipenv/cmdparse.py b/pipenv/cmdparse.py
--- a/pipenv/cmdparse.py
+++ b/pipenv/cmdparse.py
@@ -1,3 +1,4 @@
+import itertools
import re
import shlex
@@ -8,6 +9,12 @@
pass
+def _quote_if_contains(value, pattern):
+ if next(re.finditer(pattern, value), None):
+ return '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', value))
+ return value
+
+
class Script(object):
"""Parse a script line (in Pipfile's [scripts] section).
@@ -56,17 +63,21 @@
The result is then quoted into a pair of double quotes to be grouped.
An argument is intentionally not quoted if it does not contain
- whitespaces. This is done to be compatible with Windows built-in
+ foul characters. This is done to be compatible with Windows built-in
commands that don't work well with quotes, e.g. everything with `echo`,
and DOS-style (forward slash) switches.
+ Foul characters include:
+
+ * Whitespaces.
+ * Parentheses in the command. (pypa/pipenv#3168)
+
The intended use of this function is to pre-process an argument list
before passing it into ``subprocess.Popen(..., shell=True)``.
See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence
"""
- return " ".join(
- arg if not next(re.finditer(r'\s', arg), None)
- else '"{0}"'.format(re.sub(r'(\\*)"', r'\1\1\\"', arg))
- for arg in self._parts
- )
+ return " ".join(itertools.chain(
+ [_quote_if_contains(self.command, r'[\s()]')],
+ (_quote_if_contains(arg, r'\s') for arg in self.args),
+ ))
| {"golden_diff": "diff --git a/pipenv/cmdparse.py b/pipenv/cmdparse.py\n--- a/pipenv/cmdparse.py\n+++ b/pipenv/cmdparse.py\n@@ -1,3 +1,4 @@\n+import itertools\n import re\n import shlex\n \n@@ -8,6 +9,12 @@\n pass\n \n \n+def _quote_if_contains(value, pattern):\n+ if next(re.finditer(pattern, value), None):\n+ return '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', value))\n+ return value\n+\n+\n class Script(object):\n \"\"\"Parse a script line (in Pipfile's [scripts] section).\n \n@@ -56,17 +63,21 @@\n The result is then quoted into a pair of double quotes to be grouped.\n \n An argument is intentionally not quoted if it does not contain\n- whitespaces. This is done to be compatible with Windows built-in\n+ foul characters. This is done to be compatible with Windows built-in\n commands that don't work well with quotes, e.g. everything with `echo`,\n and DOS-style (forward slash) switches.\n \n+ Foul characters include:\n+\n+ * Whitespaces.\n+ * Parentheses in the command. (pypa/pipenv#3168)\n+\n The intended use of this function is to pre-process an argument list\n before passing it into ``subprocess.Popen(..., shell=True)``.\n \n See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence\n \"\"\"\n- return \" \".join(\n- arg if not next(re.finditer(r'\\s', arg), None)\n- else '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', arg))\n- for arg in self._parts\n- )\n+ return \" \".join(itertools.chain(\n+ [_quote_if_contains(self.command, r'[\\s()]')],\n+ (_quote_if_contains(arg, r'\\s') for arg in self.args),\n+ ))\n", "issue": "Quote command if parentheses exist\nThank you for contributing to Pipenv!\r\n\r\n\r\n### The issue\r\n\r\nFixes #3168 \r\n\r\n### The fix\r\n\r\nQuote the command if it contains `()`.\r\n\r\n### The checklist\r\n\r\n* [x] Associated issue\r\n* [x] A news fragment in the `news/` directory to describe this fix with the extension `.bugfix`, `.feature`, `.behavior`, `.doc`. `.vendor`. or `.trivial` (this will appear in the release changelog). Use semantic line breaks and name the file after the issue number or the PR #.\r\n\r\n<!--\r\n### If this is a patch to the `vendor` directory\u2026\r\n\r\nPlease try to refrain from submitting patches directly to `vendor` or `patched`, but raise your issue to the upstream project instead, and inform Pipenv to upgrade when the upstream project accepts the fix.\r\n\r\nA pull request to upgrade vendor packages is strongly discouraged, unless there is a very good reason (e.g. you need to test Pipenv\u2019s integration to a new vendor feature). Pipenv audits and performs vendor upgrades regularly, generally before a new release is about to drop.\r\n\r\nIf your patch is not or cannot be accepted by upstream, but is essential to Pipenv (make sure to discuss this with maintainers!), please remember to attach a patch file in `tasks/vendoring/patched`, so this divergence from upstream can be recorded and replayed afterwards.\r\n-->\r\n\n", "before_files": [{"content": "import re\nimport shlex\n\nimport six\n\n\nclass ScriptEmptyError(ValueError):\n pass\n\n\nclass Script(object):\n \"\"\"Parse a script line (in Pipfile's [scripts] section).\n\n This always works in POSIX mode, even on Windows.\n \"\"\"\n\n def __init__(self, command, args=None):\n self._parts = [command]\n if args:\n self._parts.extend(args)\n\n @classmethod\n def parse(cls, value):\n if isinstance(value, six.string_types):\n value = shlex.split(value)\n if not value:\n raise ScriptEmptyError(value)\n return cls(value[0], value[1:])\n\n def __repr__(self):\n return \"Script({0!r})\".format(self._parts)\n\n @property\n def command(self):\n return self._parts[0]\n\n @property\n def args(self):\n return self._parts[1:]\n\n def extend(self, extra_args):\n self._parts.extend(extra_args)\n\n def cmdify(self):\n \"\"\"Encode into a cmd-executable string.\n\n This re-implements CreateProcess's quoting logic to turn a list of\n arguments into one single string for the shell to interpret.\n\n * All double quotes are escaped with a backslash.\n * Existing backslashes before a quote are doubled, so they are all\n escaped properly.\n * Backslashes elsewhere are left as-is; cmd will interpret them\n literally.\n\n The result is then quoted into a pair of double quotes to be grouped.\n\n An argument is intentionally not quoted if it does not contain\n whitespaces. This is done to be compatible with Windows built-in\n commands that don't work well with quotes, e.g. everything with `echo`,\n and DOS-style (forward slash) switches.\n\n The intended use of this function is to pre-process an argument list\n before passing it into ``subprocess.Popen(..., shell=True)``.\n\n See also: https://docs.python.org/3/library/subprocess.html#converting-argument-sequence\n \"\"\"\n return \" \".join(\n arg if not next(re.finditer(r'\\s', arg), None)\n else '\"{0}\"'.format(re.sub(r'(\\\\*)\"', r'\\1\\1\\\\\"', arg))\n for arg in self._parts\n )\n", "path": "pipenv/cmdparse.py"}]} | 1,470 | 455 |
gh_patches_debug_35752 | rasdani/github-patches | git_diff | feast-dev__feast-2845 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect projects-list.json generated by feast ui when using Postgres as a data source.
## Expected Behavior
Correct generation of the projects-list.json when running feast ui.
## Current Behavior
The generated projects-list.json does not contain a name in the dataSources field, causing the parser to fail.
## Steps to reproduce
Setup feast with PostgreSQL as a data source.
### Specifications
- Version:
- Platform:
- Subsystem:
## Possible Solution
Adding name=self.name to to_proto() in postgres_source.py. And in general making the postgres_source.py file more similar to e.g., file_source.py.
</issue>
<code>
[start of sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py]
1 import json
2 from typing import Callable, Dict, Iterable, Optional, Tuple
3
4 from feast.data_source import DataSource
5 from feast.infra.utils.postgres.connection_utils import _get_conn
6 from feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto
7 from feast.repo_config import RepoConfig
8 from feast.type_map import pg_type_code_to_pg_type, pg_type_to_feast_value_type
9 from feast.value_type import ValueType
10
11
12 class PostgreSQLSource(DataSource):
13 def __init__(
14 self,
15 name: str,
16 query: str,
17 timestamp_field: Optional[str] = "",
18 created_timestamp_column: Optional[str] = "",
19 field_mapping: Optional[Dict[str, str]] = None,
20 date_partition_column: Optional[str] = "",
21 ):
22 self._postgres_options = PostgreSQLOptions(name=name, query=query)
23
24 super().__init__(
25 name=name,
26 timestamp_field=timestamp_field,
27 created_timestamp_column=created_timestamp_column,
28 field_mapping=field_mapping,
29 date_partition_column=date_partition_column,
30 )
31
32 def __hash__(self):
33 return super().__hash__()
34
35 def __eq__(self, other):
36 if not isinstance(other, PostgreSQLSource):
37 raise TypeError(
38 "Comparisons should only involve PostgreSQLSource class objects."
39 )
40
41 return (
42 self._postgres_options._query == other._postgres_options._query
43 and self.timestamp_field == other.timestamp_field
44 and self.created_timestamp_column == other.created_timestamp_column
45 and self.field_mapping == other.field_mapping
46 )
47
48 @staticmethod
49 def from_proto(data_source: DataSourceProto):
50 assert data_source.HasField("custom_options")
51
52 postgres_options = json.loads(data_source.custom_options.configuration)
53 return PostgreSQLSource(
54 name=postgres_options["name"],
55 query=postgres_options["query"],
56 field_mapping=dict(data_source.field_mapping),
57 timestamp_field=data_source.timestamp_field,
58 created_timestamp_column=data_source.created_timestamp_column,
59 date_partition_column=data_source.date_partition_column,
60 )
61
62 def to_proto(self) -> DataSourceProto:
63 data_source_proto = DataSourceProto(
64 type=DataSourceProto.CUSTOM_SOURCE,
65 data_source_class_type="feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource",
66 field_mapping=self.field_mapping,
67 custom_options=self._postgres_options.to_proto(),
68 )
69
70 data_source_proto.timestamp_field = self.timestamp_field
71 data_source_proto.created_timestamp_column = self.created_timestamp_column
72 data_source_proto.date_partition_column = self.date_partition_column
73
74 return data_source_proto
75
76 def validate(self, config: RepoConfig):
77 pass
78
79 @staticmethod
80 def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:
81 return pg_type_to_feast_value_type
82
83 def get_table_column_names_and_types(
84 self, config: RepoConfig
85 ) -> Iterable[Tuple[str, str]]:
86 with _get_conn(config.offline_store) as conn, conn.cursor() as cur:
87 cur.execute(
88 f"SELECT * FROM ({self.get_table_query_string()}) AS sub LIMIT 0"
89 )
90 return (
91 (c.name, pg_type_code_to_pg_type(c.type_code)) for c in cur.description
92 )
93
94 def get_table_query_string(self) -> str:
95 return f"({self._postgres_options._query})"
96
97
98 class PostgreSQLOptions:
99 def __init__(self, name: str, query: Optional[str]):
100 self._name = name
101 self._query = query
102
103 @classmethod
104 def from_proto(cls, postgres_options_proto: DataSourceProto.CustomSourceOptions):
105 config = json.loads(postgres_options_proto.configuration.decode("utf8"))
106 postgres_options = cls(name=config["name"], query=config["query"])
107
108 return postgres_options
109
110 def to_proto(self) -> DataSourceProto.CustomSourceOptions:
111 postgres_options_proto = DataSourceProto.CustomSourceOptions(
112 configuration=json.dumps(
113 {"name": self._name, "query": self._query}
114 ).encode()
115 )
116
117 return postgres_options_proto
118
[end of sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py
--- a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py
+++ b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py
@@ -18,6 +18,9 @@
created_timestamp_column: Optional[str] = "",
field_mapping: Optional[Dict[str, str]] = None,
date_partition_column: Optional[str] = "",
+ description: Optional[str] = "",
+ tags: Optional[Dict[str, str]] = None,
+ owner: Optional[str] = "",
):
self._postgres_options = PostgreSQLOptions(name=name, query=query)
@@ -27,6 +30,9 @@
created_timestamp_column=created_timestamp_column,
field_mapping=field_mapping,
date_partition_column=date_partition_column,
+ description=description,
+ tags=tags,
+ owner=owner,
)
def __hash__(self):
@@ -57,14 +63,21 @@
timestamp_field=data_source.timestamp_field,
created_timestamp_column=data_source.created_timestamp_column,
date_partition_column=data_source.date_partition_column,
+ description=data_source.description,
+ tags=dict(data_source.tags),
+ owner=data_source.owner,
)
def to_proto(self) -> DataSourceProto:
data_source_proto = DataSourceProto(
+ name=self.name,
type=DataSourceProto.CUSTOM_SOURCE,
data_source_class_type="feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource",
field_mapping=self.field_mapping,
custom_options=self._postgres_options.to_proto(),
+ description=self.description,
+ tags=self.tags,
+ owner=self.owner,
)
data_source_proto.timestamp_field = self.timestamp_field
| {"golden_diff": "diff --git a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py\n--- a/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py\n+++ b/sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py\n@@ -18,6 +18,9 @@\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n+ description: Optional[str] = \"\",\n+ tags: Optional[Dict[str, str]] = None,\n+ owner: Optional[str] = \"\",\n ):\n self._postgres_options = PostgreSQLOptions(name=name, query=query)\n \n@@ -27,6 +30,9 @@\n created_timestamp_column=created_timestamp_column,\n field_mapping=field_mapping,\n date_partition_column=date_partition_column,\n+ description=description,\n+ tags=tags,\n+ owner=owner,\n )\n \n def __hash__(self):\n@@ -57,14 +63,21 @@\n timestamp_field=data_source.timestamp_field,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n+ description=data_source.description,\n+ tags=dict(data_source.tags),\n+ owner=data_source.owner,\n )\n \n def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n+ name=self.name,\n type=DataSourceProto.CUSTOM_SOURCE,\n data_source_class_type=\"feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource\",\n field_mapping=self.field_mapping,\n custom_options=self._postgres_options.to_proto(),\n+ description=self.description,\n+ tags=self.tags,\n+ owner=self.owner,\n )\n \n data_source_proto.timestamp_field = self.timestamp_field\n", "issue": "Incorrect projects-list.json generated by feast ui when using Postgres as a data source.\n## Expected Behavior \r\nCorrect generation of the projects-list.json when running feast ui. \r\n## Current Behavior\r\nThe generated projects-list.json does not contain a name in the dataSources field, causing the parser to fail.\r\n## Steps to reproduce\r\nSetup feast with PostgreSQL as a data source.\r\n### Specifications\r\n\r\n- Version:\r\n- Platform:\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nAdding name=self.name to to_proto() in postgres_source.py. And in general making the postgres_source.py file more similar to e.g., file_source.py.\n", "before_files": [{"content": "import json\nfrom typing import Callable, Dict, Iterable, Optional, Tuple\n\nfrom feast.data_source import DataSource\nfrom feast.infra.utils.postgres.connection_utils import _get_conn\nfrom feast.protos.feast.core.DataSource_pb2 import DataSource as DataSourceProto\nfrom feast.repo_config import RepoConfig\nfrom feast.type_map import pg_type_code_to_pg_type, pg_type_to_feast_value_type\nfrom feast.value_type import ValueType\n\n\nclass PostgreSQLSource(DataSource):\n def __init__(\n self,\n name: str,\n query: str,\n timestamp_field: Optional[str] = \"\",\n created_timestamp_column: Optional[str] = \"\",\n field_mapping: Optional[Dict[str, str]] = None,\n date_partition_column: Optional[str] = \"\",\n ):\n self._postgres_options = PostgreSQLOptions(name=name, query=query)\n\n super().__init__(\n name=name,\n timestamp_field=timestamp_field,\n created_timestamp_column=created_timestamp_column,\n field_mapping=field_mapping,\n date_partition_column=date_partition_column,\n )\n\n def __hash__(self):\n return super().__hash__()\n\n def __eq__(self, other):\n if not isinstance(other, PostgreSQLSource):\n raise TypeError(\n \"Comparisons should only involve PostgreSQLSource class objects.\"\n )\n\n return (\n self._postgres_options._query == other._postgres_options._query\n and self.timestamp_field == other.timestamp_field\n and self.created_timestamp_column == other.created_timestamp_column\n and self.field_mapping == other.field_mapping\n )\n\n @staticmethod\n def from_proto(data_source: DataSourceProto):\n assert data_source.HasField(\"custom_options\")\n\n postgres_options = json.loads(data_source.custom_options.configuration)\n return PostgreSQLSource(\n name=postgres_options[\"name\"],\n query=postgres_options[\"query\"],\n field_mapping=dict(data_source.field_mapping),\n timestamp_field=data_source.timestamp_field,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n )\n\n def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.CUSTOM_SOURCE,\n data_source_class_type=\"feast.infra.offline_stores.contrib.postgres_offline_store.postgres_source.PostgreSQLSource\",\n field_mapping=self.field_mapping,\n custom_options=self._postgres_options.to_proto(),\n )\n\n data_source_proto.timestamp_field = self.timestamp_field\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto\n\n def validate(self, config: RepoConfig):\n pass\n\n @staticmethod\n def source_datatype_to_feast_value_type() -> Callable[[str], ValueType]:\n return pg_type_to_feast_value_type\n\n def get_table_column_names_and_types(\n self, config: RepoConfig\n ) -> Iterable[Tuple[str, str]]:\n with _get_conn(config.offline_store) as conn, conn.cursor() as cur:\n cur.execute(\n f\"SELECT * FROM ({self.get_table_query_string()}) AS sub LIMIT 0\"\n )\n return (\n (c.name, pg_type_code_to_pg_type(c.type_code)) for c in cur.description\n )\n\n def get_table_query_string(self) -> str:\n return f\"({self._postgres_options._query})\"\n\n\nclass PostgreSQLOptions:\n def __init__(self, name: str, query: Optional[str]):\n self._name = name\n self._query = query\n\n @classmethod\n def from_proto(cls, postgres_options_proto: DataSourceProto.CustomSourceOptions):\n config = json.loads(postgres_options_proto.configuration.decode(\"utf8\"))\n postgres_options = cls(name=config[\"name\"], query=config[\"query\"])\n\n return postgres_options\n\n def to_proto(self) -> DataSourceProto.CustomSourceOptions:\n postgres_options_proto = DataSourceProto.CustomSourceOptions(\n configuration=json.dumps(\n {\"name\": self._name, \"query\": self._query}\n ).encode()\n )\n\n return postgres_options_proto\n", "path": "sdk/python/feast/infra/offline_stores/contrib/postgres_offline_store/postgres_source.py"}]} | 1,804 | 439 |
gh_patches_debug_1634 | rasdani/github-patches | git_diff | coala__coala-4980 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Compatibility.py: Add comment explaining JSONDecodeError is missing in Python 3.4
difficulty/newcomer
Opened via [gitter](https://gitter.im/coala/coala/?at=59098da68fcce56b205cd7e0) by @jayvdb
</issue>
<code>
[start of coalib/misc/Compatibility.py]
1 import json
2 try:
3 JSONDecodeError = json.decoder.JSONDecodeError
4 except AttributeError: # pragma Python 3.5,3.6: no cover
5 JSONDecodeError = ValueError
6
[end of coalib/misc/Compatibility.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/coalib/misc/Compatibility.py b/coalib/misc/Compatibility.py
--- a/coalib/misc/Compatibility.py
+++ b/coalib/misc/Compatibility.py
@@ -1,5 +1,6 @@
import json
try:
+ # JSONDecodeError class is available since Python 3.5.x.
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError: # pragma Python 3.5,3.6: no cover
JSONDecodeError = ValueError
| {"golden_diff": "diff --git a/coalib/misc/Compatibility.py b/coalib/misc/Compatibility.py\n--- a/coalib/misc/Compatibility.py\n+++ b/coalib/misc/Compatibility.py\n@@ -1,5 +1,6 @@\n import json\n try:\n+ # JSONDecodeError class is available since Python 3.5.x.\n JSONDecodeError = json.decoder.JSONDecodeError\n except AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "issue": "Compatibility.py: Add comment explaining JSONDecodeError is missing in Python 3.4\ndifficulty/newcomer\n\nOpened via [gitter](https://gitter.im/coala/coala/?at=59098da68fcce56b205cd7e0) by @jayvdb\n", "before_files": [{"content": "import json\ntry:\n JSONDecodeError = json.decoder.JSONDecodeError\nexcept AttributeError: # pragma Python 3.5,3.6: no cover\n JSONDecodeError = ValueError\n", "path": "coalib/misc/Compatibility.py"}]} | 654 | 110 |
gh_patches_debug_727 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Integrate Forums into challenges
Navigating to the forum of a challenge currently takes the participant outside of the challenge environment. Navigating back to the challenge is not possible through the breadcrumbs on the forum page and instead requires going via the Challenge tab and searching for the respective Challenge again. It would be nicer if the forums were visually integrated into the challenge page layout and if the breadcrumbs reflected their nesting in the challenge rather than their nesting under all forum on GC.
See here: https://github.com/DIAGNijmegen/rse-roadmap/issues/83#issuecomment-919250835
</issue>
<code>
[start of app/grandchallenge/forum_conversation/templatetags/forum_extras.py]
1 from actstream.models import Follow
2 from django import template
3 from django.contrib.contenttypes.models import ContentType
4
5 from grandchallenge.notifications.forms import FollowForm
6
7 register = template.Library()
8
9
10 @register.simple_tag
11 def get_follow_object_pk(user, follow_object):
12 object_follows_for_user = Follow.objects.filter(
13 user=user,
14 content_type=ContentType.objects.get(
15 app_label=follow_object._meta.app_label,
16 model=follow_object._meta.model_name,
17 ),
18 ).all()
19
20 if not object_follows_for_user:
21 current_follow_object = []
22 else:
23 current_follow_object = []
24 for obj in object_follows_for_user:
25 if not obj.follow_object:
26 continue
27 elif obj.follow_object.id == follow_object.id:
28 current_follow_object = obj.pk
29 return current_follow_object
30
31
32 @register.simple_tag
33 def follow_form(*, user, object_id, content_type):
34 return FollowForm(
35 user=user,
36 initial={
37 "object_id": object_id,
38 "content_type": content_type,
39 "actor_only": False,
40 },
41 )
42
43
44 @register.simple_tag()
45 def get_content_type(follow_object):
46 try:
47 ct = ContentType.objects.get(
48 app_label=follow_object._meta.app_label,
49 model=follow_object._meta.model_name,
50 )
51 except AttributeError:
52 ct = None
53 return ct
54
[end of app/grandchallenge/forum_conversation/templatetags/forum_extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py
@@ -51,3 +51,9 @@
except AttributeError:
ct = None
return ct
+
+
[email protected]_tag()
+def is_participant(user, challenge):
+ if challenge.is_participant(user):
+ return True
| {"golden_diff": "diff --git a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n--- a/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n+++ b/app/grandchallenge/forum_conversation/templatetags/forum_extras.py\n@@ -51,3 +51,9 @@\n except AttributeError:\r\n ct = None\r\n return ct\r\n+\r\n+\r\[email protected]_tag()\r\n+def is_participant(user, challenge):\r\n+ if challenge.is_participant(user):\r\n+ return True\n", "issue": "Integrate Forums into challenges \nNavigating to the forum of a challenge currently takes the participant outside of the challenge environment. Navigating back to the challenge is not possible through the breadcrumbs on the forum page and instead requires going via the Challenge tab and searching for the respective Challenge again. It would be nicer if the forums were visually integrated into the challenge page layout and if the breadcrumbs reflected their nesting in the challenge rather than their nesting under all forum on GC. \r\n\r\nSee here: https://github.com/DIAGNijmegen/rse-roadmap/issues/83#issuecomment-919250835\r\n\n", "before_files": [{"content": "from actstream.models import Follow\r\nfrom django import template\r\nfrom django.contrib.contenttypes.models import ContentType\r\n\r\nfrom grandchallenge.notifications.forms import FollowForm\r\n\r\nregister = template.Library()\r\n\r\n\r\[email protected]_tag\r\ndef get_follow_object_pk(user, follow_object):\r\n object_follows_for_user = Follow.objects.filter(\r\n user=user,\r\n content_type=ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n ),\r\n ).all()\r\n\r\n if not object_follows_for_user:\r\n current_follow_object = []\r\n else:\r\n current_follow_object = []\r\n for obj in object_follows_for_user:\r\n if not obj.follow_object:\r\n continue\r\n elif obj.follow_object.id == follow_object.id:\r\n current_follow_object = obj.pk\r\n return current_follow_object\r\n\r\n\r\[email protected]_tag\r\ndef follow_form(*, user, object_id, content_type):\r\n return FollowForm(\r\n user=user,\r\n initial={\r\n \"object_id\": object_id,\r\n \"content_type\": content_type,\r\n \"actor_only\": False,\r\n },\r\n )\r\n\r\n\r\[email protected]_tag()\r\ndef get_content_type(follow_object):\r\n try:\r\n ct = ContentType.objects.get(\r\n app_label=follow_object._meta.app_label,\r\n model=follow_object._meta.model_name,\r\n )\r\n except AttributeError:\r\n ct = None\r\n return ct\r\n", "path": "app/grandchallenge/forum_conversation/templatetags/forum_extras.py"}]} | 1,082 | 135 |
gh_patches_debug_10509 | rasdani/github-patches | git_diff | openfun__richie-2035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cookiecutter bootstrap failure
## Bug Report
**Problematic Behavior**
The `nightly round` job warns us that there is a problem with cookiecutter template.
**Additional context/Screenshots**
[Add any other context about the problem here. If applicable, add screenshots to help explain.](https://app.circleci.com/pipelines/github/openfun/richie/6840/workflows/7b6bd5f9-e2d4-4ef1-8e54-4562a521d50d/jobs/183180)
</issue>
<code>
[start of cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py]
1 """
2 {{cookiecutter.site}} urls
3 """
4 from django.conf import settings
5 from django.conf.urls.i18n import i18n_patterns
6 from django.contrib import admin
7 from django.contrib.sitemaps.views import sitemap
8 from django.contrib.staticfiles.urls import staticfiles_urlpatterns
9 from django.urls import include, path, re_path
10 from django.views.generic import TemplateView
11 from django.views.static import serve
12
13 from cms.sitemaps import CMSSitemap
14 from richie.apps.courses.urls import (
15 redirects_urlpatterns as courses_redirects_urlpatterns,
16 urlpatterns as courses_urlpatterns,
17 )
18 from richie.apps.search.urls import urlpatterns as search_urlpatterns
19 from richie.plugins.urls import urlpatterns as plugins_urlpatterns
20
21 # For now, we use URLPathVersioning to be consistent with fonzie. Fonzie uses it
22 # because DRF OpenAPI only supports URLPathVersioning for now. See fonzie
23 # API_PREFIX config for more information.
24 API_PREFIX = r"v(?P<version>[0-9]+\.[0-9]+)"
25
26 admin.autodiscover()
27 admin.site.enable_nav_sidebar = False
28
29 urlpatterns = [
30 path(r"sitemap.xml", sitemap, {"sitemaps": {"cmspages": CMSSitemap}}),
31 re_path(
32 rf"api/{API_PREFIX:s}/",
33 include([*courses_urlpatterns, *search_urlpatterns, *plugins_urlpatterns]),
34 ),
35 path(r"", include("filer.server.urls")),
36 path(r"django-check-seo/", include("django_check_seo.urls")),
37 ]
38
39 urlpatterns += i18n_patterns(
40 path(r"admin/", admin.site.urls),
41 path(r"accounts/", include("django.contrib.auth.urls")),
42 path(r"", include("cms.urls")), # NOQA
43 )
44
45 # This is only needed when using runserver.
46 if settings.DEBUG:
47 urlpatterns = (
48 [
49 path(
50 r"styleguide/",
51 TemplateView.as_view(
52 template_name="richie/styleguide/index.html",
53 extra_context={"STYLEGUIDE": settings.STYLEGUIDE},
54 ),
55 name="styleguide",
56 ),
57 path(
58 r"media/<path:path>",
59 serve,
60 {"document_root": settings.MEDIA_ROOT, "show_indexes": True},
61 ),
62 ]
63 + staticfiles_urlpatterns()
64 + urlpatterns
65 )
66
67 handler400 = "richie.apps.core.views.error.error_400_view_handler"
68 handler403 = "richie.apps.core.views.error.error_403_view_handler"
69 handler404 = "richie.apps.core.views.error.error_404_view_handler"
70 handler500 = "richie.apps.core.views.error.error_500_view_handler"
71
[end of cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py
--- a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py
+++ b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py
@@ -11,10 +11,7 @@
from django.views.static import serve
from cms.sitemaps import CMSSitemap
-from richie.apps.courses.urls import (
- redirects_urlpatterns as courses_redirects_urlpatterns,
- urlpatterns as courses_urlpatterns,
-)
+from richie.apps.courses.urls import urlpatterns as courses_urlpatterns
from richie.apps.search.urls import urlpatterns as search_urlpatterns
from richie.plugins.urls import urlpatterns as plugins_urlpatterns
| {"golden_diff": "diff --git a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py\n--- a/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py\n+++ b/cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py\n@@ -11,10 +11,7 @@\n from django.views.static import serve\n \n from cms.sitemaps import CMSSitemap\n-from richie.apps.courses.urls import (\n- redirects_urlpatterns as courses_redirects_urlpatterns,\n- urlpatterns as courses_urlpatterns,\n-)\n+from richie.apps.courses.urls import urlpatterns as courses_urlpatterns\n from richie.apps.search.urls import urlpatterns as search_urlpatterns\n from richie.plugins.urls import urlpatterns as plugins_urlpatterns\n", "issue": "Cookiecutter bootstrap failure\n## Bug Report\r\n\r\n**Problematic Behavior**\r\nThe `nightly round` job warns us that there is a problem with cookiecutter template.\r\n\r\n**Additional context/Screenshots**\r\n[Add any other context about the problem here. If applicable, add screenshots to help explain.](https://app.circleci.com/pipelines/github/openfun/richie/6840/workflows/7b6bd5f9-e2d4-4ef1-8e54-4562a521d50d/jobs/183180)\r\n\n", "before_files": [{"content": "\"\"\"\n{{cookiecutter.site}} urls\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.urls import include, path, re_path\nfrom django.views.generic import TemplateView\nfrom django.views.static import serve\n\nfrom cms.sitemaps import CMSSitemap\nfrom richie.apps.courses.urls import (\n redirects_urlpatterns as courses_redirects_urlpatterns,\n urlpatterns as courses_urlpatterns,\n)\nfrom richie.apps.search.urls import urlpatterns as search_urlpatterns\nfrom richie.plugins.urls import urlpatterns as plugins_urlpatterns\n\n# For now, we use URLPathVersioning to be consistent with fonzie. Fonzie uses it\n# because DRF OpenAPI only supports URLPathVersioning for now. See fonzie\n# API_PREFIX config for more information.\nAPI_PREFIX = r\"v(?P<version>[0-9]+\\.[0-9]+)\"\n\nadmin.autodiscover()\nadmin.site.enable_nav_sidebar = False\n\nurlpatterns = [\n path(r\"sitemap.xml\", sitemap, {\"sitemaps\": {\"cmspages\": CMSSitemap}}),\n re_path(\n rf\"api/{API_PREFIX:s}/\",\n include([*courses_urlpatterns, *search_urlpatterns, *plugins_urlpatterns]),\n ),\n path(r\"\", include(\"filer.server.urls\")),\n path(r\"django-check-seo/\", include(\"django_check_seo.urls\")),\n]\n\nurlpatterns += i18n_patterns(\n path(r\"admin/\", admin.site.urls),\n path(r\"accounts/\", include(\"django.contrib.auth.urls\")),\n path(r\"\", include(\"cms.urls\")), # NOQA\n)\n\n# This is only needed when using runserver.\nif settings.DEBUG:\n urlpatterns = (\n [\n path(\n r\"styleguide/\",\n TemplateView.as_view(\n template_name=\"richie/styleguide/index.html\",\n extra_context={\"STYLEGUIDE\": settings.STYLEGUIDE},\n ),\n name=\"styleguide\",\n ),\n path(\n r\"media/<path:path>\",\n serve,\n {\"document_root\": settings.MEDIA_ROOT, \"show_indexes\": True},\n ),\n ]\n + staticfiles_urlpatterns()\n + urlpatterns\n )\n\nhandler400 = \"richie.apps.core.views.error.error_400_view_handler\"\nhandler403 = \"richie.apps.core.views.error.error_403_view_handler\"\nhandler404 = \"richie.apps.core.views.error.error_404_view_handler\"\nhandler500 = \"richie.apps.core.views.error.error_500_view_handler\"\n", "path": "cookiecutter/{{cookiecutter.organization}}-richie-site-factory/template/{{cookiecutter.site}}/src/backend/{{cookiecutter.site}}/urls.py"}]} | 1,427 | 254 |
gh_patches_debug_37418 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-2202 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Passing non-bytes file input leads to error
https://t.me/pythontelegrambotgroup/396541
TL;DR:
`send_document(open('text_file', 'rb'))` works but `send_document(open('text_file', 'r'))` raises is error.
This is, because we try to guess if the file is an image using `imghdr.what(None, stream)` in `InputFile.is_image`, which only works if `stream` is a bytes stream.
If I comment the `is_image` out, the file is sent without issue, so I guess we should just check if the input is bytes before calling `is_image`
</issue>
<code>
[start of telegram/files/inputfile.py]
1 #!/usr/bin/env python
2 # pylint: disable=W0622,E0611
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2020
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram InputFile."""
21
22 import imghdr
23 import mimetypes
24 import os
25 from typing import IO, Optional, Tuple
26 from uuid import uuid4
27
28 from telegram import TelegramError
29
30 DEFAULT_MIME_TYPE = 'application/octet-stream'
31
32
33 class InputFile:
34 """This object represents a Telegram InputFile.
35
36 Attributes:
37 input_file_content (:obj:`bytes`): The binary content of the file to send.
38 filename (:obj:`str`): Optional. Filename for the file to be sent.
39 attach (:obj:`str`): Optional. Attach id for sending multiple files.
40
41 Args:
42 obj (:obj:`File handler`): An open file descriptor.
43 filename (:obj:`str`, optional): Filename for this InputFile.
44 attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a
45 collection of files.
46
47 Raises:
48 TelegramError
49
50 """
51
52 def __init__(self, obj: IO, filename: str = None, attach: bool = None):
53 self.filename = None
54 self.input_file_content = obj.read()
55 self.attach = 'attached' + uuid4().hex if attach else None
56
57 if filename:
58 self.filename = filename
59 elif hasattr(obj, 'name') and not isinstance(obj.name, int):
60 self.filename = os.path.basename(obj.name)
61
62 try:
63 self.mimetype = self.is_image(self.input_file_content)
64 except TelegramError:
65 if self.filename:
66 self.mimetype = mimetypes.guess_type(self.filename)[0] or DEFAULT_MIME_TYPE
67 else:
68 self.mimetype = DEFAULT_MIME_TYPE
69 if not self.filename:
70 self.filename = self.mimetype.replace('/', '.')
71
72 @property
73 def field_tuple(self) -> Tuple[str, bytes, str]:
74 return self.filename, self.input_file_content, self.mimetype
75
76 @staticmethod
77 def is_image(stream: bytes) -> str:
78 """Check if the content file is an image by analyzing its headers.
79
80 Args:
81 stream (:obj:`bytes`): A byte stream representing the content of a file.
82
83 Returns:
84 :obj:`str`: The str mime-type of an image.
85
86 """
87 image = imghdr.what(None, stream)
88 if image:
89 return 'image/%s' % image
90
91 raise TelegramError('Could not parse file content')
92
93 @staticmethod
94 def is_file(obj: object) -> bool:
95 return hasattr(obj, 'read')
96
97 def to_dict(self) -> Optional[str]:
98 if self.attach:
99 return 'attach://' + self.attach
100 return None
101
[end of telegram/files/inputfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py
--- a/telegram/files/inputfile.py
+++ b/telegram/files/inputfile.py
@@ -20,14 +20,14 @@
"""This module contains an object that represents a Telegram InputFile."""
import imghdr
+import logging
import mimetypes
import os
from typing import IO, Optional, Tuple
from uuid import uuid4
-from telegram import TelegramError
-
DEFAULT_MIME_TYPE = 'application/octet-stream'
+logger = logging.getLogger(__name__)
class InputFile:
@@ -59,13 +59,14 @@
elif hasattr(obj, 'name') and not isinstance(obj.name, int):
self.filename = os.path.basename(obj.name)
- try:
- self.mimetype = self.is_image(self.input_file_content)
- except TelegramError:
- if self.filename:
- self.mimetype = mimetypes.guess_type(self.filename)[0] or DEFAULT_MIME_TYPE
- else:
- self.mimetype = DEFAULT_MIME_TYPE
+ image_mime_type = self.is_image(self.input_file_content)
+ if image_mime_type:
+ self.mimetype = image_mime_type
+ elif self.filename:
+ self.mimetype = mimetypes.guess_type(self.filename)[0] or DEFAULT_MIME_TYPE
+ else:
+ self.mimetype = DEFAULT_MIME_TYPE
+
if not self.filename:
self.filename = self.mimetype.replace('/', '.')
@@ -74,21 +75,27 @@
return self.filename, self.input_file_content, self.mimetype
@staticmethod
- def is_image(stream: bytes) -> str:
+ def is_image(stream: bytes) -> Optional[str]:
"""Check if the content file is an image by analyzing its headers.
Args:
stream (:obj:`bytes`): A byte stream representing the content of a file.
Returns:
- :obj:`str`: The str mime-type of an image.
+ :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or
+ :obj:`None` else.
"""
- image = imghdr.what(None, stream)
- if image:
- return 'image/%s' % image
-
- raise TelegramError('Could not parse file content')
+ try:
+ image = imghdr.what(None, stream)
+ if image:
+ return f'image/{image}'
+ return None
+ except Exception:
+ logger.debug(
+ "Could not parse file content. Assuming that file is not an image.", exc_info=True
+ )
+ return None
@staticmethod
def is_file(obj: object) -> bool:
| {"golden_diff": "diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py\n--- a/telegram/files/inputfile.py\n+++ b/telegram/files/inputfile.py\n@@ -20,14 +20,14 @@\n \"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n \n import imghdr\n+import logging\n import mimetypes\n import os\n from typing import IO, Optional, Tuple\n from uuid import uuid4\n \n-from telegram import TelegramError\n-\n DEFAULT_MIME_TYPE = 'application/octet-stream'\n+logger = logging.getLogger(__name__)\n \n \n class InputFile:\n@@ -59,13 +59,14 @@\n elif hasattr(obj, 'name') and not isinstance(obj.name, int):\n self.filename = os.path.basename(obj.name)\n \n- try:\n- self.mimetype = self.is_image(self.input_file_content)\n- except TelegramError:\n- if self.filename:\n- self.mimetype = mimetypes.guess_type(self.filename)[0] or DEFAULT_MIME_TYPE\n- else:\n- self.mimetype = DEFAULT_MIME_TYPE\n+ image_mime_type = self.is_image(self.input_file_content)\n+ if image_mime_type:\n+ self.mimetype = image_mime_type\n+ elif self.filename:\n+ self.mimetype = mimetypes.guess_type(self.filename)[0] or DEFAULT_MIME_TYPE\n+ else:\n+ self.mimetype = DEFAULT_MIME_TYPE\n+\n if not self.filename:\n self.filename = self.mimetype.replace('/', '.')\n \n@@ -74,21 +75,27 @@\n return self.filename, self.input_file_content, self.mimetype\n \n @staticmethod\n- def is_image(stream: bytes) -> str:\n+ def is_image(stream: bytes) -> Optional[str]:\n \"\"\"Check if the content file is an image by analyzing its headers.\n \n Args:\n stream (:obj:`bytes`): A byte stream representing the content of a file.\n \n Returns:\n- :obj:`str`: The str mime-type of an image.\n+ :obj:`str` | :obj:`None`: The mime-type of an image, if the input is an image, or\n+ :obj:`None` else.\n \n \"\"\"\n- image = imghdr.what(None, stream)\n- if image:\n- return 'image/%s' % image\n-\n- raise TelegramError('Could not parse file content')\n+ try:\n+ image = imghdr.what(None, stream)\n+ if image:\n+ return f'image/{image}'\n+ return None\n+ except Exception:\n+ logger.debug(\n+ \"Could not parse file content. Assuming that file is not an image.\", exc_info=True\n+ )\n+ return None\n \n @staticmethod\n def is_file(obj: object) -> bool:\n", "issue": "[BUG] Passing non-bytes file input leads to error\nhttps://t.me/pythontelegrambotgroup/396541\r\n\r\nTL;DR:\r\n\r\n`send_document(open('text_file', 'rb'))` works but `send_document(open('text_file', 'r'))` raises is error.\r\nThis is, because we try to guess if the file is an image using `imghdr.what(None, stream)` in `InputFile.is_image`, which only works if `stream` is a bytes stream.\r\nIf I comment the `is_image` out, the file is sent without issue, so I guess we should just check if the input is bytes before calling `is_image`\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport mimetypes\nimport os\nfrom typing import IO, Optional, Tuple\nfrom uuid import uuid4\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\n\n\nclass InputFile:\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binary content of the file to send.\n filename (:obj:`str`): Optional. Filename for the file to be sent.\n attach (:obj:`str`): Optional. Attach id for sending multiple files.\n\n Args:\n obj (:obj:`File handler`): An open file descriptor.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a\n collection of files.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, obj: IO, filename: str = None, attach: bool = None):\n self.filename = None\n self.input_file_content = obj.read()\n self.attach = 'attached' + uuid4().hex if attach else None\n\n if filename:\n self.filename = filename\n elif hasattr(obj, 'name') and not isinstance(obj.name, int):\n self.filename = os.path.basename(obj.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n if not self.filename:\n self.filename = self.mimetype.replace('/', '.')\n\n @property\n def field_tuple(self) -> Tuple[str, bytes, str]:\n return self.filename, self.input_file_content, self.mimetype\n\n @staticmethod\n def is_image(stream: bytes) -> str:\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`bytes`): A byte stream representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_file(obj: object) -> bool:\n return hasattr(obj, 'read')\n\n def to_dict(self) -> Optional[str]:\n if self.attach:\n return 'attach://' + self.attach\n return None\n", "path": "telegram/files/inputfile.py"}]} | 1,660 | 610 |
gh_patches_debug_218 | rasdani/github-patches | git_diff | TheAlgorithms__Python-7054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add typing to maths/segmented_sieve.py
### Describe your change:
* [ ] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [ ] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [ ] This pull request is all my own work -- I have not plagiarized.
* [ ] I know that pull requests will not be merged if they fail the automated tests.
* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [ ] All new Python files are placed inside an existing directory.
* [ ] All filenames are in all lowercase characters with no spaces or dashes.
* [ ] All functions and variable names follow Python naming conventions.
* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
</issue>
<code>
[start of maths/segmented_sieve.py]
1 """Segmented Sieve."""
2
3 import math
4
5
6 def sieve(n):
7 """Segmented Sieve."""
8 in_prime = []
9 start = 2
10 end = int(math.sqrt(n)) # Size of every segment
11 temp = [True] * (end + 1)
12 prime = []
13
14 while start <= end:
15 if temp[start] is True:
16 in_prime.append(start)
17 for i in range(start * start, end + 1, start):
18 temp[i] = False
19 start += 1
20 prime += in_prime
21
22 low = end + 1
23 high = min(2 * end, n)
24
25 while low <= n:
26 temp = [True] * (high - low + 1)
27 for each in in_prime:
28
29 t = math.floor(low / each) * each
30 if t < low:
31 t += each
32
33 for j in range(t, high + 1, each):
34 temp[j - low] = False
35
36 for j in range(len(temp)):
37 if temp[j] is True:
38 prime.append(j + low)
39
40 low = high + 1
41 high = min(high + end, n)
42
43 return prime
44
45
46 print(sieve(10**6))
47
[end of maths/segmented_sieve.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py
--- a/maths/segmented_sieve.py
+++ b/maths/segmented_sieve.py
@@ -3,7 +3,7 @@
import math
-def sieve(n):
+def sieve(n: int) -> list[int]:
"""Segmented Sieve."""
in_prime = []
start = 2
| {"golden_diff": "diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py\n--- a/maths/segmented_sieve.py\n+++ b/maths/segmented_sieve.py\n@@ -3,7 +3,7 @@\n import math\r\n \r\n \r\n-def sieve(n):\r\n+def sieve(n: int) -> list[int]:\r\n \"\"\"Segmented Sieve.\"\"\"\r\n in_prime = []\r\n start = 2\n", "issue": "Add typing to maths/segmented_sieve.py\n### Describe your change:\r\n\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [ ] Documentation change?\r\n\r\n### Checklist:\r\n* [ ] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [ ] This pull request is all my own work -- I have not plagiarized.\r\n* [ ] I know that pull requests will not be merged if they fail the automated tests.\r\n* [ ] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [ ] All new Python files are placed inside an existing directory.\r\n* [ ] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [ ] All functions and variable names follow Python naming conventions.\r\n* [ ] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [ ] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [ ] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.\r\n* [ ] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.\r\n\n", "before_files": [{"content": "\"\"\"Segmented Sieve.\"\"\"\r\n\r\nimport math\r\n\r\n\r\ndef sieve(n):\r\n \"\"\"Segmented Sieve.\"\"\"\r\n in_prime = []\r\n start = 2\r\n end = int(math.sqrt(n)) # Size of every segment\r\n temp = [True] * (end + 1)\r\n prime = []\r\n\r\n while start <= end:\r\n if temp[start] is True:\r\n in_prime.append(start)\r\n for i in range(start * start, end + 1, start):\r\n temp[i] = False\r\n start += 1\r\n prime += in_prime\r\n\r\n low = end + 1\r\n high = min(2 * end, n)\r\n\r\n while low <= n:\r\n temp = [True] * (high - low + 1)\r\n for each in in_prime:\r\n\r\n t = math.floor(low / each) * each\r\n if t < low:\r\n t += each\r\n\r\n for j in range(t, high + 1, each):\r\n temp[j - low] = False\r\n\r\n for j in range(len(temp)):\r\n if temp[j] is True:\r\n prime.append(j + low)\r\n\r\n low = high + 1\r\n high = min(high + end, n)\r\n\r\n return prime\r\n\r\n\r\nprint(sieve(10**6))\r\n", "path": "maths/segmented_sieve.py"}]} | 1,198 | 93 |
gh_patches_debug_16406 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1583 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sitecustomize is being run multiple times
When `opentelemetry-instrument` is run, the path of `opentelemetry-python`'s `sitecustomize.py` file is added to `PYTHONPATH`. This works fine unless the command executed by `opentelemetry-instrument` is also calling the `python` executable, which would make this `sitecustomize` be executed more than once. This is bad because this means multiple instrumentations may happen.
I'll be modifying `sitecustomize` to remove its path from `PYTHONPATH` after it has been executed.
</issue>
<code>
[start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys
16 from logging import getLogger
17 from os import environ, path
18
19 from pkg_resources import iter_entry_points
20
21 from opentelemetry.environment_variables import (
22 OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,
23 )
24
25 logger = getLogger(__file__)
26
27
28 def _load_distros():
29 for entry_point in iter_entry_points("opentelemetry_distro"):
30 try:
31 entry_point.load()().configure() # type: ignore
32 logger.debug("Distribution %s configured", entry_point.name)
33 except Exception as exc: # pylint: disable=broad-except
34 logger.exception(
35 "Distribution %s configuration failed", entry_point.name
36 )
37 raise exc
38
39
40 def _load_instrumentors():
41 package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])
42 if isinstance(package_to_exclude, str):
43 package_to_exclude = package_to_exclude.split(",")
44 # to handle users entering "requests , flask" or "requests, flask" with spaces
45 package_to_exclude = [x.strip() for x in package_to_exclude]
46
47 for entry_point in iter_entry_points("opentelemetry_instrumentor"):
48 try:
49 if entry_point.name in package_to_exclude:
50 logger.debug(
51 "Instrumentation skipped for library %s", entry_point.name
52 )
53 continue
54 entry_point.load()().instrument() # type: ignore
55 logger.debug("Instrumented %s", entry_point.name)
56 except Exception as exc: # pylint: disable=broad-except
57 logger.exception("Instrumenting of %s failed", entry_point.name)
58 raise exc
59
60
61 def _load_configurators():
62 configured = None
63 for entry_point in iter_entry_points("opentelemetry_configurator"):
64 if configured is not None:
65 logger.warning(
66 "Configuration of %s not loaded, %s already loaded",
67 entry_point.name,
68 configured,
69 )
70 continue
71 try:
72 entry_point.load()().configure() # type: ignore
73 configured = entry_point.name
74 except Exception as exc: # pylint: disable=broad-except
75 logger.exception("Configuration of %s failed", entry_point.name)
76 raise exc
77
78
79 def initialize():
80 try:
81 _load_distros()
82 _load_configurators()
83 _load_instrumentors()
84 except Exception: # pylint: disable=broad-except
85 logger.exception("Failed to auto initialize opentelemetry")
86
87
88 if (
89 hasattr(sys, "argv")
90 and sys.argv[0].split(path.sep)[-1] == "celery"
91 and "worker" in sys.argv[1:]
92 ):
93 from celery.signals import worker_process_init # pylint:disable=E0401
94
95 @worker_process_init.connect(weak=False)
96 def init_celery(*args, **kwargs):
97 initialize()
98
99
100 else:
101 initialize()
102
[end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py
@@ -15,6 +15,8 @@
import sys
from logging import getLogger
from os import environ, path
+from os.path import abspath, dirname, pathsep
+from re import sub
from pkg_resources import iter_entry_points
@@ -83,6 +85,12 @@
_load_instrumentors()
except Exception: # pylint: disable=broad-except
logger.exception("Failed to auto initialize opentelemetry")
+ finally:
+ environ["PYTHONPATH"] = sub(
+ r"{}{}?".format(dirname(abspath(__file__)), pathsep),
+ "",
+ environ["PYTHONPATH"],
+ )
if (
| {"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\n@@ -15,6 +15,8 @@\n import sys\n from logging import getLogger\n from os import environ, path\n+from os.path import abspath, dirname, pathsep\n+from re import sub\n \n from pkg_resources import iter_entry_points\n \n@@ -83,6 +85,12 @@\n _load_instrumentors()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n+ finally:\n+ environ[\"PYTHONPATH\"] = sub(\n+ r\"{}{}?\".format(dirname(abspath(__file__)), pathsep),\n+ \"\",\n+ environ[\"PYTHONPATH\"],\n+ )\n \n \n if (\n", "issue": "sitecustomize is being run multiple times\nWhen `opentelemetry-instrument` is run, the path of `opentelemetry-python`'s `sitecustomize.py` file is added to `PYTHONPATH`. This works fine unless the command executed by `opentelemetry-instrument` is also calling the `python` executable, which would make this `sitecustomize` be executed more than once. This is bad because this means multiple instrumentations may happen.\r\n\r\nI'll be modifying `sitecustomize` to remove its path from `PYTHONPATH` after it has been executed.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom logging import getLogger\nfrom os import environ, path\n\nfrom pkg_resources import iter_entry_points\n\nfrom opentelemetry.environment_variables import (\n OTEL_PYTHON_DISABLED_INSTRUMENTATIONS,\n)\n\nlogger = getLogger(__file__)\n\n\ndef _load_distros():\n for entry_point in iter_entry_points(\"opentelemetry_distro\"):\n try:\n entry_point.load()().configure() # type: ignore\n logger.debug(\"Distribution %s configured\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\n \"Distribution %s configuration failed\", entry_point.name\n )\n raise exc\n\n\ndef _load_instrumentors():\n package_to_exclude = environ.get(OTEL_PYTHON_DISABLED_INSTRUMENTATIONS, [])\n if isinstance(package_to_exclude, str):\n package_to_exclude = package_to_exclude.split(\",\")\n # to handle users entering \"requests , flask\" or \"requests, flask\" with spaces\n package_to_exclude = [x.strip() for x in package_to_exclude]\n\n for entry_point in iter_entry_points(\"opentelemetry_instrumentor\"):\n try:\n if entry_point.name in package_to_exclude:\n logger.debug(\n \"Instrumentation skipped for library %s\", entry_point.name\n )\n continue\n entry_point.load()().instrument() # type: ignore\n logger.debug(\"Instrumented %s\", entry_point.name)\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Instrumenting of %s failed\", entry_point.name)\n raise exc\n\n\ndef _load_configurators():\n configured = None\n for entry_point in iter_entry_points(\"opentelemetry_configurator\"):\n if configured is not None:\n logger.warning(\n \"Configuration of %s not loaded, %s already loaded\",\n entry_point.name,\n configured,\n )\n continue\n try:\n entry_point.load()().configure() # type: ignore\n configured = entry_point.name\n except Exception as exc: # pylint: disable=broad-except\n logger.exception(\"Configuration of %s failed\", entry_point.name)\n raise exc\n\n\ndef initialize():\n try:\n _load_distros()\n _load_configurators()\n _load_instrumentors()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Failed to auto initialize opentelemetry\")\n\n\nif (\n hasattr(sys, \"argv\")\n and sys.argv[0].split(path.sep)[-1] == \"celery\"\n and \"worker\" in sys.argv[1:]\n):\n from celery.signals import worker_process_init # pylint:disable=E0401\n\n @worker_process_init.connect(weak=False)\n def init_celery(*args, **kwargs):\n initialize()\n\n\nelse:\n initialize()\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py"}]} | 1,621 | 242 |
gh_patches_debug_10092 | rasdani/github-patches | git_diff | beeware__toga-645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImageView example not working on Linux
## Expected Behavior
<!--- If you're describing a bug, tell us what you expect to happen. -->
ImageView demos display both a local image file and one from a web url
<!--- If you're requesting a new feature, tell us why you'd like this feature. -->
## Current Behavior
<!--- If you're describing a bug, what currently happens? -->
Displays the wrong path variable when image not found
Concatenates local application path and url when attempting to display web url
problem in rehint() function, missing attribute get_height
I tried to address the first two issues with #532, still need to work on the 3rd.
## Steps to reproduce
<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->
1. run the application in examples/imageview
2.
3.
## Your Environment
<!--- Provide details on your current environment you found the bug in -->
* Python Version (list the specific version number)
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
- [ ] macOS - version:
- [ x] Linux - distro: - version: Ubuntu 18.04
- [ ] Windows - version:
- [ ] Other - name: - version:
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [x ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [ ] winforms
- [ ] win32
- [ ] Other (please specify)
</issue>
<code>
[start of examples/imageview/imageview/app.py]
1 import os
2 import toga
3 from toga.style.pack import *
4
5 class ImageViewApp(toga.App):
6 def startup(self):
7 self.main_window = toga.MainWindow(title=self.name)
8
9 box = toga.Box()
10 box.style.padding = 40
11 box.style.update(alignment=CENTER)
12 box.style.update(direction=COLUMN)
13
14 # image from local path
15 # load brutus.png from the package
16 # We set the style width/height parameters for this one
17 image_from_path = toga.Image('resources/brutus.png')
18 imageview_from_path = toga.ImageView(image_from_path)
19 imageview_from_path.style.update(height=72)
20 imageview_from_path.style.update(width=72)
21 box.add(imageview_from_path)
22
23 # image from remote URL
24 # no style parameters - we let Pack determine how to allocate
25 # the space
26 image_from_url = toga.Image('https://pybee.org/project/projects/libraries/toga/toga.png')
27 imageview_from_url = toga.ImageView(image_from_url)
28 box.add(imageview_from_url)
29
30 self.main_window.content = box
31 self.main_window.show()
32
33 def main():
34 return ImageViewApp('ImageView', 'org.pybee.widgets.imageview')
35
36
37 if __name__ == '__main__':
38 app = main()
39 app.main_loop()
40
[end of examples/imageview/imageview/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py
--- a/examples/imageview/imageview/app.py
+++ b/examples/imageview/imageview/app.py
@@ -14,7 +14,7 @@
# image from local path
# load brutus.png from the package
# We set the style width/height parameters for this one
- image_from_path = toga.Image('resources/brutus.png')
+ image_from_path = toga.Image('../resources/brutus.png')
imageview_from_path = toga.ImageView(image_from_path)
imageview_from_path.style.update(height=72)
imageview_from_path.style.update(width=72)
| {"golden_diff": "diff --git a/examples/imageview/imageview/app.py b/examples/imageview/imageview/app.py\n--- a/examples/imageview/imageview/app.py\n+++ b/examples/imageview/imageview/app.py\n@@ -14,7 +14,7 @@\n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n- image_from_path = toga.Image('resources/brutus.png')\n+ image_from_path = toga.Image('../resources/brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n imageview_from_path.style.update(width=72)\n", "issue": "ImageView example not working on Linux\n## Expected Behavior\r\n<!--- If you're describing a bug, tell us what you expect to happen. -->\r\nImageView demos display both a local image file and one from a web url\r\n<!--- If you're requesting a new feature, tell us why you'd like this feature. -->\r\n\r\n\r\n## Current Behavior\r\n<!--- If you're describing a bug, what currently happens? -->\r\nDisplays the wrong path variable when image not found\r\nConcatenates local application path and url when attempting to display web url\r\nproblem in rehint() function, missing attribute get_height\r\n\r\nI tried to address the first two issues with #532, still need to work on the 3rd.\r\n\r\n## Steps to reproduce\r\n<!--- Provide a set of steps describing how to reproduce this bug. If you have a live example, provide the link below -->\r\n1. run the application in examples/imageview\r\n\r\n2.\r\n\r\n3.\r\n\r\n\r\n## Your Environment\r\n<!--- Provide details on your current environment you found the bug in -->\r\n\r\n* Python Version (list the specific version number)\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version: \r\n - [ x] Linux - distro: - version: Ubuntu 18.04\r\n - [ ] Windows - version:\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n \r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django \r\n - [x ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [ ] winforms \r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "import os\nimport toga\nfrom toga.style.pack import *\n\nclass ImageViewApp(toga.App):\n def startup(self):\n self.main_window = toga.MainWindow(title=self.name)\n \n box = toga.Box()\n box.style.padding = 40\n box.style.update(alignment=CENTER)\n box.style.update(direction=COLUMN)\n \n # image from local path\n # load brutus.png from the package\n # We set the style width/height parameters for this one\n image_from_path = toga.Image('resources/brutus.png')\n imageview_from_path = toga.ImageView(image_from_path)\n imageview_from_path.style.update(height=72)\n imageview_from_path.style.update(width=72)\n box.add(imageview_from_path)\n\n # image from remote URL\n # no style parameters - we let Pack determine how to allocate\n # the space\n image_from_url = toga.Image('https://pybee.org/project/projects/libraries/toga/toga.png')\n imageview_from_url = toga.ImageView(image_from_url)\n box.add(imageview_from_url)\n \n self.main_window.content = box\n self.main_window.show()\n\ndef main():\n return ImageViewApp('ImageView', 'org.pybee.widgets.imageview')\n\n\nif __name__ == '__main__':\n app = main()\n app.main_loop()\n", "path": "examples/imageview/imageview/app.py"}]} | 1,290 | 155 |
gh_patches_debug_39176 | rasdani/github-patches | git_diff | pulp__pulpcore-2768 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
As a plugin writer, I want to have a function for touching content units
Author: @lubosmj (lmjachky)
Redmine Issue: 9419, https://pulp.plan.io/issues/9419
---
In the PR https://github.com/pulp/pulpcore/pull/1624, we introduced a method that uses `bulk_touch` for updating timestamps of content units. We should expose this method to all plugin writers (e.g., pulp_container currently implements the same method - this creates unnecessary duplicates).
</issue>
<code>
[start of pulpcore/plugin/actions.py]
1 from gettext import gettext as _
2 from drf_spectacular.utils import extend_schema
3 from rest_framework.decorators import action
4 from rest_framework.serializers import ValidationError
5
6 from pulpcore.app import tasks
7 from pulpcore.app.models import Content, RepositoryVersion
8 from pulpcore.app.response import OperationPostponedResponse
9 from pulpcore.app.serializers import (
10 AsyncOperationResponseSerializer,
11 RepositoryAddRemoveContentSerializer,
12 )
13 from pulpcore.app.viewsets import NamedModelViewSet
14 from pulpcore.tasking.tasks import dispatch
15
16
17 __all__ = ["ModifyRepositoryActionMixin"]
18
19
20 class ModifyRepositoryActionMixin:
21 @extend_schema(
22 description="Trigger an asynchronous task to create a new repository version.",
23 summary="Modify Repository Content",
24 responses={202: AsyncOperationResponseSerializer},
25 )
26 @action(detail=True, methods=["post"], serializer_class=RepositoryAddRemoveContentSerializer)
27 def modify(self, request, pk):
28 """
29 Queues a task that creates a new RepositoryVersion by adding and removing content units
30 """
31 add_content_units = {}
32 remove_content_units = {}
33
34 repository = self.get_object()
35 serializer = self.get_serializer(data=request.data)
36 serializer.is_valid(raise_exception=True)
37
38 if "base_version" in request.data:
39 base_version_pk = self.get_resource(request.data["base_version"], RepositoryVersion).pk
40 else:
41 base_version_pk = None
42
43 if "add_content_units" in request.data:
44 for url in request.data["add_content_units"]:
45 add_content_units[NamedModelViewSet.extract_pk(url)] = url
46
47 content_units_pks = set(add_content_units.keys())
48 existing_content_units = Content.objects.filter(pk__in=content_units_pks)
49 existing_content_units.touch()
50
51 self.verify_content_units(existing_content_units, add_content_units)
52
53 add_content_units = list(add_content_units.keys())
54
55 if "remove_content_units" in request.data:
56 if "*" in request.data["remove_content_units"]:
57 remove_content_units = ["*"]
58 else:
59 for url in request.data["remove_content_units"]:
60 remove_content_units[NamedModelViewSet.extract_pk(url)] = url
61 content_units_pks = set(remove_content_units.keys())
62 existing_content_units = Content.objects.filter(pk__in=content_units_pks)
63 self.verify_content_units(existing_content_units, remove_content_units)
64 remove_content_units = list(remove_content_units.keys())
65
66 task = dispatch(
67 tasks.repository.add_and_remove,
68 exclusive_resources=[repository],
69 kwargs={
70 "repository_pk": pk,
71 "base_version_pk": base_version_pk,
72 "add_content_units": add_content_units,
73 "remove_content_units": remove_content_units,
74 },
75 )
76 return OperationPostponedResponse(task, request)
77
78 def verify_content_units(self, content_units, all_content_units):
79 """Verify referenced content units."""
80 existing_content_units_pks = content_units.values_list("pk", flat=True)
81 existing_content_units_pks = {str(pk) for pk in existing_content_units_pks}
82
83 missing_pks = set(all_content_units.keys()) - existing_content_units_pks
84 if missing_pks:
85 missing_hrefs = [all_content_units[pk] for pk in missing_pks]
86 raise ValidationError(
87 _("Could not find the following content units: {}").format(missing_hrefs)
88 )
89
[end of pulpcore/plugin/actions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/plugin/actions.py b/pulpcore/plugin/actions.py
--- a/pulpcore/plugin/actions.py
+++ b/pulpcore/plugin/actions.py
@@ -1,4 +1,5 @@
from gettext import gettext as _
+
from drf_spectacular.utils import extend_schema
from rest_framework.decorators import action
from rest_framework.serializers import ValidationError
@@ -48,7 +49,7 @@
existing_content_units = Content.objects.filter(pk__in=content_units_pks)
existing_content_units.touch()
- self.verify_content_units(existing_content_units, add_content_units)
+ raise_for_unknown_content_units(existing_content_units, add_content_units)
add_content_units = list(add_content_units.keys())
@@ -60,7 +61,7 @@
remove_content_units[NamedModelViewSet.extract_pk(url)] = url
content_units_pks = set(remove_content_units.keys())
existing_content_units = Content.objects.filter(pk__in=content_units_pks)
- self.verify_content_units(existing_content_units, remove_content_units)
+ raise_for_unknown_content_units(existing_content_units, remove_content_units)
remove_content_units = list(remove_content_units.keys())
task = dispatch(
@@ -75,14 +76,24 @@
)
return OperationPostponedResponse(task, request)
- def verify_content_units(self, content_units, all_content_units):
- """Verify referenced content units."""
- existing_content_units_pks = content_units.values_list("pk", flat=True)
- existing_content_units_pks = {str(pk) for pk in existing_content_units_pks}
-
- missing_pks = set(all_content_units.keys()) - existing_content_units_pks
- if missing_pks:
- missing_hrefs = [all_content_units[pk] for pk in missing_pks]
- raise ValidationError(
- _("Could not find the following content units: {}").format(missing_hrefs)
- )
+
+def raise_for_unknown_content_units(existing_content_units, content_units_pks_hrefs):
+ """Verify if all the specified content units were found in the database.
+
+ Args:
+ existing_content_units (pulpcore.plugin.models.Content): Content filtered by
+ specified_content_units.
+ content_units_pks_hrefs (dict): An original dictionary of pk-href pairs that
+ are used for the verification.
+ Raises:
+ ValidationError: If some of the referenced content units are not present in the database
+ """
+ existing_content_units_pks = existing_content_units.values_list("pk", flat=True)
+ existing_content_units_pks = set(map(str, existing_content_units_pks))
+
+ missing_pks = set(content_units_pks_hrefs.keys()) - existing_content_units_pks
+ if missing_pks:
+ missing_hrefs = [content_units_pks_hrefs[pk] for pk in missing_pks]
+ raise ValidationError(
+ _("Could not find the following content units: {}").format(missing_hrefs)
+ )
| {"golden_diff": "diff --git a/pulpcore/plugin/actions.py b/pulpcore/plugin/actions.py\n--- a/pulpcore/plugin/actions.py\n+++ b/pulpcore/plugin/actions.py\n@@ -1,4 +1,5 @@\n from gettext import gettext as _\n+\n from drf_spectacular.utils import extend_schema\n from rest_framework.decorators import action\n from rest_framework.serializers import ValidationError\n@@ -48,7 +49,7 @@\n existing_content_units = Content.objects.filter(pk__in=content_units_pks)\n existing_content_units.touch()\n \n- self.verify_content_units(existing_content_units, add_content_units)\n+ raise_for_unknown_content_units(existing_content_units, add_content_units)\n \n add_content_units = list(add_content_units.keys())\n \n@@ -60,7 +61,7 @@\n remove_content_units[NamedModelViewSet.extract_pk(url)] = url\n content_units_pks = set(remove_content_units.keys())\n existing_content_units = Content.objects.filter(pk__in=content_units_pks)\n- self.verify_content_units(existing_content_units, remove_content_units)\n+ raise_for_unknown_content_units(existing_content_units, remove_content_units)\n remove_content_units = list(remove_content_units.keys())\n \n task = dispatch(\n@@ -75,14 +76,24 @@\n )\n return OperationPostponedResponse(task, request)\n \n- def verify_content_units(self, content_units, all_content_units):\n- \"\"\"Verify referenced content units.\"\"\"\n- existing_content_units_pks = content_units.values_list(\"pk\", flat=True)\n- existing_content_units_pks = {str(pk) for pk in existing_content_units_pks}\n-\n- missing_pks = set(all_content_units.keys()) - existing_content_units_pks\n- if missing_pks:\n- missing_hrefs = [all_content_units[pk] for pk in missing_pks]\n- raise ValidationError(\n- _(\"Could not find the following content units: {}\").format(missing_hrefs)\n- )\n+\n+def raise_for_unknown_content_units(existing_content_units, content_units_pks_hrefs):\n+ \"\"\"Verify if all the specified content units were found in the database.\n+\n+ Args:\n+ existing_content_units (pulpcore.plugin.models.Content): Content filtered by\n+ specified_content_units.\n+ content_units_pks_hrefs (dict): An original dictionary of pk-href pairs that\n+ are used for the verification.\n+ Raises:\n+ ValidationError: If some of the referenced content units are not present in the database\n+ \"\"\"\n+ existing_content_units_pks = existing_content_units.values_list(\"pk\", flat=True)\n+ existing_content_units_pks = set(map(str, existing_content_units_pks))\n+\n+ missing_pks = set(content_units_pks_hrefs.keys()) - existing_content_units_pks\n+ if missing_pks:\n+ missing_hrefs = [content_units_pks_hrefs[pk] for pk in missing_pks]\n+ raise ValidationError(\n+ _(\"Could not find the following content units: {}\").format(missing_hrefs)\n+ )\n", "issue": "As a plugin writer, I want to have a function for touching content units\nAuthor: @lubosmj (lmjachky)\n\n\nRedmine Issue: 9419, https://pulp.plan.io/issues/9419\n\n---\n\nIn the PR https://github.com/pulp/pulpcore/pull/1624, we introduced a method that uses `bulk_touch` for updating timestamps of content units. We should expose this method to all plugin writers (e.g., pulp_container currently implements the same method - this creates unnecessary duplicates).\n\n\n\n", "before_files": [{"content": "from gettext import gettext as _\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework.decorators import action\nfrom rest_framework.serializers import ValidationError\n\nfrom pulpcore.app import tasks\nfrom pulpcore.app.models import Content, RepositoryVersion\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import (\n AsyncOperationResponseSerializer,\n RepositoryAddRemoveContentSerializer,\n)\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.tasking.tasks import dispatch\n\n\n__all__ = [\"ModifyRepositoryActionMixin\"]\n\n\nclass ModifyRepositoryActionMixin:\n @extend_schema(\n description=\"Trigger an asynchronous task to create a new repository version.\",\n summary=\"Modify Repository Content\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"], serializer_class=RepositoryAddRemoveContentSerializer)\n def modify(self, request, pk):\n \"\"\"\n Queues a task that creates a new RepositoryVersion by adding and removing content units\n \"\"\"\n add_content_units = {}\n remove_content_units = {}\n\n repository = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n if \"base_version\" in request.data:\n base_version_pk = self.get_resource(request.data[\"base_version\"], RepositoryVersion).pk\n else:\n base_version_pk = None\n\n if \"add_content_units\" in request.data:\n for url in request.data[\"add_content_units\"]:\n add_content_units[NamedModelViewSet.extract_pk(url)] = url\n\n content_units_pks = set(add_content_units.keys())\n existing_content_units = Content.objects.filter(pk__in=content_units_pks)\n existing_content_units.touch()\n\n self.verify_content_units(existing_content_units, add_content_units)\n\n add_content_units = list(add_content_units.keys())\n\n if \"remove_content_units\" in request.data:\n if \"*\" in request.data[\"remove_content_units\"]:\n remove_content_units = [\"*\"]\n else:\n for url in request.data[\"remove_content_units\"]:\n remove_content_units[NamedModelViewSet.extract_pk(url)] = url\n content_units_pks = set(remove_content_units.keys())\n existing_content_units = Content.objects.filter(pk__in=content_units_pks)\n self.verify_content_units(existing_content_units, remove_content_units)\n remove_content_units = list(remove_content_units.keys())\n\n task = dispatch(\n tasks.repository.add_and_remove,\n exclusive_resources=[repository],\n kwargs={\n \"repository_pk\": pk,\n \"base_version_pk\": base_version_pk,\n \"add_content_units\": add_content_units,\n \"remove_content_units\": remove_content_units,\n },\n )\n return OperationPostponedResponse(task, request)\n\n def verify_content_units(self, content_units, all_content_units):\n \"\"\"Verify referenced content units.\"\"\"\n existing_content_units_pks = content_units.values_list(\"pk\", flat=True)\n existing_content_units_pks = {str(pk) for pk in existing_content_units_pks}\n\n missing_pks = set(all_content_units.keys()) - existing_content_units_pks\n if missing_pks:\n missing_hrefs = [all_content_units[pk] for pk in missing_pks]\n raise ValidationError(\n _(\"Could not find the following content units: {}\").format(missing_hrefs)\n )\n", "path": "pulpcore/plugin/actions.py"}]} | 1,530 | 658 |
gh_patches_debug_20885 | rasdani/github-patches | git_diff | nilearn__nilearn-394 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ugly side plots in doc
They mask part of the code they are supposed to illustrate. See an example [here](http://nilearn.github.io/building_blocks/data_preparation.html#computing-the-mask).

</issue>
<code>
[start of examples/connectivity/plot_adhd_covariance.py]
1 """
2 Computation of covariance matrix between brain regions
3 ======================================================
4
5 This example shows how to extract signals from regions defined by an atlas,
6 and to estimate a covariance matrix based on these signals.
7 """
8
9 plotted_subject = 0 # subject to plot
10
11
12 import matplotlib.pyplot as plt
13 import matplotlib
14 # Copied from matplotlib 1.2.0 for matplotlib 0.99 compatibility.
15 _bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))
16 plt.cm.register_cmap(cmap=matplotlib.colors.LinearSegmentedColormap.from_list(
17 "bwr", _bwr_data))
18
19
20 def plot_matrices(cov, prec, title):
21 """Plot covariance and precision matrices, for a given processing. """
22
23 prec = prec.copy() # avoid side effects
24
25 # Display sparsity pattern
26 sparsity = prec == 0
27 plt.figure()
28 plt.imshow(sparsity, interpolation="nearest")
29 plt.title("%s / sparsity" % title)
30
31 # Put zeros on the diagonal, for graph clarity.
32 size = prec.shape[0]
33 prec[range(size), range(size)] = 0
34 span = max(abs(prec.min()), abs(prec.max()))
35
36 # Display covariance matrix
37 plt.figure()
38 plt.imshow(cov, interpolation="nearest",
39 vmin=-1, vmax=1, cmap=plt.cm.get_cmap("bwr"))
40 plt.colorbar()
41 plt.title("%s / covariance" % title)
42
43 # Display precision matrix
44 plt.figure()
45 plt.imshow(prec, interpolation="nearest",
46 vmin=-span, vmax=span,
47 cmap=plt.cm.get_cmap("bwr"))
48 plt.colorbar()
49 plt.title("%s / precision" % title)
50
51
52 # Fetching datasets ###########################################################
53 print("-- Fetching datasets ...")
54 from nilearn import datasets
55 msdl_atlas_dataset = datasets.fetch_msdl_atlas()
56 adhd_dataset = datasets.fetch_adhd()
57
58 # Extracting region signals ###################################################
59 import nilearn.image
60 import nilearn.input_data
61
62 from sklearn.externals.joblib import Memory
63 mem = Memory(".")
64
65 # Number of subjects to consider for group-sparse covariance
66 n_subjects = 10
67 subjects = []
68
69 func_filenames = adhd_dataset.func
70 confound_filenames = adhd_dataset.confounds
71 for func_filename, confound_filename in zip(func_filenames,
72 confound_filenames):
73 print("Processing file %s" % func_filename)
74
75 print("-- Computing confounds ...")
76 hv_confounds = mem.cache(nilearn.image.high_variance_confounds)(
77 func_filename)
78
79 print("-- Computing region signals ...")
80 masker = nilearn.input_data.NiftiMapsMasker(
81 msdl_atlas_dataset.maps, resampling_target="maps", detrend=True,
82 low_pass=None, high_pass=0.01, t_r=2.5, standardize=True,
83 memory=mem, memory_level=1, verbose=1)
84 region_ts = masker.fit_transform(func_filename,
85 confounds=[hv_confounds,
86 confound_filename])
87 subjects.append(region_ts)
88
89 # Computing group-sparse precision matrices ###################################
90 print("-- Computing group-sparse precision matrices ...")
91 from nilearn.group_sparse_covariance import GroupSparseCovarianceCV
92 gsc = GroupSparseCovarianceCV(verbose=2, n_jobs=3)
93 gsc.fit(subjects)
94
95 print("-- Computing graph-lasso precision matrices ...")
96 from sklearn import covariance
97 gl = covariance.GraphLassoCV(n_jobs=3)
98 gl.fit(subjects[plotted_subject])
99
100 # Displaying results ##########################################################
101 print("-- Displaying results")
102 title = "{0:d} GroupSparseCovariance $\\alpha={1:.2e}$".format(plotted_subject,
103 gsc.alpha_)
104 plot_matrices(gsc.covariances_[..., plotted_subject],
105 gsc.precisions_[..., plotted_subject], title)
106
107 title = "{0:d} GraphLasso $\\alpha={1:.2e}$".format(plotted_subject,
108 gl.alpha_)
109 plot_matrices(gl.covariance_, gl.precision_, title)
110
111 plt.show()
112
[end of examples/connectivity/plot_adhd_covariance.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/connectivity/plot_adhd_covariance.py b/examples/connectivity/plot_adhd_covariance.py
--- a/examples/connectivity/plot_adhd_covariance.py
+++ b/examples/connectivity/plot_adhd_covariance.py
@@ -20,13 +20,11 @@
def plot_matrices(cov, prec, title):
"""Plot covariance and precision matrices, for a given processing. """
+ # Compute sparsity pattern
+ sparsity = (prec == 0)
+
prec = prec.copy() # avoid side effects
- # Display sparsity pattern
- sparsity = prec == 0
- plt.figure()
- plt.imshow(sparsity, interpolation="nearest")
- plt.title("%s / sparsity" % title)
# Put zeros on the diagonal, for graph clarity.
size = prec.shape[0]
@@ -39,6 +37,11 @@
vmin=-1, vmax=1, cmap=plt.cm.get_cmap("bwr"))
plt.colorbar()
plt.title("%s / covariance" % title)
+
+ # Display sparsity pattern
+ plt.figure()
+ plt.imshow(sparsity, interpolation="nearest")
+ plt.title("%s / sparsity" % title)
# Display precision matrix
plt.figure()
| {"golden_diff": "diff --git a/examples/connectivity/plot_adhd_covariance.py b/examples/connectivity/plot_adhd_covariance.py\n--- a/examples/connectivity/plot_adhd_covariance.py\n+++ b/examples/connectivity/plot_adhd_covariance.py\n@@ -20,13 +20,11 @@\n def plot_matrices(cov, prec, title):\n \"\"\"Plot covariance and precision matrices, for a given processing. \"\"\"\n \n+ # Compute sparsity pattern\n+ sparsity = (prec == 0)\n+ \n prec = prec.copy() # avoid side effects\n \n- # Display sparsity pattern\n- sparsity = prec == 0\n- plt.figure()\n- plt.imshow(sparsity, interpolation=\"nearest\")\n- plt.title(\"%s / sparsity\" % title)\n \n # Put zeros on the diagonal, for graph clarity.\n size = prec.shape[0]\n@@ -39,6 +37,11 @@\n vmin=-1, vmax=1, cmap=plt.cm.get_cmap(\"bwr\"))\n plt.colorbar()\n plt.title(\"%s / covariance\" % title)\n+ \n+ # Display sparsity pattern\n+ plt.figure()\n+ plt.imshow(sparsity, interpolation=\"nearest\")\n+ plt.title(\"%s / sparsity\" % title)\n \n # Display precision matrix\n plt.figure()\n", "issue": "Ugly side plots in doc\nThey mask part of the code they are supposed to illustrate. See an example [here](http://nilearn.github.io/building_blocks/data_preparation.html#computing-the-mask).\n\n\n\n", "before_files": [{"content": "\"\"\"\nComputation of covariance matrix between brain regions\n======================================================\n\nThis example shows how to extract signals from regions defined by an atlas,\nand to estimate a covariance matrix based on these signals.\n\"\"\"\n \nplotted_subject = 0 # subject to plot\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n# Copied from matplotlib 1.2.0 for matplotlib 0.99 compatibility.\n_bwr_data = ((0.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 0.0, 0.0))\nplt.cm.register_cmap(cmap=matplotlib.colors.LinearSegmentedColormap.from_list(\n \"bwr\", _bwr_data))\n\n\ndef plot_matrices(cov, prec, title):\n \"\"\"Plot covariance and precision matrices, for a given processing. \"\"\"\n\n prec = prec.copy() # avoid side effects\n\n # Display sparsity pattern\n sparsity = prec == 0\n plt.figure()\n plt.imshow(sparsity, interpolation=\"nearest\")\n plt.title(\"%s / sparsity\" % title)\n\n # Put zeros on the diagonal, for graph clarity.\n size = prec.shape[0]\n prec[range(size), range(size)] = 0\n span = max(abs(prec.min()), abs(prec.max()))\n\n # Display covariance matrix\n plt.figure()\n plt.imshow(cov, interpolation=\"nearest\",\n vmin=-1, vmax=1, cmap=plt.cm.get_cmap(\"bwr\"))\n plt.colorbar()\n plt.title(\"%s / covariance\" % title)\n\n # Display precision matrix\n plt.figure()\n plt.imshow(prec, interpolation=\"nearest\",\n vmin=-span, vmax=span,\n cmap=plt.cm.get_cmap(\"bwr\"))\n plt.colorbar()\n plt.title(\"%s / precision\" % title)\n\n\n# Fetching datasets ###########################################################\nprint(\"-- Fetching datasets ...\")\nfrom nilearn import datasets\nmsdl_atlas_dataset = datasets.fetch_msdl_atlas()\nadhd_dataset = datasets.fetch_adhd()\n\n# Extracting region signals ###################################################\nimport nilearn.image\nimport nilearn.input_data\n\nfrom sklearn.externals.joblib import Memory\nmem = Memory(\".\")\n\n# Number of subjects to consider for group-sparse covariance\nn_subjects = 10\nsubjects = []\n\nfunc_filenames = adhd_dataset.func\nconfound_filenames = adhd_dataset.confounds\nfor func_filename, confound_filename in zip(func_filenames,\n confound_filenames):\n print(\"Processing file %s\" % func_filename)\n\n print(\"-- Computing confounds ...\")\n hv_confounds = mem.cache(nilearn.image.high_variance_confounds)(\n func_filename)\n\n print(\"-- Computing region signals ...\")\n masker = nilearn.input_data.NiftiMapsMasker(\n msdl_atlas_dataset.maps, resampling_target=\"maps\", detrend=True,\n low_pass=None, high_pass=0.01, t_r=2.5, standardize=True,\n memory=mem, memory_level=1, verbose=1)\n region_ts = masker.fit_transform(func_filename,\n confounds=[hv_confounds,\n confound_filename])\n subjects.append(region_ts)\n\n# Computing group-sparse precision matrices ###################################\nprint(\"-- Computing group-sparse precision matrices ...\")\nfrom nilearn.group_sparse_covariance import GroupSparseCovarianceCV\ngsc = GroupSparseCovarianceCV(verbose=2, n_jobs=3)\ngsc.fit(subjects)\n\nprint(\"-- Computing graph-lasso precision matrices ...\")\nfrom sklearn import covariance\ngl = covariance.GraphLassoCV(n_jobs=3)\ngl.fit(subjects[plotted_subject])\n\n# Displaying results ##########################################################\nprint(\"-- Displaying results\")\ntitle = \"{0:d} GroupSparseCovariance $\\\\alpha={1:.2e}$\".format(plotted_subject,\n gsc.alpha_)\nplot_matrices(gsc.covariances_[..., plotted_subject],\n gsc.precisions_[..., plotted_subject], title)\n\ntitle = \"{0:d} GraphLasso $\\\\alpha={1:.2e}$\".format(plotted_subject,\n gl.alpha_)\nplot_matrices(gl.covariance_, gl.precision_, title)\n\nplt.show()\n", "path": "examples/connectivity/plot_adhd_covariance.py"}]} | 1,788 | 290 |
gh_patches_debug_33939 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1978 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2504 incorrectly rejects "Iops" property for io2/gp3 volumes
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.44.6
*Description of issue.*
cfn-lint produces an error "E2504: Iops shouldn't be defined for type io2 for Resource ... LaunchConfiguration/Properties/BlockDeviceMappings/0/Ebs/Iops" when setting Iops on a io2 EBS volume.
The Iops property is required for io2 and optional for gp3. [1]
Cfn-lint treats the Iops property as required for io1 and forbidden for all other volume types, which is very much not correct
[1] https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html#cfn-ec2-blockdev-template-iops
</issue>
<code>
[start of src/cfnlint/rules/resources/ectwo/Ebs.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import re
6 import six
7 from cfnlint.rules import CloudFormationLintRule
8 from cfnlint.rules import RuleMatch
9
10
11 class Ebs(CloudFormationLintRule):
12 """Check if Ec2 Ebs Resource Properties"""
13 id = 'E2504'
14 shortdesc = 'Check Ec2 Ebs Properties'
15 description = 'See if Ec2 Eb2 Properties are valid'
16 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html'
17 tags = ['properties', 'ec2', 'ebs']
18
19 def _checkEbs(self, cfn, ebs, path):
20 matches = []
21
22 if isinstance(ebs, dict):
23 volume_types_obj = cfn.get_values(ebs, 'VolumeType')
24 iops_obj = cfn.get_values(ebs, 'Iops')
25 if volume_types_obj is not None:
26 for volume_type_obj in volume_types_obj:
27 volume_type = volume_type_obj.get('Value')
28 if isinstance(volume_type, six.string_types):
29 if volume_type == 'io1':
30 if iops_obj is None:
31 pathmessage = path[:] + ['VolumeType']
32 message = 'VolumeType io1 requires Iops to be specified for {0}'
33 matches.append(
34 RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))
35 elif volume_type:
36 if iops_obj is not None:
37 pathmessage = path[:] + ['Iops']
38 message = 'Iops shouldn\'t be defined for type {0} for {1}'
39 matches.append(
40 RuleMatch(
41 pathmessage,
42 message.format(volume_type, '/'.join(map(str, pathmessage)))))
43
44 return matches
45
46 def match(self, cfn):
47 """Check Ec2 Ebs Resource Parameters"""
48
49 matches = []
50
51 results = cfn.get_resource_properties(['AWS::EC2::Instance', 'BlockDeviceMappings'])
52 results.extend(cfn.get_resource_properties(
53 ['AWS::AutoScaling::LaunchConfiguration', 'BlockDeviceMappings']))
54 for result in results:
55 path = result['Path']
56 if isinstance(result['Value'], list):
57 for index, properties in enumerate(result['Value']):
58 virtual_name = properties.get('VirtualName')
59 ebs = properties.get('Ebs')
60 if virtual_name:
61 # switch to regex
62 if not re.match(r'^ephemeral([0-9]|[1][0-9]|[2][0-3])$', virtual_name):
63 pathmessage = path[:] + [index, 'VirtualName']
64 message = 'Property VirtualName should be of type ephemeral(n) for {0}'
65 matches.append(
66 RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))
67 elif ebs:
68 matches.extend(self._checkEbs(cfn, ebs, path[:] + [index, 'Ebs']))
69 return matches
70
[end of src/cfnlint/rules/resources/ectwo/Ebs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/ectwo/Ebs.py b/src/cfnlint/rules/resources/ectwo/Ebs.py
--- a/src/cfnlint/rules/resources/ectwo/Ebs.py
+++ b/src/cfnlint/rules/resources/ectwo/Ebs.py
@@ -9,10 +9,10 @@
class Ebs(CloudFormationLintRule):
- """Check if Ec2 Ebs Resource Properties"""
+ """Check Ec2 Ebs Resource Properties"""
id = 'E2504'
shortdesc = 'Check Ec2 Ebs Properties'
- description = 'See if Ec2 Eb2 Properties are valid'
+ description = 'See if Ec2 Ebs Properties are valid'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html'
tags = ['properties', 'ec2', 'ebs']
@@ -26,13 +26,15 @@
for volume_type_obj in volume_types_obj:
volume_type = volume_type_obj.get('Value')
if isinstance(volume_type, six.string_types):
- if volume_type == 'io1':
+ if volume_type in ('io1', 'io2'):
if iops_obj is None:
pathmessage = path[:] + ['VolumeType']
- message = 'VolumeType io1 requires Iops to be specified for {0}'
+ message = 'VolumeType {0} requires Iops to be specified for {1}'
matches.append(
- RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))
- elif volume_type:
+ RuleMatch(
+ pathmessage,
+ message.format(volume_type, '/'.join(map(str, pathmessage)))))
+ elif volume_type in ('gp2', 'st1', 'sc1', 'standard'):
if iops_obj is not None:
pathmessage = path[:] + ['Iops']
message = 'Iops shouldn\'t be defined for type {0} for {1}'
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/ectwo/Ebs.py b/src/cfnlint/rules/resources/ectwo/Ebs.py\n--- a/src/cfnlint/rules/resources/ectwo/Ebs.py\n+++ b/src/cfnlint/rules/resources/ectwo/Ebs.py\n@@ -9,10 +9,10 @@\n \n \n class Ebs(CloudFormationLintRule):\n- \"\"\"Check if Ec2 Ebs Resource Properties\"\"\"\n+ \"\"\"Check Ec2 Ebs Resource Properties\"\"\"\n id = 'E2504'\n shortdesc = 'Check Ec2 Ebs Properties'\n- description = 'See if Ec2 Eb2 Properties are valid'\n+ description = 'See if Ec2 Ebs Properties are valid'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html'\n tags = ['properties', 'ec2', 'ebs']\n \n@@ -26,13 +26,15 @@\n for volume_type_obj in volume_types_obj:\n volume_type = volume_type_obj.get('Value')\n if isinstance(volume_type, six.string_types):\n- if volume_type == 'io1':\n+ if volume_type in ('io1', 'io2'):\n if iops_obj is None:\n pathmessage = path[:] + ['VolumeType']\n- message = 'VolumeType io1 requires Iops to be specified for {0}'\n+ message = 'VolumeType {0} requires Iops to be specified for {1}'\n matches.append(\n- RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))\n- elif volume_type:\n+ RuleMatch(\n+ pathmessage,\n+ message.format(volume_type, '/'.join(map(str, pathmessage)))))\n+ elif volume_type in ('gp2', 'st1', 'sc1', 'standard'):\n if iops_obj is not None:\n pathmessage = path[:] + ['Iops']\n message = 'Iops shouldn\\'t be defined for type {0} for {1}'\n", "issue": "E2504 incorrectly rejects \"Iops\" property for io2/gp3 volumes\n*cfn-lint version: (`cfn-lint --version`)*\r\n\r\ncfn-lint 0.44.6\r\n\r\n*Description of issue.*\r\n\r\ncfn-lint produces an error \"E2504: Iops shouldn't be defined for type io2 for Resource ... LaunchConfiguration/Properties/BlockDeviceMappings/0/Ebs/Iops\" when setting Iops on a io2 EBS volume.\r\n\r\nThe Iops property is required for io2 and optional for gp3. [1]\r\n\r\nCfn-lint treats the Iops property as required for io1 and forbidden for all other volume types, which is very much not correct \r\n\r\n[1] https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html#cfn-ec2-blockdev-template-iops\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport re\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Ebs(CloudFormationLintRule):\n \"\"\"Check if Ec2 Ebs Resource Properties\"\"\"\n id = 'E2504'\n shortdesc = 'Check Ec2 Ebs Properties'\n description = 'See if Ec2 Eb2 Properties are valid'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html'\n tags = ['properties', 'ec2', 'ebs']\n\n def _checkEbs(self, cfn, ebs, path):\n matches = []\n\n if isinstance(ebs, dict):\n volume_types_obj = cfn.get_values(ebs, 'VolumeType')\n iops_obj = cfn.get_values(ebs, 'Iops')\n if volume_types_obj is not None:\n for volume_type_obj in volume_types_obj:\n volume_type = volume_type_obj.get('Value')\n if isinstance(volume_type, six.string_types):\n if volume_type == 'io1':\n if iops_obj is None:\n pathmessage = path[:] + ['VolumeType']\n message = 'VolumeType io1 requires Iops to be specified for {0}'\n matches.append(\n RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))\n elif volume_type:\n if iops_obj is not None:\n pathmessage = path[:] + ['Iops']\n message = 'Iops shouldn\\'t be defined for type {0} for {1}'\n matches.append(\n RuleMatch(\n pathmessage,\n message.format(volume_type, '/'.join(map(str, pathmessage)))))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check Ec2 Ebs Resource Parameters\"\"\"\n\n matches = []\n\n results = cfn.get_resource_properties(['AWS::EC2::Instance', 'BlockDeviceMappings'])\n results.extend(cfn.get_resource_properties(\n ['AWS::AutoScaling::LaunchConfiguration', 'BlockDeviceMappings']))\n for result in results:\n path = result['Path']\n if isinstance(result['Value'], list):\n for index, properties in enumerate(result['Value']):\n virtual_name = properties.get('VirtualName')\n ebs = properties.get('Ebs')\n if virtual_name:\n # switch to regex\n if not re.match(r'^ephemeral([0-9]|[1][0-9]|[2][0-3])$', virtual_name):\n pathmessage = path[:] + [index, 'VirtualName']\n message = 'Property VirtualName should be of type ephemeral(n) for {0}'\n matches.append(\n RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))\n elif ebs:\n matches.extend(self._checkEbs(cfn, ebs, path[:] + [index, 'Ebs']))\n return matches\n", "path": "src/cfnlint/rules/resources/ectwo/Ebs.py"}]} | 1,522 | 443 |
gh_patches_debug_11278 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1893 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tag Docker versions
Currently, GitHub Action will deploy a new version to dockerhub as a `latest`. We need to push the tagged version per MindsDB version e.g 2.62.5 => mindsdb/mindsdb:2.62.5
</issue>
<code>
[start of docker/build.py]
1 import os
2 import sys
3 import requests
4 import subprocess
5
6 installer_version_url = 'https://public.api.mindsdb.com/installer/@@beta_or_release/docker___success___None'
7
8 api_response = requests.get(
9 installer_version_url.replace('@@beta_or_release', sys.argv[1]))
10
11 if api_response.status_code != 200:
12 exit(1)
13
14 installer_version = api_response.text
15
16 os.system('mkdir -p dist')
17
18 if sys.argv[1] == 'release':
19 container_name = 'mindsdb'
20 dockerfile_template = 'dockerfile_release.template'
21
22 elif sys.argv[1] == 'beta':
23 container_name = 'mindsdb_beta'
24 dockerfile_template = 'dockerfile_beta.template'
25
26 with open(dockerfile_template, 'r') as fp:
27 content = fp.read()
28 content = content.replace('@@beta_or_release', sys.argv[1])
29 content = content.replace('@@installer_version', installer_version)
30
31 with open('dist/Dockerfile', 'w') as fp:
32 fp.write(content)
33
34 command = (f"""
35 cd dist &&
36 docker build -t {container_name} . &&
37 docker tag {container_name} mindsdb/{container_name}:latest &&
38 docker tag {container_name} mindsdb/{container_name}:{installer_version} &&
39 docker push mindsdb/{container_name};
40 cd ..
41 """)
42
43 subprocess.run(command, shell=True, check=True)
[end of docker/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/build.py b/docker/build.py
--- a/docker/build.py
+++ b/docker/build.py
@@ -31,13 +31,11 @@
with open('dist/Dockerfile', 'w') as fp:
fp.write(content)
+print(installer_version)
command = (f"""
cd dist &&
- docker build -t {container_name} . &&
- docker tag {container_name} mindsdb/{container_name}:latest &&
- docker tag {container_name} mindsdb/{container_name}:{installer_version} &&
- docker push mindsdb/{container_name};
- cd ..
+ docker build -t mindsdb/{container_name}:latest -t mindsdb/{container_name}:{installer_version} . &&
+ docker push mindsdb/{container_name} --all-tags
""")
subprocess.run(command, shell=True, check=True)
\ No newline at end of file
| {"golden_diff": "diff --git a/docker/build.py b/docker/build.py\n--- a/docker/build.py\n+++ b/docker/build.py\n@@ -31,13 +31,11 @@\n with open('dist/Dockerfile', 'w') as fp:\n fp.write(content)\n \n+print(installer_version)\n command = (f\"\"\"\n cd dist &&\n- docker build -t {container_name} . &&\n- docker tag {container_name} mindsdb/{container_name}:latest &&\n- docker tag {container_name} mindsdb/{container_name}:{installer_version} &&\n- docker push mindsdb/{container_name};\n- cd ..\n+ docker build -t mindsdb/{container_name}:latest -t mindsdb/{container_name}:{installer_version} . &&\n+ docker push mindsdb/{container_name} --all-tags\n \"\"\")\n \n subprocess.run(command, shell=True, check=True)\n\\ No newline at end of file\n", "issue": "Tag Docker versions\nCurrently, GitHub Action will deploy a new version to dockerhub as a `latest`. We need to push the tagged version per MindsDB version e.g 2.62.5 => mindsdb/mindsdb:2.62.5\n", "before_files": [{"content": "import os\nimport sys\nimport requests\nimport subprocess\n\ninstaller_version_url = 'https://public.api.mindsdb.com/installer/@@beta_or_release/docker___success___None'\n\napi_response = requests.get(\n installer_version_url.replace('@@beta_or_release', sys.argv[1]))\n\nif api_response.status_code != 200:\n exit(1)\n\ninstaller_version = api_response.text\n\nos.system('mkdir -p dist')\n\nif sys.argv[1] == 'release':\n container_name = 'mindsdb'\n dockerfile_template = 'dockerfile_release.template'\n\nelif sys.argv[1] == 'beta':\n container_name = 'mindsdb_beta'\n dockerfile_template = 'dockerfile_beta.template'\n\nwith open(dockerfile_template, 'r') as fp:\n content = fp.read()\n content = content.replace('@@beta_or_release', sys.argv[1])\n content = content.replace('@@installer_version', installer_version)\n\nwith open('dist/Dockerfile', 'w') as fp:\n fp.write(content)\n\ncommand = (f\"\"\"\n cd dist &&\n docker build -t {container_name} . &&\n docker tag {container_name} mindsdb/{container_name}:latest &&\n docker tag {container_name} mindsdb/{container_name}:{installer_version} &&\n docker push mindsdb/{container_name};\n cd ..\n \"\"\")\n\nsubprocess.run(command, shell=True, check=True)", "path": "docker/build.py"}]} | 970 | 196 |
gh_patches_debug_23379 | rasdani/github-patches | git_diff | pre-commit__pre-commit-81 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Occasional flakiness of staged file stasher
It appears `git diff-files` is returning incorrectly in some case that I haven't been able to pinpoint.
It results in something like this (you can see however that all the files are staged):
```
$ pre-commit
[WARNING] Unstaged files detected.
Stashing unstaged files to /home/anthony/workspace/pre-commit/.pre-commit-files/patch1397370090.
Trim Trailing Whitespace............................................Passed
Fix End of Files....................................................Passed
Check Yaml..........................................................Passed
Debug Statements (Python)...........................................Passed
Tests should end in _test.py........................................Passed
Pyflakes............................................................Passed
Validate Pre-Commit Config..........................................Passed
Validate Pre-Commit Manifest........................................Passed
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
Traceback (most recent call last):
File "/home/anthony/workspace/pre-commit/venv-pre_commit/bin/pre-commit", line 9, in <module>
load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()
File "/home/anthony/workspace/pre-commit/pre_commit/util.py", line 52, in wrapper
return func(argv)
File "/home/anthony/workspace/pre-commit/pre_commit/run.py", line 143, in run
return _run(runner, args)
File "/home/anthony/workspace/pre-commit/pre_commit/run.py", line 95, in _run
return run_hooks(runner, args)
File "/usr/lib/python2.7/contextlib.py", line 24, in __exit__
self.gen.next()
File "/home/anthony/workspace/pre-commit/pre_commit/staged_files_only.py", line 51, in staged_files_only
cmd_runner.run(['git', 'apply', patch_filename])
File "/home/anthony/workspace/pre-commit/pre_commit/prefixed_command_runner.py", line 67, in run
returncode, replaced_cmd, retcode, output=(stdout, stderr),
pre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'apply', '/home/anthony/workspace/pre-commit/.pre-commit-files/patch1397370090']
Return code: 128
Expected return code: 0
Output: ('', 'fatal: unrecognized input\n')
$ git status
# On branch rebuild_venv
# Changes to be committed:
# (use "git reset HEAD <file>..." to unstage)
#
# modified: .gitignore
# modified: Makefile
#
```
The "stashed diff" is an empty file. I think the "fix" is to check if the diff contains anything before printing the warning message and entering the branch that isn't a noop context manager.
</issue>
<code>
[start of pre_commit/staged_files_only.py]
1 import contextlib
2 import logging
3 import time
4
5 from pre_commit.prefixed_command_runner import CalledProcessError
6
7
8 logger = logging.getLogger('pre_commit')
9
10
11 @contextlib.contextmanager
12 def staged_files_only(cmd_runner):
13 """Clear any unstaged changes from the git working directory inside this
14 context.
15
16 Args:
17 cmd_runner - PrefixedCommandRunner
18 """
19 # Determine if there are unstaged files
20 retcode, _, _ = cmd_runner.run(
21 ['git', 'diff-files', '--quiet'],
22 retcode=None,
23 )
24 if retcode:
25 patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
26 logger.warning('Unstaged files detected.')
27 logger.info(
28 'Stashing unstaged files to {0}.'.format(patch_filename),
29 )
30 # Save the current unstaged changes as a patch
31 with open(patch_filename, 'w') as patch_file:
32 cmd_runner.run(['git', 'diff', '--binary'], stdout=patch_file)
33
34 # Clear the working directory of unstaged changes
35 cmd_runner.run(['git', 'checkout', '--', '.'])
36 try:
37 yield
38 finally:
39 # Try to apply the patch we saved
40 try:
41 cmd_runner.run(['git', 'apply', patch_filename])
42 except CalledProcessError:
43 logger.warning(
44 'Stashed changes conflicted with hook auto-fixes... '
45 'Rolling back fixes...'
46 )
47 # We failed to apply the patch, presumably due to fixes made
48 # by hooks.
49 # Roll back the changes made by hooks.
50 cmd_runner.run(['git', 'checkout', '--', '.'])
51 cmd_runner.run(['git', 'apply', patch_filename])
52 logger.info('Restored changes from {0}.'.format(patch_filename))
53 else:
54 # There weren't any staged files so we don't need to do anything
55 # special
56 yield
57
[end of pre_commit/staged_files_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -17,11 +17,11 @@
cmd_runner - PrefixedCommandRunner
"""
# Determine if there are unstaged files
- retcode, _, _ = cmd_runner.run(
- ['git', 'diff-files', '--quiet'],
+ retcode, diff_stdout, _ = cmd_runner.run(
+ ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],
retcode=None,
)
- if retcode:
+ if retcode and diff_stdout.strip():
patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))
logger.warning('Unstaged files detected.')
logger.info(
@@ -29,7 +29,7 @@
)
# Save the current unstaged changes as a patch
with open(patch_filename, 'w') as patch_file:
- cmd_runner.run(['git', 'diff', '--binary'], stdout=patch_file)
+ patch_file.write(diff_stdout)
# Clear the working directory of unstaged changes
cmd_runner.run(['git', 'checkout', '--', '.'])
| {"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -17,11 +17,11 @@\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n- retcode, _, _ = cmd_runner.run(\n- ['git', 'diff-files', '--quiet'],\n+ retcode, diff_stdout, _ = cmd_runner.run(\n+ ['git', 'diff', '--ignore-submodules', '--binary', '--exit-code'],\n retcode=None,\n )\n- if retcode:\n+ if retcode and diff_stdout.strip():\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n@@ -29,7 +29,7 @@\n )\n # Save the current unstaged changes as a patch\n with open(patch_filename, 'w') as patch_file:\n- cmd_runner.run(['git', 'diff', '--binary'], stdout=patch_file)\n+ patch_file.write(diff_stdout)\n \n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n", "issue": "Occasional flakiness of staged file stasher\nIt appears `git diff-files` is returning incorrectly in some case that I haven't been able to pinpoint.\n\nIt results in something like this (you can see however that all the files are staged):\n\n```\n$ pre-commit \n[WARNING] Unstaged files detected.\nStashing unstaged files to /home/anthony/workspace/pre-commit/.pre-commit-files/patch1397370090.\nTrim Trailing Whitespace............................................Passed\nFix End of Files....................................................Passed\nCheck Yaml..........................................................Passed\nDebug Statements (Python)...........................................Passed\nTests should end in _test.py........................................Passed\nPyflakes............................................................Passed\nValidate Pre-Commit Config..........................................Passed\nValidate Pre-Commit Manifest........................................Passed\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\nTraceback (most recent call last):\n File \"/home/anthony/workspace/pre-commit/venv-pre_commit/bin/pre-commit\", line 9, in <module>\n load_entry_point('pre-commit==0.0.0', 'console_scripts', 'pre-commit')()\n File \"/home/anthony/workspace/pre-commit/pre_commit/util.py\", line 52, in wrapper\n return func(argv)\n File \"/home/anthony/workspace/pre-commit/pre_commit/run.py\", line 143, in run\n return _run(runner, args)\n File \"/home/anthony/workspace/pre-commit/pre_commit/run.py\", line 95, in _run\n return run_hooks(runner, args)\n File \"/usr/lib/python2.7/contextlib.py\", line 24, in __exit__\n self.gen.next()\n File \"/home/anthony/workspace/pre-commit/pre_commit/staged_files_only.py\", line 51, in staged_files_only\n cmd_runner.run(['git', 'apply', patch_filename])\n File \"/home/anthony/workspace/pre-commit/pre_commit/prefixed_command_runner.py\", line 67, in run\n returncode, replaced_cmd, retcode, output=(stdout, stderr),\npre_commit.prefixed_command_runner.CalledProcessError: Command: ['git', 'apply', '/home/anthony/workspace/pre-commit/.pre-commit-files/patch1397370090']\nReturn code: 128\nExpected return code: 0\nOutput: ('', 'fatal: unrecognized input\\n')\n$ git status\n# On branch rebuild_venv\n# Changes to be committed:\n# (use \"git reset HEAD <file>...\" to unstage)\n#\n# modified: .gitignore\n# modified: Makefile\n#\n```\n\nThe \"stashed diff\" is an empty file. I think the \"fix\" is to check if the diff contains anything before printing the warning message and entering the branch that isn't a noop context manager.\n\n", "before_files": [{"content": "import contextlib\nimport logging\nimport time\n\nfrom pre_commit.prefixed_command_runner import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n retcode, _, _ = cmd_runner.run(\n ['git', 'diff-files', '--quiet'],\n retcode=None,\n )\n if retcode:\n patch_filename = cmd_runner.path('patch{0}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {0}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with open(patch_filename, 'w') as patch_file:\n cmd_runner.run(['git', 'diff', '--binary'], stdout=patch_file)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(['git', 'checkout', '--', '.'])\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(['git', 'apply', patch_filename])\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...'\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(['git', 'checkout', '--', '.'])\n cmd_runner.run(['git', 'apply', patch_filename])\n logger.info('Restored changes from {0}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]} | 1,677 | 280 |
gh_patches_debug_17535 | rasdani/github-patches | git_diff | doccano__doccano-607 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Enhancement request] Avoid duplicate key value error on launching
Enhancement description
---------
I have these errors in log on each start:
```
postgres_1 | 2020-03-04 05:34:30.467 UTC [27] ERROR: duplicate key value violates unique constraint "api_role_name_key"
postgres_1 | 2020-03-04 05:34:30.467 UTC [27] DETAIL: Key (name)=(project_admin) already exists.
postgres_1 | 2020-03-04 05:34:30.467 UTC [27] STATEMENT: INSERT INTO "api_role" ("name", "description", "created_at", "updated_at") VALUES ('project_admin', '', '2020-03-04T05:34:30.460290+00:00'::timestamptz, '2020-03-04T05:34:30.460312+00:00'::timestamptz) RETURNING "api_role"."id"
backend_1 | Datbase Error: "duplicate key value violates unique constraint "api_role_name_key"
backend_1 | DETAIL: Key (name)=(project_admin) already exists.
backend_1 | "
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] ERROR: duplicate key value violates unique constraint "api_role_name_key"
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] DETAIL: Key (name)=(annotator) already exists.
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] STATEMENT: INSERT INTO "api_role" ("name", "description", "created_at", "updated_at") VALUES ('annotator', '', '2020-03-04T05:34:30.467909+00:00'::timestamptz, '2020-03-04T05:34:30.467926+00:00'::timestamptz) RETURNING "api_role"."id"
backend_1 | Datbase Error: "duplicate key value violates unique constraint "api_role_name_key"
backend_1 | DETAIL: Key (name)=(annotator) already exists.
backend_1 | "
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] ERROR: duplicate key value violates unique constraint "api_role_name_key"
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] DETAIL: Key (name)=(annotation_approver) already exists.
postgres_1 | 2020-03-04 05:34:30.468 UTC [27] STATEMENT: INSERT INTO "api_role" ("name", "description", "created_at", "updated_at") VALUES ('annotation_approver', '', '2020-03-04T05:34:30.468689+00:00'::timestamptz, '2020-03-04T05:34:30.468706+00:00'::timestamptz) RETURNING "api_role"."id"
backend_1 | Datbase Error: "duplicate key value violates unique constraint "api_role_name_key"
backend_1 | DETAIL: Key (name)=(annotation_approver) already exists.
backend_1 | "
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] ERROR: duplicate key value violates unique constraint "auth_user_username_key"
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] DETAIL: Key (username)=(admin) already exists.
postgres_1 | 2020-03-04 05:34:32.026 UTC [28] STATEMENT: INSERT INTO "auth_user" ("password", "last_login", "is_superuser", "username", "first_name", "last_name", "email", "is_staff", "is_active", "date_joined") VALUES ('<...>', NULL, true, 'admin', '', '', '[email protected]', true, true, '2020-03-04T05:34:32.023520+00:00'::timestamptz) RETURNING "auth_user"."id"
backend_1 | User admin already exists.
backend_1 | CommandError: Error: That username is already taken.
```
Propose to check existence of specified table's rows before creation to avoid these errors.
</issue>
<code>
[start of app/server/management/commands/create_roles.py]
1 from api.models import Role
2 from django.core.management.base import BaseCommand
3 from django.db import DatabaseError
4 from django.conf import settings
5
6
7 class Command(BaseCommand):
8 help = 'Non-interactively create default roles'
9
10 def handle(self, *args, **options):
11 try:
12 role_names = [settings.ROLE_PROJECT_ADMIN, settings.ROLE_ANNOTATOR, settings.ROLE_ANNOTATION_APPROVER]
13 except KeyError as key_error:
14 self.stderr.write(self.style.ERROR(f'Missing Key: "{key_error}"'))
15 for role_name in role_names:
16 role = Role()
17 role.name = role_name
18 try:
19 role.save()
20 except DatabaseError as db_error:
21 self.stderr.write(self.style.ERROR(f'Datbase Error: "{db_error}"'))
22 else:
23 self.stdout.write(self.style.SUCCESS(f'Role created successfully "{role_name}"'))
24
[end of app/server/management/commands/create_roles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/server/management/commands/create_roles.py b/app/server/management/commands/create_roles.py
--- a/app/server/management/commands/create_roles.py
+++ b/app/server/management/commands/create_roles.py
@@ -13,11 +13,13 @@
except KeyError as key_error:
self.stderr.write(self.style.ERROR(f'Missing Key: "{key_error}"'))
for role_name in role_names:
+ if Role.objects.filter(name=role_name).exists():
+ continue
role = Role()
role.name = role_name
try:
role.save()
except DatabaseError as db_error:
- self.stderr.write(self.style.ERROR(f'Datbase Error: "{db_error}"'))
+ self.stderr.write(self.style.ERROR(f'Database Error: "{db_error}"'))
else:
self.stdout.write(self.style.SUCCESS(f'Role created successfully "{role_name}"'))
| {"golden_diff": "diff --git a/app/server/management/commands/create_roles.py b/app/server/management/commands/create_roles.py\n--- a/app/server/management/commands/create_roles.py\n+++ b/app/server/management/commands/create_roles.py\n@@ -13,11 +13,13 @@\n except KeyError as key_error:\n self.stderr.write(self.style.ERROR(f'Missing Key: \"{key_error}\"'))\n for role_name in role_names:\n+ if Role.objects.filter(name=role_name).exists():\n+ continue\n role = Role()\n role.name = role_name\n try:\n role.save()\n except DatabaseError as db_error:\n- self.stderr.write(self.style.ERROR(f'Datbase Error: \"{db_error}\"'))\n+ self.stderr.write(self.style.ERROR(f'Database Error: \"{db_error}\"'))\n else:\n self.stdout.write(self.style.SUCCESS(f'Role created successfully \"{role_name}\"'))\n", "issue": "[Enhancement request] Avoid duplicate key value error on launching\nEnhancement description\r\n---------\r\nI have these errors in log on each start:\r\n```\r\npostgres_1 | 2020-03-04 05:34:30.467 UTC [27] ERROR: duplicate key value violates unique constraint \"api_role_name_key\"\r\npostgres_1 | 2020-03-04 05:34:30.467 UTC [27] DETAIL: Key (name)=(project_admin) already exists.\r\npostgres_1 | 2020-03-04 05:34:30.467 UTC [27] STATEMENT: INSERT INTO \"api_role\" (\"name\", \"description\", \"created_at\", \"updated_at\") VALUES ('project_admin', '', '2020-03-04T05:34:30.460290+00:00'::timestamptz, '2020-03-04T05:34:30.460312+00:00'::timestamptz) RETURNING \"api_role\".\"id\"\r\nbackend_1 | Datbase Error: \"duplicate key value violates unique constraint \"api_role_name_key\"\r\nbackend_1 | DETAIL: Key (name)=(project_admin) already exists.\r\nbackend_1 | \"\r\npostgres_1 | 2020-03-04 05:34:30.468 UTC [27] ERROR: duplicate key value violates unique constraint \"api_role_name_key\"\r\npostgres_1 | 2020-03-04 05:34:30.468 UTC [27] DETAIL: Key (name)=(annotator) already exists.\r\npostgres_1 | 2020-03-04 05:34:30.468 UTC [27] STATEMENT: INSERT INTO \"api_role\" (\"name\", \"description\", \"created_at\", \"updated_at\") VALUES ('annotator', '', '2020-03-04T05:34:30.467909+00:00'::timestamptz, '2020-03-04T05:34:30.467926+00:00'::timestamptz) RETURNING \"api_role\".\"id\"\r\nbackend_1 | Datbase Error: \"duplicate key value violates unique constraint \"api_role_name_key\"\r\nbackend_1 | DETAIL: Key (name)=(annotator) already exists.\r\nbackend_1 | \"\r\npostgres_1 | 2020-03-04 05:34:30.468 UTC [27] ERROR: duplicate key value violates unique constraint \"api_role_name_key\"\r\npostgres_1 | 2020-03-04 05:34:30.468 UTC [27] DETAIL: Key (name)=(annotation_approver) already exists.\r\npostgres_1 | 2020-03-04 05:34:30.468 UTC [27] STATEMENT: INSERT INTO \"api_role\" (\"name\", \"description\", \"created_at\", \"updated_at\") VALUES ('annotation_approver', '', '2020-03-04T05:34:30.468689+00:00'::timestamptz, '2020-03-04T05:34:30.468706+00:00'::timestamptz) RETURNING \"api_role\".\"id\"\r\nbackend_1 | Datbase Error: \"duplicate key value violates unique constraint \"api_role_name_key\"\r\nbackend_1 | DETAIL: Key (name)=(annotation_approver) already exists.\r\nbackend_1 | \"\r\npostgres_1 | 2020-03-04 05:34:32.026 UTC [28] ERROR: duplicate key value violates unique constraint \"auth_user_username_key\"\r\npostgres_1 | 2020-03-04 05:34:32.026 UTC [28] DETAIL: Key (username)=(admin) already exists.\r\npostgres_1 | 2020-03-04 05:34:32.026 UTC [28] STATEMENT: INSERT INTO \"auth_user\" (\"password\", \"last_login\", \"is_superuser\", \"username\", \"first_name\", \"last_name\", \"email\", \"is_staff\", \"is_active\", \"date_joined\") VALUES ('<...>', NULL, true, 'admin', '', '', '[email protected]', true, true, '2020-03-04T05:34:32.023520+00:00'::timestamptz) RETURNING \"auth_user\".\"id\"\r\nbackend_1 | User admin already exists.\r\nbackend_1 | CommandError: Error: That username is already taken.\r\n```\r\n\r\nPropose to check existence of specified table's rows before creation to avoid these errors.\n", "before_files": [{"content": "from api.models import Role\nfrom django.core.management.base import BaseCommand\nfrom django.db import DatabaseError\nfrom django.conf import settings\n\n\nclass Command(BaseCommand):\n help = 'Non-interactively create default roles'\n\n def handle(self, *args, **options):\n try:\n role_names = [settings.ROLE_PROJECT_ADMIN, settings.ROLE_ANNOTATOR, settings.ROLE_ANNOTATION_APPROVER]\n except KeyError as key_error:\n self.stderr.write(self.style.ERROR(f'Missing Key: \"{key_error}\"'))\n for role_name in role_names:\n role = Role()\n role.name = role_name\n try:\n role.save()\n except DatabaseError as db_error:\n self.stderr.write(self.style.ERROR(f'Datbase Error: \"{db_error}\"'))\n else:\n self.stdout.write(self.style.SUCCESS(f'Role created successfully \"{role_name}\"'))\n", "path": "app/server/management/commands/create_roles.py"}]} | 1,991 | 196 |
gh_patches_debug_17960 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-2277 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Dice loss bug
环境:aistudio tesla v100
paddlepaddle=2.3.0
paddleseg=2.5.0
bug:dice loss 前向传播时,label转换为one-hot编码前,并未对ignore_index进行处理,当ignore_index值大于num_classes(比如ignore index为255,类别为19)时,报错cuda 719。原因为one-hot转换错误。
代码链接:https://github.com/PaddlePaddle/PaddleSeg/blob/35a4c4d229df2d4a5ca724ad442bf5e0f75b4823/paddleseg/models/losses/dice_loss.py#L46
可将mask部分放到one-hot之前,然后将ignore_index赋值一个小于num_classes的值:
```python
def forward(self, logits, labels):
num_class = logits.shape[1]
if self.weight is not None:
assert num_class == len(self.weight), \
"The lenght of weight should be euqal to the num class"
if logits.shape != labels.shape:
labels = labels.unsqueeze(axis=1)
labels = F.interpolate(labels, size=logits.shape[2:], mode='nearest')
labels = labels.squeeze(axis=1)
logits = F.softmax(logits, axis=1)
mask = labels != self.ignore_index
mask = paddle.cast(paddle.unsqueeze(mask, 1), 'float32')
labels[labels == self.ignore_index] = 0
labels_one_hot = F.one_hot(labels, num_class)
labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])
dice_loss = 0.0
for i in range(num_class):
dice_loss_i = dice_loss_helper(logits[:, i], labels_one_hot[:, i],
mask, self.smooth, self.eps)
if self.weight is not None:
dice_loss_i *= self.weight[i]
dice_loss += dice_loss_i
dice_loss = dice_loss / num_class
return dice_loss
```
</issue>
<code>
[start of paddleseg/models/losses/dice_loss.py]
1 # you may not use this file except in compliance with the License.
2 # You may obtain a copy of the License at
3 #
4 # http://www.apache.org/licenses/LICENSE-2.0
5 #
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import paddle
13 from paddle import nn
14 import paddle.nn.functional as F
15
16 from paddleseg.cvlibs import manager
17
18
19 @manager.LOSSES.add_component
20 class DiceLoss(nn.Layer):
21 """
22 The implements of the dice loss.
23
24 Args:
25 weight (list[float], optional): The weight for each class. Default: None.
26 ignore_index (int64): ignore_index (int64, optional): Specifies a target value that
27 is ignored and does not contribute to the input gradient. Default ``255``.
28 smooth (float32): Laplace smoothing to smooth dice loss and accelerate convergence.
29 Default: 1.0
30 """
31
32 def __init__(self, weight=None, ignore_index=255, smooth=1.0):
33 super().__init__()
34 self.weight = weight
35 self.ignore_index = ignore_index
36 self.smooth = smooth
37 self.eps = 1e-8
38
39 def forward(self, logits, labels):
40 num_class = logits.shape[1]
41 if self.weight is not None:
42 assert num_class == len(self.weight), \
43 "The lenght of weight should be euqal to the num class"
44
45 logits = F.softmax(logits, axis=1)
46 labels_one_hot = F.one_hot(labels, num_class)
47 labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])
48
49 mask = labels != self.ignore_index
50 mask = paddle.cast(paddle.unsqueeze(mask, 1), 'float32')
51
52 dice_loss = 0.0
53 for i in range(num_class):
54 dice_loss_i = dice_loss_helper(logits[:, i], labels_one_hot[:, i],
55 mask, self.smooth, self.eps)
56 if self.weight is not None:
57 dice_loss_i *= self.weight[i]
58 dice_loss += dice_loss_i
59 dice_loss = dice_loss / num_class
60
61 return dice_loss
62
63
64 def dice_loss_helper(logit, label, mask, smooth, eps):
65 assert logit.shape == label.shape, \
66 "The shape of logit and label should be the same"
67 logit = paddle.reshape(logit, [0, -1])
68 label = paddle.reshape(label, [0, -1])
69 mask = paddle.reshape(mask, [0, -1])
70 logit *= mask
71 label *= mask
72 intersection = paddle.sum(logit * label, axis=1)
73 cardinality = paddle.sum(logit + label, axis=1)
74 dice_loss = 1 - (2 * intersection + smooth) / (cardinality + smooth + eps)
75 dice_loss = dice_loss.mean()
76 return dice_loss
77
[end of paddleseg/models/losses/dice_loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/paddleseg/models/losses/dice_loss.py b/paddleseg/models/losses/dice_loss.py
--- a/paddleseg/models/losses/dice_loss.py
+++ b/paddleseg/models/losses/dice_loss.py
@@ -42,13 +42,14 @@
assert num_class == len(self.weight), \
"The lenght of weight should be euqal to the num class"
- logits = F.softmax(logits, axis=1)
- labels_one_hot = F.one_hot(labels, num_class)
- labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])
-
mask = labels != self.ignore_index
mask = paddle.cast(paddle.unsqueeze(mask, 1), 'float32')
+ labels[labels == self.ignore_index] = 0
+ labels_one_hot = F.one_hot(labels, num_class)
+ labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])
+ logits = F.softmax(logits, axis=1)
+
dice_loss = 0.0
for i in range(num_class):
dice_loss_i = dice_loss_helper(logits[:, i], labels_one_hot[:, i],
| {"golden_diff": "diff --git a/paddleseg/models/losses/dice_loss.py b/paddleseg/models/losses/dice_loss.py\n--- a/paddleseg/models/losses/dice_loss.py\n+++ b/paddleseg/models/losses/dice_loss.py\n@@ -42,13 +42,14 @@\n assert num_class == len(self.weight), \\\n \"The lenght of weight should be euqal to the num class\"\n \n- logits = F.softmax(logits, axis=1)\n- labels_one_hot = F.one_hot(labels, num_class)\n- labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])\n-\n mask = labels != self.ignore_index\n mask = paddle.cast(paddle.unsqueeze(mask, 1), 'float32')\n \n+ labels[labels == self.ignore_index] = 0\n+ labels_one_hot = F.one_hot(labels, num_class)\n+ labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])\n+ logits = F.softmax(logits, axis=1)\n+\n dice_loss = 0.0\n for i in range(num_class):\n dice_loss_i = dice_loss_helper(logits[:, i], labels_one_hot[:, i],\n", "issue": "[Bug] Dice loss bug\n\u73af\u5883\uff1aaistudio tesla v100\r\npaddlepaddle=2.3.0\r\npaddleseg=2.5.0\r\n\r\nbug\uff1adice loss \u524d\u5411\u4f20\u64ad\u65f6\uff0clabel\u8f6c\u6362\u4e3aone-hot\u7f16\u7801\u524d\uff0c\u5e76\u672a\u5bf9ignore_index\u8fdb\u884c\u5904\u7406\uff0c\u5f53ignore_index\u503c\u5927\u4e8enum_classes\uff08\u6bd4\u5982ignore index\u4e3a255\uff0c\u7c7b\u522b\u4e3a19\uff09\u65f6\uff0c\u62a5\u9519cuda 719\u3002\u539f\u56e0\u4e3aone-hot\u8f6c\u6362\u9519\u8bef\u3002 \r\n\u4ee3\u7801\u94fe\u63a5\uff1ahttps://github.com/PaddlePaddle/PaddleSeg/blob/35a4c4d229df2d4a5ca724ad442bf5e0f75b4823/paddleseg/models/losses/dice_loss.py#L46\r\n\r\n\u53ef\u5c06mask\u90e8\u5206\u653e\u5230one-hot\u4e4b\u524d\uff0c\u7136\u540e\u5c06ignore_index\u8d4b\u503c\u4e00\u4e2a\u5c0f\u4e8enum_classes\u7684\u503c\uff1a\r\n```python\r\n def forward(self, logits, labels):\r\n num_class = logits.shape[1]\r\n if self.weight is not None:\r\n assert num_class == len(self.weight), \\\r\n \"The lenght of weight should be euqal to the num class\"\r\n if logits.shape != labels.shape:\r\n labels = labels.unsqueeze(axis=1)\r\n labels = F.interpolate(labels, size=logits.shape[2:], mode='nearest')\r\n labels = labels.squeeze(axis=1)\r\n logits = F.softmax(logits, axis=1)\r\n mask = labels != self.ignore_index\r\n mask = paddle.cast(paddle.unsqueeze(mask, 1), 'float32')\r\n labels[labels == self.ignore_index] = 0\r\n labels_one_hot = F.one_hot(labels, num_class)\r\n labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])\r\n \r\n dice_loss = 0.0\r\n for i in range(num_class):\r\n dice_loss_i = dice_loss_helper(logits[:, i], labels_one_hot[:, i],\r\n mask, self.smooth, self.eps)\r\n if self.weight is not None:\r\n dice_loss_i *= self.weight[i]\r\n dice_loss += dice_loss_i\r\n dice_loss = dice_loss / num_class\r\n\r\n return dice_loss\r\n```\r\n\n", "before_files": [{"content": "# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nfrom paddle import nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\n\n\[email protected]_component\nclass DiceLoss(nn.Layer):\n \"\"\"\n The implements of the dice loss.\n\n Args:\n weight (list[float], optional): The weight for each class. Default: None.\n ignore_index (int64): ignore_index (int64, optional): Specifies a target value that\n is ignored and does not contribute to the input gradient. Default ``255``.\n smooth (float32): Laplace smoothing to smooth dice loss and accelerate convergence.\n Default: 1.0\n \"\"\"\n\n def __init__(self, weight=None, ignore_index=255, smooth=1.0):\n super().__init__()\n self.weight = weight\n self.ignore_index = ignore_index\n self.smooth = smooth\n self.eps = 1e-8\n\n def forward(self, logits, labels):\n num_class = logits.shape[1]\n if self.weight is not None:\n assert num_class == len(self.weight), \\\n \"The lenght of weight should be euqal to the num class\"\n\n logits = F.softmax(logits, axis=1)\n labels_one_hot = F.one_hot(labels, num_class)\n labels_one_hot = paddle.transpose(labels_one_hot, [0, 3, 1, 2])\n\n mask = labels != self.ignore_index\n mask = paddle.cast(paddle.unsqueeze(mask, 1), 'float32')\n\n dice_loss = 0.0\n for i in range(num_class):\n dice_loss_i = dice_loss_helper(logits[:, i], labels_one_hot[:, i],\n mask, self.smooth, self.eps)\n if self.weight is not None:\n dice_loss_i *= self.weight[i]\n dice_loss += dice_loss_i\n dice_loss = dice_loss / num_class\n\n return dice_loss\n\n\ndef dice_loss_helper(logit, label, mask, smooth, eps):\n assert logit.shape == label.shape, \\\n \"The shape of logit and label should be the same\"\n logit = paddle.reshape(logit, [0, -1])\n label = paddle.reshape(label, [0, -1])\n mask = paddle.reshape(mask, [0, -1])\n logit *= mask\n label *= mask\n intersection = paddle.sum(logit * label, axis=1)\n cardinality = paddle.sum(logit + label, axis=1)\n dice_loss = 1 - (2 * intersection + smooth) / (cardinality + smooth + eps)\n dice_loss = dice_loss.mean()\n return dice_loss\n", "path": "paddleseg/models/losses/dice_loss.py"}]} | 1,840 | 277 |
gh_patches_debug_531 | rasdani/github-patches | git_diff | joke2k__faker-1569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
too long iban generated for pl-PL locale
* Faker version: 9.8.2
* OS: MacOs 12.0.1
IBANs generated for pl_PL locales are 30 characters long. This is too many. Valid PL IBAN should have 28 characters (including country code).
### Steps to reproduce
Generate a Polish IBAN with:
```
from faker import Faker
fake=Faker('pl-PL')
print(fake.iban())
```
Copy paste generated string into IBAN Validator at https://www.ibancalculator.com/
### Expected behavior
IBAN should have the correct length and checksum
### Actual behavior
There is an error message that IBAN have too many characters:
"This IBAN cannot be correct because of its length. A Polish IBAN always contains exactly 28 digits and letters ("PL", a 2-digit checksum, and the 24-digit national account number, whose first 8 digits determine the bank and branch). The IBAN you entered is 30 characters long."
too long iban generated for pl-PL locale
* Faker version: 9.8.2
* OS: MacOs 12.0.1
IBANs generated for pl_PL locales are 30 characters long. This is too many. Valid PL IBAN should have 28 characters (including country code).
### Steps to reproduce
Generate a Polish IBAN with:
```
from faker import Faker
fake=Faker('pl-PL')
print(fake.iban())
```
Copy paste generated string into IBAN Validator at https://www.ibancalculator.com/
### Expected behavior
IBAN should have the correct length and checksum
### Actual behavior
There is an error message that IBAN have too many characters:
"This IBAN cannot be correct because of its length. A Polish IBAN always contains exactly 28 digits and letters ("PL", a 2-digit checksum, and the 24-digit national account number, whose first 8 digits determine the bank and branch). The IBAN you entered is 30 characters long."
</issue>
<code>
[start of faker/providers/bank/pl_PL/__init__.py]
1 from .. import Provider as BankProvider
2
3
4 class Provider(BankProvider):
5 """Implement bank provider for ``pl_PL`` locale."""
6
7 bban_format = "#" * 26
8 country_code = "PL"
9
[end of faker/providers/bank/pl_PL/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/bank/pl_PL/__init__.py b/faker/providers/bank/pl_PL/__init__.py
--- a/faker/providers/bank/pl_PL/__init__.py
+++ b/faker/providers/bank/pl_PL/__init__.py
@@ -4,5 +4,5 @@
class Provider(BankProvider):
"""Implement bank provider for ``pl_PL`` locale."""
- bban_format = "#" * 26
+ bban_format = "#" * 24
country_code = "PL"
| {"golden_diff": "diff --git a/faker/providers/bank/pl_PL/__init__.py b/faker/providers/bank/pl_PL/__init__.py\n--- a/faker/providers/bank/pl_PL/__init__.py\n+++ b/faker/providers/bank/pl_PL/__init__.py\n@@ -4,5 +4,5 @@\n class Provider(BankProvider):\n \"\"\"Implement bank provider for ``pl_PL`` locale.\"\"\"\n \n- bban_format = \"#\" * 26\n+ bban_format = \"#\" * 24\n country_code = \"PL\"\n", "issue": "too long iban generated for pl-PL locale\n* Faker version: 9.8.2\r\n* OS: MacOs 12.0.1\r\n\r\nIBANs generated for pl_PL locales are 30 characters long. This is too many. Valid PL IBAN should have 28 characters (including country code).\r\n\r\n### Steps to reproduce\r\nGenerate a Polish IBAN with:\r\n```\r\nfrom faker import Faker\r\n fake=Faker('pl-PL')\r\n print(fake.iban())\r\n```\r\nCopy paste generated string into IBAN Validator at https://www.ibancalculator.com/\r\n### Expected behavior\r\n\r\nIBAN should have the correct length and checksum\r\n\r\n### Actual behavior\r\n\r\nThere is an error message that IBAN have too many characters:\r\n\"This IBAN cannot be correct because of its length. A Polish IBAN always contains exactly 28 digits and letters (\"PL\", a 2-digit checksum, and the 24-digit national account number, whose first 8 digits determine the bank and branch). The IBAN you entered is 30 characters long.\"\r\n\ntoo long iban generated for pl-PL locale\n* Faker version: 9.8.2\r\n* OS: MacOs 12.0.1\r\n\r\nIBANs generated for pl_PL locales are 30 characters long. This is too many. Valid PL IBAN should have 28 characters (including country code).\r\n\r\n### Steps to reproduce\r\nGenerate a Polish IBAN with:\r\n```\r\nfrom faker import Faker\r\n fake=Faker('pl-PL')\r\n print(fake.iban())\r\n```\r\nCopy paste generated string into IBAN Validator at https://www.ibancalculator.com/\r\n### Expected behavior\r\n\r\nIBAN should have the correct length and checksum\r\n\r\n### Actual behavior\r\n\r\nThere is an error message that IBAN have too many characters:\r\n\"This IBAN cannot be correct because of its length. A Polish IBAN always contains exactly 28 digits and letters (\"PL\", a 2-digit checksum, and the 24-digit national account number, whose first 8 digits determine the bank and branch). The IBAN you entered is 30 characters long.\"\r\n\n", "before_files": [{"content": "from .. import Provider as BankProvider\n\n\nclass Provider(BankProvider):\n \"\"\"Implement bank provider for ``pl_PL`` locale.\"\"\"\n\n bban_format = \"#\" * 26\n country_code = \"PL\"\n", "path": "faker/providers/bank/pl_PL/__init__.py"}]} | 1,049 | 117 |
gh_patches_debug_20741 | rasdani/github-patches | git_diff | astronomer__astro-sdk-165 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add SQLite example
* Create an example in `example_dags` illustrating the usage of SQLite
* This example could use one of our checks
* Update `tests/test_example_dags.py` to run it
</issue>
<code>
[start of noxfile.py]
1 """Nox automation definitions."""
2
3 import pathlib
4
5 import nox
6
7 nox.options.sessions = ["dev"]
8
9
10 @nox.session(python="3.9")
11 def dev(session: nox.Session) -> None:
12 """Create a dev environment with everything installed.
13
14 This is useful for setting up IDE for autocompletion etc. Point the
15 development environment to ``.nox/dev``.
16 """
17 session.install("nox")
18 session.install("-e", ".[all]")
19 session.install("-e", ".[tests]")
20
21
22 @nox.session(python=["3.7", "3.8", "3.9"])
23 def test(session: nox.Session) -> None:
24 """Run unit tests."""
25 session.install("-e", ".[all]")
26 session.install("-e", ".[tests]")
27 session.run("airflow", "db", "init")
28 session.run("pytest", *session.posargs)
29
30
31 @nox.session()
32 @nox.parametrize(
33 "extras",
34 [
35 ("postgres-only", {"include": ["postgres"], "exclude": ["amazon"]}),
36 ("postgres-amazon", {"include": ["postgres", "amazon"]}),
37 ("snowflake-amazon", {"include": ["snowflake", "amazon"]})
38 # ("sqlite", {"include": ["sqlite"]}),
39 ],
40 )
41 def test_examples_by_dependency(session: nox.Session, extras):
42 _, extras = extras
43 pypi_deps = ",".join(extras["include"])
44 pytest_options = " and ".join(extras["include"])
45 pytest_options = " and not ".join([pytest_options, *extras.get("exclude", [])])
46 pytest_args = ["-k", pytest_options]
47
48 session.install("-e", f".[{pypi_deps}]")
49 session.install("-e", f".[tests]")
50 session.run("airflow", "db", "init")
51
52 session.run("pytest", "tests/test_example_dags.py", *pytest_args, *session.posargs)
53
54
55 @nox.session()
56 def lint(session: nox.Session) -> None:
57 """Run linters."""
58 session.install("pre-commit")
59 if session.posargs:
60 args = [*session.posargs, "--all-files"]
61 else:
62 args = ["--all-files", "--show-diff-on-failure"]
63 session.run("pre-commit", "run", *args)
64
65
66 @nox.session()
67 def build(session: nox.Session) -> None:
68 """Build release artifacts."""
69 session.install("build")
70
71 # TODO: Automate version bumping, Git tagging, and more?
72
73 dist = pathlib.Path("dist")
74 if dist.exists() and next(dist.iterdir(), None) is not None:
75 session.error(
76 "There are files in dist/. Remove them and try again. "
77 "You can use `git clean -fxdi -- dist` command to do this."
78 )
79 dist.mkdir(exist_ok=True)
80
81 session.run("python", "-m", "build", *session.posargs)
82
83
84 @nox.session()
85 def release(session: nox.Session) -> None:
86 """Publish a release."""
87 session.install("twine")
88 # TODO: Better artifact checking.
89 session.run("twine", "check", *session.posargs)
90 session.run("twine", "upload", *session.posargs)
91
[end of noxfile.py]
[start of example_dags/example_sqlite_load_transform.py]
1 from datetime import datetime
2
3 from airflow import DAG
4
5 from astro import sql as aql
6 from astro.sql.table import Table
7
8 START_DATE = datetime(2000, 1, 1)
9
10
11 @aql.transform()
12 def top_five_animations(input_table: Table):
13 return """
14 SELECT Title, Rating
15 FROM {{input_table}}
16 WHERE Genre1=='Animation'
17 ORDER BY Rating desc
18 LIMIT 5;
19 """
20
21
22 with DAG(
23 "example_sqlite_load_transform",
24 schedule_interval=None,
25 start_date=START_DATE,
26 catchup=False,
27 ) as dag:
28
29 imdb_movies = aql.load_file(
30 path="https://raw.githubusercontent.com/astro-projects/astro/readme/tests/data/imdb.csv",
31 task_id="load_csv",
32 output_table=Table(
33 table_name="imdb_movies", database="sqlite", conn_id="sqlite_default"
34 ),
35 )
36
37 top_five_animations(
38 input_table=imdb_movies,
39 output_table=Table(
40 table_name="top_animation", database="sqlite", conn_id="sqlite_default"
41 ),
42 )
43
[end of example_dags/example_sqlite_load_transform.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/example_dags/example_sqlite_load_transform.py b/example_dags/example_sqlite_load_transform.py
--- a/example_dags/example_sqlite_load_transform.py
+++ b/example_dags/example_sqlite_load_transform.py
@@ -27,7 +27,7 @@
) as dag:
imdb_movies = aql.load_file(
- path="https://raw.githubusercontent.com/astro-projects/astro/readme/tests/data/imdb.csv",
+ path="https://raw.githubusercontent.com/astro-projects/astro/main/tests/data/imdb.csv",
task_id="load_csv",
output_table=Table(
table_name="imdb_movies", database="sqlite", conn_id="sqlite_default"
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -34,8 +34,8 @@
[
("postgres-only", {"include": ["postgres"], "exclude": ["amazon"]}),
("postgres-amazon", {"include": ["postgres", "amazon"]}),
- ("snowflake-amazon", {"include": ["snowflake", "amazon"]})
- # ("sqlite", {"include": ["sqlite"]}),
+ ("snowflake-amazon", {"include": ["snowflake", "amazon"]}),
+ ("sqlite", {"include": ["sqlite"]}),
],
)
def test_examples_by_dependency(session: nox.Session, extras):
| {"golden_diff": "diff --git a/example_dags/example_sqlite_load_transform.py b/example_dags/example_sqlite_load_transform.py\n--- a/example_dags/example_sqlite_load_transform.py\n+++ b/example_dags/example_sqlite_load_transform.py\n@@ -27,7 +27,7 @@\n ) as dag:\n \n imdb_movies = aql.load_file(\n- path=\"https://raw.githubusercontent.com/astro-projects/astro/readme/tests/data/imdb.csv\",\n+ path=\"https://raw.githubusercontent.com/astro-projects/astro/main/tests/data/imdb.csv\",\n task_id=\"load_csv\",\n output_table=Table(\n table_name=\"imdb_movies\", database=\"sqlite\", conn_id=\"sqlite_default\"\ndiff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -34,8 +34,8 @@\n [\n (\"postgres-only\", {\"include\": [\"postgres\"], \"exclude\": [\"amazon\"]}),\n (\"postgres-amazon\", {\"include\": [\"postgres\", \"amazon\"]}),\n- (\"snowflake-amazon\", {\"include\": [\"snowflake\", \"amazon\"]})\n- # (\"sqlite\", {\"include\": [\"sqlite\"]}),\n+ (\"snowflake-amazon\", {\"include\": [\"snowflake\", \"amazon\"]}),\n+ (\"sqlite\", {\"include\": [\"sqlite\"]}),\n ],\n )\n def test_examples_by_dependency(session: nox.Session, extras):\n", "issue": "Add SQLite example\n* Create an example in `example_dags` illustrating the usage of SQLite\r\n* This example could use one of our checks\r\n* Update `tests/test_example_dags.py` to run it\n", "before_files": [{"content": "\"\"\"Nox automation definitions.\"\"\"\n\nimport pathlib\n\nimport nox\n\nnox.options.sessions = [\"dev\"]\n\n\[email protected](python=\"3.9\")\ndef dev(session: nox.Session) -> None:\n \"\"\"Create a dev environment with everything installed.\n\n This is useful for setting up IDE for autocompletion etc. Point the\n development environment to ``.nox/dev``.\n \"\"\"\n session.install(\"nox\")\n session.install(\"-e\", \".[all]\")\n session.install(\"-e\", \".[tests]\")\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\"])\ndef test(session: nox.Session) -> None:\n \"\"\"Run unit tests.\"\"\"\n session.install(\"-e\", \".[all]\")\n session.install(\"-e\", \".[tests]\")\n session.run(\"airflow\", \"db\", \"init\")\n session.run(\"pytest\", *session.posargs)\n\n\[email protected]()\[email protected](\n \"extras\",\n [\n (\"postgres-only\", {\"include\": [\"postgres\"], \"exclude\": [\"amazon\"]}),\n (\"postgres-amazon\", {\"include\": [\"postgres\", \"amazon\"]}),\n (\"snowflake-amazon\", {\"include\": [\"snowflake\", \"amazon\"]})\n # (\"sqlite\", {\"include\": [\"sqlite\"]}),\n ],\n)\ndef test_examples_by_dependency(session: nox.Session, extras):\n _, extras = extras\n pypi_deps = \",\".join(extras[\"include\"])\n pytest_options = \" and \".join(extras[\"include\"])\n pytest_options = \" and not \".join([pytest_options, *extras.get(\"exclude\", [])])\n pytest_args = [\"-k\", pytest_options]\n\n session.install(\"-e\", f\".[{pypi_deps}]\")\n session.install(\"-e\", f\".[tests]\")\n session.run(\"airflow\", \"db\", \"init\")\n\n session.run(\"pytest\", \"tests/test_example_dags.py\", *pytest_args, *session.posargs)\n\n\[email protected]()\ndef lint(session: nox.Session) -> None:\n \"\"\"Run linters.\"\"\"\n session.install(\"pre-commit\")\n if session.posargs:\n args = [*session.posargs, \"--all-files\"]\n else:\n args = [\"--all-files\", \"--show-diff-on-failure\"]\n session.run(\"pre-commit\", \"run\", *args)\n\n\[email protected]()\ndef build(session: nox.Session) -> None:\n \"\"\"Build release artifacts.\"\"\"\n session.install(\"build\")\n\n # TODO: Automate version bumping, Git tagging, and more?\n\n dist = pathlib.Path(\"dist\")\n if dist.exists() and next(dist.iterdir(), None) is not None:\n session.error(\n \"There are files in dist/. Remove them and try again. \"\n \"You can use `git clean -fxdi -- dist` command to do this.\"\n )\n dist.mkdir(exist_ok=True)\n\n session.run(\"python\", \"-m\", \"build\", *session.posargs)\n\n\[email protected]()\ndef release(session: nox.Session) -> None:\n \"\"\"Publish a release.\"\"\"\n session.install(\"twine\")\n # TODO: Better artifact checking.\n session.run(\"twine\", \"check\", *session.posargs)\n session.run(\"twine\", \"upload\", *session.posargs)\n", "path": "noxfile.py"}, {"content": "from datetime import datetime\n\nfrom airflow import DAG\n\nfrom astro import sql as aql\nfrom astro.sql.table import Table\n\nSTART_DATE = datetime(2000, 1, 1)\n\n\[email protected]()\ndef top_five_animations(input_table: Table):\n return \"\"\"\n SELECT Title, Rating\n FROM {{input_table}}\n WHERE Genre1=='Animation'\n ORDER BY Rating desc\n LIMIT 5;\n \"\"\"\n\n\nwith DAG(\n \"example_sqlite_load_transform\",\n schedule_interval=None,\n start_date=START_DATE,\n catchup=False,\n) as dag:\n\n imdb_movies = aql.load_file(\n path=\"https://raw.githubusercontent.com/astro-projects/astro/readme/tests/data/imdb.csv\",\n task_id=\"load_csv\",\n output_table=Table(\n table_name=\"imdb_movies\", database=\"sqlite\", conn_id=\"sqlite_default\"\n ),\n )\n\n top_five_animations(\n input_table=imdb_movies,\n output_table=Table(\n table_name=\"top_animation\", database=\"sqlite\", conn_id=\"sqlite_default\"\n ),\n )\n", "path": "example_dags/example_sqlite_load_transform.py"}]} | 1,797 | 306 |
gh_patches_debug_23891 | rasdani/github-patches | git_diff | DDMAL__CantusDB-945 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Users should only be shown the View-Edit toggle if they have edit access for the source/chant in question
@annamorphism made a comment on #441 that really deserves its own issue
> also is there a way to not have the "Edit" tab show up for unauthorized people? it's annoying to try to edit something and then be sent to a 403.
Currently, the view-edit toggle is being displayed whenever the user is logged in. Instead, we need to properly check that the user is actually allowed to edit the chant before displaying the Edit link.
</issue>
<code>
[start of django/cantusdb_project/main_app/views/sequence.py]
1 from django.views.generic import DetailView, ListView, UpdateView
2 from main_app.models import Sequence
3 from django.db.models import Q
4 from main_app.forms import SequenceEditForm
5 from django.contrib.auth.mixins import LoginRequiredMixin
6 from django.contrib import messages
7 from django.contrib.auth.mixins import UserPassesTestMixin
8 from django.core.exceptions import PermissionDenied
9
10
11 class SequenceDetailView(DetailView):
12 """
13 Displays a single Sequence object. Accessed with ``sequences/<int:pk>``
14 """
15
16 model = Sequence
17 context_object_name = "sequence"
18 template_name = "sequence_detail.html"
19
20 def get_context_data(self, **kwargs):
21 sequence = self.get_object()
22 source = sequence.source
23 # if the sequence's source isn't published,
24 # only logged-in users should be able to view the sequence's detail page
25 if (
26 (source is not None)
27 and (source.published is False)
28 and (not self.request.user.is_authenticated)
29 ):
30 raise PermissionDenied()
31
32 context = super().get_context_data(**kwargs)
33 context["concordances"] = (
34 Sequence.objects.filter(cantus_id=sequence.cantus_id)
35 .select_related("source")
36 .order_by("siglum")
37 )
38 return context
39
40
41 class SequenceListView(ListView):
42 """
43 Displays a list of Sequence objects. Accessed with ``sequences/``
44 """
45
46 paginate_by = 100
47 context_object_name = "sequences"
48 template_name = "sequence_list.html"
49
50 def get_queryset(self):
51 queryset = Sequence.objects.select_related("source")
52 display_unpublished = self.request.user.is_authenticated
53 if display_unpublished:
54 q_obj_filter = Q()
55 else:
56 q_obj_filter = Q(source__published=True)
57
58 if self.request.GET.get("incipit"):
59 incipit = self.request.GET.get("incipit")
60 q_obj_filter &= Q(incipit__icontains=incipit)
61 if self.request.GET.get("siglum"):
62 siglum = self.request.GET.get("siglum")
63 q_obj_filter &= Q(siglum__icontains=siglum)
64 if self.request.GET.get("cantus_id"):
65 cantus_id = self.request.GET.get("cantus_id")
66 q_obj_filter &= Q(cantus_id__icontains=cantus_id)
67
68 return queryset.filter(q_obj_filter).order_by("siglum", "s_sequence")
69
70
71 class SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
72 template_name = "sequence_edit.html"
73 model = Sequence
74 form_class = SequenceEditForm
75 pk_url_kwarg = "sequence_id"
76
77 def form_valid(self, form):
78 form.instance.last_updated_by = self.request.user
79 messages.success(
80 self.request,
81 "Sequence updated successfully!",
82 )
83 return super().form_valid(form)
84
85 def test_func(self):
86 user = self.request.user
87 # checks if the user is a project manager (they should have the privilege to edit any sequence)
88 is_project_manager = user.groups.filter(name="project manager").exists()
89
90 if is_project_manager:
91 return True
92 else:
93 return False
94
[end of django/cantusdb_project/main_app/views/sequence.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py
--- a/django/cantusdb_project/main_app/views/sequence.py
+++ b/django/cantusdb_project/main_app/views/sequence.py
@@ -6,6 +6,7 @@
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import PermissionDenied
+from main_app.views.chant import user_can_edit_chants_in_source
class SequenceDetailView(DetailView):
@@ -20,6 +21,8 @@
def get_context_data(self, **kwargs):
sequence = self.get_object()
source = sequence.source
+ user = self.request.user
+
# if the sequence's source isn't published,
# only logged-in users should be able to view the sequence's detail page
if (
@@ -35,6 +38,7 @@
.select_related("source")
.order_by("siglum")
)
+ context["user_can_edit_sequence"] = user_can_edit_chants_in_source(user, source)
return context
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/sequence.py b/django/cantusdb_project/main_app/views/sequence.py\n--- a/django/cantusdb_project/main_app/views/sequence.py\n+++ b/django/cantusdb_project/main_app/views/sequence.py\n@@ -6,6 +6,7 @@\n from django.contrib import messages\n from django.contrib.auth.mixins import UserPassesTestMixin\n from django.core.exceptions import PermissionDenied\n+from main_app.views.chant import user_can_edit_chants_in_source\n \n \n class SequenceDetailView(DetailView):\n@@ -20,6 +21,8 @@\n def get_context_data(self, **kwargs):\n sequence = self.get_object()\n source = sequence.source\n+ user = self.request.user\n+\n # if the sequence's source isn't published,\n # only logged-in users should be able to view the sequence's detail page\n if (\n@@ -35,6 +38,7 @@\n .select_related(\"source\")\n .order_by(\"siglum\")\n )\n+ context[\"user_can_edit_sequence\"] = user_can_edit_chants_in_source(user, source)\n return context\n", "issue": "Users should only be shown the View-Edit toggle if they have edit access for the source/chant in question\n@annamorphism made a comment on #441 that really deserves its own issue\r\n\r\n> also is there a way to not have the \"Edit\" tab show up for unauthorized people? it's annoying to try to edit something and then be sent to a 403.\r\n\r\nCurrently, the view-edit toggle is being displayed whenever the user is logged in. Instead, we need to properly check that the user is actually allowed to edit the chant before displaying the Edit link.\n", "before_files": [{"content": "from django.views.generic import DetailView, ListView, UpdateView\nfrom main_app.models import Sequence\nfrom django.db.models import Q\nfrom main_app.forms import SequenceEditForm\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied\n\n\nclass SequenceDetailView(DetailView):\n \"\"\"\n Displays a single Sequence object. Accessed with ``sequences/<int:pk>``\n \"\"\"\n\n model = Sequence\n context_object_name = \"sequence\"\n template_name = \"sequence_detail.html\"\n\n def get_context_data(self, **kwargs):\n sequence = self.get_object()\n source = sequence.source\n # if the sequence's source isn't published,\n # only logged-in users should be able to view the sequence's detail page\n if (\n (source is not None)\n and (source.published is False)\n and (not self.request.user.is_authenticated)\n ):\n raise PermissionDenied()\n\n context = super().get_context_data(**kwargs)\n context[\"concordances\"] = (\n Sequence.objects.filter(cantus_id=sequence.cantus_id)\n .select_related(\"source\")\n .order_by(\"siglum\")\n )\n return context\n\n\nclass SequenceListView(ListView):\n \"\"\"\n Displays a list of Sequence objects. Accessed with ``sequences/``\n \"\"\"\n\n paginate_by = 100\n context_object_name = \"sequences\"\n template_name = \"sequence_list.html\"\n\n def get_queryset(self):\n queryset = Sequence.objects.select_related(\"source\")\n display_unpublished = self.request.user.is_authenticated\n if display_unpublished:\n q_obj_filter = Q()\n else:\n q_obj_filter = Q(source__published=True)\n\n if self.request.GET.get(\"incipit\"):\n incipit = self.request.GET.get(\"incipit\")\n q_obj_filter &= Q(incipit__icontains=incipit)\n if self.request.GET.get(\"siglum\"):\n siglum = self.request.GET.get(\"siglum\")\n q_obj_filter &= Q(siglum__icontains=siglum)\n if self.request.GET.get(\"cantus_id\"):\n cantus_id = self.request.GET.get(\"cantus_id\")\n q_obj_filter &= Q(cantus_id__icontains=cantus_id)\n\n return queryset.filter(q_obj_filter).order_by(\"siglum\", \"s_sequence\")\n\n\nclass SequenceEditView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n template_name = \"sequence_edit.html\"\n model = Sequence\n form_class = SequenceEditForm\n pk_url_kwarg = \"sequence_id\"\n\n def form_valid(self, form):\n form.instance.last_updated_by = self.request.user\n messages.success(\n self.request,\n \"Sequence updated successfully!\",\n )\n return super().form_valid(form)\n\n def test_func(self):\n user = self.request.user\n # checks if the user is a project manager (they should have the privilege to edit any sequence)\n is_project_manager = user.groups.filter(name=\"project manager\").exists()\n\n if is_project_manager:\n return True\n else:\n return False\n", "path": "django/cantusdb_project/main_app/views/sequence.py"}]} | 1,531 | 256 |
gh_patches_debug_630 | rasdani/github-patches | git_diff | pex-tool__pex-2240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.146
On the docket:
+ [x] Fix non executable venv sys path bug #2236
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.145"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.145"
+__version__ = "2.1.146"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.145\"\n+__version__ = \"2.1.146\"\n", "issue": "Release 2.1.146\nOn the docket:\r\n+ [x] Fix non executable venv sys path bug #2236\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.145\"\n", "path": "pex/version.py"}]} | 617 | 99 |
gh_patches_debug_17884 | rasdani/github-patches | git_diff | deis__deis-1517 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`deis run` generates 500 error
[Integration tests](http://ci.deis.io/view/example-apps/job/test-integration-clojure-ring/47/console) against master found an error in `deis run`:
```
=== appssample Domains
No domains
ok
/home/jenkins/workspace/test-integration-clojure-ring/src/github.com/deis/deis/tests/example-clojure-ring
apps:run echo hello
500 INTERNAL SERVER ERROR
<h1>Server Error (500)</h1>
error at command wait
--- FAIL: TestApps (76.15 seconds)
itutils.go:199: Failed:
exit status 1
FAIL
exit status 1
```
</issue>
<code>
[start of controller/api/tasks.py]
1 """
2 Long-running tasks for the Deis Controller API
3
4 This module orchestrates the real "heavy lifting" of Deis, and as such these
5 functions are decorated to run as asynchronous celery tasks.
6 """
7
8 from __future__ import unicode_literals
9
10 import requests
11 import threading
12
13 from celery import task
14 from django.conf import settings
15
16
17 @task
18 def create_cluster(cluster):
19 cluster._scheduler.setUp()
20
21
22 @task
23 def destroy_cluster(cluster):
24 for app in cluster.app_set.all():
25 app.destroy()
26 cluster._scheduler.tearDown()
27
28
29 @task
30 def deploy_release(app, release):
31 containers = app.container_set.all()
32 threads = []
33 for c in containers:
34 threads.append(threading.Thread(target=c.deploy, args=(release,)))
35 [t.start() for t in threads]
36 [t.join() for t in threads]
37
38
39 @task
40 def import_repository(source, target_repository):
41 """Imports an image from a remote registry into our own private registry"""
42 data = {
43 'src': source,
44 }
45 requests.post(
46 '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL,
47 target_repository),
48 data=data,
49 )
50
51
52 @task
53 def start_containers(containers):
54 create_threads = []
55 start_threads = []
56 for c in containers:
57 create_threads.append(threading.Thread(target=c.create))
58 start_threads.append(threading.Thread(target=c.start))
59 [t.start() for t in create_threads]
60 [t.join() for t in create_threads]
61 [t.start() for t in start_threads]
62 [t.join() for t in start_threads]
63
64
65 @task
66 def stop_containers(containers):
67 destroy_threads = []
68 delete_threads = []
69 for c in containers:
70 destroy_threads.append(threading.Thread(target=c.destroy))
71 delete_threads.append(threading.Thread(target=c.delete))
72 [t.start() for t in destroy_threads]
73 [t.join() for t in destroy_threads]
74 [t.start() for t in delete_threads]
75 [t.join() for t in delete_threads]
76
77
78 @task
79 def run_command(c, command):
80 release = c.release
81 version = release.version
82 image = release.image
83 try:
84 # pull the image first
85 rc, pull_output = c.run("docker pull {image}".format(**locals()))
86 if rc != 0:
87 raise EnvironmentError('Could not pull image: {pull_image}'.format(**locals()))
88 # run the command
89 docker_args = ' '.join(['--entrypoint=/bin/sh',
90 '-a', 'stdout', '-a', 'stderr', '--rm', image])
91 escaped_command = command.replace("'", "'\\''")
92 command = r"docker run {docker_args} -c \'{escaped_command}\'".format(**locals())
93 return c.run(command)
94 finally:
95 c.delete()
96
[end of controller/api/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/controller/api/tasks.py b/controller/api/tasks.py
--- a/controller/api/tasks.py
+++ b/controller/api/tasks.py
@@ -79,12 +79,14 @@
def run_command(c, command):
release = c.release
version = release.version
- image = release.image
+ image = '{}:{}/{}'.format(settings.REGISTRY_HOST,
+ settings.REGISTRY_PORT,
+ release.image)
try:
# pull the image first
rc, pull_output = c.run("docker pull {image}".format(**locals()))
if rc != 0:
- raise EnvironmentError('Could not pull image: {pull_image}'.format(**locals()))
+ raise EnvironmentError('Could not pull image: {image}'.format(**locals()))
# run the command
docker_args = ' '.join(['--entrypoint=/bin/sh',
'-a', 'stdout', '-a', 'stderr', '--rm', image])
| {"golden_diff": "diff --git a/controller/api/tasks.py b/controller/api/tasks.py\n--- a/controller/api/tasks.py\n+++ b/controller/api/tasks.py\n@@ -79,12 +79,14 @@\n def run_command(c, command):\n release = c.release\n version = release.version\n- image = release.image\n+ image = '{}:{}/{}'.format(settings.REGISTRY_HOST,\n+ settings.REGISTRY_PORT,\n+ release.image)\n try:\n # pull the image first\n rc, pull_output = c.run(\"docker pull {image}\".format(**locals()))\n if rc != 0:\n- raise EnvironmentError('Could not pull image: {pull_image}'.format(**locals()))\n+ raise EnvironmentError('Could not pull image: {image}'.format(**locals()))\n # run the command\n docker_args = ' '.join(['--entrypoint=/bin/sh',\n '-a', 'stdout', '-a', 'stderr', '--rm', image])\n", "issue": "`deis run` generates 500 error\n[Integration tests](http://ci.deis.io/view/example-apps/job/test-integration-clojure-ring/47/console) against master found an error in `deis run`:\n\n```\n=== appssample Domains\nNo domains\n\n\nok\n/home/jenkins/workspace/test-integration-clojure-ring/src/github.com/deis/deis/tests/example-clojure-ring\napps:run echo hello\n\n500 INTERNAL SERVER ERROR\n<h1>Server Error (500)</h1>\n\nerror at command wait\n--- FAIL: TestApps (76.15 seconds)\n itutils.go:199: Failed:\n exit status 1\nFAIL\nexit status 1\n```\n\n", "before_files": [{"content": "\"\"\"\nLong-running tasks for the Deis Controller API\n\nThis module orchestrates the real \"heavy lifting\" of Deis, and as such these\nfunctions are decorated to run as asynchronous celery tasks.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport requests\nimport threading\n\nfrom celery import task\nfrom django.conf import settings\n\n\n@task\ndef create_cluster(cluster):\n cluster._scheduler.setUp()\n\n\n@task\ndef destroy_cluster(cluster):\n for app in cluster.app_set.all():\n app.destroy()\n cluster._scheduler.tearDown()\n\n\n@task\ndef deploy_release(app, release):\n containers = app.container_set.all()\n threads = []\n for c in containers:\n threads.append(threading.Thread(target=c.deploy, args=(release,)))\n [t.start() for t in threads]\n [t.join() for t in threads]\n\n\n@task\ndef import_repository(source, target_repository):\n \"\"\"Imports an image from a remote registry into our own private registry\"\"\"\n data = {\n 'src': source,\n }\n requests.post(\n '{}/v1/repositories/{}/tags'.format(settings.REGISTRY_URL,\n target_repository),\n data=data,\n )\n\n\n@task\ndef start_containers(containers):\n create_threads = []\n start_threads = []\n for c in containers:\n create_threads.append(threading.Thread(target=c.create))\n start_threads.append(threading.Thread(target=c.start))\n [t.start() for t in create_threads]\n [t.join() for t in create_threads]\n [t.start() for t in start_threads]\n [t.join() for t in start_threads]\n\n\n@task\ndef stop_containers(containers):\n destroy_threads = []\n delete_threads = []\n for c in containers:\n destroy_threads.append(threading.Thread(target=c.destroy))\n delete_threads.append(threading.Thread(target=c.delete))\n [t.start() for t in destroy_threads]\n [t.join() for t in destroy_threads]\n [t.start() for t in delete_threads]\n [t.join() for t in delete_threads]\n\n\n@task\ndef run_command(c, command):\n release = c.release\n version = release.version\n image = release.image\n try:\n # pull the image first\n rc, pull_output = c.run(\"docker pull {image}\".format(**locals()))\n if rc != 0:\n raise EnvironmentError('Could not pull image: {pull_image}'.format(**locals()))\n # run the command\n docker_args = ' '.join(['--entrypoint=/bin/sh',\n '-a', 'stdout', '-a', 'stderr', '--rm', image])\n escaped_command = command.replace(\"'\", \"'\\\\''\")\n command = r\"docker run {docker_args} -c \\'{escaped_command}\\'\".format(**locals())\n return c.run(command)\n finally:\n c.delete()\n", "path": "controller/api/tasks.py"}]} | 1,486 | 206 |
gh_patches_debug_38869 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement 'Shares'
## Issues
- [x] https://github.com/centerofci/mathesar/issues/3033
- [x] https://github.com/centerofci/mathesar/issues/3034
- [x] https://github.com/centerofci/mathesar/issues/3035
- [x] https://github.com/centerofci/mathesar/issues/3036
## Tasks:
- [ ] Add regenerate slug endpoints
### https://github.com/centerofci/mathesar/pull/3093#pullrequestreview-1546069582
- [ ] Address the following in shared table consumer page
- [ ] Disable re-reordering of columns
- [ ] Don't show the icon hyperlink to the record page within the PK cell
- [ ] Remove the following entries in the cell context menu:
- "Set to NULL"
- "Go to Record Page"
- "Go to Linked Record" (shown only for FK columns)
- [ ] Remove the "Go to Record Page" entry from the row header context menu
- [ ] Disable record selector in filtering for FK columns
- [ ] Come up with a better term for 'ShareConsumer'. Some suggestions:
- ShareAccessInfo
- SharedLink
- ConsumableShare
## Related:
* [Product spec](https://wiki.mathesar.org/en/product/specs/publicly-shareable-links)
</issue>
<code>
[start of mathesar/api/ui/viewsets/shares.py]
1 from rest_framework import viewsets
2 from rest_access_policy import AccessViewSetMixin
3
4 from mathesar.api.pagination import DefaultLimitOffsetPagination
5 from mathesar.api.ui.serializers.shares import SharedTableSerializer, SharedQuerySerializer
6 from mathesar.api.ui.permissions.shares import SharedTableAccessPolicy, SharedQueryAccessPolicy
7 from mathesar.models.shares import SharedTable, SharedQuery
8
9
10 class SharedTableViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
11 pagination_class = DefaultLimitOffsetPagination
12 serializer_class = SharedTableSerializer
13 access_policy = SharedTableAccessPolicy
14
15 def get_queryset(self):
16 return SharedTable.objects.filter(table_id=self.kwargs['table_pk']).order_by('-created_at')
17
18 def perform_create(self, serializer):
19 serializer.save(table_id=self.kwargs['table_pk'])
20
21
22 class SharedQueryViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
23 pagination_class = DefaultLimitOffsetPagination
24 serializer_class = SharedQuerySerializer
25 access_policy = SharedQueryAccessPolicy
26
27 def get_queryset(self):
28 return SharedQuery.objects.filter(query_id=self.kwargs['query_pk']).order_by('-created_at')
29
30 def perform_create(self, serializer):
31 serializer.save(query_id=self.kwargs['query_pk'])
32
[end of mathesar/api/ui/viewsets/shares.py]
[start of mathesar/api/ui/permissions/shares.py]
1 from rest_access_policy import AccessPolicy
2
3 from mathesar.api.utils import get_query_or_404
4 from mathesar.api.permission_utils import QueryAccessInspector
5
6
7 class SharedTableAccessPolicy(AccessPolicy):
8 statements = [
9 {
10 'action': ['list', 'retrieve'],
11 'principal': 'authenticated',
12 'effect': 'allow',
13 'condition_expression': 'is_atleast_viewer_nested_table_resource'
14 },
15 {
16 'action': ['create', 'destroy', 'update', 'partial_update'],
17 'principal': 'authenticated',
18 'effect': 'allow',
19 'condition_expression': 'is_atleast_editor_nested_table_resource'
20 },
21 ]
22
23
24 class SharedQueryAccessPolicy(AccessPolicy):
25 statements = [
26 {
27 'action': ['list', 'retrieve'],
28 'principal': 'authenticated',
29 'effect': 'allow',
30 'condition_expression': 'is_atleast_query_viewer'
31 },
32 {
33 'action': ['create', 'destroy', 'update', 'partial_update'],
34 'principal': 'authenticated',
35 'effect': 'allow',
36 'condition_expression': 'is_atleast_query_editor'
37 },
38 ]
39
40 def is_atleast_query_viewer(self, request, view, action):
41 query = get_query_or_404(view.kwargs['query_pk'])
42 return QueryAccessInspector(request.user, query).is_atleast_viewer()
43
44 def is_atleast_query_editor(self, request, view, action):
45 query = get_query_or_404(view.kwargs['query_pk'])
46 return QueryAccessInspector(request.user, query).is_atleast_editor()
47
[end of mathesar/api/ui/permissions/shares.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/api/ui/permissions/shares.py b/mathesar/api/ui/permissions/shares.py
--- a/mathesar/api/ui/permissions/shares.py
+++ b/mathesar/api/ui/permissions/shares.py
@@ -13,7 +13,7 @@
'condition_expression': 'is_atleast_viewer_nested_table_resource'
},
{
- 'action': ['create', 'destroy', 'update', 'partial_update'],
+ 'action': ['create', 'destroy', 'update', 'partial_update', 'regenerate'],
'principal': 'authenticated',
'effect': 'allow',
'condition_expression': 'is_atleast_editor_nested_table_resource'
@@ -30,7 +30,7 @@
'condition_expression': 'is_atleast_query_viewer'
},
{
- 'action': ['create', 'destroy', 'update', 'partial_update'],
+ 'action': ['create', 'destroy', 'update', 'partial_update', 'regenerate'],
'principal': 'authenticated',
'effect': 'allow',
'condition_expression': 'is_atleast_query_editor'
diff --git a/mathesar/api/ui/viewsets/shares.py b/mathesar/api/ui/viewsets/shares.py
--- a/mathesar/api/ui/viewsets/shares.py
+++ b/mathesar/api/ui/viewsets/shares.py
@@ -1,5 +1,8 @@
+import uuid
from rest_framework import viewsets
from rest_access_policy import AccessViewSetMixin
+from rest_framework.decorators import action
+from rest_framework.response import Response
from mathesar.api.pagination import DefaultLimitOffsetPagination
from mathesar.api.ui.serializers.shares import SharedTableSerializer, SharedQuerySerializer
@@ -7,7 +10,17 @@
from mathesar.models.shares import SharedTable, SharedQuery
-class SharedTableViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
+class RegenerateSlugMixin(viewsets.GenericViewSet):
+ @action(methods=['post'], detail=True)
+ def regenerate(self, *args, **kwargs):
+ share = self.get_object()
+ share.slug = uuid.uuid4()
+ share.save()
+ serializer = self.get_serializer(share)
+ return Response(serializer.data)
+
+
+class SharedTableViewSet(AccessViewSetMixin, viewsets.ModelViewSet, RegenerateSlugMixin):
pagination_class = DefaultLimitOffsetPagination
serializer_class = SharedTableSerializer
access_policy = SharedTableAccessPolicy
@@ -19,7 +32,7 @@
serializer.save(table_id=self.kwargs['table_pk'])
-class SharedQueryViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
+class SharedQueryViewSet(AccessViewSetMixin, viewsets.ModelViewSet, RegenerateSlugMixin):
pagination_class = DefaultLimitOffsetPagination
serializer_class = SharedQuerySerializer
access_policy = SharedQueryAccessPolicy
| {"golden_diff": "diff --git a/mathesar/api/ui/permissions/shares.py b/mathesar/api/ui/permissions/shares.py\n--- a/mathesar/api/ui/permissions/shares.py\n+++ b/mathesar/api/ui/permissions/shares.py\n@@ -13,7 +13,7 @@\n 'condition_expression': 'is_atleast_viewer_nested_table_resource'\n },\n {\n- 'action': ['create', 'destroy', 'update', 'partial_update'],\n+ 'action': ['create', 'destroy', 'update', 'partial_update', 'regenerate'],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': 'is_atleast_editor_nested_table_resource'\n@@ -30,7 +30,7 @@\n 'condition_expression': 'is_atleast_query_viewer'\n },\n {\n- 'action': ['create', 'destroy', 'update', 'partial_update'],\n+ 'action': ['create', 'destroy', 'update', 'partial_update', 'regenerate'],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': 'is_atleast_query_editor'\ndiff --git a/mathesar/api/ui/viewsets/shares.py b/mathesar/api/ui/viewsets/shares.py\n--- a/mathesar/api/ui/viewsets/shares.py\n+++ b/mathesar/api/ui/viewsets/shares.py\n@@ -1,5 +1,8 @@\n+import uuid\n from rest_framework import viewsets\n from rest_access_policy import AccessViewSetMixin\n+from rest_framework.decorators import action\n+from rest_framework.response import Response\n \n from mathesar.api.pagination import DefaultLimitOffsetPagination\n from mathesar.api.ui.serializers.shares import SharedTableSerializer, SharedQuerySerializer\n@@ -7,7 +10,17 @@\n from mathesar.models.shares import SharedTable, SharedQuery\n \n \n-class SharedTableViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n+class RegenerateSlugMixin(viewsets.GenericViewSet):\n+ @action(methods=['post'], detail=True)\n+ def regenerate(self, *args, **kwargs):\n+ share = self.get_object()\n+ share.slug = uuid.uuid4()\n+ share.save()\n+ serializer = self.get_serializer(share)\n+ return Response(serializer.data)\n+\n+\n+class SharedTableViewSet(AccessViewSetMixin, viewsets.ModelViewSet, RegenerateSlugMixin):\n pagination_class = DefaultLimitOffsetPagination\n serializer_class = SharedTableSerializer\n access_policy = SharedTableAccessPolicy\n@@ -19,7 +32,7 @@\n serializer.save(table_id=self.kwargs['table_pk'])\n \n \n-class SharedQueryViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n+class SharedQueryViewSet(AccessViewSetMixin, viewsets.ModelViewSet, RegenerateSlugMixin):\n pagination_class = DefaultLimitOffsetPagination\n serializer_class = SharedQuerySerializer\n access_policy = SharedQueryAccessPolicy\n", "issue": "Implement 'Shares'\n## Issues\r\n- [x] https://github.com/centerofci/mathesar/issues/3033\r\n- [x] https://github.com/centerofci/mathesar/issues/3034\r\n- [x] https://github.com/centerofci/mathesar/issues/3035\r\n- [x] https://github.com/centerofci/mathesar/issues/3036\r\n\r\n## Tasks:\r\n- [ ] Add regenerate slug endpoints\r\n\r\n### https://github.com/centerofci/mathesar/pull/3093#pullrequestreview-1546069582\r\n- [ ] Address the following in shared table consumer page\r\n - [ ] Disable re-reordering of columns\r\n - [ ] Don't show the icon hyperlink to the record page within the PK cell\r\n - [ ] Remove the following entries in the cell context menu:\r\n - \"Set to NULL\"\r\n - \"Go to Record Page\"\r\n - \"Go to Linked Record\" (shown only for FK columns)\r\n - [ ] Remove the \"Go to Record Page\" entry from the row header context menu\r\n - [ ] Disable record selector in filtering for FK columns\r\n- [ ] Come up with a better term for 'ShareConsumer'. Some suggestions:\r\n - ShareAccessInfo\r\n - SharedLink\r\n - ConsumableShare\r\n\r\n## Related:\r\n* [Product spec](https://wiki.mathesar.org/en/product/specs/publicly-shareable-links)\n", "before_files": [{"content": "from rest_framework import viewsets\nfrom rest_access_policy import AccessViewSetMixin\n\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.ui.serializers.shares import SharedTableSerializer, SharedQuerySerializer\nfrom mathesar.api.ui.permissions.shares import SharedTableAccessPolicy, SharedQueryAccessPolicy\nfrom mathesar.models.shares import SharedTable, SharedQuery\n\n\nclass SharedTableViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n pagination_class = DefaultLimitOffsetPagination\n serializer_class = SharedTableSerializer\n access_policy = SharedTableAccessPolicy\n\n def get_queryset(self):\n return SharedTable.objects.filter(table_id=self.kwargs['table_pk']).order_by('-created_at')\n\n def perform_create(self, serializer):\n serializer.save(table_id=self.kwargs['table_pk'])\n\n\nclass SharedQueryViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n pagination_class = DefaultLimitOffsetPagination\n serializer_class = SharedQuerySerializer\n access_policy = SharedQueryAccessPolicy\n\n def get_queryset(self):\n return SharedQuery.objects.filter(query_id=self.kwargs['query_pk']).order_by('-created_at')\n\n def perform_create(self, serializer):\n serializer.save(query_id=self.kwargs['query_pk'])\n", "path": "mathesar/api/ui/viewsets/shares.py"}, {"content": "from rest_access_policy import AccessPolicy\n\nfrom mathesar.api.utils import get_query_or_404\nfrom mathesar.api.permission_utils import QueryAccessInspector\n\n\nclass SharedTableAccessPolicy(AccessPolicy):\n statements = [\n {\n 'action': ['list', 'retrieve'],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': 'is_atleast_viewer_nested_table_resource'\n },\n {\n 'action': ['create', 'destroy', 'update', 'partial_update'],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': 'is_atleast_editor_nested_table_resource'\n },\n ]\n\n\nclass SharedQueryAccessPolicy(AccessPolicy):\n statements = [\n {\n 'action': ['list', 'retrieve'],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': 'is_atleast_query_viewer'\n },\n {\n 'action': ['create', 'destroy', 'update', 'partial_update'],\n 'principal': 'authenticated',\n 'effect': 'allow',\n 'condition_expression': 'is_atleast_query_editor'\n },\n ]\n\n def is_atleast_query_viewer(self, request, view, action):\n query = get_query_or_404(view.kwargs['query_pk'])\n return QueryAccessInspector(request.user, query).is_atleast_viewer()\n\n def is_atleast_query_editor(self, request, view, action):\n query = get_query_or_404(view.kwargs['query_pk'])\n return QueryAccessInspector(request.user, query).is_atleast_editor()\n", "path": "mathesar/api/ui/permissions/shares.py"}]} | 1,618 | 610 |
gh_patches_debug_10772 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3082 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete setup.cfg
`setup.cfg` was used to set configuration options for `pytest` in the past, but since `pytest.ini` is now providing that configuration, it seems like `setup.cfg` should be deleted.
</issue>
<code>
[start of testinfra/conftest.py]
1 """
2 Configuration for TestInfra test suite for SecureDrop.
3 Handles importing host-specific test vars, so test functions
4 can be reused across multiple hosts, with varied targets.
5
6 Vars should be placed in `testinfra/vars/<hostname>.yml`.
7 """
8
9 import os
10 import yaml
11
12
13 target_host = os.environ['SECUREDROP_TESTINFRA_TARGET_HOST']
14 assert target_host != ""
15
16
17 def securedrop_import_testinfra_vars(hostname, with_header=False):
18 """
19 Import vars from a YAML file to populate tests with host-specific
20 values used in checks. For instance, the SecureDrop docroot will
21 be under /vagrant in development, but /var/www/securedrop in staging.
22
23 Vars must be stored in `testinfra/vars/<hostname>.yml`.
24 """
25 filepath = os.path.join(os.path.dirname(__file__), "vars", hostname+".yml")
26 with open(filepath, 'r') as f:
27 hostvars = yaml.safe_load(f)
28 # The directory Travis runs builds in varies by PR, so we cannot hardcode
29 # it in the YAML testvars. Read it from env var and concatenate.
30 if hostname.lower() == 'travis':
31 build_env = os.environ["TRAVIS_BUILD_DIR"]
32 hostvars['securedrop_code'] = build_env+"/securedrop"
33
34 if with_header:
35 hostvars = dict(securedrop_test_vars=hostvars)
36 return hostvars
37
38
39 def pytest_namespace():
40 return securedrop_import_testinfra_vars(target_host, with_header=True)
41
[end of testinfra/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/testinfra/conftest.py b/testinfra/conftest.py
--- a/testinfra/conftest.py
+++ b/testinfra/conftest.py
@@ -25,11 +25,6 @@
filepath = os.path.join(os.path.dirname(__file__), "vars", hostname+".yml")
with open(filepath, 'r') as f:
hostvars = yaml.safe_load(f)
- # The directory Travis runs builds in varies by PR, so we cannot hardcode
- # it in the YAML testvars. Read it from env var and concatenate.
- if hostname.lower() == 'travis':
- build_env = os.environ["TRAVIS_BUILD_DIR"]
- hostvars['securedrop_code'] = build_env+"/securedrop"
if with_header:
hostvars = dict(securedrop_test_vars=hostvars)
| {"golden_diff": "diff --git a/testinfra/conftest.py b/testinfra/conftest.py\n--- a/testinfra/conftest.py\n+++ b/testinfra/conftest.py\n@@ -25,11 +25,6 @@\n filepath = os.path.join(os.path.dirname(__file__), \"vars\", hostname+\".yml\")\n with open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n- # The directory Travis runs builds in varies by PR, so we cannot hardcode\n- # it in the YAML testvars. Read it from env var and concatenate.\n- if hostname.lower() == 'travis':\n- build_env = os.environ[\"TRAVIS_BUILD_DIR\"]\n- hostvars['securedrop_code'] = build_env+\"/securedrop\"\n \n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n", "issue": "Delete setup.cfg\n`setup.cfg` was used to set configuration options for `pytest` in the past, but since `pytest.ini` is now providing that configuration, it seems like `setup.cfg` should be deleted.\n", "before_files": [{"content": "\"\"\"\nConfiguration for TestInfra test suite for SecureDrop.\nHandles importing host-specific test vars, so test functions\ncan be reused across multiple hosts, with varied targets.\n\nVars should be placed in `testinfra/vars/<hostname>.yml`.\n\"\"\"\n\nimport os\nimport yaml\n\n\ntarget_host = os.environ['SECUREDROP_TESTINFRA_TARGET_HOST']\nassert target_host != \"\"\n\n\ndef securedrop_import_testinfra_vars(hostname, with_header=False):\n \"\"\"\n Import vars from a YAML file to populate tests with host-specific\n values used in checks. For instance, the SecureDrop docroot will\n be under /vagrant in development, but /var/www/securedrop in staging.\n\n Vars must be stored in `testinfra/vars/<hostname>.yml`.\n \"\"\"\n filepath = os.path.join(os.path.dirname(__file__), \"vars\", hostname+\".yml\")\n with open(filepath, 'r') as f:\n hostvars = yaml.safe_load(f)\n # The directory Travis runs builds in varies by PR, so we cannot hardcode\n # it in the YAML testvars. Read it from env var and concatenate.\n if hostname.lower() == 'travis':\n build_env = os.environ[\"TRAVIS_BUILD_DIR\"]\n hostvars['securedrop_code'] = build_env+\"/securedrop\"\n\n if with_header:\n hostvars = dict(securedrop_test_vars=hostvars)\n return hostvars\n\n\ndef pytest_namespace():\n return securedrop_import_testinfra_vars(target_host, with_header=True)\n", "path": "testinfra/conftest.py"}]} | 983 | 187 |
gh_patches_debug_3124 | rasdani/github-patches | git_diff | spack__spack-36099 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LLNL Cardioid homepage no longer exists
https://github.com/spack/spack/blob/fe5865da0d4ee4480a85ee1257cb310c036b0c88/var/spack/repos/builtin/packages/cardioid/package.py#L12
@rblake-llnl it looks like Cardioid's [home page](https://baasic.llnl.gov/comp-bio/cardioid-code.php) redirects to a 404 these days. You're listed as the maintainer. Has the home of cardioid moved?
</issue>
<code>
[start of var/spack/repos/builtin/packages/cardioid/package.py]
1 # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack.package import *
7
8
9 class Cardioid(CMakePackage):
10 """Cardiac simulation suite."""
11
12 homepage = "https://baasic.llnl.gov/comp-bio/cardioid-code.php"
13 git = "https://github.com/LLNL/cardioid.git"
14 maintainers("rblake-llnl")
15
16 version("develop", branch="master")
17 version("elecfem", branch="elec-fem")
18
19 variant("cuda", default=False, description="Build with cuda support")
20 variant("mfem", default=False, description="Build with mfem support")
21
22 depends_on("blas")
23 depends_on("lapack")
24 depends_on("mpi")
25 depends_on("cuda", when="+cuda")
26 depends_on("mfem+mpi+superlu-dist+lapack", when="+mfem")
27 depends_on("hypre+cuda", when="+mfem+cuda")
28 depends_on("[email protected]:", type="build")
29 depends_on("perl", type="build")
30
31 def cmake_args(self):
32 spec = self.spec
33 args = [
34 "-DLAPACK_LIB:PATH=" + ";".join(spec["lapack"].libs.libraries),
35 "-DBLAS_LIB:PATH=" + ";".join(spec["blas"].libs.libraries),
36 "-DENABLE_OPENMP:BOOL=ON",
37 "-DENABLE_MPI:BOOL=ON",
38 "-DENABLE_FIND_MPI:BOOL=OFF",
39 "-DMPI_C_COMPILER:STRING=" + spec["mpi"].mpicc,
40 "-DMPI_CXX_COMPILER:STRING=" + spec["mpi"].mpicxx,
41 "-DCMAKE_C_COMPILER:STRING=" + spec["mpi"].mpicc,
42 "-DCMAKE_CXX_COMPILER:STRING=" + spec["mpi"].mpicxx,
43 ]
44
45 if "+cuda" in self.spec:
46 args.append("-DENABLE_CUDA:BOOL=ON")
47 args.append("-DCUDA_TOOLKIT_ROOT:PATH=" + spec["cuda"].prefix)
48 else:
49 args.append("-DENABLE_CUDA:BOOL=OFF")
50
51 if "+mfem" in self.spec:
52 args.append("-DMFEM_DIR:PATH=" + spec["mfem"].prefix)
53 return args
54
[end of var/spack/repos/builtin/packages/cardioid/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/var/spack/repos/builtin/packages/cardioid/package.py b/var/spack/repos/builtin/packages/cardioid/package.py
--- a/var/spack/repos/builtin/packages/cardioid/package.py
+++ b/var/spack/repos/builtin/packages/cardioid/package.py
@@ -9,7 +9,7 @@
class Cardioid(CMakePackage):
"""Cardiac simulation suite."""
- homepage = "https://baasic.llnl.gov/comp-bio/cardioid-code.php"
+ homepage = "https://baasic.llnl.gov/comp-bio/cardioid-code"
git = "https://github.com/LLNL/cardioid.git"
maintainers("rblake-llnl")
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/cardioid/package.py b/var/spack/repos/builtin/packages/cardioid/package.py\n--- a/var/spack/repos/builtin/packages/cardioid/package.py\n+++ b/var/spack/repos/builtin/packages/cardioid/package.py\n@@ -9,7 +9,7 @@\n class Cardioid(CMakePackage):\n \"\"\"Cardiac simulation suite.\"\"\"\n \n- homepage = \"https://baasic.llnl.gov/comp-bio/cardioid-code.php\"\n+ homepage = \"https://baasic.llnl.gov/comp-bio/cardioid-code\"\n git = \"https://github.com/LLNL/cardioid.git\"\n maintainers(\"rblake-llnl\")\n", "issue": "LLNL Cardioid homepage no longer exists\nhttps://github.com/spack/spack/blob/fe5865da0d4ee4480a85ee1257cb310c036b0c88/var/spack/repos/builtin/packages/cardioid/package.py#L12\r\n\r\n@rblake-llnl it looks like Cardioid's [home page](https://baasic.llnl.gov/comp-bio/cardioid-code.php) redirects to a 404 these days. You're listed as the maintainer. Has the home of cardioid moved?\n", "before_files": [{"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass Cardioid(CMakePackage):\n \"\"\"Cardiac simulation suite.\"\"\"\n\n homepage = \"https://baasic.llnl.gov/comp-bio/cardioid-code.php\"\n git = \"https://github.com/LLNL/cardioid.git\"\n maintainers(\"rblake-llnl\")\n\n version(\"develop\", branch=\"master\")\n version(\"elecfem\", branch=\"elec-fem\")\n\n variant(\"cuda\", default=False, description=\"Build with cuda support\")\n variant(\"mfem\", default=False, description=\"Build with mfem support\")\n\n depends_on(\"blas\")\n depends_on(\"lapack\")\n depends_on(\"mpi\")\n depends_on(\"cuda\", when=\"+cuda\")\n depends_on(\"mfem+mpi+superlu-dist+lapack\", when=\"+mfem\")\n depends_on(\"hypre+cuda\", when=\"+mfem+cuda\")\n depends_on(\"[email protected]:\", type=\"build\")\n depends_on(\"perl\", type=\"build\")\n\n def cmake_args(self):\n spec = self.spec\n args = [\n \"-DLAPACK_LIB:PATH=\" + \";\".join(spec[\"lapack\"].libs.libraries),\n \"-DBLAS_LIB:PATH=\" + \";\".join(spec[\"blas\"].libs.libraries),\n \"-DENABLE_OPENMP:BOOL=ON\",\n \"-DENABLE_MPI:BOOL=ON\",\n \"-DENABLE_FIND_MPI:BOOL=OFF\",\n \"-DMPI_C_COMPILER:STRING=\" + spec[\"mpi\"].mpicc,\n \"-DMPI_CXX_COMPILER:STRING=\" + spec[\"mpi\"].mpicxx,\n \"-DCMAKE_C_COMPILER:STRING=\" + spec[\"mpi\"].mpicc,\n \"-DCMAKE_CXX_COMPILER:STRING=\" + spec[\"mpi\"].mpicxx,\n ]\n\n if \"+cuda\" in self.spec:\n args.append(\"-DENABLE_CUDA:BOOL=ON\")\n args.append(\"-DCUDA_TOOLKIT_ROOT:PATH=\" + spec[\"cuda\"].prefix)\n else:\n args.append(\"-DENABLE_CUDA:BOOL=OFF\")\n\n if \"+mfem\" in self.spec:\n args.append(\"-DMFEM_DIR:PATH=\" + spec[\"mfem\"].prefix)\n return args\n", "path": "var/spack/repos/builtin/packages/cardioid/package.py"}]} | 1,311 | 157 |
gh_patches_debug_37491 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
query string arrays are not fully displayed
##### Steps to reproduce the problem:
1. visit through mitmproxy/mitmdump e.g. http://example.com/?first=value&arr[]=foo+bar&arr[]=baz
2. Check the query parameters in the request
3. Notice that they contain more data than mitmproxy/mitmdump shows
##### Any other comments? What have you tried so far?
The following script shows all the data:
```
#!/usr/bin/env python3
from urllib.parse import urlparse, parse_qs
url = "http://example.com/?first=value&arr[]=foo+bar&arr[]=baz"
parts = urlparse(url)
print(parse_qs(parts.query))
```
Output:
`{'first': ['value'], 'arr[]': ['foo bar', 'baz']}`
But mitmproxy/mitmdump only shows:
```
first: value
arr[]: foo bar
```
##### System information
<!-- Paste the output of "mitmproxy --version" here. -->
Mitmproxy: 3.0.4
Python: 3.5.2
OpenSSL: OpenSSL 1.0.2g 1 Mar 2016
Platform: Linux-4.10.0-42-generic-x86_64-with-Ubuntu-16.04-xenial
<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->
</issue>
<code>
[start of mitmproxy/contentviews/base.py]
1 # Default view cutoff *in lines*
2 import typing
3
4 KEY_MAX = 30
5
6 TTextType = typing.Union[str, bytes] # FIXME: This should be either bytes or str ultimately.
7 TViewLine = typing.List[typing.Tuple[str, TTextType]]
8 TViewResult = typing.Tuple[str, typing.Iterator[TViewLine]]
9
10
11 class View:
12 name: str = None
13 content_types: typing.List[str] = []
14
15 def __call__(self, data: bytes, **metadata) -> TViewResult:
16 """
17 Transform raw data into human-readable output.
18
19 Args:
20 data: the data to decode/format.
21 metadata: optional keyword-only arguments for metadata. Implementations must not
22 rely on a given argument being present.
23
24 Returns:
25 A (description, content generator) tuple.
26
27 The content generator yields lists of (style, text) tuples, where each list represents
28 a single line. ``text`` is a unfiltered byte string which may need to be escaped,
29 depending on the used output.
30
31 Caveats:
32 The content generator must not yield tuples of tuples,
33 because urwid cannot process that. You have to yield a *list* of tuples per line.
34 """
35 raise NotImplementedError() # pragma: no cover
36
37
38 def format_dict(
39 d: typing.Mapping[TTextType, TTextType]
40 ) -> typing.Iterator[TViewLine]:
41 """
42 Helper function that transforms the given dictionary into a list of
43 [
44 ("key", key )
45 ("value", value)
46 ]
47 entries, where key is padded to a uniform width.
48 """
49
50 max_key_len = max((len(k) for k in d.keys()), default=0)
51 max_key_len = min((max_key_len, KEY_MAX), default=0)
52 for key, value in d.items():
53 if isinstance(key, bytes):
54 key += b":"
55 else:
56 key += ":"
57 key = key.ljust(max_key_len + 2)
58 yield [
59 ("header", key),
60 ("text", value)
61 ]
62
63
64 def format_text(text: TTextType) -> typing.Iterator[TViewLine]:
65 """
66 Helper function that transforms bytes into the view output format.
67 """
68 for line in text.splitlines():
69 yield [("text", line)]
70
[end of mitmproxy/contentviews/base.py]
[start of mitmproxy/contentviews/urlencoded.py]
1 from mitmproxy.net.http import url
2 from mitmproxy.coretypes import multidict
3 from . import base
4
5
6 class ViewURLEncoded(base.View):
7 name = "URL-encoded"
8 content_types = ["application/x-www-form-urlencoded"]
9
10 def __call__(self, data, **metadata):
11 try:
12 data = data.decode("ascii", "strict")
13 except ValueError:
14 return None
15 d = url.decode(data)
16 return "URLEncoded form", base.format_dict(multidict.MultiDict(d))
17
[end of mitmproxy/contentviews/urlencoded.py]
[start of mitmproxy/contentviews/query.py]
1 from typing import List # noqa
2
3 from . import base
4
5
6 class ViewQuery(base.View):
7 name = "Query"
8
9 def __call__(self, data, **metadata):
10 query = metadata.get("query")
11 if query:
12 return "Query", base.format_dict(query)
13 else:
14 return "Query", base.format_text("")
15
[end of mitmproxy/contentviews/query.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/contentviews/base.py b/mitmproxy/contentviews/base.py
--- a/mitmproxy/contentviews/base.py
+++ b/mitmproxy/contentviews/base.py
@@ -35,32 +35,52 @@
raise NotImplementedError() # pragma: no cover
-def format_dict(
- d: typing.Mapping[TTextType, TTextType]
-) -> typing.Iterator[TViewLine]:
+def format_pairs(
+ items: typing.Iterable[typing.Tuple[TTextType, TTextType]]
+)-> typing.Iterator[TViewLine]:
+
"""
- Helper function that transforms the given dictionary into a list of
+ Helper function that accepts a list of (k,v) pairs into a list of
[
- ("key", key )
+ ("key", key )
("value", value)
]
- entries, where key is padded to a uniform width.
+ where key is padded to a uniform width
"""
- max_key_len = max((len(k) for k in d.keys()), default=0)
+ max_key_len = max((len(k[0]) for k in items), default=0)
max_key_len = min((max_key_len, KEY_MAX), default=0)
- for key, value in d.items():
+
+ for key, value in items:
if isinstance(key, bytes):
+
key += b":"
else:
key += ":"
+
key = key.ljust(max_key_len + 2)
+
yield [
("header", key),
("text", value)
]
+def format_dict(
+ d: typing.Mapping[TTextType, TTextType]
+) -> typing.Iterator[TViewLine]:
+ """
+ Helper function that transforms the given dictionary into a list of
+ [
+ ("key", key )
+ ("value", value)
+ ]
+ entries, where key is padded to a uniform width.
+ """
+
+ return format_pairs(d.items())
+
+
def format_text(text: TTextType) -> typing.Iterator[TViewLine]:
"""
Helper function that transforms bytes into the view output format.
diff --git a/mitmproxy/contentviews/query.py b/mitmproxy/contentviews/query.py
--- a/mitmproxy/contentviews/query.py
+++ b/mitmproxy/contentviews/query.py
@@ -9,6 +9,6 @@
def __call__(self, data, **metadata):
query = metadata.get("query")
if query:
- return "Query", base.format_dict(query)
+ return "Query", base.format_pairs(query.items(multi=True))
else:
return "Query", base.format_text("")
diff --git a/mitmproxy/contentviews/urlencoded.py b/mitmproxy/contentviews/urlencoded.py
--- a/mitmproxy/contentviews/urlencoded.py
+++ b/mitmproxy/contentviews/urlencoded.py
@@ -1,5 +1,4 @@
from mitmproxy.net.http import url
-from mitmproxy.coretypes import multidict
from . import base
@@ -13,4 +12,4 @@
except ValueError:
return None
d = url.decode(data)
- return "URLEncoded form", base.format_dict(multidict.MultiDict(d))
+ return "URLEncoded form", base.format_pairs(d)
| {"golden_diff": "diff --git a/mitmproxy/contentviews/base.py b/mitmproxy/contentviews/base.py\n--- a/mitmproxy/contentviews/base.py\n+++ b/mitmproxy/contentviews/base.py\n@@ -35,32 +35,52 @@\n raise NotImplementedError() # pragma: no cover\n \n \n-def format_dict(\n- d: typing.Mapping[TTextType, TTextType]\n-) -> typing.Iterator[TViewLine]:\n+def format_pairs(\n+ items: typing.Iterable[typing.Tuple[TTextType, TTextType]]\n+)-> typing.Iterator[TViewLine]:\n+\n \"\"\"\n- Helper function that transforms the given dictionary into a list of\n+ Helper function that accepts a list of (k,v) pairs into a list of\n [\n- (\"key\", key )\n+ (\"key\", key )\n (\"value\", value)\n ]\n- entries, where key is padded to a uniform width.\n+ where key is padded to a uniform width\n \"\"\"\n \n- max_key_len = max((len(k) for k in d.keys()), default=0)\n+ max_key_len = max((len(k[0]) for k in items), default=0)\n max_key_len = min((max_key_len, KEY_MAX), default=0)\n- for key, value in d.items():\n+\n+ for key, value in items:\n if isinstance(key, bytes):\n+\n key += b\":\"\n else:\n key += \":\"\n+\n key = key.ljust(max_key_len + 2)\n+\n yield [\n (\"header\", key),\n (\"text\", value)\n ]\n \n \n+def format_dict(\n+ d: typing.Mapping[TTextType, TTextType]\n+) -> typing.Iterator[TViewLine]:\n+ \"\"\"\n+ Helper function that transforms the given dictionary into a list of\n+ [\n+ (\"key\", key )\n+ (\"value\", value)\n+ ]\n+ entries, where key is padded to a uniform width.\n+ \"\"\"\n+\n+ return format_pairs(d.items())\n+\n+\n def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms bytes into the view output format.\ndiff --git a/mitmproxy/contentviews/query.py b/mitmproxy/contentviews/query.py\n--- a/mitmproxy/contentviews/query.py\n+++ b/mitmproxy/contentviews/query.py\n@@ -9,6 +9,6 @@\n def __call__(self, data, **metadata):\n query = metadata.get(\"query\")\n if query:\n- return \"Query\", base.format_dict(query)\n+ return \"Query\", base.format_pairs(query.items(multi=True))\n else:\n return \"Query\", base.format_text(\"\")\ndiff --git a/mitmproxy/contentviews/urlencoded.py b/mitmproxy/contentviews/urlencoded.py\n--- a/mitmproxy/contentviews/urlencoded.py\n+++ b/mitmproxy/contentviews/urlencoded.py\n@@ -1,5 +1,4 @@\n from mitmproxy.net.http import url\n-from mitmproxy.coretypes import multidict\n from . import base\n \n \n@@ -13,4 +12,4 @@\n except ValueError:\n return None\n d = url.decode(data)\n- return \"URLEncoded form\", base.format_dict(multidict.MultiDict(d))\n+ return \"URLEncoded form\", base.format_pairs(d)\n", "issue": "query string arrays are not fully displayed\n##### Steps to reproduce the problem:\r\n\r\n1. visit through mitmproxy/mitmdump e.g. http://example.com/?first=value&arr[]=foo+bar&arr[]=baz\r\n2. Check the query parameters in the request\r\n3. Notice that they contain more data than mitmproxy/mitmdump shows\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nThe following script shows all the data:\r\n\r\n```\r\n#!/usr/bin/env python3\r\n\r\nfrom urllib.parse import urlparse, parse_qs\r\n\r\nurl = \"http://example.com/?first=value&arr[]=foo+bar&arr[]=baz\"\r\nparts = urlparse(url)\r\nprint(parse_qs(parts.query))\r\n```\r\n\r\nOutput:\r\n`{'first': ['value'], 'arr[]': ['foo bar', 'baz']}`\r\n\r\nBut mitmproxy/mitmdump only shows:\r\n```\r\n first: value\r\n arr[]: foo bar\r\n```\r\n\r\n##### System information\r\n\r\n<!-- Paste the output of \"mitmproxy --version\" here. -->\r\n\r\nMitmproxy: 3.0.4\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.0.2g 1 Mar 2016\r\nPlatform: Linux-4.10.0-42-generic-x86_64-with-Ubuntu-16.04-xenial\r\n\r\n<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->\r\n\n", "before_files": [{"content": "# Default view cutoff *in lines*\nimport typing\n\nKEY_MAX = 30\n\nTTextType = typing.Union[str, bytes] # FIXME: This should be either bytes or str ultimately.\nTViewLine = typing.List[typing.Tuple[str, TTextType]]\nTViewResult = typing.Tuple[str, typing.Iterator[TViewLine]]\n\n\nclass View:\n name: str = None\n content_types: typing.List[str] = []\n\n def __call__(self, data: bytes, **metadata) -> TViewResult:\n \"\"\"\n Transform raw data into human-readable output.\n\n Args:\n data: the data to decode/format.\n metadata: optional keyword-only arguments for metadata. Implementations must not\n rely on a given argument being present.\n\n Returns:\n A (description, content generator) tuple.\n\n The content generator yields lists of (style, text) tuples, where each list represents\n a single line. ``text`` is a unfiltered byte string which may need to be escaped,\n depending on the used output.\n\n Caveats:\n The content generator must not yield tuples of tuples,\n because urwid cannot process that. You have to yield a *list* of tuples per line.\n \"\"\"\n raise NotImplementedError() # pragma: no cover\n\n\ndef format_dict(\n d: typing.Mapping[TTextType, TTextType]\n) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms the given dictionary into a list of\n [\n (\"key\", key )\n (\"value\", value)\n ]\n entries, where key is padded to a uniform width.\n \"\"\"\n\n max_key_len = max((len(k) for k in d.keys()), default=0)\n max_key_len = min((max_key_len, KEY_MAX), default=0)\n for key, value in d.items():\n if isinstance(key, bytes):\n key += b\":\"\n else:\n key += \":\"\n key = key.ljust(max_key_len + 2)\n yield [\n (\"header\", key),\n (\"text\", value)\n ]\n\n\ndef format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n \"\"\"\n Helper function that transforms bytes into the view output format.\n \"\"\"\n for line in text.splitlines():\n yield [(\"text\", line)]\n", "path": "mitmproxy/contentviews/base.py"}, {"content": "from mitmproxy.net.http import url\nfrom mitmproxy.coretypes import multidict\nfrom . import base\n\n\nclass ViewURLEncoded(base.View):\n name = \"URL-encoded\"\n content_types = [\"application/x-www-form-urlencoded\"]\n\n def __call__(self, data, **metadata):\n try:\n data = data.decode(\"ascii\", \"strict\")\n except ValueError:\n return None\n d = url.decode(data)\n return \"URLEncoded form\", base.format_dict(multidict.MultiDict(d))\n", "path": "mitmproxy/contentviews/urlencoded.py"}, {"content": "from typing import List # noqa\n\nfrom . import base\n\n\nclass ViewQuery(base.View):\n name = \"Query\"\n\n def __call__(self, data, **metadata):\n query = metadata.get(\"query\")\n if query:\n return \"Query\", base.format_dict(query)\n else:\n return \"Query\", base.format_text(\"\")\n", "path": "mitmproxy/contentviews/query.py"}]} | 1,771 | 728 |
gh_patches_debug_427 | rasdani/github-patches | git_diff | python__python-docs-es-1787 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translate 'using/unix.po'
This needs to reach 100% translated.
The rendered version of this file will be available at https://docs.python.org/es/3.10/using/unix.html once translated.
Meanwhile, the English version is shown.
Current stats for `using/unix.po`:
* Fuzzy: 1
* Percent translated: 88.9%
* Entries: 40 / 45
* Untranslated: 5
Please, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.
Remember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).
</issue>
<code>
[start of scripts/translate.py]
1 import os
2 import re
3 import sys
4 from typing import Dict, Tuple
5
6 import polib
7
8 VERBOSE = False
9 DEBUG = False
10 SKIP_TRANSLATED_ENTRIES = True
11
12 try:
13 from deep_translator import GoogleTranslator
14 except ImportError:
15 print("Error: This util script needs `deep_translator` to be installed")
16 sys.exit(1)
17
18 _patterns = [
19 ":c:func:`[^`]+`",
20 ":c:type:`[^`]+`",
21 ":c:macro:`[^`]+`",
22 ":c:member:`[^`]+`",
23 ":c:data:`[^`]+`",
24 ":py:data:`[^`]+`",
25 ":py:mod:`[^`]+`",
26 ":func:`[^`]+`",
27 ":mod:`[^`]+`",
28 ":ref:`[^`]+`",
29 ":class:`[^`]+`",
30 ":pep:`[^`]+`",
31 ":data:`[^`]+`",
32 ":exc:`[^`]+`",
33 ":term:`[^`]+`",
34 ":meth:`[^`]+`",
35 ":envvar:`[^`]+`",
36 ":file:`[^`]+`",
37 ":attr:`[^`]+`",
38 ":const:`[^`]+`",
39 ":issue:`[^`]+`",
40 ":opcode:`[^`]+`",
41 ":option:`[^`]+`",
42 ":program:`[^`]+`",
43 ":keyword:`[^`]+`",
44 ":RFC:`[^`]+`",
45 ":rfc:`[^`]+`",
46 ":doc:`[^`]+`",
47 ":manpage:`[^`]+`",
48 ":sup:`[^`]+`",
49 "``[^`]+``",
50 "`[^`]+`__",
51 "`[^`]+`_",
52 "\*\*[^\*]+\*\*", # bold text between **
53 "\*[^\*]+\*", # italic text between *
54 ]
55
56 _exps = [re.compile(e) for e in _patterns]
57
58 def protect_sphinx_directives(s: str) -> Tuple[dict, str]:
59 """
60 Parameters:
61 string containing the text to translate
62
63 Returns:
64 dictionary containing all the placeholder text as keys
65 and the correct value.
66 """
67
68 i = 0
69 d: Dict[str, str] = {}
70 for exp in _exps:
71 matches = exp.findall(s)
72 if DEBUG:
73 print(exp, matches)
74 for match in matches:
75 ph = f"XASDF{str(i).zfill(2)}"
76 s = s.replace(match, ph)
77 if ph in d and VERBOSE:
78 print(f"Error: {ph} is already in the dictionary")
79 print("new", match)
80 print("old", d[ph])
81 d[ph] = match
82 i += 1
83 return d, s
84
85
86 def undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:
87 for ph, value in placeholders.items():
88 translated_text = translated_text.replace(ph, value)
89 if DEBUG:
90 print(ph, value)
91 print(translated_text)
92 return translated_text
93
94
95 if __name__ == "__main__":
96 filename = sys.argv[1]
97 if not os.path.isfile(filename):
98 print(f"File not found: '{filename}'")
99 sys.exit(-1)
100
101 po = polib.pofile(filename)
102 translator = GoogleTranslator(source="en", target="es")
103
104 for entry in po:
105 # If the entry has already a translation, skip.
106 if SKIP_TRANSLATED_ENTRIES and entry.msgstr:
107 continue
108
109 print("\nEN|", entry.msgid)
110 placeholders, temp_text = protect_sphinx_directives(entry.msgid)
111 if VERBOSE:
112 print(temp_text)
113 print(placeholders)
114
115 # Translate the temporary text without sphinx statements
116 translated_text = translator.translate(temp_text)
117
118 # Recover sphinx statements
119 real_text = undo_sphinx_directives_protection(placeholders, translated_text)
120 print("ES|", real_text)
121
122 # Replace the po file translated entry
123 entry.msgstr = real_text
124
125 # Save the file after all the entries are translated
126 po.save()
127
[end of scripts/translate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/translate.py b/scripts/translate.py
--- a/scripts/translate.py
+++ b/scripts/translate.py
@@ -44,6 +44,7 @@
":RFC:`[^`]+`",
":rfc:`[^`]+`",
":doc:`[^`]+`",
+ ":source:`[^`]+`",
":manpage:`[^`]+`",
":sup:`[^`]+`",
"``[^`]+``",
| {"golden_diff": "diff --git a/scripts/translate.py b/scripts/translate.py\n--- a/scripts/translate.py\n+++ b/scripts/translate.py\n@@ -44,6 +44,7 @@\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n+ \":source:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n", "issue": "Translate 'using/unix.po'\nThis needs to reach 100% translated.\n\nThe rendered version of this file will be available at https://docs.python.org/es/3.10/using/unix.html once translated.\nMeanwhile, the English version is shown.\n\nCurrent stats for `using/unix.po`:\n\n* Fuzzy: 1\n* Percent translated: 88.9%\n* Entries: 40 / 45\n* Untranslated: 5\n\nPlease, comment here if you want this file to be assigned to you and an member will assign it to you as soon as possible, so you can start working on it.\n\nRemember to follow the steps in our [Contributing Guide](https://python-docs-es.readthedocs.io/page/CONTRIBUTING.html).\n", "before_files": [{"content": "import os\nimport re\nimport sys\nfrom typing import Dict, Tuple\n\nimport polib\n\nVERBOSE = False\nDEBUG = False\nSKIP_TRANSLATED_ENTRIES = True\n\ntry:\n from deep_translator import GoogleTranslator\nexcept ImportError:\n print(\"Error: This util script needs `deep_translator` to be installed\")\n sys.exit(1)\n\n_patterns = [\n \":c:func:`[^`]+`\",\n \":c:type:`[^`]+`\",\n \":c:macro:`[^`]+`\",\n \":c:member:`[^`]+`\",\n \":c:data:`[^`]+`\",\n \":py:data:`[^`]+`\",\n \":py:mod:`[^`]+`\",\n \":func:`[^`]+`\",\n \":mod:`[^`]+`\",\n \":ref:`[^`]+`\",\n \":class:`[^`]+`\",\n \":pep:`[^`]+`\",\n \":data:`[^`]+`\",\n \":exc:`[^`]+`\",\n \":term:`[^`]+`\",\n \":meth:`[^`]+`\",\n \":envvar:`[^`]+`\",\n \":file:`[^`]+`\",\n \":attr:`[^`]+`\",\n \":const:`[^`]+`\",\n \":issue:`[^`]+`\",\n \":opcode:`[^`]+`\",\n \":option:`[^`]+`\",\n \":program:`[^`]+`\",\n \":keyword:`[^`]+`\",\n \":RFC:`[^`]+`\",\n \":rfc:`[^`]+`\",\n \":doc:`[^`]+`\",\n \":manpage:`[^`]+`\",\n \":sup:`[^`]+`\",\n \"``[^`]+``\",\n \"`[^`]+`__\",\n \"`[^`]+`_\",\n \"\\*\\*[^\\*]+\\*\\*\", # bold text between **\n \"\\*[^\\*]+\\*\", # italic text between *\n]\n\n_exps = [re.compile(e) for e in _patterns]\n\ndef protect_sphinx_directives(s: str) -> Tuple[dict, str]:\n \"\"\"\n Parameters:\n string containing the text to translate\n\n Returns:\n dictionary containing all the placeholder text as keys\n and the correct value.\n \"\"\"\n\n i = 0\n d: Dict[str, str] = {}\n for exp in _exps:\n matches = exp.findall(s)\n if DEBUG:\n print(exp, matches)\n for match in matches:\n ph = f\"XASDF{str(i).zfill(2)}\"\n s = s.replace(match, ph)\n if ph in d and VERBOSE:\n print(f\"Error: {ph} is already in the dictionary\")\n print(\"new\", match)\n print(\"old\", d[ph])\n d[ph] = match\n i += 1\n return d, s\n\n\ndef undo_sphinx_directives_protection(placeholders: dict, translated_text: str) -> str:\n for ph, value in placeholders.items():\n translated_text = translated_text.replace(ph, value)\n if DEBUG:\n print(ph, value)\n print(translated_text)\n return translated_text\n\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n if not os.path.isfile(filename):\n print(f\"File not found: '{filename}'\")\n sys.exit(-1)\n\n po = polib.pofile(filename)\n translator = GoogleTranslator(source=\"en\", target=\"es\")\n\n for entry in po:\n # If the entry has already a translation, skip.\n if SKIP_TRANSLATED_ENTRIES and entry.msgstr:\n continue\n\n print(\"\\nEN|\", entry.msgid)\n placeholders, temp_text = protect_sphinx_directives(entry.msgid)\n if VERBOSE:\n print(temp_text)\n print(placeholders)\n\n # Translate the temporary text without sphinx statements\n translated_text = translator.translate(temp_text)\n\n # Recover sphinx statements\n real_text = undo_sphinx_directives_protection(placeholders, translated_text)\n print(\"ES|\", real_text)\n\n # Replace the po file translated entry\n entry.msgstr = real_text\n\n # Save the file after all the entries are translated\n po.save()\n", "path": "scripts/translate.py"}]} | 1,888 | 106 |
gh_patches_debug_56800 | rasdani/github-patches | git_diff | wright-group__WrightTools-522 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
hide fit functionality
</issue>
<code>
[start of WrightTools/__init__.py]
1 """WrightTools init."""
2 # flake8: noqa
3
4
5 # --- import --------------------------------------------------------------------------------------
6
7
8 import sys as _sys
9
10 from .__version__ import *
11 from . import artists
12 from . import collection
13 from . import data
14 from . import diagrams
15 from . import fit
16 from . import kit
17 from . import units
18 from . import exceptions
19
20 from ._open import *
21 from .collection._collection import *
22 from .data._data import *
23
24
25 # --- rcparams ------------------------------------------------------------------------------------
26
27
28 if int(_sys.version.split('.')[0]) > 2:
29 artists.apply_rcparams('fast')
30
[end of WrightTools/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/__init__.py b/WrightTools/__init__.py
--- a/WrightTools/__init__.py
+++ b/WrightTools/__init__.py
@@ -12,7 +12,6 @@
from . import collection
from . import data
from . import diagrams
-from . import fit
from . import kit
from . import units
from . import exceptions
| {"golden_diff": "diff --git a/WrightTools/__init__.py b/WrightTools/__init__.py\n--- a/WrightTools/__init__.py\n+++ b/WrightTools/__init__.py\n@@ -12,7 +12,6 @@\n from . import collection\n from . import data\n from . import diagrams\n-from . import fit\n from . import kit\n from . import units\n from . import exceptions\n", "issue": "hide fit functionality\n\n", "before_files": [{"content": "\"\"\"WrightTools init.\"\"\"\n# flake8: noqa\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport sys as _sys\n\nfrom .__version__ import *\nfrom . import artists\nfrom . import collection\nfrom . import data\nfrom . import diagrams\nfrom . import fit\nfrom . import kit\nfrom . import units\nfrom . import exceptions\n\nfrom ._open import *\nfrom .collection._collection import *\nfrom .data._data import *\n\n\n# --- rcparams ------------------------------------------------------------------------------------\n\n\nif int(_sys.version.split('.')[0]) > 2:\n artists.apply_rcparams('fast')\n", "path": "WrightTools/__init__.py"}]} | 720 | 88 |
gh_patches_debug_7839 | rasdani/github-patches | git_diff | getsentry__sentry-25 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Limitations on SENTRY_KEY not documented
I assumed that SENTRY_KEY was just any random string that should be unique to a deployment and kept secret (ie, serving a similar purpose to django's SECRET_KEY) so I generated a random string to use there.
It turns out that it's used in a URL for the feeds:
```
url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),
```
(in sentry.urls)
Mine happened to have an '[' in it which caused a very confusing error about an unterminated regex.
It should be documented that SENTRY_KEY will make it into a URL (and further that django will try to then parse it as a regexp when trying to do reverse lookups) and thus can't have most punctuation.
</issue>
<code>
[start of sentry/urls.py]
1 import os
2
3 from django.conf.urls.defaults import *
4
5 from sentry.conf import KEY
6 from sentry import views
7 from sentry.feeds import MessageFeed, SummaryFeed
8
9 SENTRY_ROOT = os.path.dirname(__file__)
10
11 urlpatterns = patterns('',
12 url(r'^_media/(?P<path>.*)$', 'django.views.static.serve',
13 {'document_root': os.path.join(SENTRY_ROOT, 'media')}, name='sentry-media'),
14
15 # Feeds
16
17 url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),
18 url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'),
19
20 # JS and API
21
22 url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'),
23 url(r'^store/$', views.store, name='sentry-store'),
24
25 # Normal views
26
27 url(r'^login$', views.login, name='sentry-login'),
28 url(r'^logout$', views.logout, name='sentry-logout'),
29 url(r'^group/(\d+)$', views.group, name='sentry-group'),
30 url(r'^group/(\d+)/messages$', views.group_message_list, name='sentry-group-messages'),
31 url(r'^group/(\d+)/messages/(\d+)$', views.group_message_details, name='sentry-group-message'),
32 url(r'^group/(\d+)/actions/([\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'),
33
34 url(r'^$', views.index, name='sentry'),
35 )
36
[end of sentry/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry/urls.py b/sentry/urls.py
--- a/sentry/urls.py
+++ b/sentry/urls.py
@@ -1,4 +1,5 @@
import os
+import re
from django.conf.urls.defaults import *
@@ -14,8 +15,8 @@
# Feeds
- url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),
- url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'),
+ url(r'^feeds/%s/messages.xml$' % re.escape(KEY), MessageFeed(), name='sentry-feed-messages'),
+ url(r'^feeds/%s/summaries.xml$' % re.escape(KEY), SummaryFeed(), name='sentry-feed-summaries'),
# JS and API
| {"golden_diff": "diff --git a/sentry/urls.py b/sentry/urls.py\n--- a/sentry/urls.py\n+++ b/sentry/urls.py\n@@ -1,4 +1,5 @@\n import os\n+import re\n \n from django.conf.urls.defaults import *\n \n@@ -14,8 +15,8 @@\n \n # Feeds\n \n- url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),\n- url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'),\n+ url(r'^feeds/%s/messages.xml$' % re.escape(KEY), MessageFeed(), name='sentry-feed-messages'),\n+ url(r'^feeds/%s/summaries.xml$' % re.escape(KEY), SummaryFeed(), name='sentry-feed-summaries'),\n \n # JS and API\n", "issue": "Limitations on SENTRY_KEY not documented\nI assumed that SENTRY_KEY was just any random string that should be unique to a deployment and kept secret (ie, serving a similar purpose to django's SECRET_KEY) so I generated a random string to use there. \n\nIt turns out that it's used in a URL for the feeds:\n\n```\nurl(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),\n```\n\n(in sentry.urls)\n\nMine happened to have an '[' in it which caused a very confusing error about an unterminated regex. \n\nIt should be documented that SENTRY_KEY will make it into a URL (and further that django will try to then parse it as a regexp when trying to do reverse lookups) and thus can't have most punctuation. \n\n", "before_files": [{"content": "import os\n\nfrom django.conf.urls.defaults import *\n\nfrom sentry.conf import KEY\nfrom sentry import views\nfrom sentry.feeds import MessageFeed, SummaryFeed\n\nSENTRY_ROOT = os.path.dirname(__file__) \n\nurlpatterns = patterns('',\n url(r'^_media/(?P<path>.*)$', 'django.views.static.serve',\n {'document_root': os.path.join(SENTRY_ROOT, 'media')}, name='sentry-media'),\n\n # Feeds\n\n url(r'^feeds/%s/messages.xml$' % KEY, MessageFeed(), name='sentry-feed-messages'),\n url(r'^feeds/%s/summaries.xml$' % KEY, SummaryFeed(), name='sentry-feed-summaries'),\n\n # JS and API\n\n url(r'^jsapi/$', views.ajax_handler, name='sentry-ajax'),\n url(r'^store/$', views.store, name='sentry-store'),\n \n # Normal views\n\n url(r'^login$', views.login, name='sentry-login'),\n url(r'^logout$', views.logout, name='sentry-logout'),\n url(r'^group/(\\d+)$', views.group, name='sentry-group'),\n url(r'^group/(\\d+)/messages$', views.group_message_list, name='sentry-group-messages'),\n url(r'^group/(\\d+)/messages/(\\d+)$', views.group_message_details, name='sentry-group-message'),\n url(r'^group/(\\d+)/actions/([\\w_-]+)', views.group_plugin_action, name='sentry-group-plugin-action'),\n\n url(r'^$', views.index, name='sentry'),\n)\n", "path": "sentry/urls.py"}]} | 1,113 | 195 |
gh_patches_debug_14750 | rasdani/github-patches | git_diff | Qiskit__qiskit-4721 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
circuit -> schedule raises exception
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: master
- **Python version**:
- **Operating system**:
### What is the current behavior?
```python
ghz = QuantumCircuit(5, 5)
ghz.h(0)
ghz.cx(range(4), range(1,5))
ghz.barrier()
ghz.measure(range(5), range(5))
sch = schedule(ghz, backend)
```
gives:
AttributeError: 'NoneType' object has no attribute 'instruction_schedule_map'
This works on older versions.
### Steps to reproduce the problem
### What is the expected behavior?
### Suggested solutions
</issue>
<code>
[start of qiskit/compiler/schedule.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 Convenience entry point into pulse scheduling, requiring only a circuit and a backend. For more
17 control over pulse scheduling, look at `qiskit.scheduler.schedule_circuit`.
18 """
19 import logging
20
21 from time import time
22 from typing import List, Optional, Union
23
24 from qiskit.circuit.quantumcircuit import QuantumCircuit
25 from qiskit.exceptions import QiskitError
26 from qiskit.pulse import InstructionScheduleMap, Schedule
27 from qiskit.providers import BaseBackend
28 from qiskit.scheduler import ScheduleConfig
29 from qiskit.scheduler.schedule_circuit import schedule_circuit
30
31 LOG = logging.getLogger(__name__)
32
33
34 def _log_schedule_time(start_time, end_time):
35 log_msg = "Total Scheduling Time - %.5f (ms)" % ((end_time - start_time) * 1000)
36 LOG.info(log_msg)
37
38
39 def schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]],
40 backend: Optional[BaseBackend] = None,
41 inst_map: Optional[InstructionScheduleMap] = None,
42 meas_map: Optional[List[List[int]]] = None,
43 method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]:
44 """
45 Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified
46 methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`.
47
48 Args:
49 circuits: The quantum circuit or circuits to translate
50 backend: A backend instance, which contains hardware-specific data required for scheduling
51 inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the
52 ``backend``\'s ``instruction_schedule_map``
53 meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to
54 the ``backend``\'s ``meas_map``
55 method: Optionally specify a particular scheduling method
56
57 Returns:
58 A pulse ``Schedule`` that implements the input circuit
59
60 Raises:
61 QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed
62 """
63 start_time = time()
64 if inst_map is None:
65 if backend is None:
66 raise QiskitError("Must supply either a backend or InstructionScheduleMap for "
67 "scheduling passes.")
68 inst_map = backend.defaults().instruction_schedule_map
69 if meas_map is None:
70 if backend is None:
71 raise QiskitError("Must supply either a backend or a meas_map for scheduling passes.")
72 meas_map = backend.configuration().meas_map
73
74 schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map)
75 circuits = circuits if isinstance(circuits, list) else [circuits]
76 schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits]
77 end_time = time()
78 _log_schedule_time(start_time, end_time)
79 return schedules[0] if len(schedules) == 1 else schedules
80
[end of qiskit/compiler/schedule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/compiler/schedule.py b/qiskit/compiler/schedule.py
--- a/qiskit/compiler/schedule.py
+++ b/qiskit/compiler/schedule.py
@@ -65,7 +65,11 @@
if backend is None:
raise QiskitError("Must supply either a backend or InstructionScheduleMap for "
"scheduling passes.")
- inst_map = backend.defaults().instruction_schedule_map
+ defaults = backend.defaults()
+ if defaults is None:
+ raise QiskitError("The backend defaults are unavailable. The backend may not "
+ "support pulse.")
+ inst_map = defaults.instruction_schedule_map
if meas_map is None:
if backend is None:
raise QiskitError("Must supply either a backend or a meas_map for scheduling passes.")
| {"golden_diff": "diff --git a/qiskit/compiler/schedule.py b/qiskit/compiler/schedule.py\n--- a/qiskit/compiler/schedule.py\n+++ b/qiskit/compiler/schedule.py\n@@ -65,7 +65,11 @@\n if backend is None:\n raise QiskitError(\"Must supply either a backend or InstructionScheduleMap for \"\n \"scheduling passes.\")\n- inst_map = backend.defaults().instruction_schedule_map\n+ defaults = backend.defaults()\n+ if defaults is None:\n+ raise QiskitError(\"The backend defaults are unavailable. The backend may not \"\n+ \"support pulse.\")\n+ inst_map = defaults.instruction_schedule_map\n if meas_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or a meas_map for scheduling passes.\")\n", "issue": "circuit -> schedule raises exception\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: master\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\n```python\r\nghz = QuantumCircuit(5, 5)\r\nghz.h(0)\r\nghz.cx(range(4), range(1,5))\r\nghz.barrier()\r\nghz.measure(range(5), range(5))\r\n\r\nsch = schedule(ghz, backend)\r\n```\r\n\r\ngives:\r\n\r\nAttributeError: 'NoneType' object has no attribute 'instruction_schedule_map'\r\n\r\nThis works on older versions.\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nConvenience entry point into pulse scheduling, requiring only a circuit and a backend. For more\ncontrol over pulse scheduling, look at `qiskit.scheduler.schedule_circuit`.\n\"\"\"\nimport logging\n\nfrom time import time\nfrom typing import List, Optional, Union\n\nfrom qiskit.circuit.quantumcircuit import QuantumCircuit\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.pulse import InstructionScheduleMap, Schedule\nfrom qiskit.providers import BaseBackend\nfrom qiskit.scheduler import ScheduleConfig\nfrom qiskit.scheduler.schedule_circuit import schedule_circuit\n\nLOG = logging.getLogger(__name__)\n\n\ndef _log_schedule_time(start_time, end_time):\n log_msg = \"Total Scheduling Time - %.5f (ms)\" % ((end_time - start_time) * 1000)\n LOG.info(log_msg)\n\n\ndef schedule(circuits: Union[QuantumCircuit, List[QuantumCircuit]],\n backend: Optional[BaseBackend] = None,\n inst_map: Optional[InstructionScheduleMap] = None,\n meas_map: Optional[List[List[int]]] = None,\n method: Optional[Union[str, List[str]]] = None) -> Union[Schedule, List[Schedule]]:\n \"\"\"\n Schedule a circuit to a pulse ``Schedule``, using the backend, according to any specified\n methods. Supported methods are documented in :py:mod:`qiskit.scheduler.schedule_circuit`.\n\n Args:\n circuits: The quantum circuit or circuits to translate\n backend: A backend instance, which contains hardware-specific data required for scheduling\n inst_map: Mapping of circuit operations to pulse schedules. If ``None``, defaults to the\n ``backend``\\'s ``instruction_schedule_map``\n meas_map: List of sets of qubits that must be measured together. If ``None``, defaults to\n the ``backend``\\'s ``meas_map``\n method: Optionally specify a particular scheduling method\n\n Returns:\n A pulse ``Schedule`` that implements the input circuit\n\n Raises:\n QiskitError: If ``inst_map`` and ``meas_map`` are not passed and ``backend`` is not passed\n \"\"\"\n start_time = time()\n if inst_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or InstructionScheduleMap for \"\n \"scheduling passes.\")\n inst_map = backend.defaults().instruction_schedule_map\n if meas_map is None:\n if backend is None:\n raise QiskitError(\"Must supply either a backend or a meas_map for scheduling passes.\")\n meas_map = backend.configuration().meas_map\n\n schedule_config = ScheduleConfig(inst_map=inst_map, meas_map=meas_map)\n circuits = circuits if isinstance(circuits, list) else [circuits]\n schedules = [schedule_circuit(circuit, schedule_config, method) for circuit in circuits]\n end_time = time()\n _log_schedule_time(start_time, end_time)\n return schedules[0] if len(schedules) == 1 else schedules\n", "path": "qiskit/compiler/schedule.py"}]} | 1,654 | 179 |
gh_patches_debug_6245 | rasdani/github-patches | git_diff | ansible__ansible-lint-303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False Positive ANSIBLE0014 does not allow command:args:stdin
# Issue Type
- Bug report
# Ansible and Ansible Lint details
```
ansible --version
ansible 2.4.0.0
config file = /etc/ansible/ansible.cfg
configured module search path = [u'$HOME/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python2.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 2.7.5 (default, Aug 2 2017, 11:05:32) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
ansible-lint --version
ansible-lint 3.4.17
```
- ansible installation method: OS package
- ansible-lint installation method: pip
# Desired Behaviour
The `stdin` argument to the `command` module should not trigger the "Environment variables don't work as part of command" error.
# Actual Behaviour (Bug report only)
The EnvVarsInCommandRule (ANSIBLE0014) linter rule rejects the following playbook:
```
- hosts: localhost
tasks:
- command: /bin/cat
args:
stdin: "Hello, world!"
```
due to the presence of the `stdin` attribute which was added in Ansible 2.4. This appears to be because `stdin` is missing from `expected_args`.
</issue>
<code>
[start of lib/ansiblelint/rules/EnvVarsInCommandRule.py]
1 # Copyright (c) 2016 Will Thames <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 # THE SOFTWARE.
20
21 from ansiblelint import AnsibleLintRule
22 from ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY
23
24
25 class EnvVarsInCommandRule(AnsibleLintRule):
26 id = 'ANSIBLE0014'
27 shortdesc = "Environment variables don't work as part of command"
28 description = 'Environment variables should be passed to shell or ' \
29 'command through environment argument'
30 tags = ['bug']
31
32 expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn',
33 'cmd', '__ansible_module__', '__ansible_arguments__',
34 LINE_NUMBER_KEY, FILENAME_KEY]
35
36 def matchtask(self, file, task):
37 if task["action"]["__ansible_module__"] in ['shell', 'command']:
38 if 'cmd' in task['action']:
39 first_cmd_arg = task['action']['cmd'].split()[0]
40 else:
41 first_cmd_arg = task['action']['__ansible_arguments__'][0]
42 return any([arg not in self.expected_args for arg in task['action']] +
43 ["=" in first_cmd_arg])
44
[end of lib/ansiblelint/rules/EnvVarsInCommandRule.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py
--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py
+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py
@@ -29,7 +29,7 @@
'command through environment argument'
tags = ['bug']
- expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn',
+ expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',
'cmd', '__ansible_module__', '__ansible_arguments__',
LINE_NUMBER_KEY, FILENAME_KEY]
| {"golden_diff": "diff --git a/lib/ansiblelint/rules/EnvVarsInCommandRule.py b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n--- a/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n+++ b/lib/ansiblelint/rules/EnvVarsInCommandRule.py\n@@ -29,7 +29,7 @@\n 'command through environment argument'\n tags = ['bug']\n \n- expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn',\n+ expected_args = ['chdir', 'creates', 'executable', 'removes', 'stdin', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n", "issue": "False Positive ANSIBLE0014 does not allow command:args:stdin\n# Issue Type\r\n- Bug report\r\n\r\n# Ansible and Ansible Lint details\r\n\r\n```\r\nansible --version\r\nansible 2.4.0.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = [u'$HOME/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python2.7/site-packages/ansible\r\n executable location = /usr/bin/ansible\r\n python version = 2.7.5 (default, Aug 2 2017, 11:05:32) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]\r\n\r\nansible-lint --version\r\nansible-lint 3.4.17\r\n```\r\n\r\n- ansible installation method: OS package\r\n- ansible-lint installation method: pip\r\n\r\n# Desired Behaviour\r\n\r\nThe `stdin` argument to the `command` module should not trigger the \"Environment variables don't work as part of command\" error.\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nThe EnvVarsInCommandRule (ANSIBLE0014) linter rule rejects the following playbook:\r\n\r\n```\r\n- hosts: localhost\r\n tasks:\r\n - command: /bin/cat\r\n args:\r\n stdin: \"Hello, world!\"\r\n```\r\n\r\ndue to the presence of the `stdin` attribute which was added in Ansible 2.4. This appears to be because `stdin` is missing from `expected_args`.\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom ansiblelint import AnsibleLintRule\nfrom ansiblelint.utils import LINE_NUMBER_KEY, FILENAME_KEY\n\n\nclass EnvVarsInCommandRule(AnsibleLintRule):\n id = 'ANSIBLE0014'\n shortdesc = \"Environment variables don't work as part of command\"\n description = 'Environment variables should be passed to shell or ' \\\n 'command through environment argument'\n tags = ['bug']\n\n expected_args = ['chdir', 'creates', 'executable', 'removes', 'warn',\n 'cmd', '__ansible_module__', '__ansible_arguments__',\n LINE_NUMBER_KEY, FILENAME_KEY]\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] in ['shell', 'command']:\n if 'cmd' in task['action']:\n first_cmd_arg = task['action']['cmd'].split()[0]\n else:\n first_cmd_arg = task['action']['__ansible_arguments__'][0]\n return any([arg not in self.expected_args for arg in task['action']] +\n [\"=\" in first_cmd_arg])\n", "path": "lib/ansiblelint/rules/EnvVarsInCommandRule.py"}]} | 1,448 | 156 |
gh_patches_debug_7106 | rasdani/github-patches | git_diff | CTFd__CTFd-1485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Theme settings
There needs to be some way to change settings in themes themselves. People complain about a lot of nonsensical things that they should fix in their forks and not need to be a concern in master.
</issue>
<code>
[start of CTFd/constants/config.py]
1 from CTFd.utils import get_config
2 from CTFd.utils.helpers import markup
3
4
5 class _ConfigsWrapper:
6 def __getattr__(self, attr):
7 return get_config(attr)
8
9 @property
10 def ctf_name(self):
11 return get_config("theme_header", default="CTFd")
12
13 @property
14 def theme_header(self):
15 return markup(get_config("theme_header", default=""))
16
17 @property
18 def theme_footer(self):
19 return markup(get_config("theme_footer", default=""))
20
21
22 Configs = _ConfigsWrapper()
23
[end of CTFd/constants/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/constants/config.py b/CTFd/constants/config.py
--- a/CTFd/constants/config.py
+++ b/CTFd/constants/config.py
@@ -1,3 +1,5 @@
+import json
+
from CTFd.utils import get_config
from CTFd.utils.helpers import markup
@@ -18,5 +20,9 @@
def theme_footer(self):
return markup(get_config("theme_footer", default=""))
+ @property
+ def theme_settings(self):
+ return json.loads(get_config("theme_settings", default="null"))
+
Configs = _ConfigsWrapper()
| {"golden_diff": "diff --git a/CTFd/constants/config.py b/CTFd/constants/config.py\n--- a/CTFd/constants/config.py\n+++ b/CTFd/constants/config.py\n@@ -1,3 +1,5 @@\n+import json\n+\n from CTFd.utils import get_config\n from CTFd.utils.helpers import markup\n \n@@ -18,5 +20,9 @@\n def theme_footer(self):\n return markup(get_config(\"theme_footer\", default=\"\"))\n \n+ @property\n+ def theme_settings(self):\n+ return json.loads(get_config(\"theme_settings\", default=\"null\"))\n+\n \n Configs = _ConfigsWrapper()\n", "issue": "Theme settings\nThere needs to be some way to change settings in themes themselves. People complain about a lot of nonsensical things that they should fix in their forks and not need to be a concern in master. \n", "before_files": [{"content": "from CTFd.utils import get_config\nfrom CTFd.utils.helpers import markup\n\n\nclass _ConfigsWrapper:\n def __getattr__(self, attr):\n return get_config(attr)\n\n @property\n def ctf_name(self):\n return get_config(\"theme_header\", default=\"CTFd\")\n\n @property\n def theme_header(self):\n return markup(get_config(\"theme_header\", default=\"\"))\n\n @property\n def theme_footer(self):\n return markup(get_config(\"theme_footer\", default=\"\"))\n\n\nConfigs = _ConfigsWrapper()\n", "path": "CTFd/constants/config.py"}]} | 739 | 137 |
gh_patches_debug_3 | rasdani/github-patches | git_diff | plotly__dash-2553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Flask 2.2.3 dependency has HIGH security vulnerability (fixed in 2.2.5)
Issue #2538 pinned the upper bound of the Flask dependency to 2.2.3. However Flask 2.2.3 is affected by a HIGH security vulnerability that is fixed in Flask 2.2.5. See https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-30861
Debian 11, Python 3.11 (from Python official 3.11 Docker image)
```
# pip install dash
Collecting dash
Downloading dash-2.10.1-py3-none-any.whl (10.3 MB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 10.3/10.3 MB 14.1 MB/s eta 0:00:00
Collecting Flask<=2.2.3,>=1.0.4 (from dash)
Downloading Flask-2.2.3-py3-none-any.whl (101 kB)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 101.8/101.8 kB 17.0 MB/s eta 0:00:00
```
```
dash 2.10.1
dash-core-components 2.0.0
dash-html-components 2.0.0
dash-table 5.0.0
```
**Describe the bug**
Dash installs a vulnerable version of Flask and dependency scans flag the vulnerability.
**Expected behavior**
No known and fixed security vulnerabilities added. Perhaps Pin to 2.2.* instead of specific 2.2.3 version where future pins will find new security issues.
</issue>
<code>
[start of dash/version.py]
1 __version__ = "2.10.1"
2
[end of dash/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dash/version.py b/dash/version.py
--- a/dash/version.py
+++ b/dash/version.py
@@ -1 +1 @@
-__version__ = "2.10.1"
+__version__ = "2.10.2"
| {"golden_diff": "diff --git a/dash/version.py b/dash/version.py\n--- a/dash/version.py\n+++ b/dash/version.py\n@@ -1 +1 @@\n-__version__ = \"2.10.1\"\n+__version__ = \"2.10.2\"\n", "issue": "[BUG] Flask 2.2.3 dependency has HIGH security vulnerability (fixed in 2.2.5)\nIssue #2538 pinned the upper bound of the Flask dependency to 2.2.3. However Flask 2.2.3 is affected by a HIGH security vulnerability that is fixed in Flask 2.2.5. See https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-30861\r\n\r\nDebian 11, Python 3.11 (from Python official 3.11 Docker image)\r\n```\r\n# pip install dash\r\nCollecting dash\r\n Downloading dash-2.10.1-py3-none-any.whl (10.3 MB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 10.3/10.3 MB 14.1 MB/s eta 0:00:00\r\nCollecting Flask<=2.2.3,>=1.0.4 (from dash)\r\n Downloading Flask-2.2.3-py3-none-any.whl (101 kB)\r\n \u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501 101.8/101.8 kB 17.0 MB/s eta 0:00:00\r\n```\r\n\r\n```\r\ndash 2.10.1\r\ndash-core-components 2.0.0\r\ndash-html-components 2.0.0\r\ndash-table 5.0.0\r\n```\r\n\r\n**Describe the bug**\r\n\r\nDash installs a vulnerable version of Flask and dependency scans flag the vulnerability.\r\n\r\n**Expected behavior**\r\n\r\nNo known and fixed security vulnerabilities added. Perhaps Pin to 2.2.* instead of specific 2.2.3 version where future pins will find new security issues.\r\n\r\n\n", "before_files": [{"content": "__version__ = \"2.10.1\"\n", "path": "dash/version.py"}]} | 967 | 61 |
gh_patches_debug_41606 | rasdani/github-patches | git_diff | canonical__snapcraft-4622 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support project hooks for core24 snaps
### What needs to get done
The `PackageService` for core24 snaps should support project hooks. The behavior should be the same as core22.
The failures can be found by running `spread google:ubuntu-24.04-64:tests/spread/general/hooks/`. See failing logs [here](https://paste.ubuntu.com/p/CjBwVKcwyR/).
### Why it needs to get done
To support building core24 snaps with craft-application
</issue>
<code>
[start of snapcraft/services/package.py]
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright 2023 Canonical Ltd.
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 """Snapcraft Package service."""
18
19 from __future__ import annotations
20
21 import os
22 import pathlib
23 import shutil
24 from typing import TYPE_CHECKING, cast
25
26 from craft_application import AppMetadata, PackageService
27 from overrides import override
28
29 from snapcraft import errors, linters, models, pack, utils
30 from snapcraft.linters import LinterStatus
31 from snapcraft.meta import snap_yaml
32 from snapcraft.services import Lifecycle
33 from snapcraft.utils import process_version
34
35 if TYPE_CHECKING:
36 from snapcraft.services import SnapcraftServiceFactory
37
38
39 class Package(PackageService):
40 """Package service subclass for Snapcraft."""
41
42 _project: models.Project
43
44 def __init__( # noqa: PLR0913 (Too many arguments)
45 self,
46 app: AppMetadata,
47 services: SnapcraftServiceFactory,
48 *,
49 project: models.Project,
50 snapcraft_yaml_path: pathlib.Path,
51 platform: str | None,
52 build_for: str,
53 ) -> None:
54 super().__init__(app, services, project=project)
55 self._platform = platform
56 self._build_for = build_for
57 self._snapcraft_yaml_path = snapcraft_yaml_path
58
59 @override
60 def pack(self, prime_dir: pathlib.Path, dest: pathlib.Path) -> list[pathlib.Path]:
61 """Create one or more packages as appropriate.
62
63 :param prime_dir: Path to the directory to pack.
64 :param dest: Directory into which to write the package(s).
65 :returns: A list of paths to created packages.
66 """
67 issues = linters.run_linters(prime_dir, lint=self._project.lint)
68 status = linters.report(issues, intermediate=True)
69
70 # In case of linter errors, stop execution and return the error code.
71 if status in (LinterStatus.ERRORS, LinterStatus.FATAL):
72 raise errors.LinterError("Linter errors found", exit_code=status)
73
74 return [
75 pathlib.Path(
76 pack.pack_snap(
77 prime_dir,
78 output=str(dest),
79 compression=self._project.compression,
80 name=self._project.name,
81 version=process_version(self._project.version),
82 target_arch=self._build_for,
83 )
84 )
85 ]
86
87 @override
88 def write_metadata(self, path: pathlib.Path) -> None:
89 """Write the project metadata to metadata.yaml in the given directory.
90
91 :param path: The path to the prime directory.
92 """
93 meta_dir = path / "meta"
94 meta_dir.mkdir(parents=True, exist_ok=True)
95 self.metadata.to_yaml_file(meta_dir / "snap.yaml")
96
97 enable_manifest = utils.strtobool(os.getenv("SNAPCRAFT_BUILD_INFO", "n"))
98
99 if enable_manifest:
100 snap_dir = path / "snap"
101 snap_dir.mkdir(parents=True, exist_ok=True)
102 lifecycle = cast(Lifecycle, self._services.lifecycle)
103 manifest = lifecycle.generate_manifest()
104 manifest.to_yaml_file(snap_dir / "manifest.yaml")
105
106 shutil.copy(self._snapcraft_yaml_path, snap_dir)
107
108 @property
109 def metadata(self) -> snap_yaml.SnapMetadata:
110 """Get the metadata model for this project."""
111 return snap_yaml.get_metadata_from_project(
112 self._project, self._services.lifecycle.prime_dir, arch=self._build_for
113 )
114
[end of snapcraft/services/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/snapcraft/services/package.py b/snapcraft/services/package.py
--- a/snapcraft/services/package.py
+++ b/snapcraft/services/package.py
@@ -29,6 +29,7 @@
from snapcraft import errors, linters, models, pack, utils
from snapcraft.linters import LinterStatus
from snapcraft.meta import snap_yaml
+from snapcraft.parts.setup_assets import setup_assets
from snapcraft.services import Lifecycle
from snapcraft.utils import process_version
@@ -84,6 +85,23 @@
)
]
+ def _get_assets_dir(self) -> pathlib.Path:
+ """Return a snapcraft assets directory.
+
+ Asset directories can exist in:
+
+ - <PROJECT_ROOT>/snap
+ - <PROJECT_ROOT>/build-aux/snap
+ """
+ project_dir = self._services.lifecycle.project_info.project_dir
+ for asset_reldir in ("snap", "build-aux/snap"):
+ asset_dir = project_dir / asset_reldir
+ if asset_dir.exists():
+ return asset_dir
+
+ # This is for backwards compatibility with setup_assets(...)
+ return project_dir / "snap"
+
@override
def write_metadata(self, path: pathlib.Path) -> None:
"""Write the project metadata to metadata.yaml in the given directory.
@@ -105,9 +123,79 @@
shutil.copy(self._snapcraft_yaml_path, snap_dir)
+ assets_dir = self._get_assets_dir()
+ setup_assets(
+ self._project,
+ assets_dir=assets_dir,
+ project_dir=self._services.lifecycle.project_info.project_dir,
+ prime_dir=path,
+ meta_directory_handler=meta_directory_handler,
+ )
+
@property
def metadata(self) -> snap_yaml.SnapMetadata:
"""Get the metadata model for this project."""
return snap_yaml.get_metadata_from_project(
self._project, self._services.lifecycle.prime_dir, arch=self._build_for
)
+
+
+def _hardlink_or_copy(source: pathlib.Path, destination: pathlib.Path) -> bool:
+ """Try to hardlink and fallback to copy if it fails.
+
+ :param source: the source path.
+ :param destination: the destination path.
+ :returns: True if a hardlink was done or False for copy.
+ """
+ # Unlink the destination to avoid link failures
+ destination.unlink(missing_ok=True)
+
+ try:
+ destination.hardlink_to(source)
+ except OSError as os_error:
+ # Cross device link
+ if os_error.errno != 18:
+ raise
+ shutil.copy(source, destination)
+ return False
+
+ return True
+
+
+def meta_directory_handler(assets_dir: pathlib.Path, path: pathlib.Path):
+ """Handle hooks and gui assets from Snapcraft.
+
+ :param assets_dir: directory with project assets.
+ :param path: directory to write assets to.
+ """
+ meta_dir = path / "meta"
+ built_snap_hooks = path / "snap" / "hooks"
+ hooks_project_dir = assets_dir / "hooks"
+
+ hooks_meta_dir = meta_dir / "hooks"
+
+ if built_snap_hooks.is_dir():
+ hooks_meta_dir.mkdir(parents=True, exist_ok=True)
+ for hook in built_snap_hooks.iterdir():
+ meta_dir_hook = hooks_meta_dir / hook.name
+ # Remove to always refresh to the latest
+ meta_dir_hook.unlink(missing_ok=True)
+ meta_dir_hook.hardlink_to(hook)
+
+ # Overwrite any built hooks with project level ones
+ if hooks_project_dir.is_dir():
+ hooks_meta_dir.mkdir(parents=True, exist_ok=True)
+ for hook in hooks_project_dir.iterdir():
+ meta_dir_hook = hooks_meta_dir / hook.name
+
+ _hardlink_or_copy(hook, meta_dir_hook)
+
+ # Write any gui assets
+ gui_project_dir = assets_dir / "gui"
+ gui_meta_dir = meta_dir / "gui"
+ if gui_project_dir.is_dir():
+ gui_meta_dir.mkdir(parents=True, exist_ok=True)
+ for gui in gui_project_dir.iterdir():
+ meta_dir_gui = gui_meta_dir / gui.name
+
+ _hardlink_or_copy(gui, meta_dir_gui)
| {"golden_diff": "diff --git a/snapcraft/services/package.py b/snapcraft/services/package.py\n--- a/snapcraft/services/package.py\n+++ b/snapcraft/services/package.py\n@@ -29,6 +29,7 @@\n from snapcraft import errors, linters, models, pack, utils\n from snapcraft.linters import LinterStatus\n from snapcraft.meta import snap_yaml\n+from snapcraft.parts.setup_assets import setup_assets\n from snapcraft.services import Lifecycle\n from snapcraft.utils import process_version\n \n@@ -84,6 +85,23 @@\n )\n ]\n \n+ def _get_assets_dir(self) -> pathlib.Path:\n+ \"\"\"Return a snapcraft assets directory.\n+\n+ Asset directories can exist in:\n+\n+ - <PROJECT_ROOT>/snap\n+ - <PROJECT_ROOT>/build-aux/snap\n+ \"\"\"\n+ project_dir = self._services.lifecycle.project_info.project_dir\n+ for asset_reldir in (\"snap\", \"build-aux/snap\"):\n+ asset_dir = project_dir / asset_reldir\n+ if asset_dir.exists():\n+ return asset_dir\n+\n+ # This is for backwards compatibility with setup_assets(...)\n+ return project_dir / \"snap\"\n+\n @override\n def write_metadata(self, path: pathlib.Path) -> None:\n \"\"\"Write the project metadata to metadata.yaml in the given directory.\n@@ -105,9 +123,79 @@\n \n shutil.copy(self._snapcraft_yaml_path, snap_dir)\n \n+ assets_dir = self._get_assets_dir()\n+ setup_assets(\n+ self._project,\n+ assets_dir=assets_dir,\n+ project_dir=self._services.lifecycle.project_info.project_dir,\n+ prime_dir=path,\n+ meta_directory_handler=meta_directory_handler,\n+ )\n+\n @property\n def metadata(self) -> snap_yaml.SnapMetadata:\n \"\"\"Get the metadata model for this project.\"\"\"\n return snap_yaml.get_metadata_from_project(\n self._project, self._services.lifecycle.prime_dir, arch=self._build_for\n )\n+\n+\n+def _hardlink_or_copy(source: pathlib.Path, destination: pathlib.Path) -> bool:\n+ \"\"\"Try to hardlink and fallback to copy if it fails.\n+\n+ :param source: the source path.\n+ :param destination: the destination path.\n+ :returns: True if a hardlink was done or False for copy.\n+ \"\"\"\n+ # Unlink the destination to avoid link failures\n+ destination.unlink(missing_ok=True)\n+\n+ try:\n+ destination.hardlink_to(source)\n+ except OSError as os_error:\n+ # Cross device link\n+ if os_error.errno != 18:\n+ raise\n+ shutil.copy(source, destination)\n+ return False\n+\n+ return True\n+\n+\n+def meta_directory_handler(assets_dir: pathlib.Path, path: pathlib.Path):\n+ \"\"\"Handle hooks and gui assets from Snapcraft.\n+\n+ :param assets_dir: directory with project assets.\n+ :param path: directory to write assets to.\n+ \"\"\"\n+ meta_dir = path / \"meta\"\n+ built_snap_hooks = path / \"snap\" / \"hooks\"\n+ hooks_project_dir = assets_dir / \"hooks\"\n+\n+ hooks_meta_dir = meta_dir / \"hooks\"\n+\n+ if built_snap_hooks.is_dir():\n+ hooks_meta_dir.mkdir(parents=True, exist_ok=True)\n+ for hook in built_snap_hooks.iterdir():\n+ meta_dir_hook = hooks_meta_dir / hook.name\n+ # Remove to always refresh to the latest\n+ meta_dir_hook.unlink(missing_ok=True)\n+ meta_dir_hook.hardlink_to(hook)\n+\n+ # Overwrite any built hooks with project level ones\n+ if hooks_project_dir.is_dir():\n+ hooks_meta_dir.mkdir(parents=True, exist_ok=True)\n+ for hook in hooks_project_dir.iterdir():\n+ meta_dir_hook = hooks_meta_dir / hook.name\n+\n+ _hardlink_or_copy(hook, meta_dir_hook)\n+\n+ # Write any gui assets\n+ gui_project_dir = assets_dir / \"gui\"\n+ gui_meta_dir = meta_dir / \"gui\"\n+ if gui_project_dir.is_dir():\n+ gui_meta_dir.mkdir(parents=True, exist_ok=True)\n+ for gui in gui_project_dir.iterdir():\n+ meta_dir_gui = gui_meta_dir / gui.name\n+\n+ _hardlink_or_copy(gui, meta_dir_gui)\n", "issue": "Support project hooks for core24 snaps\n### What needs to get done\n\nThe `PackageService` for core24 snaps should support project hooks. The behavior should be the same as core22.\r\n\r\nThe failures can be found by running `spread google:ubuntu-24.04-64:tests/spread/general/hooks/`. See failing logs [here](https://paste.ubuntu.com/p/CjBwVKcwyR/).\n\n### Why it needs to get done\n\nTo support building core24 snaps with craft-application\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Snapcraft Package service.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport pathlib\nimport shutil\nfrom typing import TYPE_CHECKING, cast\n\nfrom craft_application import AppMetadata, PackageService\nfrom overrides import override\n\nfrom snapcraft import errors, linters, models, pack, utils\nfrom snapcraft.linters import LinterStatus\nfrom snapcraft.meta import snap_yaml\nfrom snapcraft.services import Lifecycle\nfrom snapcraft.utils import process_version\n\nif TYPE_CHECKING:\n from snapcraft.services import SnapcraftServiceFactory\n\n\nclass Package(PackageService):\n \"\"\"Package service subclass for Snapcraft.\"\"\"\n\n _project: models.Project\n\n def __init__( # noqa: PLR0913 (Too many arguments)\n self,\n app: AppMetadata,\n services: SnapcraftServiceFactory,\n *,\n project: models.Project,\n snapcraft_yaml_path: pathlib.Path,\n platform: str | None,\n build_for: str,\n ) -> None:\n super().__init__(app, services, project=project)\n self._platform = platform\n self._build_for = build_for\n self._snapcraft_yaml_path = snapcraft_yaml_path\n\n @override\n def pack(self, prime_dir: pathlib.Path, dest: pathlib.Path) -> list[pathlib.Path]:\n \"\"\"Create one or more packages as appropriate.\n\n :param prime_dir: Path to the directory to pack.\n :param dest: Directory into which to write the package(s).\n :returns: A list of paths to created packages.\n \"\"\"\n issues = linters.run_linters(prime_dir, lint=self._project.lint)\n status = linters.report(issues, intermediate=True)\n\n # In case of linter errors, stop execution and return the error code.\n if status in (LinterStatus.ERRORS, LinterStatus.FATAL):\n raise errors.LinterError(\"Linter errors found\", exit_code=status)\n\n return [\n pathlib.Path(\n pack.pack_snap(\n prime_dir,\n output=str(dest),\n compression=self._project.compression,\n name=self._project.name,\n version=process_version(self._project.version),\n target_arch=self._build_for,\n )\n )\n ]\n\n @override\n def write_metadata(self, path: pathlib.Path) -> None:\n \"\"\"Write the project metadata to metadata.yaml in the given directory.\n\n :param path: The path to the prime directory.\n \"\"\"\n meta_dir = path / \"meta\"\n meta_dir.mkdir(parents=True, exist_ok=True)\n self.metadata.to_yaml_file(meta_dir / \"snap.yaml\")\n\n enable_manifest = utils.strtobool(os.getenv(\"SNAPCRAFT_BUILD_INFO\", \"n\"))\n\n if enable_manifest:\n snap_dir = path / \"snap\"\n snap_dir.mkdir(parents=True, exist_ok=True)\n lifecycle = cast(Lifecycle, self._services.lifecycle)\n manifest = lifecycle.generate_manifest()\n manifest.to_yaml_file(snap_dir / \"manifest.yaml\")\n\n shutil.copy(self._snapcraft_yaml_path, snap_dir)\n\n @property\n def metadata(self) -> snap_yaml.SnapMetadata:\n \"\"\"Get the metadata model for this project.\"\"\"\n return snap_yaml.get_metadata_from_project(\n self._project, self._services.lifecycle.prime_dir, arch=self._build_for\n )\n", "path": "snapcraft/services/package.py"}]} | 1,749 | 970 |
gh_patches_debug_9672 | rasdani/github-patches | git_diff | svthalia__concrexit-2712 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Paparazcie committee members cannot edit promo requests
Members of the paparazcie cannot edit the promo requests in the back-end. I can, so it might be an issue with permissions.
</issue>
<code>
[start of website/promotion/admin.py]
1 """Registers admin interfaces for the models defined in this module."""
2 from django.contrib import admin
3 from django.contrib.admin import ModelAdmin
4
5 from events.services import is_organiser
6 from promotion.forms import PromotionRequestForm
7
8 from .models import PromotionChannel, PromotionRequest
9
10
11 @admin.register(PromotionRequest)
12 class PromotionRequestAdmin(admin.ModelAdmin):
13 """This manages the admin interface for the model items."""
14
15 list_display = ("event", "publish_date", "channel", "assigned_to", "status")
16 list_filter = (
17 "publish_date",
18 "assigned_to",
19 "status",
20 )
21 date_hierarchy = "publish_date"
22 form = PromotionRequestForm
23 actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
24
25 def has_change_permission(self, request, obj=None):
26 if obj is not None and not is_organiser(request.member, obj.event):
27 return False
28 return super().has_change_permission(request, obj)
29
30 def mark_not_started(self, request, queryset):
31 """Change the status of the event to published."""
32 self._change_published(queryset, PromotionRequest.NOT_STARTED)
33
34 mark_not_started.short_description = "Mark requests as not started"
35
36 def mark_started(self, request, queryset):
37 """Change the status of the event to published."""
38 self._change_published(queryset, PromotionRequest.STARTED)
39
40 mark_started.short_description = "Mark requests as started"
41
42 def mark_finished(self, request, queryset):
43 """Change the status of the event to published."""
44 self._change_published(queryset, PromotionRequest.FINISHED)
45
46 mark_finished.short_description = "Mark requests as finished"
47
48 def mark_published(self, request, queryset):
49 """Change the status of the event to published."""
50 self._change_published(queryset, PromotionRequest.PUBLISHED)
51
52 mark_published.short_description = "Mark requests as published"
53
54 @staticmethod
55 def _change_published(queryset, status):
56 queryset.update(status=status)
57
58
59 @admin.register(PromotionChannel)
60 class PromotionChannelAdmin(ModelAdmin):
61 pass
62
[end of website/promotion/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/promotion/admin.py b/website/promotion/admin.py
--- a/website/promotion/admin.py
+++ b/website/promotion/admin.py
@@ -23,8 +23,8 @@
actions = ["mark_not_started", "mark_started", "mark_finished", "mark_published"]
def has_change_permission(self, request, obj=None):
- if obj is not None and not is_organiser(request.member, obj.event):
- return False
+ if obj is not None and obj.event and is_organiser(request.member, obj.event):
+ return True
return super().has_change_permission(request, obj)
def mark_not_started(self, request, queryset):
| {"golden_diff": "diff --git a/website/promotion/admin.py b/website/promotion/admin.py\n--- a/website/promotion/admin.py\n+++ b/website/promotion/admin.py\n@@ -23,8 +23,8 @@\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n \n def has_change_permission(self, request, obj=None):\n- if obj is not None and not is_organiser(request.member, obj.event):\n- return False\n+ if obj is not None and obj.event and is_organiser(request.member, obj.event):\n+ return True\n return super().has_change_permission(request, obj)\n \n def mark_not_started(self, request, queryset):\n", "issue": "Paparazcie committee members cannot edit promo requests\nMembers of the paparazcie cannot edit the promo requests in the back-end. I can, so it might be an issue with permissions. \n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the models defined in this module.\"\"\"\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\n\nfrom events.services import is_organiser\nfrom promotion.forms import PromotionRequestForm\n\nfrom .models import PromotionChannel, PromotionRequest\n\n\[email protected](PromotionRequest)\nclass PromotionRequestAdmin(admin.ModelAdmin):\n \"\"\"This manages the admin interface for the model items.\"\"\"\n\n list_display = (\"event\", \"publish_date\", \"channel\", \"assigned_to\", \"status\")\n list_filter = (\n \"publish_date\",\n \"assigned_to\",\n \"status\",\n )\n date_hierarchy = \"publish_date\"\n form = PromotionRequestForm\n actions = [\"mark_not_started\", \"mark_started\", \"mark_finished\", \"mark_published\"]\n\n def has_change_permission(self, request, obj=None):\n if obj is not None and not is_organiser(request.member, obj.event):\n return False\n return super().has_change_permission(request, obj)\n\n def mark_not_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.NOT_STARTED)\n\n mark_not_started.short_description = \"Mark requests as not started\"\n\n def mark_started(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.STARTED)\n\n mark_started.short_description = \"Mark requests as started\"\n\n def mark_finished(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.FINISHED)\n\n mark_finished.short_description = \"Mark requests as finished\"\n\n def mark_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(queryset, PromotionRequest.PUBLISHED)\n\n mark_published.short_description = \"Mark requests as published\"\n\n @staticmethod\n def _change_published(queryset, status):\n queryset.update(status=status)\n\n\[email protected](PromotionChannel)\nclass PromotionChannelAdmin(ModelAdmin):\n pass\n", "path": "website/promotion/admin.py"}]} | 1,139 | 155 |
gh_patches_debug_35024 | rasdani/github-patches | git_diff | internetarchive__openlibrary-4013 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sentry should include deployment SHA
Sentry has options for specifying the SHA of the current code, so you can see when an error was introduced. We currently don't take advantage of this.
### Describe the problem that you'd like solved
<!-- A clear and concise description of what you want to happen. -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
### Stakeholders
@cdrini
</issue>
<code>
[start of openlibrary/plugins/openlibrary/sentry.py]
1 import logging
2
3 import sentry_sdk
4
5 import infogami
6 from infogami.utils import delegate
7
8 logger = logging.getLogger("openlibrary.sentry")
9
10
11 def is_enabled():
12 return hasattr(infogami.config, 'sentry') and infogami.config.sentry.enabled
13
14
15 def setup():
16 logger.info("Setting up sentry (enabled={})".format(is_enabled()))
17
18 if not is_enabled():
19 return
20
21 sentry_sdk.init(dsn=infogami.config.sentry.dsn,
22 environment=infogami.config.sentry.environment)
23 delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())
24
[end of openlibrary/plugins/openlibrary/sentry.py]
[start of openlibrary/plugins/openlibrary/status.py]
1 import web
2
3 import datetime
4 import socket
5 import subprocess
6 import sys
7
8 from infogami import config
9 from infogami.utils import delegate
10 from infogami.utils.view import render_template, public
11 from openlibrary.core import stats
12
13 status_info = {}
14 feature_flags = {}
15
16 class status(delegate.page):
17 def GET(self):
18 template = render_template("status", status_info, feature_flags)
19 template.v2 = True
20 return template
21
22 @public
23 def get_git_revision_short_hash():
24 return (status_info.get('Software version')
25 if status_info and isinstance(status_info, dict)
26 else None)
27
28 def get_software_version():
29 return subprocess.Popen("git rev-parse --short HEAD --".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip()
30
31 def get_features_enabled():
32 return config.features
33
34 def setup():
35 "Basic startup status for the server"
36 global status_info, feature_flags
37 version = get_software_version()
38 if bytes != str: # Python 3
39 version = version.decode("utf-8")
40 host = socket.gethostname()
41 status_info = {
42 "Software version": version,
43 "Python version": sys.version.split()[0],
44 "Host": host,
45 "Start time": datetime.datetime.utcnow(),
46 }
47 feature_flags = get_features_enabled()
48
49 # Host is e.g. ol-web4.blah.archive.org ; we just want the first subdomain
50 first_subdomain = host.split('.')[0] or 'unknown'
51 stats.increment('ol.servers.%s.started' % first_subdomain)
52
[end of openlibrary/plugins/openlibrary/status.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/plugins/openlibrary/sentry.py b/openlibrary/plugins/openlibrary/sentry.py
--- a/openlibrary/plugins/openlibrary/sentry.py
+++ b/openlibrary/plugins/openlibrary/sentry.py
@@ -5,6 +5,8 @@
import infogami
from infogami.utils import delegate
+from openlibrary.plugins.openlibrary.status import get_software_version
+
logger = logging.getLogger("openlibrary.sentry")
@@ -19,5 +21,6 @@
return
sentry_sdk.init(dsn=infogami.config.sentry.dsn,
- environment=infogami.config.sentry.environment)
+ environment=infogami.config.sentry.environment,
+ release=get_software_version())
delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())
diff --git a/openlibrary/plugins/openlibrary/status.py b/openlibrary/plugins/openlibrary/status.py
--- a/openlibrary/plugins/openlibrary/status.py
+++ b/openlibrary/plugins/openlibrary/status.py
@@ -2,8 +2,8 @@
import datetime
import socket
-import subprocess
import sys
+from subprocess import PIPE, Popen, STDOUT
from infogami import config
from infogami.utils import delegate
@@ -25,8 +25,10 @@
if status_info and isinstance(status_info, dict)
else None)
-def get_software_version():
- return subprocess.Popen("git rev-parse --short HEAD --".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip()
+
+def get_software_version(): # -> str:
+ cmd = "git rev-parse --short HEAD --".split()
+ return str(Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read().decode().strip())
def get_features_enabled():
return config.features
@@ -34,12 +36,9 @@
def setup():
"Basic startup status for the server"
global status_info, feature_flags
- version = get_software_version()
- if bytes != str: # Python 3
- version = version.decode("utf-8")
host = socket.gethostname()
status_info = {
- "Software version": version,
+ "Software version": get_software_version(),
"Python version": sys.version.split()[0],
"Host": host,
"Start time": datetime.datetime.utcnow(),
| {"golden_diff": "diff --git a/openlibrary/plugins/openlibrary/sentry.py b/openlibrary/plugins/openlibrary/sentry.py\n--- a/openlibrary/plugins/openlibrary/sentry.py\n+++ b/openlibrary/plugins/openlibrary/sentry.py\n@@ -5,6 +5,8 @@\n import infogami\n from infogami.utils import delegate\n \n+from openlibrary.plugins.openlibrary.status import get_software_version\n+\n logger = logging.getLogger(\"openlibrary.sentry\")\n \n \n@@ -19,5 +21,6 @@\n return\n \n sentry_sdk.init(dsn=infogami.config.sentry.dsn,\n- environment=infogami.config.sentry.environment)\n+ environment=infogami.config.sentry.environment,\n+ release=get_software_version())\n delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())\ndiff --git a/openlibrary/plugins/openlibrary/status.py b/openlibrary/plugins/openlibrary/status.py\n--- a/openlibrary/plugins/openlibrary/status.py\n+++ b/openlibrary/plugins/openlibrary/status.py\n@@ -2,8 +2,8 @@\n \n import datetime\n import socket\n-import subprocess\n import sys\n+from subprocess import PIPE, Popen, STDOUT\n \n from infogami import config\n from infogami.utils import delegate\n@@ -25,8 +25,10 @@\n if status_info and isinstance(status_info, dict) \n else None)\n \n-def get_software_version():\n- return subprocess.Popen(\"git rev-parse --short HEAD --\".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip()\n+\n+def get_software_version(): # -> str:\n+ cmd = \"git rev-parse --short HEAD --\".split()\n+ return str(Popen(cmd, stdout=PIPE, stderr=STDOUT).stdout.read().decode().strip())\n \n def get_features_enabled():\n return config.features\n@@ -34,12 +36,9 @@\n def setup():\n \"Basic startup status for the server\"\n global status_info, feature_flags\n- version = get_software_version()\n- if bytes != str: # Python 3\n- version = version.decode(\"utf-8\")\n host = socket.gethostname()\n status_info = {\n- \"Software version\": version,\n+ \"Software version\": get_software_version(),\n \"Python version\": sys.version.split()[0],\n \"Host\": host,\n \"Start time\": datetime.datetime.utcnow(),\n", "issue": "Sentry should include deployment SHA\nSentry has options for specifying the SHA of the current code, so you can see when an error was introduced. We currently don't take advantage of this.\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n@cdrini \r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nimport sentry_sdk\n\nimport infogami\nfrom infogami.utils import delegate\n\nlogger = logging.getLogger(\"openlibrary.sentry\")\n\n\ndef is_enabled():\n return hasattr(infogami.config, 'sentry') and infogami.config.sentry.enabled\n\n\ndef setup():\n logger.info(\"Setting up sentry (enabled={})\".format(is_enabled()))\n\n if not is_enabled():\n return\n\n sentry_sdk.init(dsn=infogami.config.sentry.dsn,\n environment=infogami.config.sentry.environment)\n delegate.add_exception_hook(lambda: sentry_sdk.capture_exception())\n", "path": "openlibrary/plugins/openlibrary/sentry.py"}, {"content": "import web\n\nimport datetime\nimport socket\nimport subprocess\nimport sys\n\nfrom infogami import config\nfrom infogami.utils import delegate\nfrom infogami.utils.view import render_template, public\nfrom openlibrary.core import stats\n\nstatus_info = {}\nfeature_flags = {}\n\nclass status(delegate.page):\n def GET(self):\n template = render_template(\"status\", status_info, feature_flags)\n template.v2 = True\n return template\n\n@public\ndef get_git_revision_short_hash():\n return (status_info.get('Software version')\n if status_info and isinstance(status_info, dict) \n else None)\n\ndef get_software_version():\n return subprocess.Popen(\"git rev-parse --short HEAD --\".split(), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).stdout.read().strip()\n\ndef get_features_enabled():\n return config.features\n\ndef setup():\n \"Basic startup status for the server\"\n global status_info, feature_flags\n version = get_software_version()\n if bytes != str: # Python 3\n version = version.decode(\"utf-8\")\n host = socket.gethostname()\n status_info = {\n \"Software version\": version,\n \"Python version\": sys.version.split()[0],\n \"Host\": host,\n \"Start time\": datetime.datetime.utcnow(),\n }\n feature_flags = get_features_enabled()\n\n # Host is e.g. ol-web4.blah.archive.org ; we just want the first subdomain\n first_subdomain = host.split('.')[0] or 'unknown'\n stats.increment('ol.servers.%s.started' % first_subdomain)\n", "path": "openlibrary/plugins/openlibrary/status.py"}]} | 1,310 | 514 |
gh_patches_debug_5772 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-471 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
link to profile breaks if space in username
</issue>
<code>
[start of apps/embed/middleware.py]
1 class AjaxPathMiddleware(object):
2 """Append request path as a header.
3
4 In an ajax request, redirects are handled implicitly, so it it not possible
5 to know the path of the page where you end up. This middleware adds that
6 information in a header.
7 """
8
9 def process_response(self, request, response):
10 response['x-ajax-path'] = request.path
11 return response
12
[end of apps/embed/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/embed/middleware.py b/apps/embed/middleware.py
--- a/apps/embed/middleware.py
+++ b/apps/embed/middleware.py
@@ -1,3 +1,6 @@
+from django.utils.http import urlquote
+
+
class AjaxPathMiddleware(object):
"""Append request path as a header.
@@ -7,5 +10,5 @@
"""
def process_response(self, request, response):
- response['x-ajax-path'] = request.path
+ response['x-ajax-path'] = urlquote(request.path)
return response
| {"golden_diff": "diff --git a/apps/embed/middleware.py b/apps/embed/middleware.py\n--- a/apps/embed/middleware.py\n+++ b/apps/embed/middleware.py\n@@ -1,3 +1,6 @@\n+from django.utils.http import urlquote\n+\n+\n class AjaxPathMiddleware(object):\n \"\"\"Append request path as a header.\n \n@@ -7,5 +10,5 @@\n \"\"\"\n \n def process_response(self, request, response):\n- response['x-ajax-path'] = request.path\n+ response['x-ajax-path'] = urlquote(request.path)\n return response\n", "issue": "link to profile breaks if space in username\n\n", "before_files": [{"content": "class AjaxPathMiddleware(object):\n \"\"\"Append request path as a header.\n\n In an ajax request, redirects are handled implicitly, so it it not possible\n to know the path of the page where you end up. This middleware adds that\n information in a header.\n \"\"\"\n\n def process_response(self, request, response):\n response['x-ajax-path'] = request.path\n return response\n", "path": "apps/embed/middleware.py"}]} | 645 | 124 |
gh_patches_debug_11574 | rasdani/github-patches | git_diff | sunpy__sunpy-5293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide an example of splitting sections of an attr query out of the Fido.search method.
@Cadair's had this snippet of code
``` python
import datetime
from sunpy.net import vso
from sunpy.time import parse_time
# Start time and end time for the AIA search
start = parse_time('2014/07/17T10:01:30')
stop = start + datetime.timedelta(seconds=12)
stop_hmi = start + datetime.timedelta(seconds=30)
# Define two VSO Searches for the AIA data and the HMI data
search_aia = (vso.attrs.Time(start, stop), vso.attrs.Instrument('AIA'))
search_hmi = (vso.attrs.Time(start, stop_hmi), vso.attrs.Instrument('HMI'),
vso.attrs.Physobs('LOS_magnetic_field'))
# Create the VSO Client
vsoClient = vso.VSOClient()
# Query VSO for both searches using the or operator `|`
results = vsoClient.query(search_aia | search_hmi)
```
That used to work but now I get this error.
``` python
TypeError: unsupported operand type(s) for |: 'tuple' and 'tuple'
```
Should this operation be possible?
</issue>
<code>
[start of examples/acquiring_data/searching_vso.py]
1 """
2 ======================================
3 Searching and downloading from the VSO
4 ======================================
5
6 How to download data from the VSO with Fido.
7 """
8 import astropy.units as u
9
10 from sunpy.net import Fido
11 from sunpy.net import attrs as a
12
13 ###############################################################################
14 # `sunpy.net.Fido` is the primary interface to search for and download data and
15 # will search the VSO when appropriate. The following example searches for all
16 # SOHO/EIT images between the times defined below by defining a
17 # timerange (`~sunpy.net.attrs.Time`) and the instrument (`~sunpy.net.attrs.Instrument`).
18
19 attrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15')
20 result = Fido.search(attrs_time, a.Instrument.eit)
21
22 ###############################################################################
23 # Let's inspect the results.
24
25 print(result)
26
27 ###############################################################################
28 # The following shows how to download the results. If we
29 # don't provide a path it will download the file into the sunpy data directory.
30 # The output provides the path of the downloaded files.
31
32 downloaded_files = Fido.fetch(result)
33 print(downloaded_files)
34
35 ###############################################################################
36 # More complicated queries can be constructed by using relational operators.
37 # For example, it is possible to query two wavelengths at the same time with
38 # the OR operator (|).
39
40 result = Fido.search(a.Time('2020/03/04 00:00', '2020/03/04 00:02'),
41 a.Instrument.aia,
42 a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))
43 print(result)
44
[end of examples/acquiring_data/searching_vso.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/acquiring_data/searching_vso.py b/examples/acquiring_data/searching_vso.py
--- a/examples/acquiring_data/searching_vso.py
+++ b/examples/acquiring_data/searching_vso.py
@@ -41,3 +41,15 @@
a.Instrument.aia,
a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))
print(result)
+
+###############################################################################
+# We can even combine entire queries in this manner.
+# Here we will define two searches for the AIA and HMI data.
+# But unlike other examples, we have to ``&`` the individual queries.
+
+search_aia = (a.Time('2020/03/04 00:00', '2020/03/04 00:01') & a.Instrument.aia)
+search_hmi = (a.Time('2020/03/04 00:00', '2020/03/04 00:01')
+ & a.Instrument.hmi & a.Physobs.los_magnetic_field)
+
+result = Fido.search(search_aia | search_hmi)
+print(result)
| {"golden_diff": "diff --git a/examples/acquiring_data/searching_vso.py b/examples/acquiring_data/searching_vso.py\n--- a/examples/acquiring_data/searching_vso.py\n+++ b/examples/acquiring_data/searching_vso.py\n@@ -41,3 +41,15 @@\n a.Instrument.aia,\n a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))\n print(result)\n+\n+###############################################################################\n+# We can even combine entire queries in this manner.\n+# Here we will define two searches for the AIA and HMI data.\n+# But unlike other examples, we have to ``&`` the individual queries.\n+\n+search_aia = (a.Time('2020/03/04 00:00', '2020/03/04 00:01') & a.Instrument.aia)\n+search_hmi = (a.Time('2020/03/04 00:00', '2020/03/04 00:01')\n+ & a.Instrument.hmi & a.Physobs.los_magnetic_field)\n+\n+result = Fido.search(search_aia | search_hmi)\n+print(result)\n", "issue": "Provide an example of splitting sections of an attr query out of the Fido.search method.\n@Cadair's had this snippet of code\r\n\r\n``` python\r\nimport datetime\r\nfrom sunpy.net import vso\r\nfrom sunpy.time import parse_time\r\n\r\n# Start time and end time for the AIA search\r\nstart = parse_time('2014/07/17T10:01:30')\r\nstop = start + datetime.timedelta(seconds=12)\r\nstop_hmi = start + datetime.timedelta(seconds=30)\r\n\r\n# Define two VSO Searches for the AIA data and the HMI data\r\nsearch_aia = (vso.attrs.Time(start, stop), vso.attrs.Instrument('AIA'))\r\nsearch_hmi = (vso.attrs.Time(start, stop_hmi), vso.attrs.Instrument('HMI'),\r\n vso.attrs.Physobs('LOS_magnetic_field'))\r\n\r\n# Create the VSO Client\r\nvsoClient = vso.VSOClient()\r\n\r\n# Query VSO for both searches using the or operator `|`\r\nresults = vsoClient.query(search_aia | search_hmi)\r\n```\r\n\r\nThat used to work but now I get this error. \r\n\r\n``` python\r\nTypeError: unsupported operand type(s) for |: 'tuple' and 'tuple'\r\n```\r\n\r\nShould this operation be possible? \r\n\n", "before_files": [{"content": "\"\"\"\n======================================\nSearching and downloading from the VSO\n======================================\n\nHow to download data from the VSO with Fido.\n\"\"\"\nimport astropy.units as u\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\n###############################################################################\n# `sunpy.net.Fido` is the primary interface to search for and download data and\n# will search the VSO when appropriate. The following example searches for all\n# SOHO/EIT images between the times defined below by defining a\n# timerange (`~sunpy.net.attrs.Time`) and the instrument (`~sunpy.net.attrs.Instrument`).\n\nattrs_time = a.Time('2005/01/01 00:10', '2005/01/01 00:15')\nresult = Fido.search(attrs_time, a.Instrument.eit)\n\n###############################################################################\n# Let's inspect the results.\n\nprint(result)\n\n###############################################################################\n# The following shows how to download the results. If we\n# don't provide a path it will download the file into the sunpy data directory.\n# The output provides the path of the downloaded files.\n\ndownloaded_files = Fido.fetch(result)\nprint(downloaded_files)\n\n###############################################################################\n# More complicated queries can be constructed by using relational operators.\n# For example, it is possible to query two wavelengths at the same time with\n# the OR operator (|).\n\nresult = Fido.search(a.Time('2020/03/04 00:00', '2020/03/04 00:02'),\n a.Instrument.aia,\n a.Wavelength(171*u.angstrom) | a.Wavelength(94*u.angstrom))\nprint(result)\n", "path": "examples/acquiring_data/searching_vso.py"}]} | 1,276 | 275 |
gh_patches_debug_2247 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleDetection-8421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
训练出现长警告
### 问题确认 Search before asking
- [X] 我已经查询[历史issue](https://github.com/PaddlePaddle/PaddleDetection/issues),没有发现相似的bug。I have searched the [issues](https://github.com/PaddlePaddle/PaddleDetection/issues) and found no similar bug report.
### Bug组件 Bug Component
_No response_
### Bug描述 Describe the Bug
训练出现长警告
```
I0706 13:09:13.075042 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6.
I0706 13:09:13.382442 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6.
```
### 复现环境 Environment
PaddleDetection2.6
PaddlePaddle2.5.0
经过排查将`ppdet/utils/stats.py`第77行进行如下修改
`v.update(stats[k].numpy())`→`v.update(float(stats[k]))`
### Bug描述确认 Bug description confirmation
- [X] 我确认已经提供了Bug复现步骤、代码改动说明、以及环境信息,确认问题是可以复现的。I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced.
### 是否愿意提交PR? Are you willing to submit a PR?
- [ ] 我愿意提交PR!I'd like to help by submitting a PR!
</issue>
<code>
[start of ppdet/utils/stats.py]
1 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import collections
16 import numpy as np
17
18 __all__ = ['SmoothedValue', 'TrainingStats']
19
20
21 class SmoothedValue(object):
22 """Track a series of values and provide access to smoothed values over a
23 window or the global series average.
24 """
25
26 def __init__(self, window_size=20, fmt=None):
27 if fmt is None:
28 fmt = "{median:.4f} ({avg:.4f})"
29 self.deque = collections.deque(maxlen=window_size)
30 self.fmt = fmt
31 self.total = 0.
32 self.count = 0
33
34 def update(self, value, n=1):
35 self.deque.append(value)
36 self.count += n
37 self.total += value * n
38
39 @property
40 def median(self):
41 return np.median(self.deque)
42
43 @property
44 def avg(self):
45 return np.mean(self.deque)
46
47 @property
48 def max(self):
49 return np.max(self.deque)
50
51 @property
52 def value(self):
53 return self.deque[-1]
54
55 @property
56 def global_avg(self):
57 return self.total / self.count
58
59 def __str__(self):
60 return self.fmt.format(
61 median=self.median, avg=self.avg, max=self.max, value=self.value)
62
63
64 class TrainingStats(object):
65 def __init__(self, window_size, delimiter=' '):
66 self.meters = None
67 self.window_size = window_size
68 self.delimiter = delimiter
69
70 def update(self, stats):
71 if self.meters is None:
72 self.meters = {
73 k: SmoothedValue(self.window_size)
74 for k in stats.keys()
75 }
76 for k, v in self.meters.items():
77 v.update(stats[k].numpy())
78
79 def get(self, extras=None):
80 stats = collections.OrderedDict()
81 if extras:
82 for k, v in extras.items():
83 stats[k] = v
84 for k, v in self.meters.items():
85 stats[k] = format(v.median, '.6f')
86
87 return stats
88
89 def log(self, extras=None):
90 d = self.get(extras)
91 strs = []
92 for k, v in d.items():
93 strs.append("{}: {}".format(k, str(v)))
94 return self.delimiter.join(strs)
95
[end of ppdet/utils/stats.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ppdet/utils/stats.py b/ppdet/utils/stats.py
--- a/ppdet/utils/stats.py
+++ b/ppdet/utils/stats.py
@@ -74,7 +74,7 @@
for k in stats.keys()
}
for k, v in self.meters.items():
- v.update(stats[k].numpy())
+ v.update(float(stats[k]))
def get(self, extras=None):
stats = collections.OrderedDict()
| {"golden_diff": "diff --git a/ppdet/utils/stats.py b/ppdet/utils/stats.py\n--- a/ppdet/utils/stats.py\n+++ b/ppdet/utils/stats.py\n@@ -74,7 +74,7 @@\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n- v.update(stats[k].numpy())\n+ v.update(float(stats[k]))\n \n def get(self, extras=None):\n stats = collections.OrderedDict()\n", "issue": "\u8bad\u7ec3\u51fa\u73b0\u957f\u8b66\u544a\n### \u95ee\u9898\u786e\u8ba4 Search before asking\n\n- [X] \u6211\u5df2\u7ecf\u67e5\u8be2[\u5386\u53f2issue](https://github.com/PaddlePaddle/PaddleDetection/issues)\uff0c\u6ca1\u6709\u53d1\u73b0\u76f8\u4f3c\u7684bug\u3002I have searched the [issues](https://github.com/PaddlePaddle/PaddleDetection/issues) and found no similar bug report.\n\n\n### Bug\u7ec4\u4ef6 Bug Component\n\n_No response_\n\n### Bug\u63cf\u8ff0 Describe the Bug\n\n\u8bad\u7ec3\u51fa\u73b0\u957f\u8b66\u544a\r\n```\r\nI0706 13:09:13.075042 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6.\r\nI0706 13:09:13.382442 3772 eager_method.cc:140] Warning:: 0D Tensor cannot be used as 'Tensor.numpy()[0]' . In order to avoid this problem, 0D Tensor will be changed to 1D numpy currently, but it's not correct and will be removed in release 2.6. For Tensor contain only one element, Please modify 'Tensor.numpy()[0]' to 'float(Tensor)' as soon as possible, otherwise 'Tensor.numpy()[0]' will raise error in release 2.6.\r\n```\n\n### \u590d\u73b0\u73af\u5883 Environment\n\nPaddleDetection2.6\r\nPaddlePaddle2.5.0\r\n\r\n\u7ecf\u8fc7\u6392\u67e5\u5c06`ppdet/utils/stats.py`\u7b2c77\u884c\u8fdb\u884c\u5982\u4e0b\u4fee\u6539\r\n`v.update(stats[k].numpy())`\u2192`v.update(float(stats[k]))`\n\n### Bug\u63cf\u8ff0\u786e\u8ba4 Bug description confirmation\n\n- [X] \u6211\u786e\u8ba4\u5df2\u7ecf\u63d0\u4f9b\u4e86Bug\u590d\u73b0\u6b65\u9aa4\u3001\u4ee3\u7801\u6539\u52a8\u8bf4\u660e\u3001\u4ee5\u53ca\u73af\u5883\u4fe1\u606f\uff0c\u786e\u8ba4\u95ee\u9898\u662f\u53ef\u4ee5\u590d\u73b0\u7684\u3002I confirm that the bug replication steps, code change instructions, and environment information have been provided, and the problem can be reproduced.\n\n\n### \u662f\u5426\u613f\u610f\u63d0\u4ea4PR\uff1f Are you willing to submit a PR?\n\n- [ ] \u6211\u613f\u610f\u63d0\u4ea4PR\uff01I'd like to help by submitting a PR!\n", "before_files": [{"content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport numpy as np\n\n__all__ = ['SmoothedValue', 'TrainingStats']\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({avg:.4f})\"\n self.deque = collections.deque(maxlen=window_size)\n self.fmt = fmt\n self.total = 0.\n self.count = 0\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n @property\n def median(self):\n return np.median(self.deque)\n\n @property\n def avg(self):\n return np.mean(self.deque)\n\n @property\n def max(self):\n return np.max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n def __str__(self):\n return self.fmt.format(\n median=self.median, avg=self.avg, max=self.max, value=self.value)\n\n\nclass TrainingStats(object):\n def __init__(self, window_size, delimiter=' '):\n self.meters = None\n self.window_size = window_size\n self.delimiter = delimiter\n\n def update(self, stats):\n if self.meters is None:\n self.meters = {\n k: SmoothedValue(self.window_size)\n for k in stats.keys()\n }\n for k, v in self.meters.items():\n v.update(stats[k].numpy())\n\n def get(self, extras=None):\n stats = collections.OrderedDict()\n if extras:\n for k, v in extras.items():\n stats[k] = v\n for k, v in self.meters.items():\n stats[k] = format(v.median, '.6f')\n\n return stats\n\n def log(self, extras=None):\n d = self.get(extras)\n strs = []\n for k, v in d.items():\n strs.append(\"{}: {}\".format(k, str(v)))\n return self.delimiter.join(strs)\n", "path": "ppdet/utils/stats.py"}]} | 1,877 | 99 |
gh_patches_debug_28974 | rasdani/github-patches | git_diff | prowler-cloud__prowler-2282 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: False positives on some checks?
### Steps to Reproduce
Hi,
it looks like some checks produce false positives (they are tagged as warning because I've allowlisted them):
```
Check ID: ec2_ebs_snapshots_encrypted - ec2 [medium]
WARNING eu-central-1: EBS Snapshot snap-112 is unencrypted.
WARNING eu-central-1: EBS Snapshot snap-113 is encrypted. <<<<
```
```
Check ID: iam_policy_allows_privilege_escalation - iam [high]
WARNING eu-central-1: Custom Policy arn:aws:iam::112:policy/aws_admin_access does not allow privilege escalation
```
Are you maybe simply overring the status (also "PASS") by WARNING in case of an allowlist match?
Another type of issue but more like a question:
_sns_topics_not_publicly_accessible_ triggers with
` WARNING eu-central-1: SNS topic cloudwatch-pagerduty-alarms-ec2-state-changes policy with public access but has a Condition`
which is (from the User's perspective) a false positive as well because we have a condition, which prowler cannot evaluate?
### Expected behavior
none
### Actual Result with Screenshots or Logs
none
### How did you install Prowler?
Cloning the repository from github.com (git clone)
### Environment Resource
locally
### OS used
Linux
### Prowler version
3.4.1
### Pip version
none
### Context
_No response_
</issue>
<code>
[start of prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.sns.sns_client import sns_client
3
4
5 class sns_topics_not_publicly_accessible(Check):
6 def execute(self):
7 findings = []
8 for topic in sns_client.topics:
9 report = Check_Report_AWS(self.metadata())
10 report.region = topic.region
11 report.resource_id = topic.name
12 report.resource_arn = topic.arn
13 report.resource_tags = topic.tags
14 report.status = "PASS"
15 report.status_extended = f"SNS topic {topic.name} without public access"
16 if topic.policy:
17 for statement in topic.policy["Statement"]:
18 # Only check allow statements
19 if statement["Effect"] == "Allow":
20 if (
21 "*" in statement["Principal"]
22 or (
23 "AWS" in statement["Principal"]
24 and "*" in statement["Principal"]["AWS"]
25 )
26 or (
27 "CanonicalUser" in statement["Principal"]
28 and "*" in statement["Principal"]["CanonicalUser"]
29 )
30 ):
31 if "Condition" not in statement:
32 report.status = "FAIL"
33 report.status_extended = (
34 f"SNS topic {topic.name} policy with public access"
35 )
36 else:
37 report.status = "FAIL"
38 report.status_extended = f"SNS topic {topic.name} policy with public access but has a Condition"
39
40 findings.append(report)
41
42 return findings
43
[end of prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py
--- a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py
+++ b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py
@@ -12,7 +12,7 @@
report.resource_arn = topic.arn
report.resource_tags = topic.tags
report.status = "PASS"
- report.status_extended = f"SNS topic {topic.name} without public access"
+ report.status_extended = f"SNS topic {topic.name} is not publicly accesible"
if topic.policy:
for statement in topic.policy["Statement"]:
# Only check allow statements
@@ -31,11 +31,11 @@
if "Condition" not in statement:
report.status = "FAIL"
report.status_extended = (
- f"SNS topic {topic.name} policy with public access"
+ f"SNS topic {topic.name} is publicly accesible"
)
else:
- report.status = "FAIL"
- report.status_extended = f"SNS topic {topic.name} policy with public access but has a Condition"
+ report.status = "PASS"
+ report.status_extended = f"SNS topic {topic.name} is publicly accesible but has a Condition that could filter it"
findings.append(report)
| {"golden_diff": "diff --git a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py\n--- a/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py\n+++ b/prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py\n@@ -12,7 +12,7 @@\n report.resource_arn = topic.arn\n report.resource_tags = topic.tags\n report.status = \"PASS\"\n- report.status_extended = f\"SNS topic {topic.name} without public access\"\n+ report.status_extended = f\"SNS topic {topic.name} is not publicly accesible\"\n if topic.policy:\n for statement in topic.policy[\"Statement\"]:\n # Only check allow statements\n@@ -31,11 +31,11 @@\n if \"Condition\" not in statement:\n report.status = \"FAIL\"\n report.status_extended = (\n- f\"SNS topic {topic.name} policy with public access\"\n+ f\"SNS topic {topic.name} is publicly accesible\"\n )\n else:\n- report.status = \"FAIL\"\n- report.status_extended = f\"SNS topic {topic.name} policy with public access but has a Condition\"\n+ report.status = \"PASS\"\n+ report.status_extended = f\"SNS topic {topic.name} is publicly accesible but has a Condition that could filter it\"\n \n findings.append(report)\n", "issue": "[Bug]: False positives on some checks?\n### Steps to Reproduce\n\nHi,\r\n\r\nit looks like some checks produce false positives (they are tagged as warning because I've allowlisted them):\r\n\r\n```\r\nCheck ID: ec2_ebs_snapshots_encrypted - ec2 [medium]\r\n WARNING eu-central-1: EBS Snapshot snap-112 is unencrypted.\r\n WARNING eu-central-1: EBS Snapshot snap-113 is encrypted. <<<<\r\n```\r\n\r\n\r\n```\r\nCheck ID: iam_policy_allows_privilege_escalation - iam [high]\r\n WARNING eu-central-1: Custom Policy arn:aws:iam::112:policy/aws_admin_access does not allow privilege escalation\r\n```\r\n\r\nAre you maybe simply overring the status (also \"PASS\") by WARNING in case of an allowlist match?\r\n\r\n\r\nAnother type of issue but more like a question:\r\n\r\n_sns_topics_not_publicly_accessible_ triggers with \r\n` WARNING eu-central-1: SNS topic cloudwatch-pagerduty-alarms-ec2-state-changes policy with public access but has a Condition`\r\nwhich is (from the User's perspective) a false positive as well because we have a condition, which prowler cannot evaluate?\r\n\r\n\r\n\n\n### Expected behavior\n\nnone\n\n### Actual Result with Screenshots or Logs\n\nnone\n\n### How did you install Prowler?\n\nCloning the repository from github.com (git clone)\n\n### Environment Resource\n\nlocally\n\n### OS used\n\nLinux\n\n### Prowler version\n\n3.4.1\n\n### Pip version\n\nnone\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.sns.sns_client import sns_client\n\n\nclass sns_topics_not_publicly_accessible(Check):\n def execute(self):\n findings = []\n for topic in sns_client.topics:\n report = Check_Report_AWS(self.metadata())\n report.region = topic.region\n report.resource_id = topic.name\n report.resource_arn = topic.arn\n report.resource_tags = topic.tags\n report.status = \"PASS\"\n report.status_extended = f\"SNS topic {topic.name} without public access\"\n if topic.policy:\n for statement in topic.policy[\"Statement\"]:\n # Only check allow statements\n if statement[\"Effect\"] == \"Allow\":\n if (\n \"*\" in statement[\"Principal\"]\n or (\n \"AWS\" in statement[\"Principal\"]\n and \"*\" in statement[\"Principal\"][\"AWS\"]\n )\n or (\n \"CanonicalUser\" in statement[\"Principal\"]\n and \"*\" in statement[\"Principal\"][\"CanonicalUser\"]\n )\n ):\n if \"Condition\" not in statement:\n report.status = \"FAIL\"\n report.status_extended = (\n f\"SNS topic {topic.name} policy with public access\"\n )\n else:\n report.status = \"FAIL\"\n report.status_extended = f\"SNS topic {topic.name} policy with public access but has a Condition\"\n\n findings.append(report)\n\n return findings\n", "path": "prowler/providers/aws/services/sns/sns_topics_not_publicly_accessible/sns_topics_not_publicly_accessible.py"}]} | 1,281 | 344 |
gh_patches_debug_26160 | rasdani/github-patches | git_diff | buildbot__buildbot-1614 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix bytes/unicode issue to fix test on Python 3
</issue>
<code>
[start of master/buildbot/db/schedulers.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 import sqlalchemy as sa
17 import sqlalchemy.exc
18
19 from buildbot.db import base
20
21
22 class SchedulersConnectorComponent(base.DBConnectorComponent):
23 # Documentation is in developer/database.rst
24
25 def classifyChanges(self, objectid, classifications):
26 def thd(conn):
27 transaction = conn.begin()
28 tbl = self.db.model.scheduler_changes
29 ins_q = tbl.insert()
30 upd_q = tbl.update(
31 ((tbl.c.objectid == objectid)
32 & (tbl.c.changeid == sa.bindparam('wc_changeid'))))
33 for changeid, important in classifications.items():
34 # convert the 'important' value into an integer, since that
35 # is the column type
36 imp_int = important and 1 or 0
37 try:
38 conn.execute(ins_q,
39 objectid=objectid,
40 changeid=changeid,
41 important=imp_int)
42 except (sqlalchemy.exc.ProgrammingError,
43 sqlalchemy.exc.IntegrityError):
44 transaction.rollback()
45 transaction = conn.begin()
46 # insert failed, so try an update
47 conn.execute(upd_q,
48 wc_changeid=changeid,
49 important=imp_int)
50
51 transaction.commit()
52 return self.db.pool.do(thd)
53
54 def flushChangeClassifications(self, objectid, less_than=None):
55 def thd(conn):
56 sch_ch_tbl = self.db.model.scheduler_changes
57 wc = (sch_ch_tbl.c.objectid == objectid)
58 if less_than is not None:
59 wc = wc & (sch_ch_tbl.c.changeid < less_than)
60 q = sch_ch_tbl.delete(whereclause=wc)
61 conn.execute(q)
62 return self.db.pool.do(thd)
63
64 class Thunk:
65 pass
66
67 def getChangeClassifications(self, objectid, branch=Thunk,
68 repository=Thunk, project=Thunk,
69 codebase=Thunk):
70 def thd(conn):
71 sch_ch_tbl = self.db.model.scheduler_changes
72 ch_tbl = self.db.model.changes
73
74 wc = (sch_ch_tbl.c.objectid == objectid)
75
76 # may need to filter further based on branch, etc
77 extra_wheres = []
78 if branch is not self.Thunk:
79 extra_wheres.append(ch_tbl.c.branch == branch)
80 if repository is not self.Thunk:
81 extra_wheres.append(ch_tbl.c.repository == repository)
82 if project is not self.Thunk:
83 extra_wheres.append(ch_tbl.c.project == project)
84 if codebase is not self.Thunk:
85 extra_wheres.append(ch_tbl.c.codebase == codebase)
86
87 # if we need to filter further append those, as well as a join
88 # on changeid (but just once for that one)
89 if extra_wheres:
90 wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)
91 for w in extra_wheres:
92 wc &= w
93
94 q = sa.select(
95 [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],
96 whereclause=wc)
97 return dict([(r.changeid, [False, True][r.important])
98 for r in conn.execute(q)])
99 return self.db.pool.do(thd)
100
[end of master/buildbot/db/schedulers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py
--- a/master/buildbot/db/schedulers.py
+++ b/master/buildbot/db/schedulers.py
@@ -24,13 +24,13 @@
def classifyChanges(self, objectid, classifications):
def thd(conn):
- transaction = conn.begin()
tbl = self.db.model.scheduler_changes
ins_q = tbl.insert()
upd_q = tbl.update(
((tbl.c.objectid == objectid)
& (tbl.c.changeid == sa.bindparam('wc_changeid'))))
for changeid, important in classifications.items():
+ transaction = conn.begin()
# convert the 'important' value into an integer, since that
# is the column type
imp_int = important and 1 or 0
@@ -48,7 +48,7 @@
wc_changeid=changeid,
important=imp_int)
- transaction.commit()
+ transaction.commit()
return self.db.pool.do(thd)
def flushChangeClassifications(self, objectid, less_than=None):
| {"golden_diff": "diff --git a/master/buildbot/db/schedulers.py b/master/buildbot/db/schedulers.py\n--- a/master/buildbot/db/schedulers.py\n+++ b/master/buildbot/db/schedulers.py\n@@ -24,13 +24,13 @@\n \n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n- transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n+ transaction = conn.begin()\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n@@ -48,7 +48,7 @@\n wc_changeid=changeid,\n important=imp_int)\n \n- transaction.commit()\n+ transaction.commit()\n return self.db.pool.do(thd)\n \n def flushChangeClassifications(self, objectid, less_than=None):\n", "issue": "Fix bytes/unicode issue to fix test on Python 3\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nimport sqlalchemy as sa\nimport sqlalchemy.exc\n\nfrom buildbot.db import base\n\n\nclass SchedulersConnectorComponent(base.DBConnectorComponent):\n # Documentation is in developer/database.rst\n\n def classifyChanges(self, objectid, classifications):\n def thd(conn):\n transaction = conn.begin()\n tbl = self.db.model.scheduler_changes\n ins_q = tbl.insert()\n upd_q = tbl.update(\n ((tbl.c.objectid == objectid)\n & (tbl.c.changeid == sa.bindparam('wc_changeid'))))\n for changeid, important in classifications.items():\n # convert the 'important' value into an integer, since that\n # is the column type\n imp_int = important and 1 or 0\n try:\n conn.execute(ins_q,\n objectid=objectid,\n changeid=changeid,\n important=imp_int)\n except (sqlalchemy.exc.ProgrammingError,\n sqlalchemy.exc.IntegrityError):\n transaction.rollback()\n transaction = conn.begin()\n # insert failed, so try an update\n conn.execute(upd_q,\n wc_changeid=changeid,\n important=imp_int)\n\n transaction.commit()\n return self.db.pool.do(thd)\n\n def flushChangeClassifications(self, objectid, less_than=None):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n wc = (sch_ch_tbl.c.objectid == objectid)\n if less_than is not None:\n wc = wc & (sch_ch_tbl.c.changeid < less_than)\n q = sch_ch_tbl.delete(whereclause=wc)\n conn.execute(q)\n return self.db.pool.do(thd)\n\n class Thunk:\n pass\n\n def getChangeClassifications(self, objectid, branch=Thunk,\n repository=Thunk, project=Thunk,\n codebase=Thunk):\n def thd(conn):\n sch_ch_tbl = self.db.model.scheduler_changes\n ch_tbl = self.db.model.changes\n\n wc = (sch_ch_tbl.c.objectid == objectid)\n\n # may need to filter further based on branch, etc\n extra_wheres = []\n if branch is not self.Thunk:\n extra_wheres.append(ch_tbl.c.branch == branch)\n if repository is not self.Thunk:\n extra_wheres.append(ch_tbl.c.repository == repository)\n if project is not self.Thunk:\n extra_wheres.append(ch_tbl.c.project == project)\n if codebase is not self.Thunk:\n extra_wheres.append(ch_tbl.c.codebase == codebase)\n\n # if we need to filter further append those, as well as a join\n # on changeid (but just once for that one)\n if extra_wheres:\n wc &= (sch_ch_tbl.c.changeid == ch_tbl.c.changeid)\n for w in extra_wheres:\n wc &= w\n\n q = sa.select(\n [sch_ch_tbl.c.changeid, sch_ch_tbl.c.important],\n whereclause=wc)\n return dict([(r.changeid, [False, True][r.important])\n for r in conn.execute(q)])\n return self.db.pool.do(thd)\n", "path": "master/buildbot/db/schedulers.py"}]} | 1,588 | 243 |
gh_patches_debug_23728 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-179 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nested mappings raise an error
```cfn-lint 0.3.1```
We use nested maps in our templates:
```yaml
Mappings:
RegionAccountToAZ:
ap-northeast-1:
0123456789:
- ap-northeast-1a
- ap-northeast-1c
- none
9876543210:
- ap-northeast-1a
- ap-northeast-1b
- ap-northeast-1c
```
We'd access this data using a construction like `!FindInMap [RegionAccountToAZ, !Ref 'AWS::Region', !Ref 'AWS::AccountId']`. However cfn-lint says:
```
E7001 Mapping RegionAccountToAZ has invalid property at 9876543210
test.cfn.yaml:3:5
E7001 Mapping RegionAccountToAZ has invalid property at 0123456789
test.cfn.yaml:4:7
```
</issue>
<code>
[start of src/cfnlint/rules/mappings/Configuration.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class Configuration(CloudFormationLintRule):
22 """Check if Mappings are configured correctly"""
23 id = 'E7001'
24 shortdesc = 'Mappings are appropriately configured'
25 description = 'Check if Mappings are properly configured'
26 tags = ['base', 'mappings']
27
28 def match(self, cfn):
29 """Check CloudFormation Parameters"""
30
31 matches = list()
32
33 mappings = cfn.template.get('Mappings', {})
34 if mappings:
35 for mapname, mapobj in mappings.items():
36 if not isinstance(mapobj, dict):
37 message = 'Mapping {0} has invalid property'
38 matches.append(RuleMatch(
39 ['Mappings', mapname],
40 message.format(mapname)
41 ))
42 else:
43 for firstkey in mapobj:
44 firstkeyobj = mapobj[firstkey]
45 if not isinstance(firstkeyobj, dict):
46 message = 'Mapping {0} has invalid property at {1}'
47 matches.append(RuleMatch(
48 ['Mappings', mapname, firstkey],
49 message.format(mapname, firstkeyobj)
50 ))
51 else:
52 for secondkey in firstkeyobj:
53 if isinstance(firstkeyobj[secondkey], (dict, list)):
54 message = 'Mapping {0} has invalid property at {1}'
55 matches.append(RuleMatch(
56 ['Mappings', mapname, firstkey, secondkey],
57 message.format(mapname, secondkey)
58 ))
59
60 return matches
61
[end of src/cfnlint/rules/mappings/Configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py
--- a/src/cfnlint/rules/mappings/Configuration.py
+++ b/src/cfnlint/rules/mappings/Configuration.py
@@ -14,6 +14,7 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
+import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
@@ -50,7 +51,9 @@
))
else:
for secondkey in firstkeyobj:
- if isinstance(firstkeyobj[secondkey], (dict, list)):
+ if not isinstance(
+ firstkeyobj[secondkey],
+ (six.string_types, list, six.integer_types)):
message = 'Mapping {0} has invalid property at {1}'
matches.append(RuleMatch(
['Mappings', mapname, firstkey, secondkey],
| {"golden_diff": "diff --git a/src/cfnlint/rules/mappings/Configuration.py b/src/cfnlint/rules/mappings/Configuration.py\n--- a/src/cfnlint/rules/mappings/Configuration.py\n+++ b/src/cfnlint/rules/mappings/Configuration.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -50,7 +51,9 @@\n ))\n else:\n for secondkey in firstkeyobj:\n- if isinstance(firstkeyobj[secondkey], (dict, list)):\n+ if not isinstance(\n+ firstkeyobj[secondkey],\n+ (six.string_types, list, six.integer_types)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n", "issue": "Nested mappings raise an error\n```cfn-lint 0.3.1```\r\n\r\nWe use nested maps in our templates:\r\n\r\n```yaml\r\nMappings:\r\n RegionAccountToAZ:\r\n ap-northeast-1:\r\n 0123456789:\r\n - ap-northeast-1a\r\n - ap-northeast-1c\r\n - none\r\n 9876543210:\r\n - ap-northeast-1a\r\n - ap-northeast-1b\r\n - ap-northeast-1c\r\n```\r\n\r\nWe'd access this data using a construction like `!FindInMap [RegionAccountToAZ, !Ref 'AWS::Region', !Ref 'AWS::AccountId']`. However cfn-lint says:\r\n\r\n```\r\nE7001 Mapping RegionAccountToAZ has invalid property at 9876543210\r\ntest.cfn.yaml:3:5\r\n\r\nE7001 Mapping RegionAccountToAZ has invalid property at 0123456789\r\ntest.cfn.yaml:4:7\r\n```\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Mappings are configured correctly\"\"\"\n id = 'E7001'\n shortdesc = 'Mappings are appropriately configured'\n description = 'Check if Mappings are properly configured'\n tags = ['base', 'mappings']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = list()\n\n mappings = cfn.template.get('Mappings', {})\n if mappings:\n for mapname, mapobj in mappings.items():\n if not isinstance(mapobj, dict):\n message = 'Mapping {0} has invalid property'\n matches.append(RuleMatch(\n ['Mappings', mapname],\n message.format(mapname)\n ))\n else:\n for firstkey in mapobj:\n firstkeyobj = mapobj[firstkey]\n if not isinstance(firstkeyobj, dict):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey],\n message.format(mapname, firstkeyobj)\n ))\n else:\n for secondkey in firstkeyobj:\n if isinstance(firstkeyobj[secondkey], (dict, list)):\n message = 'Mapping {0} has invalid property at {1}'\n matches.append(RuleMatch(\n ['Mappings', mapname, firstkey, secondkey],\n message.format(mapname, secondkey)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/mappings/Configuration.py"}]} | 1,435 | 224 |
gh_patches_debug_20 | rasdani/github-patches | git_diff | google__pytype-251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test_data to MANIFEST.in
This PR also needs to be imported and re-exported rather than merged directly. I'm planning to use this one to test the import process fix I sent you.
Fixes https://github.com/google/pytype/issues/245.
</issue>
<code>
[start of pytype/__version__.py]
1 # pylint: skip-file
2 __version__ = '2019.02.13'
3
[end of pytype/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytype/__version__.py b/pytype/__version__.py
--- a/pytype/__version__.py
+++ b/pytype/__version__.py
@@ -1,2 +1,2 @@
# pylint: skip-file
-__version__ = '2019.02.13'
+__version__ = '2019.03.01'
| {"golden_diff": "diff --git a/pytype/__version__.py b/pytype/__version__.py\n--- a/pytype/__version__.py\n+++ b/pytype/__version__.py\n@@ -1,2 +1,2 @@\n # pylint: skip-file\n-__version__ = '2019.02.13'\n+__version__ = '2019.03.01'\n", "issue": "Add test_data to MANIFEST.in\nThis PR also needs to be imported and re-exported rather than merged directly. I'm planning to use this one to test the import process fix I sent you.\r\n\r\nFixes https://github.com/google/pytype/issues/245.\n", "before_files": [{"content": "# pylint: skip-file\n__version__ = '2019.02.13'\n", "path": "pytype/__version__.py"}]} | 619 | 87 |
gh_patches_debug_7061 | rasdani/github-patches | git_diff | mindsdb__lightwood-1051 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lightwood pip packages creates tests module
Installing lightwood creates 'tests' module in python site-packages
Steps to reproduce:
- `pip install lightwood`
- in python
- `import tests`
- `print(tests.__file__) `
It will show that 'tests' is in site-packages
</issue>
<code>
[start of setup.py]
1 import sys
2 import setuptools
3 import os
4
5
6 def remove_requirements(requirements, name, replace=''):
7 new_requirements = []
8 for requirement in requirements:
9 if requirement.split(' ')[0] != name:
10 new_requirements.append(requirement)
11 elif replace is not None:
12 new_requirements.append(replace)
13 return new_requirements
14
15
16 sys_platform = sys.platform
17
18 about = {}
19 with open("lightwood/__about__.py") as fp:
20 exec(fp.read(), about)
21
22 with open("README.md", "r") as fh:
23 long_description = fh.read()
24
25 with open('requirements.txt') as req_file:
26 requirements = [req.strip() for req in req_file.read().splitlines()]
27
28 extra_requirements = {}
29 for fn in os.listdir('.'):
30 if fn.startswith('requirements_') and fn.endswith('.txt'):
31 extra_name = fn.replace('requirements_', '').replace('.txt', '')
32 with open(fn) as fp:
33 extra = [req.strip() for req in fp.read().splitlines()]
34 extra_requirements[extra_name] = extra
35 full_requirements = []
36 for v in extra_requirements.values():
37 full_requirements += v
38 extra_requirements['all_extras'] = list(set(full_requirements))
39
40 # Windows specific requirements
41 if sys_platform in ['win32', 'cygwin', 'windows']:
42 # These have to be installed manually or via the installers in windows
43 requirements = remove_requirements(requirements, 'torch')
44
45 setuptools.setup(
46 name=about['__title__'],
47 version=about['__version__'],
48 url=about['__github__'],
49 download_url=about['__pypi__'],
50 license=about['__license__'],
51 author=about['__author__'],
52 author_email=about['__email__'],
53 description=about['__description__'],
54 long_description=long_description,
55 long_description_content_type="text/markdown",
56 packages=setuptools.find_packages(),
57 package_data={'project': ['requirements.txt']},
58 install_requires=requirements,
59 extras_require=extra_requirements,
60 classifiers=[
61 "Programming Language :: Python :: 3",
62 "License :: OSI Approved :: MIT License",
63 "Operating System :: OS Independent",
64 ],
65 python_requires=">=3.7"
66 )
67
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,7 @@
description=about['__description__'],
long_description=long_description,
long_description_content_type="text/markdown",
- packages=setuptools.find_packages(),
+ packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
package_data={'project': ['requirements.txt']},
install_requires=requirements,
extras_require=extra_requirements,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,7 @@\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n- packages=setuptools.find_packages(),\n+ packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={'project': ['requirements.txt']},\n install_requires=requirements,\n extras_require=extra_requirements,\n", "issue": "Lightwood pip packages creates tests module\nInstalling lightwood creates 'tests' module in python site-packages\r\n\r\nSteps to reproduce:\r\n- `pip install lightwood`\r\n- in python\r\n - `import tests`\r\n - `print(tests.__file__) `\r\nIt will show that 'tests' is in site-packages\n", "before_files": [{"content": "import sys\nimport setuptools\nimport os\n\n\ndef remove_requirements(requirements, name, replace=''):\n new_requirements = []\n for requirement in requirements:\n if requirement.split(' ')[0] != name:\n new_requirements.append(requirement)\n elif replace is not None:\n new_requirements.append(replace)\n return new_requirements\n\n\nsys_platform = sys.platform\n\nabout = {}\nwith open(\"lightwood/__about__.py\") as fp:\n exec(fp.read(), about)\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nwith open('requirements.txt') as req_file:\n requirements = [req.strip() for req in req_file.read().splitlines()]\n\nextra_requirements = {}\nfor fn in os.listdir('.'):\n if fn.startswith('requirements_') and fn.endswith('.txt'):\n extra_name = fn.replace('requirements_', '').replace('.txt', '')\n with open(fn) as fp:\n extra = [req.strip() for req in fp.read().splitlines()]\n extra_requirements[extra_name] = extra\nfull_requirements = []\nfor v in extra_requirements.values():\n full_requirements += v\nextra_requirements['all_extras'] = list(set(full_requirements))\n\n# Windows specific requirements\nif sys_platform in ['win32', 'cygwin', 'windows']:\n # These have to be installed manually or via the installers in windows\n requirements = remove_requirements(requirements, 'torch')\n\nsetuptools.setup(\n name=about['__title__'],\n version=about['__version__'],\n url=about['__github__'],\n download_url=about['__pypi__'],\n license=about['__license__'],\n author=about['__author__'],\n author_email=about['__email__'],\n description=about['__description__'],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n package_data={'project': ['requirements.txt']},\n install_requires=requirements,\n extras_require=extra_requirements,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\"\n)\n", "path": "setup.py"}]} | 1,183 | 105 |
gh_patches_debug_18477 | rasdani/github-patches | git_diff | saleor__saleor-1416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logging does not redirect to ?next= link
### What I'm trying to achieve
Currently Saleor has an option to redirect user to particular URL after being asked to log in - which isn't working ATM, beacuse user gets redirected to storefront main page.
### Steps to reproduce the problem
1. Go to auth-protected URL (such as `/dashboard`)
2. Log in
### What I expected to happen
To redirect user to requested page.
### What happened instead/how it failed
User gets redirected to `/`
</issue>
<code>
[start of saleor/registration/views.py]
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.contrib import auth, messages
5 from django.contrib.auth import views as django_views
6 from django.contrib.auth.decorators import login_required
7 from django.shortcuts import redirect
8 from django.template.response import TemplateResponse
9 from django.urls import reverse_lazy
10 from django.utils.translation import ugettext_lazy as _
11
12 from saleor.cart.utils import find_and_assign_anonymous_cart
13
14 from .forms import LoginForm, PasswordSetUpForm, SignupForm
15
16
17 @find_and_assign_anonymous_cart()
18 def login(request):
19 kwargs = {
20 'template_name': 'account/login.html', 'authentication_form': LoginForm}
21 return django_views.LoginView.as_view(**kwargs)(request, **kwargs)
22
23
24 @login_required
25 def logout(request):
26 auth.logout(request)
27 messages.success(request, _('You have been successfully logged out.'))
28 return redirect(settings.LOGIN_REDIRECT_URL)
29
30
31 def signup(request):
32 form = SignupForm(request.POST or None)
33 if form.is_valid():
34 form.save()
35 password = form.cleaned_data.get('password')
36 email = form.cleaned_data.get('email')
37 user = auth.authenticate(request=request, email=email,
38 password=password)
39 if user:
40 auth.login(request, user)
41 messages.success(request, _('User has been created'))
42 redirect_url = request.POST.get('next', '')
43 if redirect_url:
44 return redirect(redirect_url)
45 else:
46 return redirect(settings.LOGIN_REDIRECT_URL)
47 ctx = {'form': form}
48 return TemplateResponse(request, 'account/signup.html', ctx)
49
50
51 def password_reset(request):
52 kwargs = {
53 'template_name': 'account/password_reset.html',
54 'success_url': reverse_lazy('account_reset_password_done'),
55 'email_template_name': 'account/email/password_reset_message.txt',
56 'subject_template_name': 'account/email/password_reset_subject.txt'}
57 return django_views.PasswordResetView.as_view(**kwargs)(request, **kwargs)
58
59
60 class PasswordResetConfirm(django_views.PasswordResetConfirmView):
61 template_name = 'account/password_reset_from_key.html'
62 success_url = reverse_lazy('account_reset_password_complete')
63 set_password_form = PasswordSetUpForm
64 token = None
65 uidb64 = None
66
67
68 def password_reset_confirm(request, uidb64=None, token=None):
69 kwargs = {
70 'template_name': 'account/password_reset_from_key.html',
71 'success_url': reverse_lazy('account_reset_password_complete'),
72 'set_password_form': 'PasswordSetUpForm',
73 'token': token,
74 'uidb64': uidb64}
75 return PasswordResetConfirm.as_view(**kwargs)(
76 request, **kwargs)
77
[end of saleor/registration/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/registration/views.py b/saleor/registration/views.py
--- a/saleor/registration/views.py
+++ b/saleor/registration/views.py
@@ -34,16 +34,13 @@
form.save()
password = form.cleaned_data.get('password')
email = form.cleaned_data.get('email')
- user = auth.authenticate(request=request, email=email,
- password=password)
+ user = auth.authenticate(
+ request=request, email=email, password=password)
if user:
auth.login(request, user)
messages.success(request, _('User has been created'))
- redirect_url = request.POST.get('next', '')
- if redirect_url:
- return redirect(redirect_url)
- else:
- return redirect(settings.LOGIN_REDIRECT_URL)
+ redirect_url = request.POST.get('next', settings.LOGIN_REDIRECT_URL)
+ return redirect(redirect_url)
ctx = {'form': form}
return TemplateResponse(request, 'account/signup.html', ctx)
| {"golden_diff": "diff --git a/saleor/registration/views.py b/saleor/registration/views.py\n--- a/saleor/registration/views.py\n+++ b/saleor/registration/views.py\n@@ -34,16 +34,13 @@\n form.save()\n password = form.cleaned_data.get('password')\n email = form.cleaned_data.get('email')\n- user = auth.authenticate(request=request, email=email,\n- password=password)\n+ user = auth.authenticate(\n+ request=request, email=email, password=password)\n if user:\n auth.login(request, user)\n messages.success(request, _('User has been created'))\n- redirect_url = request.POST.get('next', '')\n- if redirect_url:\n- return redirect(redirect_url)\n- else:\n- return redirect(settings.LOGIN_REDIRECT_URL)\n+ redirect_url = request.POST.get('next', settings.LOGIN_REDIRECT_URL)\n+ return redirect(redirect_url)\n ctx = {'form': form}\n return TemplateResponse(request, 'account/signup.html', ctx)\n", "issue": "Logging does not redirect to ?next= link\n### What I'm trying to achieve\r\n\r\nCurrently Saleor has an option to redirect user to particular URL after being asked to log in - which isn't working ATM, beacuse user gets redirected to storefront main page.\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Go to auth-protected URL (such as `/dashboard`)\r\n2. Log in\r\n\r\n### What I expected to happen\r\n\r\nTo redirect user to requested page.\r\n\r\n### What happened instead/how it failed\r\n\r\nUser gets redirected to `/`\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.contrib import auth, messages\nfrom django.contrib.auth import views as django_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom saleor.cart.utils import find_and_assign_anonymous_cart\n\nfrom .forms import LoginForm, PasswordSetUpForm, SignupForm\n\n\n@find_and_assign_anonymous_cart()\ndef login(request):\n kwargs = {\n 'template_name': 'account/login.html', 'authentication_form': LoginForm}\n return django_views.LoginView.as_view(**kwargs)(request, **kwargs)\n\n\n@login_required\ndef logout(request):\n auth.logout(request)\n messages.success(request, _('You have been successfully logged out.'))\n return redirect(settings.LOGIN_REDIRECT_URL)\n\n\ndef signup(request):\n form = SignupForm(request.POST or None)\n if form.is_valid():\n form.save()\n password = form.cleaned_data.get('password')\n email = form.cleaned_data.get('email')\n user = auth.authenticate(request=request, email=email,\n password=password)\n if user:\n auth.login(request, user)\n messages.success(request, _('User has been created'))\n redirect_url = request.POST.get('next', '')\n if redirect_url:\n return redirect(redirect_url)\n else:\n return redirect(settings.LOGIN_REDIRECT_URL)\n ctx = {'form': form}\n return TemplateResponse(request, 'account/signup.html', ctx)\n\n\ndef password_reset(request):\n kwargs = {\n 'template_name': 'account/password_reset.html',\n 'success_url': reverse_lazy('account_reset_password_done'),\n 'email_template_name': 'account/email/password_reset_message.txt',\n 'subject_template_name': 'account/email/password_reset_subject.txt'}\n return django_views.PasswordResetView.as_view(**kwargs)(request, **kwargs)\n\n\nclass PasswordResetConfirm(django_views.PasswordResetConfirmView):\n template_name = 'account/password_reset_from_key.html'\n success_url = reverse_lazy('account_reset_password_complete')\n set_password_form = PasswordSetUpForm\n token = None\n uidb64 = None\n\n\ndef password_reset_confirm(request, uidb64=None, token=None):\n kwargs = {\n 'template_name': 'account/password_reset_from_key.html',\n 'success_url': reverse_lazy('account_reset_password_complete'),\n 'set_password_form': 'PasswordSetUpForm',\n 'token': token,\n 'uidb64': uidb64}\n return PasswordResetConfirm.as_view(**kwargs)(\n request, **kwargs)\n", "path": "saleor/registration/views.py"}]} | 1,350 | 220 |
gh_patches_debug_48346 | rasdani/github-patches | git_diff | interlegis__sapl-3164 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Não permitir que se altere campos rotulo_prefixo_texto e rotulo_sufixo_texto via interface admin
<!--- Forneça um resumo geral da _issue_ no título acima -->
## Comportamento Esperado
<!--- Se você está descrevendo um _bug_, conte-nos o que deveria acontecer. -->
<!--- Se você está sugerindo uma mudança/melhoria, conte-nos como deve funcionar. -->
## Comportamento Atual
<!--- Se está descrevendo um bug, conte-nos o que acontece em vez do comportamento esperado. -->
<!--- Se está sugerindo uma mudança/melhoria, explique a diferença com o comportamento atual. -->
## Possível Solução
<!--- Não é obrigatório, mas sugira uma possível correção/razão para o bug -->
<!--- ou ideias de como implementar a adição/mudança. -->
## Passos para Reproduzir (para bugs)
<!--- Forneça um link para um exemplo, ou um conjunto de passos inequívocos -->
<!--- para reproduzir esse bug. Inclua código para reproduzir, se relevante. -->
1.
2.
3.
4.
## Contexto
<!--- Como esse problema o afeta? O que você está tentando realizar? -->
<!--- Fornecer o contexto nos ajuda a encontrar uma solução que seja mais útil no mundo real -->
## Imagens do Ocorrido
<!--- Representação visual em vídeo ou imagem do ocorrido -->
<!--- Se está descrevendo um bug poste imagens ou vídeos na reprodução do bug citado, caso se aplique -->
## Seu Ambiente
<!--- Inclua detalhes relevantes sobre o ambiente em que você presenciou/experienciou o bug. -->
* Versão usada (_Release_):
* Nome e versão do navegador:
* Nome e versão do Sistema Operacional (desktop ou mobile):
* Link para o seu projeto (Caso de fork deste projeto):
</issue>
<code>
[start of sapl/compilacao/admin.py]
1 from sapl.utils import register_all_models_in_admin
2
3 register_all_models_in_admin(__name__)
4
[end of sapl/compilacao/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sapl/compilacao/admin.py b/sapl/compilacao/admin.py
--- a/sapl/compilacao/admin.py
+++ b/sapl/compilacao/admin.py
@@ -1,3 +1,12 @@
+from django.contrib import admin
+from sapl.compilacao.models import TipoDispositivo
from sapl.utils import register_all_models_in_admin
register_all_models_in_admin(__name__)
+admin.site.unregister(TipoDispositivo)
+
+
[email protected](TipoDispositivo)
+class TipoDispositivoAdmin(admin.ModelAdmin):
+ readonly_fields = ("rotulo_prefixo_texto", "rotulo_sufixo_texto",)
+ list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']
| {"golden_diff": "diff --git a/sapl/compilacao/admin.py b/sapl/compilacao/admin.py\n--- a/sapl/compilacao/admin.py\n+++ b/sapl/compilacao/admin.py\n@@ -1,3 +1,12 @@\n+from django.contrib import admin\n+from sapl.compilacao.models import TipoDispositivo\n from sapl.utils import register_all_models_in_admin\n \n register_all_models_in_admin(__name__)\n+admin.site.unregister(TipoDispositivo)\n+\n+\[email protected](TipoDispositivo)\n+class TipoDispositivoAdmin(admin.ModelAdmin):\n+ readonly_fields = (\"rotulo_prefixo_texto\", \"rotulo_sufixo_texto\",)\n+ list_display = [f.name for f in TipoDispositivo._meta.fields if f.name != 'id']\n", "issue": "N\u00e3o permitir que se altere campos rotulo_prefixo_texto e rotulo_sufixo_texto via interface admin\n<!--- Forne\u00e7a um resumo geral da _issue_ no t\u00edtulo acima -->\r\n\r\n## Comportamento Esperado\r\n<!--- Se voc\u00ea est\u00e1 descrevendo um _bug_, conte-nos o que deveria acontecer. -->\r\n<!--- Se voc\u00ea est\u00e1 sugerindo uma mudan\u00e7a/melhoria, conte-nos como deve funcionar. -->\r\n\r\n## Comportamento Atual\r\n<!--- Se est\u00e1 descrevendo um bug, conte-nos o que acontece em vez do comportamento esperado. -->\r\n<!--- Se est\u00e1 sugerindo uma mudan\u00e7a/melhoria, explique a diferen\u00e7a com o comportamento atual. -->\r\n\r\n## Poss\u00edvel Solu\u00e7\u00e3o\r\n<!--- N\u00e3o \u00e9 obrigat\u00f3rio, mas sugira uma poss\u00edvel corre\u00e7\u00e3o/raz\u00e3o para o bug -->\r\n<!--- ou ideias de como implementar a adi\u00e7\u00e3o/mudan\u00e7a. -->\r\n\r\n## Passos para Reproduzir (para bugs)\r\n<!--- Forne\u00e7a um link para um exemplo, ou um conjunto de passos inequ\u00edvocos -->\r\n<!--- para reproduzir esse bug. Inclua c\u00f3digo para reproduzir, se relevante. -->\r\n1.\r\n2.\r\n3.\r\n4.\r\n\r\n## Contexto\r\n<!--- Como esse problema o afeta? O que voc\u00ea est\u00e1 tentando realizar? -->\r\n<!--- Fornecer o contexto nos ajuda a encontrar uma solu\u00e7\u00e3o que seja mais \u00fatil no mundo real -->\r\n\r\n## Imagens do Ocorrido\r\n<!--- Representa\u00e7\u00e3o visual em v\u00eddeo ou imagem do ocorrido -->\r\n<!--- Se est\u00e1 descrevendo um bug poste imagens ou v\u00eddeos na reprodu\u00e7\u00e3o do bug citado, caso se aplique -->\r\n\r\n## Seu Ambiente\r\n<!--- Inclua detalhes relevantes sobre o ambiente em que voc\u00ea presenciou/experienciou o bug. -->\r\n* Vers\u00e3o usada (_Release_):\r\n* Nome e vers\u00e3o do navegador:\r\n* Nome e vers\u00e3o do Sistema Operacional (desktop ou mobile):\r\n* Link para o seu projeto (Caso de fork deste projeto):\r\n\n", "before_files": [{"content": "from sapl.utils import register_all_models_in_admin\n\nregister_all_models_in_admin(__name__)\n", "path": "sapl/compilacao/admin.py"}]} | 1,001 | 176 |
gh_patches_debug_9994 | rasdani/github-patches | git_diff | urllib3__urllib3-603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Really don't spam InsecurePlatformWarning
urllib3 should configure its warnings using append=True to avoid overriding the user's preferences as specified with python -W or PYTHONWARNINGS.
If this issue were fixed, the user could work around pypa/pip#2681 with
```
export PYTHONWARNINGS="ignore:A true SSLContext object is not available"
```
Additionally, the urllib3 docs are very unclear about why this is considered worth warning the end user about, particularly given that adding this strange message has effectively introduced a bug in hundreds of other projects.
</issue>
<code>
[start of urllib3/__init__.py]
1 """
2 urllib3 - Thread-safe connection pooling and re-using.
3 """
4
5 __author__ = 'Andrey Petrov ([email protected])'
6 __license__ = 'MIT'
7 __version__ = '1.10.2'
8
9
10 from .connectionpool import (
11 HTTPConnectionPool,
12 HTTPSConnectionPool,
13 connection_from_url
14 )
15
16 from . import exceptions
17 from .filepost import encode_multipart_formdata
18 from .poolmanager import PoolManager, ProxyManager, proxy_from_url
19 from .response import HTTPResponse
20 from .util.request import make_headers
21 from .util.url import get_host
22 from .util.timeout import Timeout
23 from .util.retry import Retry
24
25
26 # Set default logging handler to avoid "No handler found" warnings.
27 import logging
28 try: # Python 2.7+
29 from logging import NullHandler
30 except ImportError:
31 class NullHandler(logging.Handler):
32 def emit(self, record):
33 pass
34
35 logging.getLogger(__name__).addHandler(NullHandler())
36
37 def add_stderr_logger(level=logging.DEBUG):
38 """
39 Helper for quickly adding a StreamHandler to the logger. Useful for
40 debugging.
41
42 Returns the handler after adding it.
43 """
44 # This method needs to be in this __init__.py to get the __name__ correct
45 # even if urllib3 is vendored within another package.
46 logger = logging.getLogger(__name__)
47 handler = logging.StreamHandler()
48 handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
49 logger.addHandler(handler)
50 logger.setLevel(level)
51 logger.debug('Added a stderr logging handler to logger: %s' % __name__)
52 return handler
53
54 # ... Clean up.
55 del NullHandler
56
57
58 import warnings
59 # SecurityWarning's always go off by default.
60 warnings.simplefilter('always', exceptions.SecurityWarning)
61 # InsecurePlatformWarning's don't vary between requests, so we keep it default.
62 warnings.simplefilter('default', exceptions.InsecurePlatformWarning)
63
64 def disable_warnings(category=exceptions.HTTPWarning):
65 """
66 Helper for quickly disabling all urllib3 warnings.
67 """
68 warnings.simplefilter('ignore', category)
69
[end of urllib3/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/urllib3/__init__.py b/urllib3/__init__.py
--- a/urllib3/__init__.py
+++ b/urllib3/__init__.py
@@ -57,9 +57,10 @@
import warnings
# SecurityWarning's always go off by default.
-warnings.simplefilter('always', exceptions.SecurityWarning)
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
-warnings.simplefilter('default', exceptions.InsecurePlatformWarning)
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+ append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
| {"golden_diff": "diff --git a/urllib3/__init__.py b/urllib3/__init__.py\n--- a/urllib3/__init__.py\n+++ b/urllib3/__init__.py\n@@ -57,9 +57,10 @@\n \n import warnings\n # SecurityWarning's always go off by default.\n-warnings.simplefilter('always', exceptions.SecurityWarning)\n+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)\n # InsecurePlatformWarning's don't vary between requests, so we keep it default.\n-warnings.simplefilter('default', exceptions.InsecurePlatformWarning)\n+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,\n+ append=True)\n \n def disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n", "issue": "Really don't spam InsecurePlatformWarning\nurllib3 should configure its warnings using append=True to avoid overriding the user's preferences as specified with python -W or PYTHONWARNINGS.\n\nIf this issue were fixed, the user could work around pypa/pip#2681 with\n\n```\nexport PYTHONWARNINGS=\"ignore:A true SSLContext object is not available\"\n```\n\nAdditionally, the urllib3 docs are very unclear about why this is considered worth warning the end user about, particularly given that adding this strange message has effectively introduced a bug in hundreds of other projects.\n\n", "before_files": [{"content": "\"\"\"\nurllib3 - Thread-safe connection pooling and re-using.\n\"\"\"\n\n__author__ = 'Andrey Petrov ([email protected])'\n__license__ = 'MIT'\n__version__ = '1.10.2'\n\n\nfrom .connectionpool import (\n HTTPConnectionPool,\n HTTPSConnectionPool,\n connection_from_url\n)\n\nfrom . import exceptions\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.url import get_host\nfrom .util.timeout import Timeout\nfrom .util.retry import Retry\n\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\ntry: # Python 2.7+\n from logging import NullHandler\nexcept ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug('Added a stderr logging handler to logger: %s' % __name__)\n return handler\n\n# ... Clean up.\ndel NullHandler\n\n\nimport warnings\n# SecurityWarning's always go off by default.\nwarnings.simplefilter('always', exceptions.SecurityWarning)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter('default', exceptions.InsecurePlatformWarning)\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter('ignore', category)\n", "path": "urllib3/__init__.py"}]} | 1,237 | 158 |
gh_patches_debug_17919 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-4061 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spelling Error (_medical_likeliehood)
_medical_likeliehood -> _medical_likelihood
https://github.com/GoogleCloudPlatform/google-cloud-python/blob/b28a4eb667ae08c3f4dcf9af891ed4931884989c/vision/google/cloud/vision/safe_search.py#L43
</issue>
<code>
[start of vision/google/cloud/vision/safe_search.py]
1 # Copyright 2017 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Safe search class for information returned from annotating an image."""
16
17 from google.cloud.vision.likelihood import _get_pb_likelihood
18 from google.cloud.vision.likelihood import Likelihood
19
20
21 class SafeSearchAnnotation(object):
22 """Representation of a SafeSearchAnnotation.
23
24 :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`
25 :param adult_likelihood: Likelihood that image contains adult material.
26
27 :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`
28 :param spoof_likelihood: Likelihood that image is a spoof.
29
30 :type medical_likelihood:
31 :class:`~google.cloud.vision.likelihood.Likelihood`
32 :param medical_likelihood: Likelihood that image contains medical material.
33
34 :type violence_likelihood:
35 :class:`~google.cloud.vision.likelihood.Likelihood`
36 :param violence_likelihood: Likelihood that image contains violence.
37 """
38
39 def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood,
40 violence_likelihood):
41 self._adult_likelihood = adult_likelihood
42 self._spoof_likelihood = spoof_likelihood
43 self._medical_likeliehood = medical_likelihood
44 self._violence_likelihood = violence_likelihood
45
46 @classmethod
47 def from_api_repr(cls, response):
48 """Factory: construct SafeSearchAnnotation from Vision API response.
49
50 :type response: dict
51 :param response: Dictionary response from Vision API with safe search
52 data.
53
54 :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`
55 :returns: Instance of ``SafeSearchAnnotation``.
56 """
57 adult_likelihood = Likelihood[response['adult']]
58 spoof_likelihood = Likelihood[response['spoof']]
59 medical_likelihood = Likelihood[response['medical']]
60 violence_likelihood = Likelihood[response['violence']]
61
62 return cls(adult_likelihood, spoof_likelihood, medical_likelihood,
63 violence_likelihood)
64
65 @classmethod
66 def from_pb(cls, image):
67 """Factory: construct SafeSearchAnnotation from Vision API response.
68
69 :type image: :class:`~google.cloud.vision_v1.proto.\
70 image_annotator_pb2.SafeSearchAnnotation`
71 :param image: Protobuf response from Vision API with safe search data.
72
73 :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`
74 :returns: Instance of ``SafeSearchAnnotation``.
75 """
76 values = [image.adult, image.spoof, image.medical, image.violence]
77 classifications = map(_get_pb_likelihood, values)
78 return cls(*classifications)
79
80 @property
81 def adult(self):
82 """Represents the adult contents likelihood for the image.
83
84 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
85 :returns: ``Likelihood`` of the image containing adult content.
86 """
87 return self._adult_likelihood
88
89 @property
90 def spoof(self):
91 """The likelihood that an obvious modification was made to the image.
92
93 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
94 :returns: The ``Likelihood`` that an obvious modification was made to
95 the image's canonical version to make it appear funny or
96 offensive.
97 """
98 return self._spoof_likelihood
99
100 @property
101 def medical(self):
102 """Likelihood this is a medical image.
103
104 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
105 :returns: The ``Likelihood`` that the image is medical in origin.
106 """
107 return self._medical_likeliehood
108
109 @property
110 def violence(self):
111 """Likeliehood that this image contains violence.
112
113 :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
114 :returns: The ``Likelihood`` that the image contains violence.
115 """
116 return self._violence_likelihood
117
[end of vision/google/cloud/vision/safe_search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vision/google/cloud/vision/safe_search.py b/vision/google/cloud/vision/safe_search.py
--- a/vision/google/cloud/vision/safe_search.py
+++ b/vision/google/cloud/vision/safe_search.py
@@ -40,7 +40,7 @@
violence_likelihood):
self._adult_likelihood = adult_likelihood
self._spoof_likelihood = spoof_likelihood
- self._medical_likeliehood = medical_likelihood
+ self._medical_likelihood = medical_likelihood
self._violence_likelihood = violence_likelihood
@classmethod
@@ -104,7 +104,7 @@
:rtype: :class:`~google.cloud.vision.likelihood.Likelihood`
:returns: The ``Likelihood`` that the image is medical in origin.
"""
- return self._medical_likeliehood
+ return self._medical_likelihood
@property
def violence(self):
| {"golden_diff": "diff --git a/vision/google/cloud/vision/safe_search.py b/vision/google/cloud/vision/safe_search.py\n--- a/vision/google/cloud/vision/safe_search.py\n+++ b/vision/google/cloud/vision/safe_search.py\n@@ -40,7 +40,7 @@\n violence_likelihood):\n self._adult_likelihood = adult_likelihood\n self._spoof_likelihood = spoof_likelihood\n- self._medical_likeliehood = medical_likelihood\n+ self._medical_likelihood = medical_likelihood\n self._violence_likelihood = violence_likelihood\n \n @classmethod\n@@ -104,7 +104,7 @@\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image is medical in origin.\n \"\"\"\n- return self._medical_likeliehood\n+ return self._medical_likelihood\n \n @property\n def violence(self):\n", "issue": "Spelling Error (_medical_likeliehood)\n_medical_likeliehood -> _medical_likelihood\r\n\r\nhttps://github.com/GoogleCloudPlatform/google-cloud-python/blob/b28a4eb667ae08c3f4dcf9af891ed4931884989c/vision/google/cloud/vision/safe_search.py#L43\n", "before_files": [{"content": "# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Safe search class for information returned from annotating an image.\"\"\"\n\nfrom google.cloud.vision.likelihood import _get_pb_likelihood\nfrom google.cloud.vision.likelihood import Likelihood\n\n\nclass SafeSearchAnnotation(object):\n \"\"\"Representation of a SafeSearchAnnotation.\n\n :type adult_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`\n :param adult_likelihood: Likelihood that image contains adult material.\n\n :type spoof_likelihood: :class:`~google.cloud.vision.likelihood.Likelihood`\n :param spoof_likelihood: Likelihood that image is a spoof.\n\n :type medical_likelihood:\n :class:`~google.cloud.vision.likelihood.Likelihood`\n :param medical_likelihood: Likelihood that image contains medical material.\n\n :type violence_likelihood:\n :class:`~google.cloud.vision.likelihood.Likelihood`\n :param violence_likelihood: Likelihood that image contains violence.\n \"\"\"\n\n def __init__(self, adult_likelihood, spoof_likelihood, medical_likelihood,\n violence_likelihood):\n self._adult_likelihood = adult_likelihood\n self._spoof_likelihood = spoof_likelihood\n self._medical_likeliehood = medical_likelihood\n self._violence_likelihood = violence_likelihood\n\n @classmethod\n def from_api_repr(cls, response):\n \"\"\"Factory: construct SafeSearchAnnotation from Vision API response.\n\n :type response: dict\n :param response: Dictionary response from Vision API with safe search\n data.\n\n :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`\n :returns: Instance of ``SafeSearchAnnotation``.\n \"\"\"\n adult_likelihood = Likelihood[response['adult']]\n spoof_likelihood = Likelihood[response['spoof']]\n medical_likelihood = Likelihood[response['medical']]\n violence_likelihood = Likelihood[response['violence']]\n\n return cls(adult_likelihood, spoof_likelihood, medical_likelihood,\n violence_likelihood)\n\n @classmethod\n def from_pb(cls, image):\n \"\"\"Factory: construct SafeSearchAnnotation from Vision API response.\n\n :type image: :class:`~google.cloud.vision_v1.proto.\\\n image_annotator_pb2.SafeSearchAnnotation`\n :param image: Protobuf response from Vision API with safe search data.\n\n :rtype: :class:`~google.cloud.vision.safe_search.SafeSearchAnnotation`\n :returns: Instance of ``SafeSearchAnnotation``.\n \"\"\"\n values = [image.adult, image.spoof, image.medical, image.violence]\n classifications = map(_get_pb_likelihood, values)\n return cls(*classifications)\n\n @property\n def adult(self):\n \"\"\"Represents the adult contents likelihood for the image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: ``Likelihood`` of the image containing adult content.\n \"\"\"\n return self._adult_likelihood\n\n @property\n def spoof(self):\n \"\"\"The likelihood that an obvious modification was made to the image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that an obvious modification was made to\n the image's canonical version to make it appear funny or\n offensive.\n \"\"\"\n return self._spoof_likelihood\n\n @property\n def medical(self):\n \"\"\"Likelihood this is a medical image.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image is medical in origin.\n \"\"\"\n return self._medical_likeliehood\n\n @property\n def violence(self):\n \"\"\"Likeliehood that this image contains violence.\n\n :rtype: :class:`~google.cloud.vision.likelihood.Likelihood`\n :returns: The ``Likelihood`` that the image contains violence.\n \"\"\"\n return self._violence_likelihood\n", "path": "vision/google/cloud/vision/safe_search.py"}]} | 1,833 | 205 |
gh_patches_debug_64529 | rasdani/github-patches | git_diff | kartoza__prj.app-293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
We need to support animated GIF's
Using licecap or silentcast it is easy to make animated GIF's. When images are uploaded to django though they are resized and converted to PNG. We need to update the logic so thumbs etc. can be created for animate GIF's without losing the animation.
</issue>
<code>
[start of django_project/base/templatetags/custom_markup.py]
1 import markdown
2 from django import template
3 from django.template.defaultfilters import stringfilter
4 from django.utils.encoding import force_unicode
5 from django.utils.safestring import mark_safe
6
7 register = template.Library()
8
9
10 @register.filter(name='base_markdown', is_safe=True)
11 @stringfilter
12 def base_markdown(value):
13 extensions = ["nl2br", ]
14
15 return mark_safe(markdown.markdown(force_unicode(value),
16 extensions,
17 safe_mode=True,
18 enable_attributes=False))
19
[end of django_project/base/templatetags/custom_markup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py
--- a/django_project/base/templatetags/custom_markup.py
+++ b/django_project/base/templatetags/custom_markup.py
@@ -16,3 +16,9 @@
extensions,
safe_mode=True,
enable_attributes=False))
+
+
[email protected](name='is_gif', is_safe=True)
+@stringfilter
+def is_gif(value):
+ return value[-4:] == '.gif'
| {"golden_diff": "diff --git a/django_project/base/templatetags/custom_markup.py b/django_project/base/templatetags/custom_markup.py\n--- a/django_project/base/templatetags/custom_markup.py\n+++ b/django_project/base/templatetags/custom_markup.py\n@@ -16,3 +16,9 @@\n extensions,\n safe_mode=True,\n enable_attributes=False))\n+\n+\[email protected](name='is_gif', is_safe=True)\n+@stringfilter\n+def is_gif(value):\n+ return value[-4:] == '.gif'\n", "issue": "We need to support animated GIF's\nUsing licecap or silentcast it is easy to make animated GIF's. When images are uploaded to django though they are resized and converted to PNG. We need to update the logic so thumbs etc. can be created for animate GIF's without losing the animation. \n\n", "before_files": [{"content": "import markdown\nfrom django import template\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.encoding import force_unicode\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name='base_markdown', is_safe=True)\n@stringfilter\ndef base_markdown(value):\n extensions = [\"nl2br\", ]\n\n return mark_safe(markdown.markdown(force_unicode(value),\n extensions,\n safe_mode=True,\n enable_attributes=False))\n", "path": "django_project/base/templatetags/custom_markup.py"}]} | 740 | 125 |
gh_patches_debug_13360 | rasdani/github-patches | git_diff | urllib3__urllib3-60 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nosetests crashes under IPv4 (error: getsockaddrarg: bad family)
Turns out tornado is really eager to use IPv6. Unless you expressly hand the server the address, it doesn't even check for socket IPv6 support. I'll submit a pull request for the one-line fix in dummyserver/server.py momentarily.
Source: https://groups.google.com/group/python-tornado/browse_thread/thread/3ec04536e57a2833?pli=1
</issue>
<code>
[start of dummyserver/server.py]
1 #!/usr/bin/env python
2
3 """
4 Dummy server used for unit testing.
5 """
6 from __future__ import print_function
7
8 import logging
9 import os
10 import sys
11 import threading
12 import socket
13
14 import tornado.wsgi
15 import tornado.httpserver
16 import tornado.ioloop
17
18 from dummyserver.handlers import TestingApp
19
20
21 log = logging.getLogger(__name__)
22
23 CERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')
24 DEFAULT_CERTS = {
25 'certfile': os.path.join(CERTS_PATH, 'server.crt'),
26 'keyfile': os.path.join(CERTS_PATH, 'server.key'),
27 }
28 DEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')
29 DEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')
30
31
32 # Different types of servers we have:
33
34
35 class SocketServerThread(threading.Thread):
36 """
37 :param socket_handler: Callable which receives a socket argument for one
38 request.
39 :param ready_lock: Lock which gets released when the socket handler is
40 ready to receive requests.
41 """
42 def __init__(self, socket_handler, host='localhost', port=8081,
43 ready_lock=None):
44 threading.Thread.__init__(self)
45
46 self.socket_handler = socket_handler
47 self.host = host
48 self.port = port
49 self.ready_lock = ready_lock
50
51 def _start_server(self):
52 sock = socket.socket()
53 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
54 sock.bind((self.host, self.port))
55
56 # Once listen() returns, the server socket is ready
57 sock.listen(1)
58
59 if self.ready_lock:
60 self.ready_lock.release()
61
62 self.socket_handler(sock)
63
64 def run(self):
65 self.server = self._start_server()
66
67
68 class TornadoServerThread(threading.Thread):
69 def __init__(self, host='localhost', port=8081, scheme='http', certs=None):
70 threading.Thread.__init__(self)
71
72 self.host = host
73 self.port = port
74 self.scheme = scheme
75 self.certs = certs
76
77 def _start_server(self):
78 container = tornado.wsgi.WSGIContainer(TestingApp())
79
80 if self.scheme == 'https':
81 http_server = tornado.httpserver.HTTPServer(container,
82 ssl_options=self.certs)
83 else:
84 http_server = tornado.httpserver.HTTPServer(container)
85
86 http_server.listen(self.port)
87 return http_server
88
89 def run(self):
90 self.server = self._start_server()
91 self.ioloop = tornado.ioloop.IOLoop.instance()
92 self.ioloop.start()
93
94 def stop(self):
95 self.server.stop()
96 self.ioloop.stop()
97
98
99 if __name__ == '__main__':
100 log.setLevel(logging.DEBUG)
101 log.addHandler(logging.StreamHandler(sys.stderr))
102
103 from urllib3 import get_host
104
105 url = "http://localhost:8081"
106 if len(sys.argv) > 1:
107 url = sys.argv[1]
108
109 print("Starting WGI server at: %s" % url)
110
111 scheme, host, port = get_host(url)
112 t = TornadoServerThread(scheme=scheme, host=host, port=port)
113 t.start()
114
[end of dummyserver/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dummyserver/server.py b/dummyserver/server.py
--- a/dummyserver/server.py
+++ b/dummyserver/server.py
@@ -83,7 +83,7 @@
else:
http_server = tornado.httpserver.HTTPServer(container)
- http_server.listen(self.port)
+ http_server.listen(self.port, address=self.host)
return http_server
def run(self):
@@ -106,7 +106,7 @@
if len(sys.argv) > 1:
url = sys.argv[1]
- print("Starting WGI server at: %s" % url)
+ print("Starting WSGI server at: %s" % url)
scheme, host, port = get_host(url)
t = TornadoServerThread(scheme=scheme, host=host, port=port)
| {"golden_diff": "diff --git a/dummyserver/server.py b/dummyserver/server.py\n--- a/dummyserver/server.py\n+++ b/dummyserver/server.py\n@@ -83,7 +83,7 @@\n else:\n http_server = tornado.httpserver.HTTPServer(container)\n \n- http_server.listen(self.port)\n+ http_server.listen(self.port, address=self.host)\n return http_server\n \n def run(self):\n@@ -106,7 +106,7 @@\n if len(sys.argv) > 1:\n url = sys.argv[1]\n \n- print(\"Starting WGI server at: %s\" % url)\n+ print(\"Starting WSGI server at: %s\" % url)\n \n scheme, host, port = get_host(url)\n t = TornadoServerThread(scheme=scheme, host=host, port=port)\n", "issue": "nosetests crashes under IPv4 (error: getsockaddrarg: bad family)\nTurns out tornado is really eager to use IPv6. Unless you expressly hand the server the address, it doesn't even check for socket IPv6 support. I'll submit a pull request for the one-line fix in dummyserver/server.py momentarily.\n\nSource: https://groups.google.com/group/python-tornado/browse_thread/thread/3ec04536e57a2833?pli=1\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\nDummy server used for unit testing.\n\"\"\"\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport threading\nimport socket\n\nimport tornado.wsgi\nimport tornado.httpserver\nimport tornado.ioloop\n\nfrom dummyserver.handlers import TestingApp\n\n\nlog = logging.getLogger(__name__)\n\nCERTS_PATH = os.path.join(os.path.dirname(__file__), 'certs')\nDEFAULT_CERTS = {\n 'certfile': os.path.join(CERTS_PATH, 'server.crt'),\n 'keyfile': os.path.join(CERTS_PATH, 'server.key'),\n}\nDEFAULT_CA = os.path.join(CERTS_PATH, 'cacert.pem')\nDEFAULT_CA_BAD = os.path.join(CERTS_PATH, 'client_bad.pem')\n\n\n# Different types of servers we have:\n\n\nclass SocketServerThread(threading.Thread):\n \"\"\"\n :param socket_handler: Callable which receives a socket argument for one\n request.\n :param ready_lock: Lock which gets released when the socket handler is\n ready to receive requests.\n \"\"\"\n def __init__(self, socket_handler, host='localhost', port=8081,\n ready_lock=None):\n threading.Thread.__init__(self)\n\n self.socket_handler = socket_handler\n self.host = host\n self.port = port\n self.ready_lock = ready_lock\n\n def _start_server(self):\n sock = socket.socket()\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((self.host, self.port))\n\n # Once listen() returns, the server socket is ready\n sock.listen(1)\n\n if self.ready_lock:\n self.ready_lock.release()\n\n self.socket_handler(sock)\n\n def run(self):\n self.server = self._start_server()\n\n\nclass TornadoServerThread(threading.Thread):\n def __init__(self, host='localhost', port=8081, scheme='http', certs=None):\n threading.Thread.__init__(self)\n\n self.host = host\n self.port = port\n self.scheme = scheme\n self.certs = certs\n\n def _start_server(self):\n container = tornado.wsgi.WSGIContainer(TestingApp())\n\n if self.scheme == 'https':\n http_server = tornado.httpserver.HTTPServer(container,\n ssl_options=self.certs)\n else:\n http_server = tornado.httpserver.HTTPServer(container)\n\n http_server.listen(self.port)\n return http_server\n\n def run(self):\n self.server = self._start_server()\n self.ioloop = tornado.ioloop.IOLoop.instance()\n self.ioloop.start()\n\n def stop(self):\n self.server.stop()\n self.ioloop.stop()\n\n\nif __name__ == '__main__':\n log.setLevel(logging.DEBUG)\n log.addHandler(logging.StreamHandler(sys.stderr))\n\n from urllib3 import get_host\n\n url = \"http://localhost:8081\"\n if len(sys.argv) > 1:\n url = sys.argv[1]\n\n print(\"Starting WGI server at: %s\" % url)\n\n scheme, host, port = get_host(url)\n t = TornadoServerThread(scheme=scheme, host=host, port=port)\n t.start()\n", "path": "dummyserver/server.py"}]} | 1,581 | 188 |
gh_patches_debug_22208 | rasdani/github-patches | git_diff | wagtail__wagtail-1576 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Project template needs updating for search promotions changes
The provided view still references the EditorsPick model: https://github.com/torchbox/wagtail/blob/master/wagtail/project_template/search/views.py#L5
Shall we update this to use the new contrib module or remove it completely?
</issue>
<code>
[start of wagtail/project_template/search/views.py]
1 from django.shortcuts import render
2 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
3
4 from wagtail.wagtailcore.models import Page
5 from wagtail.wagtailsearch.models import Query, EditorsPick
6
7
8 def search(request):
9 search_query = request.GET.get('query', None)
10 page = request.GET.get('page', 1)
11
12 # Search
13 if search_query:
14 search_results = Page.objects.live().search(search_query)
15 query = Query.get(search_query)
16
17 # Record hit
18 query.add_hit()
19
20 # Get search picks
21 search_picks = query.editors_picks.all()
22 else:
23 search_results = Page.objects.none()
24 search_picks = EditorsPick.objects.none()
25
26 # Pagination
27 paginator = Paginator(search_results, 10)
28 try:
29 search_results = paginator.page(page)
30 except PageNotAnInteger:
31 search_results = paginator.page(1)
32 except EmptyPage:
33 search_results = paginator.page(paginator.num_pages)
34
35 return render(request, 'search/search.html', {
36 'search_query': search_query,
37 'search_results': search_results,
38 'search_picks': search_picks,
39 })
40
[end of wagtail/project_template/search/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/project_template/search/views.py b/wagtail/project_template/search/views.py
--- a/wagtail/project_template/search/views.py
+++ b/wagtail/project_template/search/views.py
@@ -2,7 +2,7 @@
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore.models import Page
-from wagtail.wagtailsearch.models import Query, EditorsPick
+from wagtail.wagtailsearch.models import Query
def search(request):
@@ -16,12 +16,8 @@
# Record hit
query.add_hit()
-
- # Get search picks
- search_picks = query.editors_picks.all()
else:
search_results = Page.objects.none()
- search_picks = EditorsPick.objects.none()
# Pagination
paginator = Paginator(search_results, 10)
@@ -35,5 +31,4 @@
return render(request, 'search/search.html', {
'search_query': search_query,
'search_results': search_results,
- 'search_picks': search_picks,
})
| {"golden_diff": "diff --git a/wagtail/project_template/search/views.py b/wagtail/project_template/search/views.py\n--- a/wagtail/project_template/search/views.py\n+++ b/wagtail/project_template/search/views.py\n@@ -2,7 +2,7 @@\n from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n \n from wagtail.wagtailcore.models import Page\n-from wagtail.wagtailsearch.models import Query, EditorsPick\n+from wagtail.wagtailsearch.models import Query\n \n \n def search(request):\n@@ -16,12 +16,8 @@\n \n # Record hit\n query.add_hit()\n-\n- # Get search picks\n- search_picks = query.editors_picks.all()\n else:\n search_results = Page.objects.none()\n- search_picks = EditorsPick.objects.none()\n \n # Pagination\n paginator = Paginator(search_results, 10)\n@@ -35,5 +31,4 @@\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n- 'search_picks': search_picks,\n })\n", "issue": "Project template needs updating for search promotions changes\nThe provided view still references the EditorsPick model: https://github.com/torchbox/wagtail/blob/master/wagtail/project_template/search/views.py#L5\n\nShall we update this to use the new contrib module or remove it completely?\n\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailsearch.models import Query, EditorsPick\n\n\ndef search(request):\n search_query = request.GET.get('query', None)\n page = request.GET.get('page', 1)\n\n # Search\n if search_query:\n search_results = Page.objects.live().search(search_query)\n query = Query.get(search_query)\n\n # Record hit\n query.add_hit()\n\n # Get search picks\n search_picks = query.editors_picks.all()\n else:\n search_results = Page.objects.none()\n search_picks = EditorsPick.objects.none()\n\n # Pagination\n paginator = Paginator(search_results, 10)\n try:\n search_results = paginator.page(page)\n except PageNotAnInteger:\n search_results = paginator.page(1)\n except EmptyPage:\n search_results = paginator.page(paginator.num_pages)\n\n return render(request, 'search/search.html', {\n 'search_query': search_query,\n 'search_results': search_results,\n 'search_picks': search_picks,\n })\n", "path": "wagtail/project_template/search/views.py"}]} | 927 | 247 |
gh_patches_debug_23718 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1398 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SIGNAGES module / Type filter disappeared
I think TYPE filter was available before.
Crucial filter of course.
</issue>
<code>
[start of geotrek/infrastructure/filters.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 from geotrek.common.filters import StructureRelatedFilterSet, YearFilter
4 from geotrek.maintenance.filters import InterventionYearSelect
5
6 from .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage
7
8
9 class InfrastructureYearSelect(InterventionYearSelect):
10 label = _(u"Intervention year")
11
12
13 class InfrastructureFilterSet(StructureRelatedFilterSet):
14 intervention_year = YearFilter(name='interventions_set__date',
15 widget=InfrastructureYearSelect,
16 label=_(u"Intervention year"))
17
18 def __init__(self, *args, **kwargs):
19 super(InfrastructureFilterSet, self).__init__(*args, **kwargs)
20 field = self.form.fields['type']
21 field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)
22
23 field = self.form.fields['type__type']
24 all_choices = field.widget.choices
25 all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]
26 field.widget.choices = [('', _(u"Category"))] + all_choices
27
28 class Meta(StructureRelatedFilterSet.Meta):
29 model = Infrastructure
30 fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']
31
32
33 class SignageFilterSet(StructureRelatedFilterSet):
34 intervention_year = YearFilter(name='interventions_set__date',
35 widget=InfrastructureYearSelect)
36
37 class Meta(StructureRelatedFilterSet.Meta):
38 model = Signage
39 fields = StructureRelatedFilterSet.Meta.fields
40
[end of geotrek/infrastructure/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/infrastructure/filters.py b/geotrek/infrastructure/filters.py
--- a/geotrek/infrastructure/filters.py
+++ b/geotrek/infrastructure/filters.py
@@ -12,8 +12,7 @@
class InfrastructureFilterSet(StructureRelatedFilterSet):
intervention_year = YearFilter(name='interventions_set__date',
- widget=InfrastructureYearSelect,
- label=_(u"Intervention year"))
+ widget=InfrastructureYearSelect)
def __init__(self, *args, **kwargs):
super(InfrastructureFilterSet, self).__init__(*args, **kwargs)
@@ -34,6 +33,11 @@
intervention_year = YearFilter(name='interventions_set__date',
widget=InfrastructureYearSelect)
+ def __init__(self, *args, **kwargs):
+ super(SignageFilterSet, self).__init__(*args, **kwargs)
+ field = self.form.fields['type']
+ field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)
+
class Meta(StructureRelatedFilterSet.Meta):
model = Signage
- fields = StructureRelatedFilterSet.Meta.fields
+ fields = StructureRelatedFilterSet.Meta.fields + ['type']
| {"golden_diff": "diff --git a/geotrek/infrastructure/filters.py b/geotrek/infrastructure/filters.py\n--- a/geotrek/infrastructure/filters.py\n+++ b/geotrek/infrastructure/filters.py\n@@ -12,8 +12,7 @@\n \n class InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n- widget=InfrastructureYearSelect,\n- label=_(u\"Intervention year\"))\n+ widget=InfrastructureYearSelect)\n \n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n@@ -34,6 +33,11 @@\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n \n+ def __init__(self, *args, **kwargs):\n+ super(SignageFilterSet, self).__init__(*args, **kwargs)\n+ field = self.form.fields['type']\n+ field.queryset = field.queryset.filter(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n+\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n- fields = StructureRelatedFilterSet.Meta.fields\n+ fields = StructureRelatedFilterSet.Meta.fields + ['type']\n", "issue": "SIGNAGES module / Type filter disappeared\nI think TYPE filter was available before.\nCrucial filter of course.\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom geotrek.common.filters import StructureRelatedFilterSet, YearFilter\nfrom geotrek.maintenance.filters import InterventionYearSelect\n\nfrom .models import INFRASTRUCTURE_TYPES, Infrastructure, Signage\n\n\nclass InfrastructureYearSelect(InterventionYearSelect):\n label = _(u\"Intervention year\")\n\n\nclass InfrastructureFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect,\n label=_(u\"Intervention year\"))\n\n def __init__(self, *args, **kwargs):\n super(InfrastructureFilterSet, self).__init__(*args, **kwargs)\n field = self.form.fields['type']\n field.queryset = field.queryset.exclude(type=INFRASTRUCTURE_TYPES.SIGNAGE)\n\n field = self.form.fields['type__type']\n all_choices = field.widget.choices\n all_choices = [c for c in all_choices if c[0] != INFRASTRUCTURE_TYPES.SIGNAGE]\n field.widget.choices = [('', _(u\"Category\"))] + all_choices\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Infrastructure\n fields = StructureRelatedFilterSet.Meta.fields + ['type__type', 'type']\n\n\nclass SignageFilterSet(StructureRelatedFilterSet):\n intervention_year = YearFilter(name='interventions_set__date',\n widget=InfrastructureYearSelect)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Signage\n fields = StructureRelatedFilterSet.Meta.fields\n", "path": "geotrek/infrastructure/filters.py"}]} | 975 | 285 |
gh_patches_debug_61517 | rasdani/github-patches | git_diff | open-mmlab__mmpose-271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pylint: R1710
```bash
mmpose/apis/test.py:142:0: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)
mmpose/datasets/datasets/mesh/mesh_mix_dataset.py:38:4: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)
```
</issue>
<code>
[start of mmpose/datasets/datasets/mesh/mesh_mix_dataset.py]
1 from abc import ABCMeta
2
3 import numpy as np
4 from torch.utils.data import Dataset
5
6 from mmpose.datasets.builder import DATASETS
7 from .mesh_base_dataset import MeshBaseDataset
8
9
10 @DATASETS.register_module()
11 class MeshMixDataset(Dataset, metaclass=ABCMeta):
12 """Mix Dataset for 3D human mesh estimation.
13
14 The dataset combines data from multiple datasets (MeshBaseDataset) and
15 sample the data from different datasets with the provided proportions.
16 The dataset loads raw features and apply specified transforms
17 to return a dict containing the image tensors and other information.
18
19 Args:
20 configs (list): List of configs for multiple datasets.
21 partition (list): Sample proportion of multiple datasets.
22 The the elements of it should be non-negative and the
23 sum of it should be 1.
24 """
25
26 def __init__(self, configs, partition):
27 """Load data from multiple datasets."""
28 assert min(partition) >= 0
29 assert sum(partition) == 1
30 self.partition = np.array(partition).cumsum()
31 self.datasets = [MeshBaseDataset(**cfg) for cfg in configs]
32 self.length = max(len(ds) for ds in self.datasets)
33
34 def __len__(self):
35 """Get the size of the dataset."""
36 return self.length
37
38 def __getitem__(self, idx):
39 """Given index, sample the data from multiple datasets with the given
40 proportion."""
41 p = np.random.rand()
42 for i in range(len(self.datasets)):
43 if p <= self.partition[i]:
44 index_new = (idx + np.random.rand()) * len(
45 self.datasets[i]) / self.length
46 index_new = int(np.round(index_new)) % (len(self.datasets[i]))
47 return self.datasets[i][index_new]
48
[end of mmpose/datasets/datasets/mesh/mesh_mix_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py
--- a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py
+++ b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py
@@ -45,3 +45,4 @@
self.datasets[i]) / self.length
index_new = int(np.round(index_new)) % (len(self.datasets[i]))
return self.datasets[i][index_new]
+ return None
| {"golden_diff": "diff --git a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py\n--- a/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py\n+++ b/mmpose/datasets/datasets/mesh/mesh_mix_dataset.py\n@@ -45,3 +45,4 @@\n self.datasets[i]) / self.length\n index_new = int(np.round(index_new)) % (len(self.datasets[i]))\n return self.datasets[i][index_new]\n+ return None\n", "issue": "Pylint: R1710\n```bash\r\nmmpose/apis/test.py:142:0: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)\r\nmmpose/datasets/datasets/mesh/mesh_mix_dataset.py:38:4: R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)\r\n```\n", "before_files": [{"content": "from abc import ABCMeta\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .mesh_base_dataset import MeshBaseDataset\n\n\[email protected]_module()\nclass MeshMixDataset(Dataset, metaclass=ABCMeta):\n \"\"\"Mix Dataset for 3D human mesh estimation.\n\n The dataset combines data from multiple datasets (MeshBaseDataset) and\n sample the data from different datasets with the provided proportions.\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n configs (list): List of configs for multiple datasets.\n partition (list): Sample proportion of multiple datasets.\n The the elements of it should be non-negative and the\n sum of it should be 1.\n \"\"\"\n\n def __init__(self, configs, partition):\n \"\"\"Load data from multiple datasets.\"\"\"\n assert min(partition) >= 0\n assert sum(partition) == 1\n self.partition = np.array(partition).cumsum()\n self.datasets = [MeshBaseDataset(**cfg) for cfg in configs]\n self.length = max(len(ds) for ds in self.datasets)\n\n def __len__(self):\n \"\"\"Get the size of the dataset.\"\"\"\n return self.length\n\n def __getitem__(self, idx):\n \"\"\"Given index, sample the data from multiple datasets with the given\n proportion.\"\"\"\n p = np.random.rand()\n for i in range(len(self.datasets)):\n if p <= self.partition[i]:\n index_new = (idx + np.random.rand()) * len(\n self.datasets[i]) / self.length\n index_new = int(np.round(index_new)) % (len(self.datasets[i]))\n return self.datasets[i][index_new]\n", "path": "mmpose/datasets/datasets/mesh/mesh_mix_dataset.py"}]} | 1,135 | 121 |
gh_patches_debug_16387 | rasdani/github-patches | git_diff | python__python-docs-es-106 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revisar los `:role:!key`
Cuando hicimos la migración #27 aceptamos `:role:!key` como `:role:key`.
La única diferencia entre ellos es que el que tiene `!` no hace un link a la referencia.
Tenemos que revisar que queden consistentes nuevamente.
</issue>
<code>
[start of conf.py]
1 # Sphinx configuration file.
2 #
3 # - import original configurations from cpython/Doc/conf.py
4 # - append the path considering the cpython submodule is at ./cpython
5 # - create the symbolic links under ./cpython/locale/es/LC_MESSAGES
6 # - make the build to work under Read the Docs
7 #
8 # The git submodule was created using this Stack Overflow answer
9 # to fetch only the commit that I needed and avoid clonning the whole history
10 # https://stackoverflow.com/a/27445058
11 #
12 # This can be built locally using `sphinx-build` by running
13 #
14 # $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html
15
16 import sys, os, time
17 sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))
18 sys.path.append(os.path.abspath('cpython/Doc/includes'))
19
20 # Import all the Sphinx settings from cpython
21 sys.path.append(os.path.abspath('cpython/Doc'))
22 from conf import *
23
24 # Call patchlevel with the proper path to get the version from
25 # instead of hardcoding it
26 import patchlevel
27 version, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))
28
29 project = 'Python en Español'
30 copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
31
32 html_theme_path = ['cpython/Doc/tools']
33 templates_path = ['cpython/Doc/tools/templates']
34 html_static_path = ['cpython/Doc/tools/static']
35
36 os.system('mkdir -p cpython/locales/es/')
37 os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
38
39
40 if not os.environ.get('SPHINX_GETTEXT') == 'True':
41 # Override all the files from ``.overrides`` directory
42 import glob
43 for root, dirs, files in os.walk('.overrides'):
44 for fname in files:
45 if fname == 'README.rst' and root == '.overrides':
46 continue
47 destroot = root.replace('.overrides', '').lstrip('/')
48 outputdir = os.path.join(
49 'cpython',
50 'Doc',
51 destroot,
52 fname,
53 )
54 os.system(f'ln -nfs `pwd`/{root}/{fname} {outputdir}')
55
56 gettext_compact = False
57 locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir
58
59
60 # NOTE: Read the Docs does not support "multi document output".
61 # So, we put all the documentation as a single file for now.
62 _stdauthor = r'Guido van Rossum\\and the Python development team'
63 latex_documents = [
64 ('contents', 'python-docs-es.tex', u'Documentación de Python en Español',
65 _stdauthor, 'manual'),
66 ]
67
68 def setup(app):
69
70 def add_contributing_banner(app, doctree):
71 """
72 Insert a banner at the top of the index.
73
74 This way, we can easily communicate people to help with the translation,
75 pointing them to different resources.
76 """
77
78 if app.builder.format != 'html':
79 # Do not include the banner when building with other formats
80 # (this is useful when using -b gettext)
81 return
82
83 from docutils import nodes, core
84
85 message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \
86 f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \
87 'Ayuda a acercar Python a más personas de habla hispana.'
88
89 paragraph = core.publish_doctree(message)[0]
90 banner = nodes.warning(ids=['contributing-banner'])
91 banner.append(paragraph)
92
93 for document in doctree.traverse(nodes.document):
94 document.insert(0, banner)
95
96 # Change the sourcedir programmatically because Read the Docs always call it with `.`
97 app.srcdir = 'cpython/Doc'
98
99 app.connect('doctree-read', add_contributing_banner)
100
[end of conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -11,7 +11,7 @@
#
# This can be built locally using `sphinx-build` by running
#
-# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html
+# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html
import sys, os, time
sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))
@@ -37,6 +37,12 @@
os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
+exclude_patterns = [
+ # This file is not included and it not marked as :orphan:
+ 'distutils/_setuptools_disclaimer.rst',
+ 'README.rst',
+]
+
if not os.environ.get('SPHINX_GETTEXT') == 'True':
# Override all the files from ``.overrides`` directory
import glob
| {"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -11,7 +11,7 @@\n #\n # This can be built locally using `sphinx-build` by running\n #\n-# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n+# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html\n \n import sys, os, time\n sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\n@@ -37,6 +37,12 @@\n os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n \n \n+exclude_patterns = [\n+ # This file is not included and it not marked as :orphan:\n+ 'distutils/_setuptools_disclaimer.rst',\n+ 'README.rst',\n+]\n+\n if not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n import glob\n", "issue": "Revisar los `:role:!key`\nCuando hicimos la migraci\u00f3n #27 aceptamos `:role:!key` como `:role:key`.\r\n\r\nLa \u00fanica diferencia entre ellos es que el que tiene `!` no hace un link a la referencia.\r\n\r\nTenemos que revisar que queden consistentes nuevamente.\n", "before_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\n# Call patchlevel with the proper path to get the version from\n# instead of hardcoding it\nimport patchlevel\nversion, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\n\nif not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n import glob\n for root, dirs, files in os.walk('.overrides'):\n for fname in files:\n if fname == 'README.rst' and root == '.overrides':\n continue\n destroot = root.replace('.overrides', '').lstrip('/')\n outputdir = os.path.join(\n 'cpython',\n 'Doc',\n destroot,\n fname,\n )\n os.system(f'ln -nfs `pwd`/{root}/{fname} {outputdir}')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\n\n# NOTE: Read the Docs does not support \"multi document output\".\n# So, we put all the documentation as a single file for now.\n_stdauthor = r'Guido van Rossum\\\\and the Python development team'\nlatex_documents = [\n ('contents', 'python-docs-es.tex', u'Documentaci\u00f3n de Python en Espa\u00f1ol',\n _stdauthor, 'manual'),\n]\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n\n if app.builder.format != 'html':\n # Do not include the banner when building with other formats\n # (this is useful when using -b gettext)\n return\n\n from docutils import nodes, core\n\n message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \\\n 'Ayuda a acercar Python a m\u00e1s personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n", "path": "conf.py"}]} | 1,669 | 238 |
gh_patches_debug_30419 | rasdani/github-patches | git_diff | uccser__cs-unplugged-731 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement automatic update of .po file
The .po file needs to be updated with all static strings for translation, by running the `python manage.py makemessages`. This needs to be run upon any change to templates or database content.
This process should be automated on Travis to run when any such files are updated on `develop`.
</issue>
<code>
[start of infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py]
1 """Script to print list of file paths of all completely translated files for a given language."""
2
3 import os
4 import argparse
5
6 from crowdin_bot import api
7
8 SOURCE_LANGUAGE = "en"
9
10 def get_language_info(language):
11 """Get xml tree from language info api call.
12
13 Args:
14 language: (str) crowdin language code
15
16 Returns:
17 lxml.etree object
18 """
19 return api.api_call_xml(
20 "language-status",
21 language=language
22 )
23
24 def process_item(item, parent_path=None, csu_language_code=None):
25 """Return list of completely translated file paths in a given directory tree node.
26
27 Args:
28 item: (etree.Element): itemm node in language-status xml tree
29 (see https://support.crowdin.com/api/language-status/)
30 parent_path: (str) path to the translated file node (None if the current item is
31 the root of the directory tree).
32 csu_language_code: (str) Language code (in locale format) on CSU end
33 (may differ from crowdin language code according to language mapping
34 in yaml file)
35
36 Returns:
37 (list) list of file paths that are completely translated
38 """
39 if item.find("node_type").text == "file":
40 filename = item.find("name").text
41 if parent_path:
42 path = os.path.join(parent_path, filename)
43 else:
44 path = filename
45
46 # Skip full translated check for *.po - they can always be included
47 if filename.endswith(".po"):
48 return [path]
49
50 if item.find("phrases").text == item.find("approved").text:
51 return [path]
52 else:
53 return []
54
55 else:
56 inner_nodes = item.find("files")
57 dirname = item.find("name").text
58 if dirname == SOURCE_LANGUAGE:
59 dirname = csu_language_code
60 if parent_path:
61 path = os.path.join(parent_path, dirname)
62 else:
63 path = dirname
64 completed = []
65 for inner_node in inner_nodes:
66 completed += process_item(inner_node, parent_path=path, csu_language_code=csu_language_code)
67 return completed
68
69
70 if __name__ == "__main__":
71 parser = argparse.ArgumentParser()
72 parser.add_argument('--crowdin-code', required=True,
73 help='Crowdin language code for target language')
74 parser.add_argument('--csu-code', required=True,
75 help='CSU language code for target language')
76 args = parser.parse_args()
77 lang_info = get_language_info(args.crowdin_code)
78 files = lang_info.find("files")
79 completed = []
80 for item in files:
81 completed += process_item(item, csu_language_code=args.csu_code)
82 print('\n'.join(completed))
83
[end of infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py]
[start of infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py]
1 """Script to print list of all crowdin language codes for project."""
2
3 from crowdin_bot import api
4
5 NS_DICT = {
6 'ns': "urn:oasis:names:tc:xliff:document:1.2"
7 }
8
9 def get_project_languages():
10 """Get list of crowdin language codes.
11
12 Returns:
13 (list) list of project crowdin language codes
14 """
15 info_xml = api.api_call_xml("info")
16 languages = info_xml.find('languages')
17 translatable_languages = []
18 for language in languages:
19 # Check it's not the incontext pseudo language
20 if language.find("can_translate").text == "1":
21 translatable_languages.append(language.find('code').text)
22 return translatable_languages
23
24 if __name__ == "__main__":
25 print('\n'.join(get_project_languages()))
26
[end of infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py
--- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py
+++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py
@@ -43,9 +43,9 @@
else:
path = filename
- # Skip full translated check for *.po - they can always be included
+ # Skip *.po - they are handled separately
if filename.endswith(".po"):
- return [path]
+ return []
if item.find("phrases").text == item.find("approved").text:
return [path]
@@ -79,4 +79,5 @@
completed = []
for item in files:
completed += process_item(item, csu_language_code=args.csu_code)
- print('\n'.join(completed))
+ for path in completed:
+ print(path)
diff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py
--- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py
+++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py
@@ -12,14 +12,14 @@
Returns:
(list) list of project crowdin language codes
"""
- info_xml = api.api_call_xml("info")
- languages = info_xml.find('languages')
- translatable_languages = []
- for language in languages:
- # Check it's not the incontext pseudo language
- if language.find("can_translate").text == "1":
- translatable_languages.append(language.find('code').text)
- return translatable_languages
+ active_languages = []
+ trans_status = api.api_call_json("status")
+ for language in trans_status:
+ # Check language has actually had some translation done
+ if int(language["words_approved"]) > 0:
+ active_languages.append(language["code"])
+ return active_languages
if __name__ == "__main__":
- print('\n'.join(get_project_languages()))
+ for language in get_project_languages():
+ print(language)
| {"golden_diff": "diff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py\n--- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py\n+++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py\n@@ -43,9 +43,9 @@\n else:\n path = filename\n \n- # Skip full translated check for *.po - they can always be included\n+ # Skip *.po - they are handled separately\n if filename.endswith(\".po\"):\n- return [path]\n+ return []\n \n if item.find(\"phrases\").text == item.find(\"approved\").text:\n return [path]\n@@ -79,4 +79,5 @@\n completed = []\n for item in files:\n completed += process_item(item, csu_language_code=args.csu_code)\n- print('\\n'.join(completed))\n+ for path in completed:\n+ print(path)\ndiff --git a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py\n--- a/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py\n+++ b/infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py\n@@ -12,14 +12,14 @@\n Returns:\n (list) list of project crowdin language codes\n \"\"\"\n- info_xml = api.api_call_xml(\"info\")\n- languages = info_xml.find('languages')\n- translatable_languages = []\n- for language in languages:\n- # Check it's not the incontext pseudo language\n- if language.find(\"can_translate\").text == \"1\":\n- translatable_languages.append(language.find('code').text)\n- return translatable_languages\n+ active_languages = []\n+ trans_status = api.api_call_json(\"status\")\n+ for language in trans_status:\n+ # Check language has actually had some translation done\n+ if int(language[\"words_approved\"]) > 0:\n+ active_languages.append(language[\"code\"])\n+ return active_languages\n \n if __name__ == \"__main__\":\n- print('\\n'.join(get_project_languages()))\n+ for language in get_project_languages():\n+ print(language)\n", "issue": "Implement automatic update of .po file \nThe .po file needs to be updated with all static strings for translation, by running the `python manage.py makemessages`. This needs to be run upon any change to templates or database content.\r\n\r\nThis process should be automated on Travis to run when any such files are updated on `develop`.\n", "before_files": [{"content": "\"\"\"Script to print list of file paths of all completely translated files for a given language.\"\"\"\n\nimport os\nimport argparse\n\nfrom crowdin_bot import api\n\nSOURCE_LANGUAGE = \"en\"\n\ndef get_language_info(language):\n \"\"\"Get xml tree from language info api call.\n\n Args:\n language: (str) crowdin language code\n\n Returns:\n lxml.etree object\n \"\"\"\n return api.api_call_xml(\n \"language-status\",\n language=language\n )\n\ndef process_item(item, parent_path=None, csu_language_code=None):\n \"\"\"Return list of completely translated file paths in a given directory tree node.\n\n Args:\n item: (etree.Element): itemm node in language-status xml tree\n (see https://support.crowdin.com/api/language-status/)\n parent_path: (str) path to the translated file node (None if the current item is\n the root of the directory tree).\n csu_language_code: (str) Language code (in locale format) on CSU end\n (may differ from crowdin language code according to language mapping\n in yaml file)\n\n Returns:\n (list) list of file paths that are completely translated\n \"\"\"\n if item.find(\"node_type\").text == \"file\":\n filename = item.find(\"name\").text\n if parent_path:\n path = os.path.join(parent_path, filename)\n else:\n path = filename\n\n # Skip full translated check for *.po - they can always be included\n if filename.endswith(\".po\"):\n return [path]\n\n if item.find(\"phrases\").text == item.find(\"approved\").text:\n return [path]\n else:\n return []\n\n else:\n inner_nodes = item.find(\"files\")\n dirname = item.find(\"name\").text\n if dirname == SOURCE_LANGUAGE:\n dirname = csu_language_code\n if parent_path:\n path = os.path.join(parent_path, dirname)\n else:\n path = dirname\n completed = []\n for inner_node in inner_nodes:\n completed += process_item(inner_node, parent_path=path, csu_language_code=csu_language_code)\n return completed\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--crowdin-code', required=True,\n help='Crowdin language code for target language')\n parser.add_argument('--csu-code', required=True,\n help='CSU language code for target language')\n args = parser.parse_args()\n lang_info = get_language_info(args.crowdin_code)\n files = lang_info.find(\"files\")\n completed = []\n for item in files:\n completed += process_item(item, csu_language_code=args.csu_code)\n print('\\n'.join(completed))\n", "path": "infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_complete_translations.py"}, {"content": "\"\"\"Script to print list of all crowdin language codes for project.\"\"\"\n\nfrom crowdin_bot import api\n\nNS_DICT = {\n 'ns': \"urn:oasis:names:tc:xliff:document:1.2\"\n}\n\ndef get_project_languages():\n \"\"\"Get list of crowdin language codes.\n\n Returns:\n (list) list of project crowdin language codes\n \"\"\"\n info_xml = api.api_call_xml(\"info\")\n languages = info_xml.find('languages')\n translatable_languages = []\n for language in languages:\n # Check it's not the incontext pseudo language\n if language.find(\"can_translate\").text == \"1\":\n translatable_languages.append(language.find('code').text)\n return translatable_languages\n\nif __name__ == \"__main__\":\n print('\\n'.join(get_project_languages()))\n", "path": "infrastructure/crowdin/crowdin_bot_python_package/crowdin_bot/get_crowdin_languages.py"}]} | 1,646 | 559 |
gh_patches_debug_3174 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1614 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't use ruby 2.7.1 on MacOS
Hi,
Bumping my ruby hooks to version 2.7.1 worked fine for me on Ubuntu but doesn't work for my colleagues using MacOS, is there something to do about bumping rbenv archives ?
Thanks
</issue>
<code>
[start of pre_commit/make_archives.py]
1 import argparse
2 import os.path
3 import tarfile
4 from typing import Optional
5 from typing import Sequence
6
7 from pre_commit import output
8 from pre_commit.util import cmd_output_b
9 from pre_commit.util import rmtree
10 from pre_commit.util import tmpdir
11
12
13 # This is a script for generating the tarred resources for git repo
14 # dependencies. Currently it's just for "vendoring" ruby support packages.
15
16
17 REPOS = (
18 ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'),
19 ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'),
20 (
21 'ruby-download',
22 'git://github.com/garnieretienne/rvm-download',
23 '09bd7c6',
24 ),
25 )
26
27
28 def make_archive(name: str, repo: str, ref: str, destdir: str) -> str:
29 """Makes an archive of a repository in the given destdir.
30
31 :param text name: Name to give the archive. For instance foo. The file
32 that is created will be called foo.tar.gz.
33 :param text repo: Repository to clone.
34 :param text ref: Tag/SHA/branch to check out.
35 :param text destdir: Directory to place archives in.
36 """
37 output_path = os.path.join(destdir, f'{name}.tar.gz')
38 with tmpdir() as tempdir:
39 # Clone the repository to the temporary directory
40 cmd_output_b('git', 'clone', repo, tempdir)
41 cmd_output_b('git', 'checkout', ref, cwd=tempdir)
42
43 # We don't want the '.git' directory
44 # It adds a bunch of size to the archive and we don't use it at
45 # runtime
46 rmtree(os.path.join(tempdir, '.git'))
47
48 with tarfile.open(output_path, 'w|gz') as tf:
49 tf.add(tempdir, name)
50
51 return output_path
52
53
54 def main(argv: Optional[Sequence[str]] = None) -> int:
55 parser = argparse.ArgumentParser()
56 parser.add_argument('--dest', default='pre_commit/resources')
57 args = parser.parse_args(argv)
58 for archive_name, repo, ref in REPOS:
59 output.write_line(f'Making {archive_name}.tar.gz for {repo}@{ref}')
60 make_archive(archive_name, repo, ref, args.dest)
61 return 0
62
63
64 if __name__ == '__main__':
65 exit(main())
66
[end of pre_commit/make_archives.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/make_archives.py b/pre_commit/make_archives.py
--- a/pre_commit/make_archives.py
+++ b/pre_commit/make_archives.py
@@ -15,8 +15,8 @@
REPOS = (
- ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'),
- ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'),
+ ('rbenv', 'git://github.com/rbenv/rbenv', '0843745'),
+ ('ruby-build', 'git://github.com/rbenv/ruby-build', '258455e'),
(
'ruby-download',
'git://github.com/garnieretienne/rvm-download',
| {"golden_diff": "diff --git a/pre_commit/make_archives.py b/pre_commit/make_archives.py\n--- a/pre_commit/make_archives.py\n+++ b/pre_commit/make_archives.py\n@@ -15,8 +15,8 @@\n \n \n REPOS = (\n- ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'),\n- ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'),\n+ ('rbenv', 'git://github.com/rbenv/rbenv', '0843745'),\n+ ('ruby-build', 'git://github.com/rbenv/ruby-build', '258455e'),\n (\n 'ruby-download',\n 'git://github.com/garnieretienne/rvm-download',\n", "issue": "Can't use ruby 2.7.1 on MacOS\nHi, \r\n\r\nBumping my ruby hooks to version 2.7.1 worked fine for me on Ubuntu but doesn't work for my colleagues using MacOS, is there something to do about bumping rbenv archives ? \r\n\r\nThanks\n", "before_files": [{"content": "import argparse\nimport os.path\nimport tarfile\nfrom typing import Optional\nfrom typing import Sequence\n\nfrom pre_commit import output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\nfrom pre_commit.util import tmpdir\n\n\n# This is a script for generating the tarred resources for git repo\n# dependencies. Currently it's just for \"vendoring\" ruby support packages.\n\n\nREPOS = (\n ('rbenv', 'git://github.com/rbenv/rbenv', 'a3fa9b7'),\n ('ruby-build', 'git://github.com/rbenv/ruby-build', '1a902f3'),\n (\n 'ruby-download',\n 'git://github.com/garnieretienne/rvm-download',\n '09bd7c6',\n ),\n)\n\n\ndef make_archive(name: str, repo: str, ref: str, destdir: str) -> str:\n \"\"\"Makes an archive of a repository in the given destdir.\n\n :param text name: Name to give the archive. For instance foo. The file\n that is created will be called foo.tar.gz.\n :param text repo: Repository to clone.\n :param text ref: Tag/SHA/branch to check out.\n :param text destdir: Directory to place archives in.\n \"\"\"\n output_path = os.path.join(destdir, f'{name}.tar.gz')\n with tmpdir() as tempdir:\n # Clone the repository to the temporary directory\n cmd_output_b('git', 'clone', repo, tempdir)\n cmd_output_b('git', 'checkout', ref, cwd=tempdir)\n\n # We don't want the '.git' directory\n # It adds a bunch of size to the archive and we don't use it at\n # runtime\n rmtree(os.path.join(tempdir, '.git'))\n\n with tarfile.open(output_path, 'w|gz') as tf:\n tf.add(tempdir, name)\n\n return output_path\n\n\ndef main(argv: Optional[Sequence[str]] = None) -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dest', default='pre_commit/resources')\n args = parser.parse_args(argv)\n for archive_name, repo, ref in REPOS:\n output.write_line(f'Making {archive_name}.tar.gz for {repo}@{ref}')\n make_archive(archive_name, repo, ref, args.dest)\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/make_archives.py"}]} | 1,273 | 187 |
gh_patches_debug_61831 | rasdani/github-patches | git_diff | pulp__pulpcore-3411 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc
**Version**
3.18.10
**Describe the bug**
Migration 0077 fails when you have a remote that has an @ somewhere in the path
```
Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):
File "/usr/bin/pulpcore-manager", line 33, in <module>
sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())
File "/usr/lib/python3.9/site-packages/pulpcore/app/manage.py", line 11, in manage
execute_from_command_line(sys.argv)
File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 89, in wrapped
res = handle_func(*args, **kwargs)
File "/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py", line 244, in handle
post_migrate_state = executor.migrate(
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 117, in migrate
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 227, in apply_migration
state = migration.apply(state, schema_editor)
File "/usr/lib/python3.9/site-packages/django/db/migrations/migration.py", line 126, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards
self.code(from_state.apps, schema_editor)
File "/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py", line 19, in move_remote_url_credentials
_, url_split = url.netloc.rsplit("@", maxsplit=1)
ValueError: not enough values to unpack (expected 2, got 1)
```
**To Reproduce**
Steps to reproduce the behavior:
* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`
* Try to migrate 0077
**Expected behavior**
migration aplies
**Additional context**
https://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088
</issue>
<code>
[start of pulpcore/app/migrations/0077_move_remote_url_credentials.py]
1 # Generated by Django 3.2.6 on 2021-09-29 14:00
2
3 from urllib.parse import urlparse, urlunparse
4
5 from django.db import migrations
6
7
8 def move_remote_url_credentials(apps, schema_editor):
9 Remote = apps.get_model("core", "Remote")
10
11 for remote in Remote.objects.filter(url__contains="@").iterator():
12 url = urlparse(remote.url)
13
14 if not remote.username:
15 remote.username = url.username
16 if not remote.password:
17 remote.password = url.password
18
19 _, url_split = url.netloc.rsplit("@", maxsplit=1)
20 remote.url = urlunparse(url._replace(netloc=url_split))
21 remote.save()
22
23
24 class Migration(migrations.Migration):
25
26 dependencies = [
27 ('core', '0076_remove_reserved_resource'),
28 ]
29
30 operations = [
31 migrations.RunPython(
32 code=move_remote_url_credentials,
33 reverse_code=migrations.RunPython.noop,
34 elidable=True,
35 )
36 ]
37
[end of pulpcore/app/migrations/0077_move_remote_url_credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py
--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py
+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py
@@ -11,6 +11,11 @@
for remote in Remote.objects.filter(url__contains="@").iterator():
url = urlparse(remote.url)
+ if '@' not in url.netloc:
+ # URLs can have an @ in other places than the netloc,
+ # but those do not indicate credentials
+ continue
+
if not remote.username:
remote.username = url.username
if not remote.password:
| {"golden_diff": "diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n@@ -11,6 +11,11 @@\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n \n+ if '@' not in url.netloc:\n+ # URLs can have an @ in other places than the netloc,\n+ # but those do not indicate credentials\n+ continue\n+\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n", "issue": "0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc\n**Version**\r\n3.18.10\r\n\r\n**Describe the bug**\r\nMigration 0077 fails when you have a remote that has an @ somewhere in the path\r\n\r\n```\r\n Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):\r\n File \"/usr/bin/pulpcore-manager\", line 33, in <module>\r\n sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/manage.py\", line 11, in manage\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 398, in execute\r\n output = self.handle(*args, **options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 89, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py\", line 244, in handle\r\n post_migrate_state = executor.migrate(\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 117, in migrate\r\n state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 147, in _migrate_all_forwards\r\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 227, in apply_migration\r\n state = migration.apply(state, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/migration.py\", line 126, in apply\r\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py\", line 190, in database_forwards\r\n self.code(from_state.apps, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py\", line 19, in move_remote_url_credentials\r\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\r\nValueError: not enough values to unpack (expected 2, got 1)\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`\r\n* Try to migrate 0077\r\n\r\n**Expected behavior**\r\nmigration aplies\r\n\r\n**Additional context**\r\nhttps://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.6 on 2021-09-29 14:00\n\nfrom urllib.parse import urlparse, urlunparse\n\nfrom django.db import migrations\n\n\ndef move_remote_url_credentials(apps, schema_editor):\n Remote = apps.get_model(\"core\", \"Remote\")\n\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n remote.password = url.password\n\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\n remote.url = urlunparse(url._replace(netloc=url_split))\n remote.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0076_remove_reserved_resource'),\n ]\n\n operations = [\n migrations.RunPython(\n code=move_remote_url_credentials,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n )\n ]\n", "path": "pulpcore/app/migrations/0077_move_remote_url_credentials.py"}]} | 1,674 | 173 |
gh_patches_debug_28439 | rasdani/github-patches | git_diff | iterative__dvc-10423 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature proposal: `dvc artifacts get --show-url`
DVC currently supports `dvc get --show-url` as a way to retrieve just the URL of a DVC-versioned object as opposed to the object itself.
However, there is no equivalent for `dvc artifacts get`. This came as a customer request (to allow easier sharing of results even to people who are not DVC/DVC Studio users). It also has advantages e.g. in model deployment to Sagemaker (which requires the artifact URL on S3).
</issue>
<code>
[start of dvc/commands/artifacts.py]
1 from dvc.cli import completion, formatter
2 from dvc.cli.command import CmdBaseNoRepo
3 from dvc.cli.utils import DictAction, append_doc_link
4 from dvc.exceptions import DvcException
5 from dvc.log import logger
6
7 logger = logger.getChild(__name__)
8
9
10 class CmdArtifactsGet(CmdBaseNoRepo):
11 def run(self):
12 from dvc.repo.artifacts import Artifacts
13 from dvc.scm import CloneError
14 from dvc.ui import ui
15
16 try:
17 count, out = Artifacts.get(
18 self.args.url,
19 name=self.args.name,
20 version=self.args.rev,
21 stage=self.args.stage,
22 force=self.args.force,
23 config=self.args.config,
24 remote=self.args.remote,
25 remote_config=self.args.remote_config,
26 out=self.args.out,
27 )
28 ui.write(f"Downloaded {count} file(s) to '{out}'")
29 return 0
30 except CloneError:
31 logger.exception("failed to get '%s'", self.args.name)
32 return 1
33 except DvcException:
34 logger.exception(
35 "failed to get '%s' from '%s'", self.args.name, self.args.url
36 )
37 return 1
38
39
40 def add_parser(subparsers, parent_parser):
41 ARTIFACTS_HELP = "DVC model registry artifact commands."
42
43 artifacts_parser = subparsers.add_parser(
44 "artifacts",
45 parents=[parent_parser],
46 description=append_doc_link(ARTIFACTS_HELP, "artifacts"),
47 help=ARTIFACTS_HELP,
48 formatter_class=formatter.RawDescriptionHelpFormatter,
49 )
50 artifacts_subparsers = artifacts_parser.add_subparsers(
51 dest="cmd",
52 help="Use `dvc artifacts CMD --help` to display command-specific help.",
53 required=True,
54 )
55
56 ARTIFACTS_GET_HELP = "Download an artifact from a DVC project."
57 get_parser = artifacts_subparsers.add_parser(
58 "get",
59 parents=[parent_parser],
60 description=append_doc_link(ARTIFACTS_GET_HELP, "artifacts/get"),
61 help=ARTIFACTS_HELP,
62 formatter_class=formatter.RawDescriptionHelpFormatter,
63 )
64 get_parser.add_argument("url", help="Location of DVC repository to download from")
65 get_parser.add_argument(
66 "name", help="Name of artifact in the repository"
67 ).complete = completion.FILE
68 get_parser.add_argument(
69 "--rev",
70 nargs="?",
71 help="Artifact version",
72 metavar="<version>",
73 )
74 get_parser.add_argument(
75 "--stage",
76 nargs="?",
77 help="Artifact stage",
78 metavar="<stage>",
79 )
80 get_parser.add_argument(
81 "-o",
82 "--out",
83 nargs="?",
84 help="Destination path to download artifact to",
85 metavar="<path>",
86 ).complete = completion.DIR
87 get_parser.add_argument(
88 "-j",
89 "--jobs",
90 type=int,
91 help=(
92 "Number of jobs to run simultaneously. "
93 "The default value is 4 * cpu_count(). "
94 ),
95 metavar="<number>",
96 )
97 get_parser.add_argument(
98 "-f",
99 "--force",
100 action="store_true",
101 default=False,
102 help="Override local file or folder if exists.",
103 )
104 get_parser.add_argument(
105 "--config",
106 type=str,
107 help=(
108 "Path to a config file that will be merged with the config "
109 "in the target repository."
110 ),
111 )
112 get_parser.add_argument(
113 "--remote",
114 type=str,
115 help=(
116 "Remote name to set as a default in the target repository "
117 "(only applicable when downloading from DVC remote)."
118 ),
119 )
120 get_parser.add_argument(
121 "--remote-config",
122 type=str,
123 nargs="*",
124 action=DictAction,
125 help=(
126 "Remote config options to merge with a remote's config (default or one "
127 "specified by '--remote') in the target repository (only applicable "
128 "when downloading from DVC remote)."
129 ),
130 )
131 get_parser.set_defaults(func=CmdArtifactsGet)
132
[end of dvc/commands/artifacts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/commands/artifacts.py b/dvc/commands/artifacts.py
--- a/dvc/commands/artifacts.py
+++ b/dvc/commands/artifacts.py
@@ -13,6 +13,9 @@
from dvc.scm import CloneError
from dvc.ui import ui
+ if self.args.show_url:
+ return self._show_url()
+
try:
count, out = Artifacts.get(
self.args.url,
@@ -36,6 +39,28 @@
)
return 1
+ def _show_url(self):
+ from dvc.api import artifacts_show, get_url
+ from dvc.ui import ui
+
+ artifact = artifacts_show(
+ self.args.name,
+ version=self.args.rev,
+ stage=self.args.stage,
+ repo=self.args.url,
+ )
+
+ url = get_url(
+ artifact["path"],
+ repo=self.args.url,
+ rev=artifact["rev"],
+ remote=self.args.remote,
+ remote_config=self.args.remote_config,
+ )
+ ui.write(url, force=True)
+
+ return 0
+
def add_parser(subparsers, parent_parser):
ARTIFACTS_HELP = "DVC model registry artifact commands."
@@ -84,6 +109,14 @@
help="Destination path to download artifact to",
metavar="<path>",
).complete = completion.DIR
+ get_parser.add_argument(
+ "--show-url",
+ action="store_true",
+ help=(
+ "Print the storage location (URL) the target data would be "
+ "downloaded from, and exit."
+ ),
+ )
get_parser.add_argument(
"-j",
"--jobs",
| {"golden_diff": "diff --git a/dvc/commands/artifacts.py b/dvc/commands/artifacts.py\n--- a/dvc/commands/artifacts.py\n+++ b/dvc/commands/artifacts.py\n@@ -13,6 +13,9 @@\n from dvc.scm import CloneError\n from dvc.ui import ui\n \n+ if self.args.show_url:\n+ return self._show_url()\n+\n try:\n count, out = Artifacts.get(\n self.args.url,\n@@ -36,6 +39,28 @@\n )\n return 1\n \n+ def _show_url(self):\n+ from dvc.api import artifacts_show, get_url\n+ from dvc.ui import ui\n+\n+ artifact = artifacts_show(\n+ self.args.name,\n+ version=self.args.rev,\n+ stage=self.args.stage,\n+ repo=self.args.url,\n+ )\n+\n+ url = get_url(\n+ artifact[\"path\"],\n+ repo=self.args.url,\n+ rev=artifact[\"rev\"],\n+ remote=self.args.remote,\n+ remote_config=self.args.remote_config,\n+ )\n+ ui.write(url, force=True)\n+\n+ return 0\n+\n \n def add_parser(subparsers, parent_parser):\n ARTIFACTS_HELP = \"DVC model registry artifact commands.\"\n@@ -84,6 +109,14 @@\n help=\"Destination path to download artifact to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n+ get_parser.add_argument(\n+ \"--show-url\",\n+ action=\"store_true\",\n+ help=(\n+ \"Print the storage location (URL) the target data would be \"\n+ \"downloaded from, and exit.\"\n+ ),\n+ )\n get_parser.add_argument(\n \"-j\",\n \"--jobs\",\n", "issue": "Feature proposal: `dvc artifacts get --show-url`\nDVC currently supports `dvc get --show-url` as a way to retrieve just the URL of a DVC-versioned object as opposed to the object itself.\r\n\r\nHowever, there is no equivalent for `dvc artifacts get`. This came as a customer request (to allow easier sharing of results even to people who are not DVC/DVC Studio users). It also has advantages e.g. in model deployment to Sagemaker (which requires the artifact URL on S3).\n", "before_files": [{"content": "from dvc.cli import completion, formatter\nfrom dvc.cli.command import CmdBaseNoRepo\nfrom dvc.cli.utils import DictAction, append_doc_link\nfrom dvc.exceptions import DvcException\nfrom dvc.log import logger\n\nlogger = logger.getChild(__name__)\n\n\nclass CmdArtifactsGet(CmdBaseNoRepo):\n def run(self):\n from dvc.repo.artifacts import Artifacts\n from dvc.scm import CloneError\n from dvc.ui import ui\n\n try:\n count, out = Artifacts.get(\n self.args.url,\n name=self.args.name,\n version=self.args.rev,\n stage=self.args.stage,\n force=self.args.force,\n config=self.args.config,\n remote=self.args.remote,\n remote_config=self.args.remote_config,\n out=self.args.out,\n )\n ui.write(f\"Downloaded {count} file(s) to '{out}'\")\n return 0\n except CloneError:\n logger.exception(\"failed to get '%s'\", self.args.name)\n return 1\n except DvcException:\n logger.exception(\n \"failed to get '%s' from '%s'\", self.args.name, self.args.url\n )\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n ARTIFACTS_HELP = \"DVC model registry artifact commands.\"\n\n artifacts_parser = subparsers.add_parser(\n \"artifacts\",\n parents=[parent_parser],\n description=append_doc_link(ARTIFACTS_HELP, \"artifacts\"),\n help=ARTIFACTS_HELP,\n formatter_class=formatter.RawDescriptionHelpFormatter,\n )\n artifacts_subparsers = artifacts_parser.add_subparsers(\n dest=\"cmd\",\n help=\"Use `dvc artifacts CMD --help` to display command-specific help.\",\n required=True,\n )\n\n ARTIFACTS_GET_HELP = \"Download an artifact from a DVC project.\"\n get_parser = artifacts_subparsers.add_parser(\n \"get\",\n parents=[parent_parser],\n description=append_doc_link(ARTIFACTS_GET_HELP, \"artifacts/get\"),\n help=ARTIFACTS_HELP,\n formatter_class=formatter.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\"url\", help=\"Location of DVC repository to download from\")\n get_parser.add_argument(\n \"name\", help=\"Name of artifact in the repository\"\n ).complete = completion.FILE\n get_parser.add_argument(\n \"--rev\",\n nargs=\"?\",\n help=\"Artifact version\",\n metavar=\"<version>\",\n )\n get_parser.add_argument(\n \"--stage\",\n nargs=\"?\",\n help=\"Artifact stage\",\n metavar=\"<stage>\",\n )\n get_parser.add_argument(\n \"-o\",\n \"--out\",\n nargs=\"?\",\n help=\"Destination path to download artifact to\",\n metavar=\"<path>\",\n ).complete = completion.DIR\n get_parser.add_argument(\n \"-j\",\n \"--jobs\",\n type=int,\n help=(\n \"Number of jobs to run simultaneously. \"\n \"The default value is 4 * cpu_count(). \"\n ),\n metavar=\"<number>\",\n )\n get_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Override local file or folder if exists.\",\n )\n get_parser.add_argument(\n \"--config\",\n type=str,\n help=(\n \"Path to a config file that will be merged with the config \"\n \"in the target repository.\"\n ),\n )\n get_parser.add_argument(\n \"--remote\",\n type=str,\n help=(\n \"Remote name to set as a default in the target repository \"\n \"(only applicable when downloading from DVC remote).\"\n ),\n )\n get_parser.add_argument(\n \"--remote-config\",\n type=str,\n nargs=\"*\",\n action=DictAction,\n help=(\n \"Remote config options to merge with a remote's config (default or one \"\n \"specified by '--remote') in the target repository (only applicable \"\n \"when downloading from DVC remote).\"\n ),\n )\n get_parser.set_defaults(func=CmdArtifactsGet)\n", "path": "dvc/commands/artifacts.py"}]} | 1,806 | 390 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.