problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_26260 | rasdani/github-patches | git_diff | genialis__resolwe-196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Elasticserach returns paginated results when querying/mapping features using RESDK
In resolwe-bio tools/goea.py `org_features = res.feature.filter(source=args.source_db, query=genes)` should return all genes, not just the first 10.
</issue>
<code>
[start of resolwe/elastic/viewsets.py]
1 """.. Ignore pydocstyle D400.
2
3 ================
4 Elastic Viewsets
5 ================
6
7 .. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin
8 :members:
9
10 """
11 from __future__ import absolute_import, division, print_function, unicode_literals
12
13 from elasticsearch_dsl.query import Q
14
15 from django.conf import settings
16 from django.contrib.auth import get_user_model
17
18 from rest_framework.response import Response
19 from rest_framework.viewsets import GenericViewSet
20
21 __all__ = (
22 'ElasticSearchMixin',
23 'PaginationMixin',
24 'ElasticSearchBaseViewSet',
25 )
26
27
28 class ElasticSearchMixin(object):
29 """Mixin to use Django REST Framework with ElasticSearch based querysets.
30
31 This mixin adds following methods:
32 * :func:`~ElasticSearchMixin.order_search`
33 * :func:`~ElasticSearchMixin.filter_search`
34 * :func:`~ElasticSearchMixin.filter_permissions`
35
36 """
37
38 filtering_fields = []
39 ordering_fields = []
40 ordering = None
41
42 def get_query_param(self, key, default=None):
43 """Get query parameter uniformly for GET and POST requests."""
44 value = self.request.query_params.get(key, None)
45 if value is None:
46 value = self.request.data.get(key, None)
47 if value is None:
48 value = default
49 return value
50
51 def order_search(self, search):
52 """Order given search by the ordering parameter given in request.
53
54 :param search: ElasticSearch query object
55
56 """
57 ordering = self.get_query_param('ordering', self.ordering)
58
59 ordering_field = ordering.lstrip('-')
60 if ordering_field not in self.ordering_fields:
61 raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field))
62
63 return search.sort(ordering)
64
65 def filter_search(self, search):
66 """Filter given search by the filter parameter given in request.
67
68 :param search: ElasticSearch query object
69
70 """
71 for field in self.filtering_fields:
72 value = self.get_query_param(field, None)
73 if value:
74 if isinstance(value, list):
75 filters = [Q('match', **{field: item}) for item in value]
76 search = search.query('bool', should=filters)
77 else:
78 search = search.query('wildcard', **{field: value})
79
80 return search
81
82 def filter_permissions(self, search):
83 """Filter given query based on permissions of the user in the request.
84
85 :param search: ElasticSearch query object
86
87 """
88 user = self.request.user
89 if user.is_superuser:
90 return search
91 if user.is_anonymous():
92 user_model = get_user_model()
93 user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME})
94
95 filters = [Q('match', users_with_permissions=user.pk)]
96 filters.extend([
97 Q('match', groups_with_permissions=group.pk) for group in user.groups.all()
98 ])
99
100 # `minimum_should_match` is set to 1 by default
101 return search.query('bool', should=filters)
102
103
104 class PaginationMixin(object):
105 """Mixin for making paginated response in case pagination parameters are provided."""
106
107 def paginate_response(self, queryset):
108 """Optionally return paginated response.
109
110 If pagination parameters are provided in the request, then paginated response
111 is returned, otherwise response is not paginated.
112
113 """
114 page = self.paginate_queryset(queryset)
115 if page is not None:
116 serializer = self.get_serializer(page, many=True)
117 return self.get_paginated_response(serializer.data)
118
119 serializer = self.get_serializer(queryset, many=True)
120 return Response(serializer.data)
121
122
123 class ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet):
124 """Base ViewSet for ElasticSearch based views.
125
126 This ViewSet creates search based on ``document_class`` parameter, specified
127 in subclass and:
128
129 * filter it by query parameters
130 * apply ordering
131 * filter permissions
132 * apply pagination
133
134 .. IMPORTANT::
135
136 Both ``POST`` and ``GET`` requests are supported.
137 """
138
139 document_class = None
140
141 def custom_filter(self, search):
142 """Perform custom search filtering.
143
144 This method is intended to be overriden in subclasses.
145 """
146 return search
147
148 def list_with_post(self, request):
149 """Endpoint handler."""
150 search = self.document_class().search() # pylint: disable=not-callable
151
152 search = self.custom_filter(search)
153
154 search = self.filter_search(search)
155 search = self.order_search(search)
156 search = self.filter_permissions(search)
157
158 return self.paginate_response(search)
159
160 def list(self, request):
161 """Endpoint handler."""
162 return self.list_with_post(request)
163
[end of resolwe/elastic/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/resolwe/elastic/viewsets.py b/resolwe/elastic/viewsets.py
--- a/resolwe/elastic/viewsets.py
+++ b/resolwe/elastic/viewsets.py
@@ -15,6 +15,7 @@
from django.conf import settings
from django.contrib.auth import get_user_model
+from rest_framework.exceptions import APIException
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
@@ -24,6 +25,16 @@
'ElasticSearchBaseViewSet',
)
+ELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch
+
+
+class TooManyResults(APIException):
+ """Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results."""
+
+ status_code = 400
+ default_detail = 'Query returned too many results. Please, add more filters or use pagination.'
+ default_code = 'bad_request'
+
class ElasticSearchMixin(object):
"""Mixin to use Django REST Framework with ElasticSearch based querysets.
@@ -155,6 +166,11 @@
search = self.order_search(search)
search = self.filter_permissions(search)
+ if search.count() > ELASTICSEARCH_SIZE:
+ raise TooManyResults()
+
+ search = search.extra(size=ELASTICSEARCH_SIZE)
+
return self.paginate_response(search)
def list(self, request):
| {"golden_diff": "diff --git a/resolwe/elastic/viewsets.py b/resolwe/elastic/viewsets.py\n--- a/resolwe/elastic/viewsets.py\n+++ b/resolwe/elastic/viewsets.py\n@@ -15,6 +15,7 @@\n from django.conf import settings\n from django.contrib.auth import get_user_model\n \n+from rest_framework.exceptions import APIException\n from rest_framework.response import Response\n from rest_framework.viewsets import GenericViewSet\n \n@@ -24,6 +25,16 @@\n 'ElasticSearchBaseViewSet',\n )\n \n+ELASTICSEARCH_SIZE = 10000 # maximum number of results returned by ElasticSearch\n+\n+\n+class TooManyResults(APIException):\n+ \"\"\"Exception when elastic query returns more than ``ELASTICSEARCH_SIZE`` results.\"\"\"\n+\n+ status_code = 400\n+ default_detail = 'Query returned too many results. Please, add more filters or use pagination.'\n+ default_code = 'bad_request'\n+\n \n class ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n@@ -155,6 +166,11 @@\n search = self.order_search(search)\n search = self.filter_permissions(search)\n \n+ if search.count() > ELASTICSEARCH_SIZE:\n+ raise TooManyResults()\n+\n+ search = search.extra(size=ELASTICSEARCH_SIZE)\n+\n return self.paginate_response(search)\n \n def list(self, request):\n", "issue": "Elasticserach returns paginated results when querying/mapping features using RESDK\nIn resolwe-bio tools/goea.py `org_features = res.feature.filter(source=args.source_db, query=genes)` should return all genes, not just the first 10.\n", "before_files": [{"content": "\"\"\".. Ignore pydocstyle D400.\n\n================\nElastic Viewsets\n================\n\n.. autoclass:: resolwe.elastic.viewsets.ElasticSearchMixin\n :members:\n\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom elasticsearch_dsl.query import Q\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\n\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\n__all__ = (\n 'ElasticSearchMixin',\n 'PaginationMixin',\n 'ElasticSearchBaseViewSet',\n)\n\n\nclass ElasticSearchMixin(object):\n \"\"\"Mixin to use Django REST Framework with ElasticSearch based querysets.\n\n This mixin adds following methods:\n * :func:`~ElasticSearchMixin.order_search`\n * :func:`~ElasticSearchMixin.filter_search`\n * :func:`~ElasticSearchMixin.filter_permissions`\n\n \"\"\"\n\n filtering_fields = []\n ordering_fields = []\n ordering = None\n\n def get_query_param(self, key, default=None):\n \"\"\"Get query parameter uniformly for GET and POST requests.\"\"\"\n value = self.request.query_params.get(key, None)\n if value is None:\n value = self.request.data.get(key, None)\n if value is None:\n value = default\n return value\n\n def order_search(self, search):\n \"\"\"Order given search by the ordering parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n ordering = self.get_query_param('ordering', self.ordering)\n\n ordering_field = ordering.lstrip('-')\n if ordering_field not in self.ordering_fields:\n raise KeyError('Ordering by `{}` is not supported.'.format(ordering_field))\n\n return search.sort(ordering)\n\n def filter_search(self, search):\n \"\"\"Filter given search by the filter parameter given in request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n for field in self.filtering_fields:\n value = self.get_query_param(field, None)\n if value:\n if isinstance(value, list):\n filters = [Q('match', **{field: item}) for item in value]\n search = search.query('bool', should=filters)\n else:\n search = search.query('wildcard', **{field: value})\n\n return search\n\n def filter_permissions(self, search):\n \"\"\"Filter given query based on permissions of the user in the request.\n\n :param search: ElasticSearch query object\n\n \"\"\"\n user = self.request.user\n if user.is_superuser:\n return search\n if user.is_anonymous():\n user_model = get_user_model()\n user = user_model.objects.get(**{user_model.USERNAME_FIELD: settings.ANONYMOUS_USER_NAME})\n\n filters = [Q('match', users_with_permissions=user.pk)]\n filters.extend([\n Q('match', groups_with_permissions=group.pk) for group in user.groups.all()\n ])\n\n # `minimum_should_match` is set to 1 by default\n return search.query('bool', should=filters)\n\n\nclass PaginationMixin(object):\n \"\"\"Mixin for making paginated response in case pagination parameters are provided.\"\"\"\n\n def paginate_response(self, queryset):\n \"\"\"Optionally return paginated response.\n\n If pagination parameters are provided in the request, then paginated response\n is returned, otherwise response is not paginated.\n\n \"\"\"\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n\n\nclass ElasticSearchBaseViewSet(PaginationMixin, ElasticSearchMixin, GenericViewSet):\n \"\"\"Base ViewSet for ElasticSearch based views.\n\n This ViewSet creates search based on ``document_class`` parameter, specified\n in subclass and:\n\n * filter it by query parameters\n * apply ordering\n * filter permissions\n * apply pagination\n\n .. IMPORTANT::\n\n Both ``POST`` and ``GET`` requests are supported.\n \"\"\"\n\n document_class = None\n\n def custom_filter(self, search):\n \"\"\"Perform custom search filtering.\n\n This method is intended to be overriden in subclasses.\n \"\"\"\n return search\n\n def list_with_post(self, request):\n \"\"\"Endpoint handler.\"\"\"\n search = self.document_class().search() # pylint: disable=not-callable\n\n search = self.custom_filter(search)\n\n search = self.filter_search(search)\n search = self.order_search(search)\n search = self.filter_permissions(search)\n\n return self.paginate_response(search)\n\n def list(self, request):\n \"\"\"Endpoint handler.\"\"\"\n return self.list_with_post(request)\n", "path": "resolwe/elastic/viewsets.py"}]} | 2,006 | 321 |
gh_patches_debug_13499 | rasdani/github-patches | git_diff | lutris__lutris-488 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Specify a User Agent for HTTP requests
Right now it's python-urllib/someversion, and Cloudflare sites (tested on medium protection site) blocks it and returns 403 status code.
Testing the same url with curl works without it blocking, so I'm guessing Cloudflare checks the request UA.
</issue>
<code>
[start of lutris/util/http.py]
1 import json
2 import socket
3 import urllib.request
4 import urllib.error
5 import urllib.parse
6 from ssl import CertificateError
7
8 from lutris.settings import SITE_URL
9 from lutris.util.log import logger
10
11
12 class Request(object):
13 def __init__(self, url, timeout=5, stop_request=None,
14 thread_queue=None, headers={}):
15
16 if not url:
17 raise ValueError('An URL is required!')
18
19 if url.startswith('//'):
20 url = 'https:' + url
21
22 if url.startswith('/'):
23 url = SITE_URL + url
24
25 self.url = url
26 self.content = ''
27 self.timeout = timeout
28 self.stop_request = stop_request
29 self.thread_queue = thread_queue
30 self.buffer_size = 32 * 1024 # Bytes
31 self.downloaded_size = 0
32 self.headers = headers
33
34 def get(self, data=None):
35 req = urllib.request.Request(url=self.url, data=data, headers=self.headers)
36 try:
37 request = urllib.request.urlopen(req, timeout=self.timeout)
38 except (urllib.error.HTTPError, CertificateError) as e:
39 logger.error("Unavailable url (%s): %s", self.url, e)
40 except (socket.timeout, urllib.error.URLError) as e:
41 logger.error("Unable to connect to server (%s): %s", self.url, e)
42 else:
43 try:
44 total_size = request.info().get('Content-Length').strip()
45 total_size = int(total_size)
46 except AttributeError:
47 total_size = 0
48
49 chunks = []
50 while 1:
51 if self.stop_request and self.stop_request.is_set():
52 self.content = ''
53 return self
54 try:
55 chunk = request.read(self.buffer_size)
56 except socket.timeout as e:
57 logger.error("Request timed out")
58 self.content = ''
59 return self
60 self.downloaded_size += len(chunk)
61 if self.thread_queue:
62 self.thread_queue.put(
63 (chunk, self.downloaded_size, total_size)
64 )
65 else:
66 chunks.append(chunk)
67 if not chunk:
68 break
69 request.close()
70 self.content = b''.join(chunks)
71 return self
72
73 def post(self, data):
74 raise NotImplementedError
75
76 def write_to_file(self, path):
77 content = self.content
78 if content:
79 with open(path, 'wb') as dest_file:
80 dest_file.write(content)
81
82 @property
83 def json(self):
84 if self.content:
85 return json.loads(self.text)
86
87 @property
88 def text(self):
89 if self.content:
90 return self.content.decode()
91
[end of lutris/util/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/util/http.py b/lutris/util/http.py
--- a/lutris/util/http.py
+++ b/lutris/util/http.py
@@ -5,6 +5,8 @@
import urllib.parse
from ssl import CertificateError
+from lutris.settings import PROJECT
+from lutris.settings import VERSION
from lutris.settings import SITE_URL
from lutris.util.log import logger
@@ -29,6 +31,8 @@
self.thread_queue = thread_queue
self.buffer_size = 32 * 1024 # Bytes
self.downloaded_size = 0
+ if not headers.get('User-Agent'):
+ headers['User-Agent'] = PROJECT + '/' + VERSION
self.headers = headers
def get(self, data=None):
| {"golden_diff": "diff --git a/lutris/util/http.py b/lutris/util/http.py\n--- a/lutris/util/http.py\n+++ b/lutris/util/http.py\n@@ -5,6 +5,8 @@\n import urllib.parse\n from ssl import CertificateError\n \n+from lutris.settings import PROJECT\n+from lutris.settings import VERSION\n from lutris.settings import SITE_URL\n from lutris.util.log import logger\n \n@@ -29,6 +31,8 @@\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n+ if not headers.get('User-Agent'):\n+ headers['User-Agent'] = PROJECT + '/' + VERSION\n self.headers = headers\n \n def get(self, data=None):\n", "issue": "Specify a User Agent for HTTP requests\nRight now it's python-urllib/someversion, and Cloudflare sites (tested on medium protection site) blocks it and returns 403 status code.\r\nTesting the same url with curl works without it blocking, so I'm guessing Cloudflare checks the request UA.\n", "before_files": [{"content": "import json\nimport socket\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nfrom ssl import CertificateError\n\nfrom lutris.settings import SITE_URL\nfrom lutris.util.log import logger\n\n\nclass Request(object):\n def __init__(self, url, timeout=5, stop_request=None,\n thread_queue=None, headers={}):\n\n if not url:\n raise ValueError('An URL is required!')\n\n if url.startswith('//'):\n url = 'https:' + url\n\n if url.startswith('/'):\n url = SITE_URL + url\n\n self.url = url\n self.content = ''\n self.timeout = timeout\n self.stop_request = stop_request\n self.thread_queue = thread_queue\n self.buffer_size = 32 * 1024 # Bytes\n self.downloaded_size = 0\n self.headers = headers\n\n def get(self, data=None):\n req = urllib.request.Request(url=self.url, data=data, headers=self.headers)\n try:\n request = urllib.request.urlopen(req, timeout=self.timeout)\n except (urllib.error.HTTPError, CertificateError) as e:\n logger.error(\"Unavailable url (%s): %s\", self.url, e)\n except (socket.timeout, urllib.error.URLError) as e:\n logger.error(\"Unable to connect to server (%s): %s\", self.url, e)\n else:\n try:\n total_size = request.info().get('Content-Length').strip()\n total_size = int(total_size)\n except AttributeError:\n total_size = 0\n\n chunks = []\n while 1:\n if self.stop_request and self.stop_request.is_set():\n self.content = ''\n return self\n try:\n chunk = request.read(self.buffer_size)\n except socket.timeout as e:\n logger.error(\"Request timed out\")\n self.content = ''\n return self\n self.downloaded_size += len(chunk)\n if self.thread_queue:\n self.thread_queue.put(\n (chunk, self.downloaded_size, total_size)\n )\n else:\n chunks.append(chunk)\n if not chunk:\n break\n request.close()\n self.content = b''.join(chunks)\n return self\n\n def post(self, data):\n raise NotImplementedError\n\n def write_to_file(self, path):\n content = self.content\n if content:\n with open(path, 'wb') as dest_file:\n dest_file.write(content)\n\n @property\n def json(self):\n if self.content:\n return json.loads(self.text)\n\n @property\n def text(self):\n if self.content:\n return self.content.decode()\n", "path": "lutris/util/http.py"}]} | 1,333 | 174 |
gh_patches_debug_6933 | rasdani/github-patches | git_diff | Flexget__Flexget-3204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
python 3.10 issue
I have an issue with python 3.10 and Flexget. Greenlet has been updated to 1.1.2 because the 1.0.0 version is not compatible with python 3.10. After that Flexget was installed successfully but I got the error message below.
- FlexGet version: 3.1.137
- Python version: 3.10
- Installation method: pip
- Using daemon (yes/no): no
- OS and version: Linux / Slackware / 5.14.8 kernel
Traceback (most recent call last):
File "/usr/bin/flexget", line 5, in <module>
from flexget import main
File "/usr/lib/python3.10/site-packages/flexget/__init__.py", line 11, in <module>
from flexget.manager import Manager # noqa
File "/usr/lib/python3.10/site-packages/flexget/manager.py", line 47, in <module>
from flexget.ipc import IPCClient, IPCServer # noqa
File "/usr/lib/python3.10/site-packages/flexget/ipc.py", line 14, in <module>
from flexget import terminal
File "/usr/lib/python3.10/site-packages/flexget/terminal.py", line 7, in <module>
from colorclass import Color, Windows
File "/usr/lib/python3.10/site-packages/colorclass/__init__.py", line 11, in <module>
from colorclass.codes import list_tags # noqa
File "/usr/lib/python3.10/site-packages/colorclass/codes.py", line 4, in <module>
from collections import Mapping
ImportError: cannot import name 'Mapping' from 'collections' (/usr/lib/python3.10/collections/__init__.py)
Thanks!
</issue>
<code>
[start of setup.py]
1 import sys
2 from pathlib import Path
3 from typing import List
4
5 from setuptools import find_packages, setup
6
7 long_description = Path('README.rst').read_text()
8
9 # Populates __version__ without importing the package
10 __version__ = None
11 with open('flexget/_version.py', encoding='utf-8') as ver_file:
12 exec(ver_file.read()) # pylint: disable=W0122
13 if not __version__:
14 print('Could not find __version__ from flexget/_version.py')
15 sys.exit(1)
16
17
18 def load_requirements(filename: str) -> List[str]:
19 return [
20 line.strip()
21 for line in Path(filename).read_text().splitlines()
22 if not line.startswith('#')
23 ]
24
25
26 setup(
27 name='FlexGet',
28 version=__version__,
29 description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
30 'from different sources like RSS-feeds, html-pages, various sites and more.',
31 long_description=long_description,
32 long_description_content_type='text/x-rst',
33 author='Marko Koivusalo',
34 author_email='[email protected]',
35 license='MIT',
36 url='https://flexget.com',
37 project_urls={
38 'Repository': 'https://github.com/Flexget/Flexget',
39 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',
40 'Forum': 'https://discuss.flexget.com',
41 },
42 packages=find_packages(exclude=['flexget.tests']),
43 include_package_data=True,
44 zip_safe=False,
45 install_requires=load_requirements('requirements.txt'),
46 tests_require=['pytest'],
47 extras_require={'dev': load_requirements('dev-requirements.txt')},
48 entry_points={
49 'console_scripts': ['flexget = flexget:main'],
50 'gui_scripts': [
51 'flexget-headless = flexget:main'
52 ], # This is useful on Windows to avoid a cmd popup
53 },
54 python_requires='>=3.6',
55 classifiers=[
56 "Development Status :: 5 - Production/Stable",
57 "License :: OSI Approved :: MIT License",
58 "Operating System :: OS Independent",
59 "Programming Language :: Python",
60 "Programming Language :: Python :: 3.6",
61 "Programming Language :: Python :: 3.7",
62 "Programming Language :: Python :: 3.8",
63 "Programming Language :: Python :: 3.9",
64 "Programming Language :: Python :: Implementation :: CPython",
65 "Programming Language :: Python :: Implementation :: PyPy",
66 ],
67 )
68
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,7 @@
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,7 @@\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n", "issue": "python 3.10 issue\nI have an issue with python 3.10 and Flexget. Greenlet has been updated to 1.1.2 because the 1.0.0 version is not compatible with python 3.10. After that Flexget was installed successfully but I got the error message below.\r\n\r\n- FlexGet version: 3.1.137\r\n- Python version: 3.10\r\n- Installation method: pip\r\n- Using daemon (yes/no): no\r\n- OS and version: Linux / Slackware / 5.14.8 kernel\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/flexget\", line 5, in <module>\r\n from flexget import main\r\n File \"/usr/lib/python3.10/site-packages/flexget/__init__.py\", line 11, in <module>\r\n from flexget.manager import Manager # noqa\r\n File \"/usr/lib/python3.10/site-packages/flexget/manager.py\", line 47, in <module>\r\n from flexget.ipc import IPCClient, IPCServer # noqa\r\n File \"/usr/lib/python3.10/site-packages/flexget/ipc.py\", line 14, in <module>\r\n from flexget import terminal\r\n File \"/usr/lib/python3.10/site-packages/flexget/terminal.py\", line 7, in <module>\r\n from colorclass import Color, Windows\r\n File \"/usr/lib/python3.10/site-packages/colorclass/__init__.py\", line 11, in <module>\r\n from colorclass.codes import list_tags # noqa\r\n File \"/usr/lib/python3.10/site-packages/colorclass/codes.py\", line 4, in <module>\r\n from collections import Mapping\r\nImportError: cannot import name 'Mapping' from 'collections' (/usr/lib/python3.10/collections/__init__.py)\r\n\r\nThanks!\n", "before_files": [{"content": "import sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom setuptools import find_packages, setup\n\nlong_description = Path('README.rst').read_text()\n\n# Populates __version__ without importing the package\n__version__ = None\nwith open('flexget/_version.py', encoding='utf-8') as ver_file:\n exec(ver_file.read()) # pylint: disable=W0122\nif not __version__:\n print('Could not find __version__ from flexget/_version.py')\n sys.exit(1)\n\n\ndef load_requirements(filename: str) -> List[str]:\n return [\n line.strip()\n for line in Path(filename).read_text().splitlines()\n if not line.startswith('#')\n ]\n\n\nsetup(\n name='FlexGet',\n version=__version__,\n description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '\n 'from different sources like RSS-feeds, html-pages, various sites and more.',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n author='Marko Koivusalo',\n author_email='[email protected]',\n license='MIT',\n url='https://flexget.com',\n project_urls={\n 'Repository': 'https://github.com/Flexget/Flexget',\n 'Issue Tracker': 'https://github.com/Flexget/Flexget/issues',\n 'Forum': 'https://discuss.flexget.com',\n },\n packages=find_packages(exclude=['flexget.tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=load_requirements('requirements.txt'),\n tests_require=['pytest'],\n extras_require={'dev': load_requirements('dev-requirements.txt')},\n entry_points={\n 'console_scripts': ['flexget = flexget:main'],\n 'gui_scripts': [\n 'flexget-headless = flexget:main'\n ], # This is useful on Windows to avoid a cmd popup\n },\n python_requires='>=3.6',\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n)\n", "path": "setup.py"}]} | 1,629 | 109 |
gh_patches_debug_57793 | rasdani/github-patches | git_diff | catalyst-team__catalyst-855 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EarlyStoppingCallback considers first epoch as bad
## 🐛 Bug Report
EarlyStoppingCallback considers first epoch as bad. This can lead for example to always stopping after first epoch if patience=1.
### How To Reproduce
You can train a model with early stopping and patience=1 and see that it always stops after first epoch. Or you can use the unit test below that I added to pull request.
#### Code sample
```python
from unittest.mock import MagicMock, PropertyMock
from catalyst.core import EarlyStoppingCallback
def test_patience1():
"""@TODO: Docs. Contribution is welcome."""
early_stop = EarlyStoppingCallback(1)
runner = MagicMock()
type(runner).stage_name = PropertyMock(return_value="training")
type(runner).valid_metrics = PropertyMock(return_value={"loss": 0.001})
stop_mock = PropertyMock(return_value=False)
type(runner).need_early_stop = stop_mock
early_stop.on_epoch_end(runner)
assert stop_mock.mock_calls == []
```
### Expected behavior
Training doesn't stop after first epoch. And the unit test passes.
### Environment
```bash
Catalyst version: 20.06
PyTorch version: 1.5.1
Is debug build: No
CUDA used to build PyTorch: None
TensorFlow version: N/A
TensorBoard version: 2.2.2
OS: Mac OSX 10.15.5
GCC version: Could not collect
CMake version: version 3.8.0
Python version: 3.7
Is CUDA available: No
CUDA runtime version: No CUDA
GPU models and configuration: No CUDA
Nvidia driver version: No CUDA
cuDNN version: No CUDA
Versions of relevant libraries:
[pip3] catalyst-codestyle==20.4
[pip3] catalyst-sphinx-theme==1.1.1
[pip3] efficientnet-pytorch==0.6.3
[pip3] numpy==1.18.5
[pip3] segmentation-models-pytorch==0.1.0
[pip3] tensorboard==2.2.2
[pip3] tensorboard-plugin-wit==1.6.0.post3
[pip3] tensorboardX==2.0
[pip3] torch==1.5.1
[pip3] torchvision==0.6.1
[conda] catalyst-codestyle 20.4 <pip>
[conda] catalyst-sphinx-theme 1.1.1 <pip>
[conda] efficientnet-pytorch 0.6.3 <pip>
[conda] numpy 1.18.5 <pip>
[conda] segmentation-models-pytorch 0.1.0 <pip>
[conda] tensorboard 2.2.2 <pip>
[conda] tensorboard-plugin-wit 1.6.0.post3 <pip>
[conda] tensorboardX 2.0 <pip>
[conda] torch 1.5.1 <pip>
[conda] torchvision 0.6.1 <pip>
```
</issue>
<code>
[start of catalyst/core/callbacks/early_stop.py]
1 from catalyst.core.callback import Callback, CallbackNode, CallbackOrder
2 from catalyst.core.runner import IRunner
3
4
5 class CheckRunCallback(Callback):
6 """@TODO: Docs. Contribution is welcome."""
7
8 def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):
9 """@TODO: Docs. Contribution is welcome."""
10 super().__init__(order=CallbackOrder.external, node=CallbackNode.all)
11 self.num_batch_steps = num_batch_steps
12 self.num_epoch_steps = num_epoch_steps
13
14 def on_epoch_end(self, runner: IRunner):
15 """@TODO: Docs. Contribution is welcome."""
16 if runner.epoch >= self.num_epoch_steps:
17 runner.need_early_stop = True
18
19 def on_batch_end(self, runner: IRunner):
20 """@TODO: Docs. Contribution is welcome."""
21 if runner.loader_batch_step >= self.num_batch_steps:
22 runner.need_early_stop = True
23
24
25 class EarlyStoppingCallback(Callback):
26 """@TODO: Docs. Contribution is welcome."""
27
28 def __init__(
29 self,
30 patience: int,
31 metric: str = "loss",
32 minimize: bool = True,
33 min_delta: float = 1e-6,
34 ):
35 """@TODO: Docs. Contribution is welcome."""
36 super().__init__(order=CallbackOrder.external, node=CallbackNode.all)
37 self.best_score = None
38 self.metric = metric
39 self.patience = patience
40 self.num_bad_epochs = 0
41 self.is_better = None
42
43 if minimize:
44 self.is_better = lambda score, best: score <= (best - min_delta)
45 else:
46 self.is_better = lambda score, best: score >= (best + min_delta)
47
48 def on_epoch_end(self, runner: IRunner) -> None:
49 """@TODO: Docs. Contribution is welcome."""
50 if runner.stage_name.startswith("infer"):
51 return
52
53 score = runner.valid_metrics[self.metric]
54 if self.best_score is None:
55 self.best_score = score
56 if self.is_better(score, self.best_score):
57 self.num_bad_epochs = 0
58 self.best_score = score
59 else:
60 self.num_bad_epochs += 1
61
62 if self.num_bad_epochs >= self.patience:
63 print(f"Early stop at {runner.epoch} epoch")
64 runner.need_early_stop = True
65
[end of catalyst/core/callbacks/early_stop.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py
--- a/catalyst/core/callbacks/early_stop.py
+++ b/catalyst/core/callbacks/early_stop.py
@@ -51,9 +51,7 @@
return
score = runner.valid_metrics[self.metric]
- if self.best_score is None:
- self.best_score = score
- if self.is_better(score, self.best_score):
+ if self.best_score is None or self.is_better(score, self.best_score):
self.num_bad_epochs = 0
self.best_score = score
else:
| {"golden_diff": "diff --git a/catalyst/core/callbacks/early_stop.py b/catalyst/core/callbacks/early_stop.py\n--- a/catalyst/core/callbacks/early_stop.py\n+++ b/catalyst/core/callbacks/early_stop.py\n@@ -51,9 +51,7 @@\n return\n \n score = runner.valid_metrics[self.metric]\n- if self.best_score is None:\n- self.best_score = score\n- if self.is_better(score, self.best_score):\n+ if self.best_score is None or self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n", "issue": "EarlyStoppingCallback considers first epoch as bad\n## \ud83d\udc1b Bug Report\r\nEarlyStoppingCallback considers first epoch as bad. This can lead for example to always stopping after first epoch if patience=1.\r\n\r\n\r\n### How To Reproduce\r\nYou can train a model with early stopping and patience=1 and see that it always stops after first epoch. Or you can use the unit test below that I added to pull request.\r\n\r\n#### Code sample\r\n```python\r\nfrom unittest.mock import MagicMock, PropertyMock\r\n\r\nfrom catalyst.core import EarlyStoppingCallback\r\n\r\n\r\ndef test_patience1():\r\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\r\n early_stop = EarlyStoppingCallback(1)\r\n runner = MagicMock()\r\n type(runner).stage_name = PropertyMock(return_value=\"training\")\r\n type(runner).valid_metrics = PropertyMock(return_value={\"loss\": 0.001})\r\n stop_mock = PropertyMock(return_value=False)\r\n type(runner).need_early_stop = stop_mock\r\n\r\n early_stop.on_epoch_end(runner)\r\n\r\n assert stop_mock.mock_calls == []\r\n```\r\n\r\n### Expected behavior\r\nTraining doesn't stop after first epoch. And the unit test passes.\r\n\r\n\r\n### Environment\r\n```bash\r\nCatalyst version: 20.06\r\nPyTorch version: 1.5.1\r\nIs debug build: No\r\nCUDA used to build PyTorch: None\r\nTensorFlow version: N/A\r\nTensorBoard version: 2.2.2\r\n\r\nOS: Mac OSX 10.15.5\r\nGCC version: Could not collect\r\nCMake version: version 3.8.0\r\n\r\nPython version: 3.7\r\nIs CUDA available: No\r\nCUDA runtime version: No CUDA\r\nGPU models and configuration: No CUDA\r\nNvidia driver version: No CUDA\r\ncuDNN version: No CUDA\r\n\r\nVersions of relevant libraries:\r\n[pip3] catalyst-codestyle==20.4\r\n[pip3] catalyst-sphinx-theme==1.1.1\r\n[pip3] efficientnet-pytorch==0.6.3\r\n[pip3] numpy==1.18.5\r\n[pip3] segmentation-models-pytorch==0.1.0\r\n[pip3] tensorboard==2.2.2\r\n[pip3] tensorboard-plugin-wit==1.6.0.post3\r\n[pip3] tensorboardX==2.0\r\n[pip3] torch==1.5.1\r\n[pip3] torchvision==0.6.1\r\n[conda] catalyst-codestyle 20.4 <pip>\r\n[conda] catalyst-sphinx-theme 1.1.1 <pip>\r\n[conda] efficientnet-pytorch 0.6.3 <pip>\r\n[conda] numpy 1.18.5 <pip>\r\n[conda] segmentation-models-pytorch 0.1.0 <pip>\r\n[conda] tensorboard 2.2.2 <pip>\r\n[conda] tensorboard-plugin-wit 1.6.0.post3 <pip>\r\n[conda] tensorboardX 2.0 <pip>\r\n[conda] torch 1.5.1 <pip>\r\n[conda] torchvision 0.6.1 <pip>\r\n```\r\n\n", "before_files": [{"content": "from catalyst.core.callback import Callback, CallbackNode, CallbackOrder\nfrom catalyst.core.runner import IRunner\n\n\nclass CheckRunCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(self, num_batch_steps: int = 3, num_epoch_steps: int = 2):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.num_batch_steps = num_batch_steps\n self.num_epoch_steps = num_epoch_steps\n\n def on_epoch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.epoch >= self.num_epoch_steps:\n runner.need_early_stop = True\n\n def on_batch_end(self, runner: IRunner):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.loader_batch_step >= self.num_batch_steps:\n runner.need_early_stop = True\n\n\nclass EarlyStoppingCallback(Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(\n self,\n patience: int,\n metric: str = \"loss\",\n minimize: bool = True,\n min_delta: float = 1e-6,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.external, node=CallbackNode.all)\n self.best_score = None\n self.metric = metric\n self.patience = patience\n self.num_bad_epochs = 0\n self.is_better = None\n\n if minimize:\n self.is_better = lambda score, best: score <= (best - min_delta)\n else:\n self.is_better = lambda score, best: score >= (best + min_delta)\n\n def on_epoch_end(self, runner: IRunner) -> None:\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n if runner.stage_name.startswith(\"infer\"):\n return\n\n score = runner.valid_metrics[self.metric]\n if self.best_score is None:\n self.best_score = score\n if self.is_better(score, self.best_score):\n self.num_bad_epochs = 0\n self.best_score = score\n else:\n self.num_bad_epochs += 1\n\n if self.num_bad_epochs >= self.patience:\n print(f\"Early stop at {runner.epoch} epoch\")\n runner.need_early_stop = True\n", "path": "catalyst/core/callbacks/early_stop.py"}]} | 1,883 | 145 |
gh_patches_debug_840 | rasdani/github-patches | git_diff | nilearn__nilearn-507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add test for compatibility of old version of six
For the moment, we are compatible with the latest version of six. Recently, somebody pointed out that we did not support six 1.5.2. We should investigate, decide which version we should be compatible with and then add this to Travis.
</issue>
<code>
[start of continuous_integration/show-python-packages-versions.py]
1 import sys
2
3 DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']
4
5
6 def print_package_version(package_name, indent=' '):
7 try:
8 package = __import__(package_name)
9 version = getattr(package, '__version__', None)
10 package_file = getattr(package, '__file__', )
11 provenance_info = '{0} from {1}'.format(version, package_file)
12 except ImportError:
13 provenance_info = 'not installed'
14
15 print('{0}{1}: {2}'.format(indent, package_name, provenance_info))
16
17 if __name__ == '__main__':
18 print('=' * 120)
19 print('Python %s' % str(sys.version))
20 print('from: %s\n' % sys.executable)
21
22 print('Dependencies versions')
23 for package_name in DEPENDENCIES:
24 print_package_version(package_name)
25 print('=' * 120)
26
[end of continuous_integration/show-python-packages-versions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py
--- a/continuous_integration/show-python-packages-versions.py
+++ b/continuous_integration/show-python-packages-versions.py
@@ -1,6 +1,6 @@
import sys
-DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']
+DEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']
def print_package_version(package_name, indent=' '):
| {"golden_diff": "diff --git a/continuous_integration/show-python-packages-versions.py b/continuous_integration/show-python-packages-versions.py\n--- a/continuous_integration/show-python-packages-versions.py\n+++ b/continuous_integration/show-python-packages-versions.py\n@@ -1,6 +1,6 @@\n import sys\n \n-DEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n+DEPENDENCIES = ['six', 'numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n \n \n def print_package_version(package_name, indent=' '):\n", "issue": "Add test for compatibility of old version of six\nFor the moment, we are compatible with the latest version of six. Recently, somebody pointed out that we did not support six 1.5.2. We should investigate, decide which version we should be compatible with and then add this to Travis.\n\n", "before_files": [{"content": "import sys\n\nDEPENDENCIES = ['numpy', 'scipy', 'sklearn', 'matplotlib', 'nibabel']\n\n\ndef print_package_version(package_name, indent=' '):\n try:\n package = __import__(package_name)\n version = getattr(package, '__version__', None)\n package_file = getattr(package, '__file__', )\n provenance_info = '{0} from {1}'.format(version, package_file)\n except ImportError:\n provenance_info = 'not installed'\n\n print('{0}{1}: {2}'.format(indent, package_name, provenance_info))\n\nif __name__ == '__main__':\n print('=' * 120)\n print('Python %s' % str(sys.version))\n print('from: %s\\n' % sys.executable)\n\n print('Dependencies versions')\n for package_name in DEPENDENCIES:\n print_package_version(package_name)\n print('=' * 120)\n", "path": "continuous_integration/show-python-packages-versions.py"}]} | 849 | 124 |
gh_patches_debug_11637 | rasdani/github-patches | git_diff | getsentry__sentry-59857 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jira deprecation of glance panels
Notice from Atlassian Support team about glance panel deprecation.
AC:
- Review the deprecation plan
- Build a recommendation based on how we're impacted. If minor development work is required, complete that with this ticket. If significant work is required, notify EM/PM to share impact and come up with next steps together.
Email from Atlassian:
```
Hope you are having a good day!
As part of this deprecation notice (https://developer.atlassian.com/cloud/jira/platform/changelog/#CHANGE-897), we are reaching out because we have identified that your app, “Sentry,” will be affected by the deprecation of glance panels.
This was initially scheduled for the 6th of October, but we have delayed it until the 30th of November.
The jiraIssueGlances and jira:issueGlance modules in Forge (https://developer.atlassian.com/platform/forge/manifest-reference/modules/jira-issue-glance/) and Connect (https://developer.atlassian.com/cloud/jira/platform/modules/issue-glance/) are being deprecated and replaced with the issueContext module.
We recommend transitioning from the glance panel to the new issue context module before the 30th of November.
Please note, we will not be extending this deprecation date as we announced it on the 30th of March.
Let me know if you need any further assistance,
Ahmud
Product Manager-Jira Cloud
```
</issue>
<code>
[start of src/sentry/integrations/jira/endpoints/descriptor.py]
1 from django.conf import settings
2 from django.urls import reverse
3 from rest_framework.request import Request
4 from rest_framework.response import Response
5
6 from sentry.api.api_publish_status import ApiPublishStatus
7 from sentry.api.base import Endpoint, control_silo_endpoint
8 from sentry.utils.assets import get_frontend_app_asset_url
9 from sentry.utils.http import absolute_uri
10
11 from .. import JIRA_KEY
12
13 scopes = ["read", "write", "act_as_user"]
14 # For Jira, only approved apps can use the access_email_addresses scope
15 # This scope allows Sentry to use the email endpoint (https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-user-email-get)
16 # We use the email with Jira 2-way sync in order to match the user
17 if settings.JIRA_USE_EMAIL_SCOPE:
18 scopes.append("access_email_addresses")
19
20
21 @control_silo_endpoint
22 class JiraDescriptorEndpoint(Endpoint):
23 publish_status = {
24 "GET": ApiPublishStatus.UNKNOWN,
25 }
26 """
27 Provides the metadata needed by Jira to setup an instance of the Sentry integration within Jira.
28 Only used by on-prem orgs and devs setting up local instances of the integration. (Sentry SAAS
29 already has an established, official instance of the Sentry integration registered with Jira.)
30 """
31
32 authentication_classes = ()
33 permission_classes = ()
34
35 def get(self, request: Request) -> Response:
36 sentry_logo = absolute_uri(
37 get_frontend_app_asset_url("sentry", "entrypoints/logo-sentry.svg")
38 )
39 return self.respond(
40 {
41 "name": "Sentry",
42 "description": "Connect your Sentry organization to one or more of your Jira cloud instances. Get started streamlining your bug-squashing workflow by allowing your Sentry and Jira instances to work together.",
43 "key": JIRA_KEY,
44 "baseUrl": absolute_uri(),
45 "vendor": {"name": "Sentry", "url": "https://sentry.io"},
46 "authentication": {"type": "jwt"},
47 "lifecycle": {
48 "installed": "/extensions/jira/installed/",
49 "uninstalled": "/extensions/jira/uninstalled/",
50 },
51 "apiVersion": 1,
52 "modules": {
53 "postInstallPage": {
54 "url": "/extensions/jira/ui-hook/",
55 "name": {"value": "Configure Sentry Add-on"},
56 "key": "post-install-sentry",
57 },
58 "configurePage": {
59 "url": "/extensions/jira/ui-hook/",
60 "name": {"value": "Configure Sentry Add-on"},
61 "key": "configure-sentry",
62 },
63 "jiraIssueGlances": [
64 {
65 "icon": {"width": 24, "height": 24, "url": sentry_logo},
66 "content": {"type": "label", "label": {"value": "Linked Issues"}},
67 "target": {
68 "type": "web_panel",
69 "url": "/extensions/jira/issue/{issue.key}/",
70 },
71 "name": {"value": "Sentry "},
72 "key": "sentry-issues-glance",
73 }
74 ],
75 "webhooks": [
76 {
77 "event": "jira:issue_updated",
78 "url": reverse("sentry-extensions-jira-issue-updated"),
79 "excludeBody": False,
80 }
81 ],
82 },
83 "apiMigrations": {"gdpr": True, "context-qsh": True, "signed-install": True},
84 "scopes": scopes,
85 }
86 )
87
[end of src/sentry/integrations/jira/endpoints/descriptor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/integrations/jira/endpoints/descriptor.py b/src/sentry/integrations/jira/endpoints/descriptor.py
--- a/src/sentry/integrations/jira/endpoints/descriptor.py
+++ b/src/sentry/integrations/jira/endpoints/descriptor.py
@@ -60,7 +60,7 @@
"name": {"value": "Configure Sentry Add-on"},
"key": "configure-sentry",
},
- "jiraIssueGlances": [
+ "jiraIssueContexts": [
{
"icon": {"width": 24, "height": 24, "url": sentry_logo},
"content": {"type": "label", "label": {"value": "Linked Issues"}},
| {"golden_diff": "diff --git a/src/sentry/integrations/jira/endpoints/descriptor.py b/src/sentry/integrations/jira/endpoints/descriptor.py\n--- a/src/sentry/integrations/jira/endpoints/descriptor.py\n+++ b/src/sentry/integrations/jira/endpoints/descriptor.py\n@@ -60,7 +60,7 @@\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"configure-sentry\",\n },\n- \"jiraIssueGlances\": [\n+ \"jiraIssueContexts\": [\n {\n \"icon\": {\"width\": 24, \"height\": 24, \"url\": sentry_logo},\n \"content\": {\"type\": \"label\", \"label\": {\"value\": \"Linked Issues\"}},\n", "issue": "Jira deprecation of glance panels\nNotice from Atlassian Support team about glance panel deprecation. \r\n\r\nAC:\r\n- Review the deprecation plan\r\n- Build a recommendation based on how we're impacted. If minor development work is required, complete that with this ticket. If significant work is required, notify EM/PM to share impact and come up with next steps together.\r\n\r\nEmail from Atlassian:\r\n```\r\nHope you are having a good day!\r\nAs part of this deprecation notice (https://developer.atlassian.com/cloud/jira/platform/changelog/#CHANGE-897), we are reaching out because we have identified that your app, \u201cSentry,\u201d will be affected by the deprecation of glance panels. \r\nThis was initially scheduled for the 6th of October, but we have delayed it until the 30th of November.\r\nThe jiraIssueGlances and jira:issueGlance modules in Forge (https://developer.atlassian.com/platform/forge/manifest-reference/modules/jira-issue-glance/) and Connect (https://developer.atlassian.com/cloud/jira/platform/modules/issue-glance/) are being deprecated and replaced with the issueContext module. \r\nWe recommend transitioning from the glance panel to the new issue context module before the 30th of November. \r\nPlease note, we will not be extending this deprecation date as we announced it on the 30th of March.\r\nLet me know if you need any further assistance,\r\nAhmud\r\nProduct Manager-Jira Cloud\r\n```\n", "before_files": [{"content": "from django.conf import settings\nfrom django.urls import reverse\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\n\nfrom sentry.api.api_publish_status import ApiPublishStatus\nfrom sentry.api.base import Endpoint, control_silo_endpoint\nfrom sentry.utils.assets import get_frontend_app_asset_url\nfrom sentry.utils.http import absolute_uri\n\nfrom .. import JIRA_KEY\n\nscopes = [\"read\", \"write\", \"act_as_user\"]\n# For Jira, only approved apps can use the access_email_addresses scope\n# This scope allows Sentry to use the email endpoint (https://developer.atlassian.com/cloud/jira/platform/rest/v3/#api-rest-api-3-user-email-get)\n# We use the email with Jira 2-way sync in order to match the user\nif settings.JIRA_USE_EMAIL_SCOPE:\n scopes.append(\"access_email_addresses\")\n\n\n@control_silo_endpoint\nclass JiraDescriptorEndpoint(Endpoint):\n publish_status = {\n \"GET\": ApiPublishStatus.UNKNOWN,\n }\n \"\"\"\n Provides the metadata needed by Jira to setup an instance of the Sentry integration within Jira.\n Only used by on-prem orgs and devs setting up local instances of the integration. (Sentry SAAS\n already has an established, official instance of the Sentry integration registered with Jira.)\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n def get(self, request: Request) -> Response:\n sentry_logo = absolute_uri(\n get_frontend_app_asset_url(\"sentry\", \"entrypoints/logo-sentry.svg\")\n )\n return self.respond(\n {\n \"name\": \"Sentry\",\n \"description\": \"Connect your Sentry organization to one or more of your Jira cloud instances. Get started streamlining your bug-squashing workflow by allowing your Sentry and Jira instances to work together.\",\n \"key\": JIRA_KEY,\n \"baseUrl\": absolute_uri(),\n \"vendor\": {\"name\": \"Sentry\", \"url\": \"https://sentry.io\"},\n \"authentication\": {\"type\": \"jwt\"},\n \"lifecycle\": {\n \"installed\": \"/extensions/jira/installed/\",\n \"uninstalled\": \"/extensions/jira/uninstalled/\",\n },\n \"apiVersion\": 1,\n \"modules\": {\n \"postInstallPage\": {\n \"url\": \"/extensions/jira/ui-hook/\",\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"post-install-sentry\",\n },\n \"configurePage\": {\n \"url\": \"/extensions/jira/ui-hook/\",\n \"name\": {\"value\": \"Configure Sentry Add-on\"},\n \"key\": \"configure-sentry\",\n },\n \"jiraIssueGlances\": [\n {\n \"icon\": {\"width\": 24, \"height\": 24, \"url\": sentry_logo},\n \"content\": {\"type\": \"label\", \"label\": {\"value\": \"Linked Issues\"}},\n \"target\": {\n \"type\": \"web_panel\",\n \"url\": \"/extensions/jira/issue/{issue.key}/\",\n },\n \"name\": {\"value\": \"Sentry \"},\n \"key\": \"sentry-issues-glance\",\n }\n ],\n \"webhooks\": [\n {\n \"event\": \"jira:issue_updated\",\n \"url\": reverse(\"sentry-extensions-jira-issue-updated\"),\n \"excludeBody\": False,\n }\n ],\n },\n \"apiMigrations\": {\"gdpr\": True, \"context-qsh\": True, \"signed-install\": True},\n \"scopes\": scopes,\n }\n )\n", "path": "src/sentry/integrations/jira/endpoints/descriptor.py"}]} | 1,785 | 172 |
gh_patches_debug_21950 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] The Added Loss term for InducingKernel seems flipped in sign?
# 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
```
def loss(self, *params):
prior_covar = self.prior_dist.lazy_covariance_matrix
variational_covar = self.variational_dist.lazy_covariance_matrix
diag = prior_covar.diag() - variational_covar.diag()
shape = prior_covar.shape[:-1]
noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
return 0.5 * (diag / noise_diag).sum()
```
This is the current code for InducingPointKernelAddedLossTerm.loss
From what I see, this "loss term" is added into the mll that is returned by the `ExactMarginalLogLikelihood` class. This in itself is misleading as the loss is usually the negative of the mll.
Moreover, the variational negative loss used to evaluate inducing points is given below

As can be seen, the above is the expression for the pseudo-mll that is maximized when optimizing the inducing points. in this, the component of `InducingPointKernelAddedLossTerm` is negative to the value that is being added into the loss.
This is quite likely a significant bug. Please fix (just invert the sign of `diag` above)
</issue>
<code>
[start of gpytorch/mlls/inducing_point_kernel_added_loss_term.py]
1 #!/usr/bin/env python3
2
3 from .added_loss_term import AddedLossTerm
4
5
6 class InducingPointKernelAddedLossTerm(AddedLossTerm):
7 def __init__(self, variational_dist, prior_dist, likelihood):
8 self.prior_dist = prior_dist
9 self.variational_dist = variational_dist
10 self.likelihood = likelihood
11
12 def loss(self, *params):
13 prior_covar = self.prior_dist.lazy_covariance_matrix
14 variational_covar = self.variational_dist.lazy_covariance_matrix
15 diag = prior_covar.diag() - variational_covar.diag()
16 shape = prior_covar.shape[:-1]
17 noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
18 return 0.5 * (diag / noise_diag).sum()
19
[end of gpytorch/mlls/inducing_point_kernel_added_loss_term.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py
--- a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py
+++ b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py
@@ -4,7 +4,7 @@
class InducingPointKernelAddedLossTerm(AddedLossTerm):
- def __init__(self, variational_dist, prior_dist, likelihood):
+ def __init__(self, prior_dist, variational_dist, likelihood):
self.prior_dist = prior_dist
self.variational_dist = variational_dist
self.likelihood = likelihood
@@ -12,7 +12,7 @@
def loss(self, *params):
prior_covar = self.prior_dist.lazy_covariance_matrix
variational_covar = self.variational_dist.lazy_covariance_matrix
- diag = prior_covar.diag() - variational_covar.diag()
+ diag = variational_covar.diag() - prior_covar.diag()
shape = prior_covar.shape[:-1]
noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()
return 0.5 * (diag / noise_diag).sum()
| {"golden_diff": "diff --git a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py\n--- a/gpytorch/mlls/inducing_point_kernel_added_loss_term.py\n+++ b/gpytorch/mlls/inducing_point_kernel_added_loss_term.py\n@@ -4,7 +4,7 @@\n \n \n class InducingPointKernelAddedLossTerm(AddedLossTerm):\n- def __init__(self, variational_dist, prior_dist, likelihood):\n+ def __init__(self, prior_dist, variational_dist, likelihood):\n self.prior_dist = prior_dist\n self.variational_dist = variational_dist\n self.likelihood = likelihood\n@@ -12,7 +12,7 @@\n def loss(self, *params):\n prior_covar = self.prior_dist.lazy_covariance_matrix\n variational_covar = self.variational_dist.lazy_covariance_matrix\n- diag = prior_covar.diag() - variational_covar.diag()\n+ diag = variational_covar.diag() - prior_covar.diag()\n shape = prior_covar.shape[:-1]\n noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()\n return 0.5 * (diag / noise_diag).sum()\n", "issue": "[Bug] The Added Loss term for InducingKernel seems flipped in sign?\n# \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n```\r\n def loss(self, *params):\r\n prior_covar = self.prior_dist.lazy_covariance_matrix\r\n variational_covar = self.variational_dist.lazy_covariance_matrix\r\n diag = prior_covar.diag() - variational_covar.diag()\r\n shape = prior_covar.shape[:-1]\r\n noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()\r\n return 0.5 * (diag / noise_diag).sum()\r\n```\r\nThis is the current code for InducingPointKernelAddedLossTerm.loss\r\n\r\nFrom what I see, this \"loss term\" is added into the mll that is returned by the `ExactMarginalLogLikelihood` class. This in itself is misleading as the loss is usually the negative of the mll.\r\n\r\nMoreover, the variational negative loss used to evaluate inducing points is given below\r\n\r\n\r\n\r\nAs can be seen, the above is the expression for the pseudo-mll that is maximized when optimizing the inducing points. in this, the component of `InducingPointKernelAddedLossTerm` is negative to the value that is being added into the loss.\r\n\r\nThis is quite likely a significant bug. Please fix (just invert the sign of `diag` above)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom .added_loss_term import AddedLossTerm\n\n\nclass InducingPointKernelAddedLossTerm(AddedLossTerm):\n def __init__(self, variational_dist, prior_dist, likelihood):\n self.prior_dist = prior_dist\n self.variational_dist = variational_dist\n self.likelihood = likelihood\n\n def loss(self, *params):\n prior_covar = self.prior_dist.lazy_covariance_matrix\n variational_covar = self.variational_dist.lazy_covariance_matrix\n diag = prior_covar.diag() - variational_covar.diag()\n shape = prior_covar.shape[:-1]\n noise_diag = self.likelihood._shaped_noise_covar(shape, *params).diag()\n return 0.5 * (diag / noise_diag).sum()\n", "path": "gpytorch/mlls/inducing_point_kernel_added_loss_term.py"}]} | 1,111 | 285 |
gh_patches_debug_5537 | rasdani/github-patches | git_diff | nextcloud__appstore-619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Verify email addresses after E-Mail change
When a user changes their email address, it should be verified. allauth provides some views for that which may or may not be useful. Unsure whether email addresses currently are verified at signup, but it would be appropriate for it to use the same mechanism.
</issue>
<code>
[start of nextcloudappstore/user/views.py]
1 from allauth.account.models import EmailAddress
2 from allauth.account.views import PasswordChangeView
3 from django.contrib import messages
4 from django.contrib.auth.mixins import LoginRequiredMixin
5 from django.urls import reverse_lazy
6 from django.shortcuts import redirect, render, get_object_or_404
7 from django.urls import reverse
8 from django.views.generic import TemplateView
9 from django.views.generic import UpdateView
10
11 from nextcloudappstore.core.models import App
12 from nextcloudappstore.user.forms import DeleteAccountForm, AccountForm
13
14
15 class TransferAppsView(LoginRequiredMixin, TemplateView):
16 template_name = 'user/transfer-apps.html'
17
18 def post(self, request, pk):
19 app = get_object_or_404(App, pk=pk, owner=self.request.user)
20 app.ownership_transfer_enabled = not app.ownership_transfer_enabled
21 app.save()
22 return redirect(reverse('user:account-transfer-apps'))
23
24 def get_context_data(self, **kwargs):
25 context = super().get_context_data(**kwargs)
26 context['apps'] = App.objects.filter(owner=self.request.user)
27 context['acc_page'] = 'account-transfer-apps'
28 return context
29
30
31 class ChangeLanguageView(LoginRequiredMixin, TemplateView):
32 template_name = 'user/set-language.html'
33
34 def get_context_data(self, **kwargs):
35 context = super().get_context_data(**kwargs)
36 context['acc_page'] = 'account-change-language'
37 return context
38
39
40 class DeleteAccountView(LoginRequiredMixin, TemplateView):
41 template_name = 'user/delete-account.html'
42
43 def get_context_data(self, **kwargs):
44 context = super().get_context_data(**kwargs)
45 context['form'] = DeleteAccountForm()
46 context['acc_page'] = 'delete-account'
47 return context
48
49 def post(self, request, *args, **kwargs):
50 form = DeleteAccountForm(request.POST, user=request.user)
51 if form.is_valid():
52 request.user.delete()
53 return redirect(reverse_lazy('home'))
54 else:
55 return render(request, self.template_name, {'form': form})
56
57
58 class AccountView(LoginRequiredMixin, UpdateView):
59 """Display and allow changing of the user's name."""
60
61 template_name = 'user/account.html'
62 template_name_suffix = ''
63 form_class = AccountForm
64 success_url = reverse_lazy('user:account')
65
66 def get_context_data(self, **kwargs):
67 context = super().get_context_data(**kwargs)
68 context['acc_page'] = 'account'
69 return context
70
71 def form_valid(self, form):
72 email = EmailAddress.objects.get_primary(user=self.request.user)
73 email.email = form.cleaned_data['email']
74 email.save()
75 messages.success(self.request, 'Account details saved.')
76 return super().form_valid(form)
77
78 def get_object(self, queryset=None):
79 return self.request.user
80
81
82 class PasswordView(LoginRequiredMixin, PasswordChangeView):
83 """Allow the user to change their password."""
84
85 template_name = 'user/password.html'
86 success_url = reverse_lazy('user:account-password')
87
88 def get_context_data(self, **kwargs):
89 context = super().get_context_data(**kwargs)
90 context['acc_page'] = 'password'
91 return context
92
93
94 class APITokenView(LoginRequiredMixin, TemplateView):
95 """Display the user's API token, and allow it to be regenerated."""
96
97 template_name = 'user/api-token.html'
98
99 def get_context_data(self, **kwargs):
100 context = super().get_context_data(**kwargs)
101 context['acc_page'] = 'api-token'
102 return context
103
[end of nextcloudappstore/user/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/user/views.py b/nextcloudappstore/user/views.py
--- a/nextcloudappstore/user/views.py
+++ b/nextcloudappstore/user/views.py
@@ -70,8 +70,7 @@
def form_valid(self, form):
email = EmailAddress.objects.get_primary(user=self.request.user)
- email.email = form.cleaned_data['email']
- email.save()
+ email.change(None, form.cleaned_data['email'])
messages.success(self.request, 'Account details saved.')
return super().form_valid(form)
| {"golden_diff": "diff --git a/nextcloudappstore/user/views.py b/nextcloudappstore/user/views.py\n--- a/nextcloudappstore/user/views.py\n+++ b/nextcloudappstore/user/views.py\n@@ -70,8 +70,7 @@\n \n def form_valid(self, form):\n email = EmailAddress.objects.get_primary(user=self.request.user)\n- email.email = form.cleaned_data['email']\n- email.save()\n+ email.change(None, form.cleaned_data['email'])\n messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n", "issue": "Verify email addresses after E-Mail change\nWhen a user changes their email address, it should be verified. allauth provides some views for that which may or may not be useful. Unsure whether email addresses currently are verified at signup, but it would be appropriate for it to use the same mechanism.\n\n", "before_files": [{"content": "from allauth.account.models import EmailAddress\nfrom allauth.account.views import PasswordChangeView\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom django.views.generic import UpdateView\n\nfrom nextcloudappstore.core.models import App\nfrom nextcloudappstore.user.forms import DeleteAccountForm, AccountForm\n\n\nclass TransferAppsView(LoginRequiredMixin, TemplateView):\n template_name = 'user/transfer-apps.html'\n\n def post(self, request, pk):\n app = get_object_or_404(App, pk=pk, owner=self.request.user)\n app.ownership_transfer_enabled = not app.ownership_transfer_enabled\n app.save()\n return redirect(reverse('user:account-transfer-apps'))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['apps'] = App.objects.filter(owner=self.request.user)\n context['acc_page'] = 'account-transfer-apps'\n return context\n\n\nclass ChangeLanguageView(LoginRequiredMixin, TemplateView):\n template_name = 'user/set-language.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account-change-language'\n return context\n\n\nclass DeleteAccountView(LoginRequiredMixin, TemplateView):\n template_name = 'user/delete-account.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = DeleteAccountForm()\n context['acc_page'] = 'delete-account'\n return context\n\n def post(self, request, *args, **kwargs):\n form = DeleteAccountForm(request.POST, user=request.user)\n if form.is_valid():\n request.user.delete()\n return redirect(reverse_lazy('home'))\n else:\n return render(request, self.template_name, {'form': form})\n\n\nclass AccountView(LoginRequiredMixin, UpdateView):\n \"\"\"Display and allow changing of the user's name.\"\"\"\n\n template_name = 'user/account.html'\n template_name_suffix = ''\n form_class = AccountForm\n success_url = reverse_lazy('user:account')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'account'\n return context\n\n def form_valid(self, form):\n email = EmailAddress.objects.get_primary(user=self.request.user)\n email.email = form.cleaned_data['email']\n email.save()\n messages.success(self.request, 'Account details saved.')\n return super().form_valid(form)\n\n def get_object(self, queryset=None):\n return self.request.user\n\n\nclass PasswordView(LoginRequiredMixin, PasswordChangeView):\n \"\"\"Allow the user to change their password.\"\"\"\n\n template_name = 'user/password.html'\n success_url = reverse_lazy('user:account-password')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'password'\n return context\n\n\nclass APITokenView(LoginRequiredMixin, TemplateView):\n \"\"\"Display the user's API token, and allow it to be regenerated.\"\"\"\n\n template_name = 'user/api-token.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['acc_page'] = 'api-token'\n return context\n", "path": "nextcloudappstore/user/views.py"}]} | 1,556 | 126 |
gh_patches_debug_11270 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2886 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2520 raised for mutually exclusive properties when using Conditions
### CloudFormation Lint Version
cfn-lint 0.80.2
### What operating system are you using?
Windows
### Describe the bug
[E2520](https://github.com/aws-cloudformation/cfn-lint/blob/main/docs/rules.md#E2520) is raised for mutually exclusive properties when using Conditions
```
cfn-lint -t ./template.yaml
E2520 Property SourceSecurityGroupId should NOT exist with CidrIp for Resources/Ingress/Properties
.\template.yaml:13:7
```
The same was working prior `0.79.11`. PR [2875](https://github.com/aws-cloudformation/cfn-lint/pull/2875) seems to be the cause.
```
> cfn-lint --version
cfn-lint 0.79.10
> cfn-lint -t ./template.yaml
> echo $lastexitcode
0
```
### Expected behavior
E2520 is ignored for mutually exclusive properties that use the same Condition and Fn::If intrinsic function which makes sure only one of the properties has value.
### Reproduction template
```yaml
AWSTemplateFormatVersion: 2010-09-09
Parameters:
pCidr:
Type: String
Default: ''
Conditions:
cIsCidr: !Not [!Equals [!Ref pCidr, '']]
Resources:
Ingress:
Type: AWS::EC2::SecurityGroupIngress
Properties:
SourceSecurityGroupId: !If [ cIsCidr, !Ref AWS::NoValue, sg-abc12345 ]
CidrIp: !If [ cIsCidr, !Ref pCidr, !Ref AWS::NoValue ]
IpProtocol: "-1"
GroupId: sg-abc1234567
```
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/Exclusive.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import cfnlint.helpers
6 from cfnlint.data import AdditionalSpecs
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class Exclusive(CloudFormationLintRule):
11 """Check Properties Resource Configuration"""
12
13 id = "E2520"
14 shortdesc = "Check Properties that are mutually exclusive"
15 description = (
16 "Making sure CloudFormation properties that are exclusive are not defined"
17 )
18 source_url = "https://github.com/aws-cloudformation/cfn-python-lint"
19 tags = ["resources"]
20
21 def __init__(self):
22 """Init"""
23 super().__init__()
24 exclusivespec = cfnlint.helpers.load_resource(AdditionalSpecs, "Exclusive.json")
25 self.resource_types_specs = exclusivespec["ResourceTypes"]
26 self.property_types_specs = exclusivespec["PropertyTypes"]
27 for resource_type_spec in self.resource_types_specs:
28 self.resource_property_types.append(resource_type_spec)
29 for property_type_spec in self.property_types_specs:
30 self.resource_sub_property_types.append(property_type_spec)
31
32 def check(self, properties, exclusions, path, cfn):
33 """Check itself"""
34 matches = []
35 for p_value, p_path in properties.items_safe(path[:]):
36 for k, v in exclusions.items():
37 property_sets = cfn.get_object_without_conditions(p_value, [k] + v)
38 for property_set in property_sets:
39 obj = property_set["Object"].clean()
40 for prop in obj:
41 if prop in exclusions:
42 for excl_property in exclusions[prop]:
43 if excl_property in obj:
44 if property_set["Scenario"] is None:
45 message = "Property {0} should NOT exist with {1} for {2}"
46 matches.append(
47 RuleMatch(
48 p_path + [prop],
49 message.format(
50 excl_property,
51 prop,
52 "/".join(map(str, p_path)),
53 ),
54 )
55 )
56 else:
57 scenario_text = " and ".join(
58 [
59 f'when condition "{k}" is {v}'
60 for (k, v) in property_set[
61 "Scenario"
62 ].items()
63 ]
64 )
65 message = "Property {0} should NOT exist with {1} {2} for {3}"
66 matches.append(
67 RuleMatch(
68 p_path + [prop],
69 message.format(
70 excl_property,
71 prop,
72 scenario_text,
73 "/".join(map(str, p_path)),
74 ),
75 )
76 )
77
78 return matches
79
80 def match_resource_sub_properties(self, properties, property_type, path, cfn):
81 """Match for sub properties"""
82 matches = []
83
84 exclusions = self.property_types_specs.get(property_type, {})
85 matches.extend(self.check(properties, exclusions, path, cfn))
86
87 return matches
88
89 def match_resource_properties(self, properties, resource_type, path, cfn):
90 """Check CloudFormation Properties"""
91 matches = []
92
93 exclusions = self.resource_types_specs.get(resource_type, {})
94 matches.extend(self.check(properties, exclusions, path, cfn))
95
96 return matches
97
[end of src/cfnlint/rules/resources/properties/Exclusive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/properties/Exclusive.py b/src/cfnlint/rules/resources/properties/Exclusive.py
--- a/src/cfnlint/rules/resources/properties/Exclusive.py
+++ b/src/cfnlint/rules/resources/properties/Exclusive.py
@@ -38,7 +38,7 @@
for property_set in property_sets:
obj = property_set["Object"].clean()
for prop in obj:
- if prop in exclusions:
+ if prop == k:
for excl_property in exclusions[prop]:
if excl_property in obj:
if property_set["Scenario"] is None:
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/Exclusive.py b/src/cfnlint/rules/resources/properties/Exclusive.py\n--- a/src/cfnlint/rules/resources/properties/Exclusive.py\n+++ b/src/cfnlint/rules/resources/properties/Exclusive.py\n@@ -38,7 +38,7 @@\n for property_set in property_sets:\n obj = property_set[\"Object\"].clean()\n for prop in obj:\n- if prop in exclusions:\n+ if prop == k:\n for excl_property in exclusions[prop]:\n if excl_property in obj:\n if property_set[\"Scenario\"] is None:\n", "issue": "E2520 raised for mutually exclusive properties when using Conditions\n### CloudFormation Lint Version\n\ncfn-lint 0.80.2\n\n### What operating system are you using?\n\nWindows\n\n### Describe the bug\n\n[E2520](https://github.com/aws-cloudformation/cfn-lint/blob/main/docs/rules.md#E2520) is raised for mutually exclusive properties when using Conditions\r\n\r\n```\r\ncfn-lint -t ./template.yaml\r\nE2520 Property SourceSecurityGroupId should NOT exist with CidrIp for Resources/Ingress/Properties\r\n.\\template.yaml:13:7\r\n```\r\n\r\nThe same was working prior `0.79.11`. PR [2875](https://github.com/aws-cloudformation/cfn-lint/pull/2875) seems to be the cause.\r\n\r\n```\r\n> cfn-lint --version \r\ncfn-lint 0.79.10\r\n> cfn-lint -t ./template.yaml \r\n> echo $lastexitcode\r\n0\r\n```\n\n### Expected behavior\n\nE2520 is ignored for mutually exclusive properties that use the same Condition and Fn::If intrinsic function which makes sure only one of the properties has value.\n\n### Reproduction template\n\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nParameters:\r\n pCidr:\r\n Type: String\r\n Default: ''\r\nConditions:\r\n cIsCidr: !Not [!Equals [!Ref pCidr, '']]\r\nResources:\r\n Ingress:\r\n Type: AWS::EC2::SecurityGroupIngress\r\n Properties:\r\n SourceSecurityGroupId: !If [ cIsCidr, !Ref AWS::NoValue, sg-abc12345 ]\r\n CidrIp: !If [ cIsCidr, !Ref pCidr, !Ref AWS::NoValue ]\r\n IpProtocol: \"-1\"\r\n GroupId: sg-abc1234567\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport cfnlint.helpers\nfrom cfnlint.data import AdditionalSpecs\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Exclusive(CloudFormationLintRule):\n \"\"\"Check Properties Resource Configuration\"\"\"\n\n id = \"E2520\"\n shortdesc = \"Check Properties that are mutually exclusive\"\n description = (\n \"Making sure CloudFormation properties that are exclusive are not defined\"\n )\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint\"\n tags = [\"resources\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n exclusivespec = cfnlint.helpers.load_resource(AdditionalSpecs, \"Exclusive.json\")\n self.resource_types_specs = exclusivespec[\"ResourceTypes\"]\n self.property_types_specs = exclusivespec[\"PropertyTypes\"]\n for resource_type_spec in self.resource_types_specs:\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in self.property_types_specs:\n self.resource_sub_property_types.append(property_type_spec)\n\n def check(self, properties, exclusions, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for k, v in exclusions.items():\n property_sets = cfn.get_object_without_conditions(p_value, [k] + v)\n for property_set in property_sets:\n obj = property_set[\"Object\"].clean()\n for prop in obj:\n if prop in exclusions:\n for excl_property in exclusions[prop]:\n if excl_property in obj:\n if property_set[\"Scenario\"] is None:\n message = \"Property {0} should NOT exist with {1} for {2}\"\n matches.append(\n RuleMatch(\n p_path + [prop],\n message.format(\n excl_property,\n prop,\n \"/\".join(map(str, p_path)),\n ),\n )\n )\n else:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in property_set[\n \"Scenario\"\n ].items()\n ]\n )\n message = \"Property {0} should NOT exist with {1} {2} for {3}\"\n matches.append(\n RuleMatch(\n p_path + [prop],\n message.format(\n excl_property,\n prop,\n scenario_text,\n \"/\".join(map(str, p_path)),\n ),\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n exclusions = self.property_types_specs.get(property_type, {})\n matches.extend(self.check(properties, exclusions, path, cfn))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n exclusions = self.resource_types_specs.get(resource_type, {})\n matches.extend(self.check(properties, exclusions, path, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Exclusive.py"}]} | 1,849 | 134 |
gh_patches_debug_7736 | rasdani/github-patches | git_diff | google__flax-2492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve documentation for `Dropout` and `rngs` argument in `linen.Module.apply()`
Here is an example of `Dropout` in a model definition:
https://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/models.py#L211
Here is the `apply()`, where `rngs` is passed in
https://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/train.py#L206-L207
However the `rng` is not very clearly explained in `apply()`
https://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/linen/module.py#L749
The `rngs` seems to be passed to `flax/core/scope.py`
Here is the code for `Dropout` (linen)
https://github.com/google/flax/blob/9b4807840c5cb26ef5e29028e3558d404aee00a0/flax/linen/stochastic.py#L56-L57
Here is the code for `make_rng()`
https://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/core/scope.py#L441-L447
The documentation for `rngs` in `apply()` should have a (pointer to) list of names of possible rngs
And documentation for `Dropout` should mention how to pass in rng using `apply()`, without directly passing in like `Dropout()(x,rng=rng)`.
Also probably need to mention the `make_rng()` `fold_in` the rng so each dropout layer will use different rng if there are multiple dropout layers.
</issue>
<code>
[start of flax/linen/stochastic.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Stochastic modules."""
16
17 from typing import Optional, Sequence
18
19 from flax.linen.module import compact
20 from flax.linen.module import merge_param
21 from flax.linen.module import Module
22 from jax import lax
23 from jax import random
24 import jax.numpy as jnp
25
26
27 class Dropout(Module):
28 """Create a dropout layer.
29
30 Attributes:
31 rate: the dropout probability. (_not_ the keep rate!)
32 broadcast_dims: dimensions that will share the same dropout mask
33 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and
34 masked, whereas if true, no mask is applied and the inputs are returned
35 as is.
36 """
37 rate: float
38 broadcast_dims: Sequence[int] = ()
39 deterministic: Optional[bool] = None
40
41 @compact
42 def __call__(self, inputs, deterministic: Optional[bool] = None):
43 """Applies a random dropout mask to the input.
44
45 Args:
46 inputs: the inputs that should be randomly masked.
47 deterministic: if false the inputs are scaled by `1 / (1 - rate)` and
48 masked, whereas if true, no mask is applied and the inputs are returned
49 as is.
50
51 Returns:
52 The masked inputs reweighted to preserve mean.
53 """
54 deterministic = merge_param(
55 'deterministic', self.deterministic, deterministic)
56 if self.rate == 0.:
57 return inputs
58 # Prevent gradient NaNs in 1.0 edge-case.
59 if self.rate == 1.0:
60 return jnp.zeros_like(inputs)
61 keep_prob = 1. - self.rate
62 if deterministic:
63 return inputs
64 else:
65 rng = self.make_rng('dropout')
66 broadcast_shape = list(inputs.shape)
67 for dim in self.broadcast_dims:
68 broadcast_shape[dim] = 1
69 mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)
70 mask = jnp.broadcast_to(mask, inputs.shape)
71 return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))
72
[end of flax/linen/stochastic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py
--- a/flax/linen/stochastic.py
+++ b/flax/linen/stochastic.py
@@ -27,6 +27,11 @@
class Dropout(Module):
"""Create a dropout layer.
+ Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure
+ to include an RNG seed named `'dropout'`. For example::
+
+ model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`
+
Attributes:
rate: the dropout probability. (_not_ the keep rate!)
broadcast_dims: dimensions that will share the same dropout mask
| {"golden_diff": "diff --git a/flax/linen/stochastic.py b/flax/linen/stochastic.py\n--- a/flax/linen/stochastic.py\n+++ b/flax/linen/stochastic.py\n@@ -27,6 +27,11 @@\n class Dropout(Module):\n \"\"\"Create a dropout layer.\n \n+ Note: When using :meth:`Module.apply() <flax.linen.Module.apply>`, make sure\n+ to include an RNG seed named `'dropout'`. For example::\n+ \n+ model.apply({'params': params}, inputs=inputs, train=True, rngs={'dropout': dropout_rng})`\n+\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n", "issue": "Improve documentation for `Dropout` and `rngs` argument in `linen.Module.apply()`\n\r\nHere is an example of `Dropout` in a model definition:\r\nhttps://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/models.py#L211\r\n\r\nHere is the `apply()`, where `rngs` is passed in\r\nhttps://github.com/google/flax/blob/d068512a932da3e05b822790a591bac391aeab36/examples/nlp_seq/train.py#L206-L207\r\nHowever the `rng` is not very clearly explained in `apply()`\r\nhttps://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/linen/module.py#L749\r\nThe `rngs` seems to be passed to `flax/core/scope.py`\r\nHere is the code for `Dropout` (linen)\r\nhttps://github.com/google/flax/blob/9b4807840c5cb26ef5e29028e3558d404aee00a0/flax/linen/stochastic.py#L56-L57\r\nHere is the code for `make_rng()`\r\nhttps://github.com/google/flax/blob/615f40be774e7ed66fd344e8291ac0d48ebcef7d/flax/core/scope.py#L441-L447\r\n\r\nThe documentation for `rngs` in `apply()` should have a (pointer to) list of names of possible rngs\r\nAnd documentation for `Dropout` should mention how to pass in rng using `apply()`, without directly passing in like `Dropout()(x,rng=rng)`.\r\nAlso probably need to mention the `make_rng()` `fold_in` the rng so each dropout layer will use different rng if there are multiple dropout layers.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Stochastic modules.\"\"\"\n\nfrom typing import Optional, Sequence\n\nfrom flax.linen.module import compact\nfrom flax.linen.module import merge_param\nfrom flax.linen.module import Module\nfrom jax import lax\nfrom jax import random\nimport jax.numpy as jnp\n\n\nclass Dropout(Module):\n \"\"\"Create a dropout layer.\n\n Attributes:\n rate: the dropout probability. (_not_ the keep rate!)\n broadcast_dims: dimensions that will share the same dropout mask\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n \"\"\"\n rate: float\n broadcast_dims: Sequence[int] = ()\n deterministic: Optional[bool] = None\n\n @compact\n def __call__(self, inputs, deterministic: Optional[bool] = None):\n \"\"\"Applies a random dropout mask to the input.\n\n Args:\n inputs: the inputs that should be randomly masked.\n deterministic: if false the inputs are scaled by `1 / (1 - rate)` and\n masked, whereas if true, no mask is applied and the inputs are returned\n as is.\n\n Returns:\n The masked inputs reweighted to preserve mean.\n \"\"\"\n deterministic = merge_param(\n 'deterministic', self.deterministic, deterministic)\n if self.rate == 0.:\n return inputs\n # Prevent gradient NaNs in 1.0 edge-case.\n if self.rate == 1.0:\n return jnp.zeros_like(inputs)\n keep_prob = 1. - self.rate\n if deterministic:\n return inputs\n else:\n rng = self.make_rng('dropout')\n broadcast_shape = list(inputs.shape)\n for dim in self.broadcast_dims:\n broadcast_shape[dim] = 1\n mask = random.bernoulli(rng, p=keep_prob, shape=broadcast_shape)\n mask = jnp.broadcast_to(mask, inputs.shape)\n return lax.select(mask, inputs / keep_prob, jnp.zeros_like(inputs))\n", "path": "flax/linen/stochastic.py"}]} | 1,751 | 168 |
gh_patches_debug_18985 | rasdani/github-patches | git_diff | oppia__oppia-6309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
InteractiveMap interaction: in the rule editor, clicks on the map are not displayed correctly
Create an exploration with a map interaction. Add a rule and click on the map to choose the point the rule applies to. A marker should appear where you click, but it does not.
Save and close the rule, then re-open it. The marker is now displayed correctly.
Create a new rule. Before being clicked on the map should be blank, but instead it displays the position of the marker from the previous rule.
</issue>
<code>
[start of extensions/dependencies/dependencies_config.py]
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Configuration for JavaScript library dependencies."""
18
19
20 # A dict mapping dependency ids to the Angular module names they
21 # should insert when the Angular app is first initialized.
22 DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {
23 'codemirror': ['ui.codemirror'],
24 'google_maps': ['ui.map'],
25 'guppy': [],
26 'logic_proof': [],
27 'math_expressions': [],
28 'midijs': [],
29 'pencilcode': [],
30 'skulpt': [],
31 }
32
[end of extensions/dependencies/dependencies_config.py]
[start of extensions/interactions/InteractiveMap/InteractiveMap.py]
1 # coding: utf-8
2 #
3 # Copyright 2014 The Oppia Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, softwar
12 # distributed under the License is distributed on an "AS-IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 """Python configuration for InteractiveMap interaction."""
18
19 from extensions.interactions import base
20
21
22 class InteractiveMap(base.BaseInteraction):
23 """Interaction for pinpointing a location on a map."""
24
25 name = 'World Map'
26 description = 'Allows learners to specify a position on a world map.'
27 display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
28 is_trainable = False
29 _dependency_ids = ['google_maps']
30 answer_type = 'CoordTwoDim'
31 instructions = 'Click on the map'
32 narrow_instructions = 'View map'
33 needs_summary = True
34 # There needs to be a way to pass marker location so that an answer can be
35 # conveyed meaningfully to the learner. Once this issue is fixed,
36 # InteractiveMap interaction can be supported by the solution feature.
37 can_have_solution = False
38 show_generic_submit_button = False
39
40 _customization_arg_specs = [{
41 'name': 'latitude',
42 'description': 'Starting center latitude (-90 to 90)',
43 'schema': {
44 'type': 'float',
45 'validators': [{
46 'id': 'is_at_least',
47 'min_value': -90.0,
48 }, {
49 'id': 'is_at_most',
50 'max_value': 90.0,
51 }]
52 },
53 'default_value': 0.0,
54 }, {
55 'name': 'longitude',
56 'description': 'Starting center longitude (-180 to 180)',
57 'schema': {
58 'type': 'float',
59 'validators': [{
60 'id': 'is_at_least',
61 'min_value': -180.0,
62 }, {
63 'id': 'is_at_most',
64 'max_value': 180.0,
65 }]
66 },
67 'default_value': 0.0,
68 }, {
69 'name': 'zoom',
70 'description': 'Starting zoom level (0 shows the entire earth)',
71 'schema': {
72 'type': 'float',
73 },
74 'default_value': 0.0,
75 }]
76
77 _answer_visualization_specs = [{
78 # Table with answer counts for top N answers.
79 'id': 'FrequencyTable',
80 'options': {
81 'column_headers': ['Answer', 'Count'],
82 'title': 'Top 10 answers',
83 },
84 'calculation_id': 'Top10AnswerFrequencies',
85 'addressed_info_is_supported': True,
86 }]
87
[end of extensions/interactions/InteractiveMap/InteractiveMap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/extensions/dependencies/dependencies_config.py b/extensions/dependencies/dependencies_config.py
--- a/extensions/dependencies/dependencies_config.py
+++ b/extensions/dependencies/dependencies_config.py
@@ -21,7 +21,7 @@
# should insert when the Angular app is first initialized.
DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {
'codemirror': ['ui.codemirror'],
- 'google_maps': ['ui.map'],
+ 'ui_leaflet': ['ui-leaflet'],
'guppy': [],
'logic_proof': [],
'math_expressions': [],
diff --git a/extensions/interactions/InteractiveMap/InteractiveMap.py b/extensions/interactions/InteractiveMap/InteractiveMap.py
--- a/extensions/interactions/InteractiveMap/InteractiveMap.py
+++ b/extensions/interactions/InteractiveMap/InteractiveMap.py
@@ -26,7 +26,7 @@
description = 'Allows learners to specify a position on a world map.'
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
is_trainable = False
- _dependency_ids = ['google_maps']
+ _dependency_ids = ['ui_leaflet']
answer_type = 'CoordTwoDim'
instructions = 'Click on the map'
narrow_instructions = 'View map'
| {"golden_diff": "diff --git a/extensions/dependencies/dependencies_config.py b/extensions/dependencies/dependencies_config.py\n--- a/extensions/dependencies/dependencies_config.py\n+++ b/extensions/dependencies/dependencies_config.py\n@@ -21,7 +21,7 @@\n # should insert when the Angular app is first initialized.\n DEPENDENCIES_TO_ANGULAR_MODULES_DICT = {\n 'codemirror': ['ui.codemirror'],\n- 'google_maps': ['ui.map'],\n+ 'ui_leaflet': ['ui-leaflet'],\n 'guppy': [],\n 'logic_proof': [],\n 'math_expressions': [],\ndiff --git a/extensions/interactions/InteractiveMap/InteractiveMap.py b/extensions/interactions/InteractiveMap/InteractiveMap.py\n--- a/extensions/interactions/InteractiveMap/InteractiveMap.py\n+++ b/extensions/interactions/InteractiveMap/InteractiveMap.py\n@@ -26,7 +26,7 @@\n description = 'Allows learners to specify a position on a world map.'\n display_mode = base.DISPLAY_MODE_SUPPLEMENTAL\n is_trainable = False\n- _dependency_ids = ['google_maps']\n+ _dependency_ids = ['ui_leaflet']\n answer_type = 'CoordTwoDim'\n instructions = 'Click on the map'\n narrow_instructions = 'View map'\n", "issue": "InteractiveMap interaction: in the rule editor, clicks on the map are not displayed correctly\nCreate an exploration with a map interaction. Add a rule and click on the map to choose the point the rule applies to. A marker should appear where you click, but it does not.\n\nSave and close the rule, then re-open it. The marker is now displayed correctly.\n\nCreate a new rule. Before being clicked on the map should be blank, but instead it displays the position of the marker from the previous rule.\n\n", "before_files": [{"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Configuration for JavaScript library dependencies.\"\"\"\n\n\n# A dict mapping dependency ids to the Angular module names they\n# should insert when the Angular app is first initialized.\nDEPENDENCIES_TO_ANGULAR_MODULES_DICT = {\n 'codemirror': ['ui.codemirror'],\n 'google_maps': ['ui.map'],\n 'guppy': [],\n 'logic_proof': [],\n 'math_expressions': [],\n 'midijs': [],\n 'pencilcode': [],\n 'skulpt': [],\n}\n", "path": "extensions/dependencies/dependencies_config.py"}, {"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python configuration for InteractiveMap interaction.\"\"\"\n\nfrom extensions.interactions import base\n\n\nclass InteractiveMap(base.BaseInteraction):\n \"\"\"Interaction for pinpointing a location on a map.\"\"\"\n\n name = 'World Map'\n description = 'Allows learners to specify a position on a world map.'\n display_mode = base.DISPLAY_MODE_SUPPLEMENTAL\n is_trainable = False\n _dependency_ids = ['google_maps']\n answer_type = 'CoordTwoDim'\n instructions = 'Click on the map'\n narrow_instructions = 'View map'\n needs_summary = True\n # There needs to be a way to pass marker location so that an answer can be\n # conveyed meaningfully to the learner. Once this issue is fixed,\n # InteractiveMap interaction can be supported by the solution feature.\n can_have_solution = False\n show_generic_submit_button = False\n\n _customization_arg_specs = [{\n 'name': 'latitude',\n 'description': 'Starting center latitude (-90 to 90)',\n 'schema': {\n 'type': 'float',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': -90.0,\n }, {\n 'id': 'is_at_most',\n 'max_value': 90.0,\n }]\n },\n 'default_value': 0.0,\n }, {\n 'name': 'longitude',\n 'description': 'Starting center longitude (-180 to 180)',\n 'schema': {\n 'type': 'float',\n 'validators': [{\n 'id': 'is_at_least',\n 'min_value': -180.0,\n }, {\n 'id': 'is_at_most',\n 'max_value': 180.0,\n }]\n },\n 'default_value': 0.0,\n }, {\n 'name': 'zoom',\n 'description': 'Starting zoom level (0 shows the entire earth)',\n 'schema': {\n 'type': 'float',\n },\n 'default_value': 0.0,\n }]\n\n _answer_visualization_specs = [{\n # Table with answer counts for top N answers.\n 'id': 'FrequencyTable',\n 'options': {\n 'column_headers': ['Answer', 'Count'],\n 'title': 'Top 10 answers',\n },\n 'calculation_id': 'Top10AnswerFrequencies',\n 'addressed_info_is_supported': True,\n }]\n", "path": "extensions/interactions/InteractiveMap/InteractiveMap.py"}]} | 1,813 | 277 |
gh_patches_debug_34246 | rasdani/github-patches | git_diff | uccser__cs-unplugged-318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support multiple page resources
Currently the create image function for a resource return a single image. Instead it should return a list of images, which would allow multiple page resources.
For example, for 4 pages of a single page resource the content would be:
```
Image output: [A]
Final document: A, A, A, A
```
For 4 pages of a three page resource the content would be:
```
Image output: [A, B, C], [A, B, C], [A, B, C], [A, B, C]
Final document: A, B, C, A, B, C, A, B, C, A, B, C
```
</issue>
<code>
[start of csunplugged/resources/views/generate_resource_pdf.py]
1 """Module for generating custom resource PDFs."""
2
3 from django.http import HttpResponse
4 from django.template.loader import render_to_string
5 from django.contrib.staticfiles import finders
6 from django.conf import settings
7 from PIL import Image
8 from io import BytesIO
9 import importlib
10 import base64
11
12 RESPONSE_CONTENT_DISPOSITION = 'attachment; filename="{filename}.pdf"'
13 MM_TO_PIXEL_RATIO = 3.78
14
15
16 def generate_resource_pdf(request, resource, module_path):
17 """Return a response containing a generated PDF resource.
18
19 Args:
20 request: HTTP request object
21 resource: Object of resource data.
22 module_path: Path to module for generating resource.
23
24 Returns:
25 HTTP response containing generated resource PDF.
26 """
27 # TODO: Weasyprint handling in production
28 import environ
29 env = environ.Env(
30 DJANGO_PRODUCTION=(bool),
31 )
32 if env("DJANGO_PRODUCTION"):
33 return HttpResponse("<html><body>PDF generation is currently not supported in production.</body></html>")
34 else:
35 from weasyprint import HTML, CSS
36 context = dict()
37 get_request = request.GET
38 context["paper_size"] = get_request["paper_size"]
39 context["resource"] = resource
40 context["header_text"] = get_request["header_text"]
41
42 resource_image_generator = importlib.import_module(module_path)
43 filename = "{} ({})".format(resource.name, resource_image_generator.subtitle(get_request, resource))
44 context["filename"] = filename
45
46 num_copies = range(0, int(get_request["copies"]))
47 context["resource_images"] = []
48 for copy in num_copies:
49 context["resource_images"].append(
50 generate_resource_image(get_request, resource, module_path)
51 )
52
53 pdf_html = render_to_string("resources/base-resource-pdf.html", context)
54 html = HTML(string=pdf_html, base_url=settings.STATIC_ROOT)
55 css_file = finders.find("css/print-resource-pdf.css")
56 css_string = open(css_file, encoding="UTF-8").read()
57 base_css = CSS(string=css_string)
58 pdf_file = html.write_pdf(stylesheets=[base_css])
59
60 response = HttpResponse(pdf_file, content_type="application/pdf")
61 response["Content-Disposition"] = RESPONSE_CONTENT_DISPOSITION.format(filename=filename)
62 return response
63
64
65 def generate_resource_image(get_request, resource, module_path):
66 """Retrieve image from resource generator and resize to size.
67
68 Args:
69 get_request: HTTP request object
70 resource: Object of resource data.
71 module_path: Path to module for generating resource.
72
73 Returns:
74 Base64 string of a generated resource image.
75 """
76 # Get image from resource image creator
77 resource_image_generator = importlib.import_module(module_path)
78 image = resource_image_generator.resource_image(get_request, resource)
79
80 # Resize image to reduce file size
81 if get_request["paper_size"] == "a4":
82 max_pixel_height = 267 * MM_TO_PIXEL_RATIO
83 elif get_request["paper_size"] == "letter":
84 max_pixel_height = 249 * MM_TO_PIXEL_RATIO
85 (width, height) = image.size
86 if height > max_pixel_height:
87 ratio = max_pixel_height / height
88 width *= ratio
89 height *= ratio
90 image = image.resize((int(width), int(height)), Image.ANTIALIAS)
91
92 # Save image to buffer
93 image_buffer = BytesIO()
94 image.save(image_buffer, format="PNG")
95
96 # Return base64 of image
97 return base64.b64encode(image_buffer.getvalue())
98
[end of csunplugged/resources/views/generate_resource_pdf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/resources/views/generate_resource_pdf.py b/csunplugged/resources/views/generate_resource_pdf.py
--- a/csunplugged/resources/views/generate_resource_pdf.py
+++ b/csunplugged/resources/views/generate_resource_pdf.py
@@ -63,7 +63,9 @@
def generate_resource_image(get_request, resource, module_path):
- """Retrieve image from resource generator and resize to size.
+ """Retrieve image(s) for one copy of resource from resource generator.
+
+ Images are resized to size.
Args:
get_request: HTTP request object
@@ -71,27 +73,33 @@
module_path: Path to module for generating resource.
Returns:
- Base64 string of a generated resource image.
+ List of Base64 strings of a generated resource images for one copy.
"""
- # Get image from resource image creator
+ # Get images from resource image creator
resource_image_generator = importlib.import_module(module_path)
- image = resource_image_generator.resource_image(get_request, resource)
+ raw_images = resource_image_generator.resource_image(get_request, resource)
+ if not isinstance(raw_images, list):
+ raw_images = [raw_images]
- # Resize image to reduce file size
+ # Resize images to reduce file size
if get_request["paper_size"] == "a4":
max_pixel_height = 267 * MM_TO_PIXEL_RATIO
elif get_request["paper_size"] == "letter":
max_pixel_height = 249 * MM_TO_PIXEL_RATIO
- (width, height) = image.size
- if height > max_pixel_height:
- ratio = max_pixel_height / height
- width *= ratio
- height *= ratio
- image = image.resize((int(width), int(height)), Image.ANTIALIAS)
-
- # Save image to buffer
- image_buffer = BytesIO()
- image.save(image_buffer, format="PNG")
-
- # Return base64 of image
- return base64.b64encode(image_buffer.getvalue())
+
+ images = []
+ for image in raw_images:
+ (width, height) = image.size
+ if height > max_pixel_height:
+ ratio = max_pixel_height / height
+ width *= ratio
+ height *= ratio
+ image = image.resize((int(width), int(height)), Image.ANTIALIAS)
+
+ # Save image to buffer
+ image_buffer = BytesIO()
+ image.save(image_buffer, format="PNG")
+ # Add base64 of image to list of images
+ images.append(base64.b64encode(image_buffer.getvalue()))
+
+ return images
| {"golden_diff": "diff --git a/csunplugged/resources/views/generate_resource_pdf.py b/csunplugged/resources/views/generate_resource_pdf.py\n--- a/csunplugged/resources/views/generate_resource_pdf.py\n+++ b/csunplugged/resources/views/generate_resource_pdf.py\n@@ -63,7 +63,9 @@\n \n \n def generate_resource_image(get_request, resource, module_path):\n- \"\"\"Retrieve image from resource generator and resize to size.\n+ \"\"\"Retrieve image(s) for one copy of resource from resource generator.\n+\n+ Images are resized to size.\n \n Args:\n get_request: HTTP request object\n@@ -71,27 +73,33 @@\n module_path: Path to module for generating resource.\n \n Returns:\n- Base64 string of a generated resource image.\n+ List of Base64 strings of a generated resource images for one copy.\n \"\"\"\n- # Get image from resource image creator\n+ # Get images from resource image creator\n resource_image_generator = importlib.import_module(module_path)\n- image = resource_image_generator.resource_image(get_request, resource)\n+ raw_images = resource_image_generator.resource_image(get_request, resource)\n+ if not isinstance(raw_images, list):\n+ raw_images = [raw_images]\n \n- # Resize image to reduce file size\n+ # Resize images to reduce file size\n if get_request[\"paper_size\"] == \"a4\":\n max_pixel_height = 267 * MM_TO_PIXEL_RATIO\n elif get_request[\"paper_size\"] == \"letter\":\n max_pixel_height = 249 * MM_TO_PIXEL_RATIO\n- (width, height) = image.size\n- if height > max_pixel_height:\n- ratio = max_pixel_height / height\n- width *= ratio\n- height *= ratio\n- image = image.resize((int(width), int(height)), Image.ANTIALIAS)\n-\n- # Save image to buffer\n- image_buffer = BytesIO()\n- image.save(image_buffer, format=\"PNG\")\n-\n- # Return base64 of image\n- return base64.b64encode(image_buffer.getvalue())\n+\n+ images = []\n+ for image in raw_images:\n+ (width, height) = image.size\n+ if height > max_pixel_height:\n+ ratio = max_pixel_height / height\n+ width *= ratio\n+ height *= ratio\n+ image = image.resize((int(width), int(height)), Image.ANTIALIAS)\n+\n+ # Save image to buffer\n+ image_buffer = BytesIO()\n+ image.save(image_buffer, format=\"PNG\")\n+ # Add base64 of image to list of images\n+ images.append(base64.b64encode(image_buffer.getvalue()))\n+\n+ return images\n", "issue": "Support multiple page resources\nCurrently the create image function for a resource return a single image. Instead it should return a list of images, which would allow multiple page resources.\r\n\r\nFor example, for 4 pages of a single page resource the content would be:\r\n\r\n```\r\nImage output: [A]\r\nFinal document: A, A, A, A\r\n```\r\n\r\nFor 4 pages of a three page resource the content would be:\r\n\r\n```\r\nImage output: [A, B, C], [A, B, C], [A, B, C], [A, B, C] \r\nFinal document: A, B, C, A, B, C, A, B, C, A, B, C\r\n```\n", "before_files": [{"content": "\"\"\"Module for generating custom resource PDFs.\"\"\"\n\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.contrib.staticfiles import finders\nfrom django.conf import settings\nfrom PIL import Image\nfrom io import BytesIO\nimport importlib\nimport base64\n\nRESPONSE_CONTENT_DISPOSITION = 'attachment; filename=\"{filename}.pdf\"'\nMM_TO_PIXEL_RATIO = 3.78\n\n\ndef generate_resource_pdf(request, resource, module_path):\n \"\"\"Return a response containing a generated PDF resource.\n\n Args:\n request: HTTP request object\n resource: Object of resource data.\n module_path: Path to module for generating resource.\n\n Returns:\n HTTP response containing generated resource PDF.\n \"\"\"\n # TODO: Weasyprint handling in production\n import environ\n env = environ.Env(\n DJANGO_PRODUCTION=(bool),\n )\n if env(\"DJANGO_PRODUCTION\"):\n return HttpResponse(\"<html><body>PDF generation is currently not supported in production.</body></html>\")\n else:\n from weasyprint import HTML, CSS\n context = dict()\n get_request = request.GET\n context[\"paper_size\"] = get_request[\"paper_size\"]\n context[\"resource\"] = resource\n context[\"header_text\"] = get_request[\"header_text\"]\n\n resource_image_generator = importlib.import_module(module_path)\n filename = \"{} ({})\".format(resource.name, resource_image_generator.subtitle(get_request, resource))\n context[\"filename\"] = filename\n\n num_copies = range(0, int(get_request[\"copies\"]))\n context[\"resource_images\"] = []\n for copy in num_copies:\n context[\"resource_images\"].append(\n generate_resource_image(get_request, resource, module_path)\n )\n\n pdf_html = render_to_string(\"resources/base-resource-pdf.html\", context)\n html = HTML(string=pdf_html, base_url=settings.STATIC_ROOT)\n css_file = finders.find(\"css/print-resource-pdf.css\")\n css_string = open(css_file, encoding=\"UTF-8\").read()\n base_css = CSS(string=css_string)\n pdf_file = html.write_pdf(stylesheets=[base_css])\n\n response = HttpResponse(pdf_file, content_type=\"application/pdf\")\n response[\"Content-Disposition\"] = RESPONSE_CONTENT_DISPOSITION.format(filename=filename)\n return response\n\n\ndef generate_resource_image(get_request, resource, module_path):\n \"\"\"Retrieve image from resource generator and resize to size.\n\n Args:\n get_request: HTTP request object\n resource: Object of resource data.\n module_path: Path to module for generating resource.\n\n Returns:\n Base64 string of a generated resource image.\n \"\"\"\n # Get image from resource image creator\n resource_image_generator = importlib.import_module(module_path)\n image = resource_image_generator.resource_image(get_request, resource)\n\n # Resize image to reduce file size\n if get_request[\"paper_size\"] == \"a4\":\n max_pixel_height = 267 * MM_TO_PIXEL_RATIO\n elif get_request[\"paper_size\"] == \"letter\":\n max_pixel_height = 249 * MM_TO_PIXEL_RATIO\n (width, height) = image.size\n if height > max_pixel_height:\n ratio = max_pixel_height / height\n width *= ratio\n height *= ratio\n image = image.resize((int(width), int(height)), Image.ANTIALIAS)\n\n # Save image to buffer\n image_buffer = BytesIO()\n image.save(image_buffer, format=\"PNG\")\n\n # Return base64 of image\n return base64.b64encode(image_buffer.getvalue())\n", "path": "csunplugged/resources/views/generate_resource_pdf.py"}]} | 1,649 | 600 |
gh_patches_debug_34688 | rasdani/github-patches | git_diff | tensorflow__addons-271 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automate Build Process
Currently we have no automated process for building Addons across python version and operating systems. Going forward we'll want this process to be automated.. but it may be challenging for us to start builds without access to the Google internal tooling.
We could conceivably use Travis... but if we can keep consistent CI that would be ideal.
</issue>
<code>
[start of setup.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons
16
17 TensorFlow Addons is a repository of contributions that conform to
18 well-established API patterns,but implement new functionality not available in
19 core TensorFlow.TensorFlow natively supports a large number of operators,
20 layers, metrics, losses, and optimizers. However, in a fast movingfield like
21 ML, there are many interesting new developments that cannot be integrated into
22 core TensorFlow (because their broad applicability is not yet clear, or it is
23 mostly used by a smallersubset of the community).
24 """
25
26 from __future__ import absolute_import
27 from __future__ import division
28 from __future__ import print_function
29
30 import os
31
32 from setuptools import find_packages
33 from setuptools import setup
34 from setuptools.dist import Distribution
35
36 DOCLINES = __doc__.split('\n')
37
38 version = {}
39 base_dir = os.path.dirname(os.path.abspath(__file__))
40 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
41 # yapf: disable
42 exec(fp.read(), version)
43 # yapf: enable
44
45 REQUIRED_PACKAGES = [
46 'six >= 1.10.0',
47 ]
48
49 project_name = 'tensorflow-addons'
50
51
52 class BinaryDistribution(Distribution):
53 """This class is needed in order to create OS specific wheels."""
54
55 def has_ext_modules(self):
56 return True
57
58
59 setup(
60 name=project_name,
61 version=version['__version__'],
62 description=DOCLINES[0],
63 long_description='\n'.join(DOCLINES[2:]),
64 author='Google Inc.',
65 author_email='[email protected]',
66 packages=find_packages(),
67 install_requires=REQUIRED_PACKAGES,
68 include_package_data=True,
69 zip_safe=False,
70 distclass=BinaryDistribution,
71 classifiers=[
72 'Development Status :: 4 - Beta',
73 'Intended Audience :: Developers',
74 'Intended Audience :: Education',
75 'Intended Audience :: Science/Research',
76 'License :: OSI Approved :: Apache Software License',
77 'Programming Language :: Python :: 2.7',
78 'Programming Language :: Python :: 3.4',
79 'Programming Language :: Python :: 3.5',
80 'Programming Language :: Python :: 3.6',
81 'Programming Language :: Python :: 3.7',
82 'Topic :: Scientific/Engineering :: Mathematics',
83 'Topic :: Software Development :: Libraries :: Python Modules',
84 'Topic :: Software Development :: Libraries',
85 ],
86 license='Apache 2.0',
87 keywords='tensorflow addons machine learning',
88 )
89
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,10 +17,10 @@
TensorFlow Addons is a repository of contributions that conform to
well-established API patterns,but implement new functionality not available in
core TensorFlow.TensorFlow natively supports a large number of operators,
-layers, metrics, losses, and optimizers. However, in a fast movingfield like
+layers, metrics, losses, and optimizers. However, in a fast moving field like
ML, there are many interesting new developments that cannot be integrated into
core TensorFlow (because their broad applicability is not yet clear, or it is
-mostly used by a smallersubset of the community).
+mostly used by a smaller subset of the community).
"""
from __future__ import absolute_import
@@ -28,7 +28,9 @@
from __future__ import print_function
import os
+import sys
+from datetime import datetime
from setuptools import find_packages
from setuptools import setup
from setuptools.dist import Distribution
@@ -46,7 +48,13 @@
'six >= 1.10.0',
]
-project_name = 'tensorflow-addons'
+if '--nightly' in sys.argv:
+ project_name = 'tfa-nightly'
+ nightly_idx = sys.argv.index('--nightly')
+ sys.argv.pop(nightly_idx)
+ version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
+else:
+ project_name = 'tensorflow-addons'
class BinaryDistribution(Distribution):
@@ -78,7 +86,6 @@
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,10 +17,10 @@\n TensorFlow Addons is a repository of contributions that conform to\n well-established API patterns,but implement new functionality not available in\n core TensorFlow.TensorFlow natively supports a large number of operators,\n-layers, metrics, losses, and optimizers. However, in a fast movingfield like\n+layers, metrics, losses, and optimizers. However, in a fast moving field like\n ML, there are many interesting new developments that cannot be integrated into\n core TensorFlow (because their broad applicability is not yet clear, or it is\n-mostly used by a smallersubset of the community).\n+mostly used by a smaller subset of the community).\n \"\"\"\n \n from __future__ import absolute_import\n@@ -28,7 +28,9 @@\n from __future__ import print_function\n \n import os\n+import sys\n \n+from datetime import datetime\n from setuptools import find_packages\n from setuptools import setup\n from setuptools.dist import Distribution\n@@ -46,7 +48,13 @@\n 'six >= 1.10.0',\n ]\n \n-project_name = 'tensorflow-addons'\n+if '--nightly' in sys.argv:\n+ project_name = 'tfa-nightly'\n+ nightly_idx = sys.argv.index('--nightly')\n+ sys.argv.pop(nightly_idx)\n+ version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n+else:\n+ project_name = 'tensorflow-addons'\n \n \n class BinaryDistribution(Distribution):\n@@ -78,7 +86,6 @@\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n- 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n", "issue": "Automate Build Process\nCurrently we have no automated process for building Addons across python version and operating systems. Going forward we'll want this process to be automated.. but it may be challenging for us to start builds without access to the Google internal tooling.\r\n\r\nWe could conceivably use Travis... but if we can keep consistent CI that would be ideal.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons \n\nTensorFlow Addons is a repository of contributions that conform to\nwell-established API patterns,but implement new functionality not available in\ncore TensorFlow.TensorFlow natively supports a large number of operators,\nlayers, metrics, losses, and optimizers. However, in a fast movingfield like\nML, there are many interesting new developments that cannot be integrated into\ncore TensorFlow (because their broad applicability is not yet clear, or it is\nmostly used by a smallersubset of the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\n\nDOCLINES = __doc__.split('\\n')\n\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nproject_name = 'tensorflow-addons'\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]} | 1,443 | 432 |
gh_patches_debug_35290 | rasdani/github-patches | git_diff | docarray__docarray-979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug(v2): relative file paths in url types
Passing relative file paths gives a validation error:
```python
from docarray import Image
url = 'Test/05978.jpg'
img = Image(url=url)
```
```text
Test/05978.jpg
Traceback (most recent call last):
File "/home/johannes/.config/JetBrains/PyCharmCE2022.3/scratches/scratch_116.py", line 12, in <module>
img = Image(url=url)
File "pydantic/main.py", line 342, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for Image
url
unsupported operand type(s) for +: 'NoneType' and 'str' (type=type_error)
```
</issue>
<code>
[start of docarray/typing/url/any_url.py]
1 from typing import TYPE_CHECKING, Type, TypeVar
2
3 from pydantic import AnyUrl as BaseAnyUrl
4 from pydantic import errors, parse_obj_as
5
6 from docarray.typing.abstract_type import AbstractType
7
8 if TYPE_CHECKING:
9 from pydantic.networks import Parts
10
11 from docarray.proto import NodeProto
12
13 T = TypeVar('T', bound='AnyUrl')
14
15
16 class AnyUrl(BaseAnyUrl, AbstractType):
17 host_required = (
18 False # turn off host requirement to allow passing of local paths as URL
19 )
20
21 def _to_node_protobuf(self) -> 'NodeProto':
22 """Convert Document into a NodeProto protobuf message. This function should
23 be called when the Document is nested into another Document that need to
24 be converted into a protobuf
25
26 :return: the nested item protobuf message
27 """
28 from docarray.proto import NodeProto
29
30 return NodeProto(any_url=str(self))
31
32 @classmethod
33 def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
34 """
35 A method used to validate parts of a URL.
36 Our URLs should be able to function both in local and remote settings.
37 Therefore, we allow missing `scheme`, making it possible to pass a file path.
38 """
39 scheme = parts['scheme']
40 if scheme is None:
41 pass # allow missing scheme, unlike pydantic
42
43 elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
44 raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
45
46 if validate_port:
47 cls._validate_port(parts['port'])
48
49 user = parts['user']
50 if cls.user_required and user is None:
51 raise errors.UrlUserInfoError()
52
53 return parts
54
55 @classmethod
56 def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
57 """
58 read url from a proto msg
59 :param pb_msg:
60 :return: url
61 """
62 return parse_obj_as(cls, pb_msg)
63
[end of docarray/typing/url/any_url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docarray/typing/url/any_url.py b/docarray/typing/url/any_url.py
--- a/docarray/typing/url/any_url.py
+++ b/docarray/typing/url/any_url.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Type, TypeVar
+from typing import TYPE_CHECKING, Optional, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
@@ -34,11 +34,14 @@
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
- Therefore, we allow missing `scheme`, making it possible to pass a file path.
+ Therefore, we allow missing `scheme`, making it possible to pass a file
+ path without prefix.
+ If `scheme` is missing, we assume it is a local file path.
"""
scheme = parts['scheme']
if scheme is None:
- pass # allow missing scheme, unlike pydantic
+ # allow missing scheme, unlike pydantic
+ pass
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
@@ -52,6 +55,44 @@
return parts
+ @classmethod
+ def build(
+ cls,
+ *,
+ scheme: str,
+ user: Optional[str] = None,
+ password: Optional[str] = None,
+ host: str,
+ port: Optional[str] = None,
+ path: Optional[str] = None,
+ query: Optional[str] = None,
+ fragment: Optional[str] = None,
+ **_kwargs: str,
+ ) -> str:
+ """
+ Build a URL from its parts.
+ The only difference from the pydantic implementation is that we allow
+ missing `scheme`, making it possible to pass a file path without prefix.
+ """
+
+ # allow missing scheme, unlike pydantic
+ scheme_ = scheme if scheme is not None else ''
+ url = super().build(
+ scheme=scheme_,
+ user=user,
+ password=password,
+ host=host,
+ port=port,
+ path=path,
+ query=query,
+ fragment=fragment,
+ **_kwargs,
+ )
+ if scheme is None and url.startswith('://'):
+ # remove the `://` prefix, since scheme is missing
+ url = url[3:]
+ return url
+
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
| {"golden_diff": "diff --git a/docarray/typing/url/any_url.py b/docarray/typing/url/any_url.py\n--- a/docarray/typing/url/any_url.py\n+++ b/docarray/typing/url/any_url.py\n@@ -1,4 +1,4 @@\n-from typing import TYPE_CHECKING, Type, TypeVar\n+from typing import TYPE_CHECKING, Optional, Type, TypeVar\n \n from pydantic import AnyUrl as BaseAnyUrl\n from pydantic import errors, parse_obj_as\n@@ -34,11 +34,14 @@\n \"\"\"\n A method used to validate parts of a URL.\n Our URLs should be able to function both in local and remote settings.\n- Therefore, we allow missing `scheme`, making it possible to pass a file path.\n+ Therefore, we allow missing `scheme`, making it possible to pass a file\n+ path without prefix.\n+ If `scheme` is missing, we assume it is a local file path.\n \"\"\"\n scheme = parts['scheme']\n if scheme is None:\n- pass # allow missing scheme, unlike pydantic\n+ # allow missing scheme, unlike pydantic\n+ pass\n \n elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:\n raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))\n@@ -52,6 +55,44 @@\n \n return parts\n \n+ @classmethod\n+ def build(\n+ cls,\n+ *,\n+ scheme: str,\n+ user: Optional[str] = None,\n+ password: Optional[str] = None,\n+ host: str,\n+ port: Optional[str] = None,\n+ path: Optional[str] = None,\n+ query: Optional[str] = None,\n+ fragment: Optional[str] = None,\n+ **_kwargs: str,\n+ ) -> str:\n+ \"\"\"\n+ Build a URL from its parts.\n+ The only difference from the pydantic implementation is that we allow\n+ missing `scheme`, making it possible to pass a file path without prefix.\n+ \"\"\"\n+\n+ # allow missing scheme, unlike pydantic\n+ scheme_ = scheme if scheme is not None else ''\n+ url = super().build(\n+ scheme=scheme_,\n+ user=user,\n+ password=password,\n+ host=host,\n+ port=port,\n+ path=path,\n+ query=query,\n+ fragment=fragment,\n+ **_kwargs,\n+ )\n+ if scheme is None and url.startswith('://'):\n+ # remove the `://` prefix, since scheme is missing\n+ url = url[3:]\n+ return url\n+\n @classmethod\n def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:\n \"\"\"\n", "issue": "bug(v2): relative file paths in url types\nPassing relative file paths gives a validation error:\n\n```python\nfrom docarray import Image\n\nurl = 'Test/05978.jpg'\nimg = Image(url=url)\n```\n\n```text\nTest/05978.jpg\nTraceback (most recent call last):\n File \"/home/johannes/.config/JetBrains/PyCharmCE2022.3/scratches/scratch_116.py\", line 12, in <module>\n img = Image(url=url)\n File \"pydantic/main.py\", line 342, in pydantic.main.BaseModel.__init__\npydantic.error_wrappers.ValidationError: 1 validation error for Image\nurl\n unsupported operand type(s) for +: 'NoneType' and 'str' (type=type_error)\n```\n\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Type, TypeVar\n\nfrom pydantic import AnyUrl as BaseAnyUrl\nfrom pydantic import errors, parse_obj_as\n\nfrom docarray.typing.abstract_type import AbstractType\n\nif TYPE_CHECKING:\n from pydantic.networks import Parts\n\n from docarray.proto import NodeProto\n\nT = TypeVar('T', bound='AnyUrl')\n\n\nclass AnyUrl(BaseAnyUrl, AbstractType):\n host_required = (\n False # turn off host requirement to allow passing of local paths as URL\n )\n\n def _to_node_protobuf(self) -> 'NodeProto':\n \"\"\"Convert Document into a NodeProto protobuf message. This function should\n be called when the Document is nested into another Document that need to\n be converted into a protobuf\n\n :return: the nested item protobuf message\n \"\"\"\n from docarray.proto import NodeProto\n\n return NodeProto(any_url=str(self))\n\n @classmethod\n def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':\n \"\"\"\n A method used to validate parts of a URL.\n Our URLs should be able to function both in local and remote settings.\n Therefore, we allow missing `scheme`, making it possible to pass a file path.\n \"\"\"\n scheme = parts['scheme']\n if scheme is None:\n pass # allow missing scheme, unlike pydantic\n\n elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:\n raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))\n\n if validate_port:\n cls._validate_port(parts['port'])\n\n user = parts['user']\n if cls.user_required and user is None:\n raise errors.UrlUserInfoError()\n\n return parts\n\n @classmethod\n def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:\n \"\"\"\n read url from a proto msg\n :param pb_msg:\n :return: url\n \"\"\"\n return parse_obj_as(cls, pb_msg)\n", "path": "docarray/typing/url/any_url.py"}]} | 1,291 | 610 |
gh_patches_debug_34183 | rasdani/github-patches | git_diff | sonic-net__sonic-mgmt-4352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Investigate RDMA nightly run failures on 202012
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/Azure/SONiC/wiki#report-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support can be found at the following locations:
- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
RDMA test runs on TD2 with 202012 are quite flaky. Different set of test failures are seen daily and sometimes test fails at pretest
09/09 run skipped all tgen tests with the following reason
SKIPPED [1] /azp/agent/_work/27/s/tests/common/helpers/assertions.py:13: Port is not mapped to the expected DUT
</issue>
<code>
[start of ansible/library/testbed_vm_info.py]
1 #!/usr/bin/env python
2
3 import re
4 import yaml
5 import os
6 import traceback
7 import subprocess
8 import ipaddr as ipaddress
9 from operator import itemgetter
10 from itertools import groupby
11 from collections import defaultdict
12 import re
13
14 from ansible.parsing.dataloader import DataLoader
15 from ansible.inventory.manager import InventoryManager
16
17 DOCUMENTATION = '''
18 module: testbed_vm_info.py
19 Ansible_version_added: 2.0.0.2
20 short_description: Gather all related VMs info
21 Description:
22 When deploy testbed topology with VM connected to SONiC, gather neighbor VMs info for generating SONiC minigraph file
23 options:
24 base_vm: base vm name defined in testbed.csv for the deployed topology; required: True
25 topo: topology name defined in testbed.csv for the deployed topology; required: True
26 vm_file: the virtual machine file path ; default: 'veos'
27
28 Ansible_facts:
29 'neighbor_eosvm_mgmt': all VM hosts management IPs
30 'topoall': topology information
31
32 '''
33
34 EXAMPLES = '''
35 - name: gather vm information
36 testbed_vm_info: base_vm='VM0100' topo='t1' vm_file='veos'
37 '''
38
39 ### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here
40 TOPO_PATH = 'vars/'
41 VM_INV_FILE = 'veos'
42
43
44 class TestbedVMFacts():
45 """
46 Retrieve testbed VMs management information that for a specified toplogy defined in testbed.csv
47
48 """
49
50 def __init__(self, toponame, vmbase, vmfile):
51 CLET_SUFFIX = "-clet"
52 toponame = re.sub(CLET_SUFFIX + "$", "", toponame)
53 self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'
54 self.start_index = int(re.findall('VM(\d+)', vmbase)[0])
55 self.vmhosts = {}
56 self.vmfile = vmfile
57 self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)
58 return
59
60
61 def get_neighbor_eos(self):
62 eos = {}
63 with open(self.topofile) as f:
64 vm_topology = yaml.load(f)
65 self.topoall = vm_topology
66 for vm in vm_topology['topology']['VMs']:
67 vm_index = int(vm_topology['topology']['VMs'][vm]['vm_offset'])+self.start_index
68 eos[vm] = vm_index
69 return eos
70
71
72 def main():
73 module = AnsibleModule(
74 argument_spec=dict(
75 base_vm=dict(required=True, type='str'),
76 topo=dict(required=True, type='str'),
77 vm_file=dict(default=VM_INV_FILE, type='str')
78 ),
79 supports_check_mode=True
80 )
81 m_args = module.params
82 topo_type = m_args['topo']
83 if 'ptf' in topo_type:
84 module.exit_json(ansible_facts={'neighbor_eosvm_mgmt': {}})
85 try:
86 vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])
87 neighbor_eos = vmsall.get_neighbor_eos()
88 for eos in neighbor_eos:
89 vmname = 'VM'+format(neighbor_eos[eos], '04d')
90 if vmname in vmsall.inv_mgr.hosts:
91 vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']
92 else:
93 err_msg = "cannot find the vm " + vmname + " in VM inventory file, please make sure you have enough VMs for the topology you are using"
94 module.fail_json(msg=err_msg)
95 module.exit_json(ansible_facts={'neighbor_eosvm_mgmt':vmsall.vmhosts, 'topoall': vmsall.topoall})
96 except (IOError, OSError):
97 module.fail_json(msg="Can not find file "+vmsall.topofile+" or "+m_args['vm_file']+" or "+VM_INV_FILE)
98 except Exception as e:
99 module.fail_json(msg=traceback.format_exc())
100
101 from ansible.module_utils.basic import *
102 if __name__ == "__main__":
103 main()
104
105
[end of ansible/library/testbed_vm_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py
--- a/ansible/library/testbed_vm_info.py
+++ b/ansible/library/testbed_vm_info.py
@@ -39,6 +39,7 @@
### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here
TOPO_PATH = 'vars/'
VM_INV_FILE = 'veos'
+TGEN_MGMT_NETWORK = '10.65.32.0/24'
class TestbedVMFacts():
@@ -51,7 +52,10 @@
CLET_SUFFIX = "-clet"
toponame = re.sub(CLET_SUFFIX + "$", "", toponame)
self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'
- self.start_index = int(re.findall('VM(\d+)', vmbase)[0])
+ if vmbase != '':
+ self.start_index = int(re.findall('VM(\d+)', vmbase)[0])
+ else:
+ self.start_index = 0
self.vmhosts = {}
self.vmfile = vmfile
self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)
@@ -85,9 +89,12 @@
try:
vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])
neighbor_eos = vmsall.get_neighbor_eos()
- for eos in neighbor_eos:
+ tgen_mgmt_ips = list(ipaddress.IPNetwork(unicode(TGEN_MGMT_NETWORK)))
+ for index, eos in enumerate(neighbor_eos):
vmname = 'VM'+format(neighbor_eos[eos], '04d')
- if vmname in vmsall.inv_mgr.hosts:
+ if 'tgen' in topo_type:
+ vmsall.vmhosts[eos] = str(tgen_mgmt_ips[index])
+ elif vmname in vmsall.inv_mgr.hosts:
vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']
else:
err_msg = "cannot find the vm " + vmname + " in VM inventory file, please make sure you have enough VMs for the topology you are using"
| {"golden_diff": "diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py\n--- a/ansible/library/testbed_vm_info.py\n+++ b/ansible/library/testbed_vm_info.py\n@@ -39,6 +39,7 @@\n ### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here\n TOPO_PATH = 'vars/'\n VM_INV_FILE = 'veos'\n+TGEN_MGMT_NETWORK = '10.65.32.0/24'\n \n \n class TestbedVMFacts():\n@@ -51,7 +52,10 @@\n CLET_SUFFIX = \"-clet\"\n toponame = re.sub(CLET_SUFFIX + \"$\", \"\", toponame)\n self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'\n- self.start_index = int(re.findall('VM(\\d+)', vmbase)[0])\n+ if vmbase != '':\n+ self.start_index = int(re.findall('VM(\\d+)', vmbase)[0])\n+ else:\n+ self.start_index = 0\n self.vmhosts = {}\n self.vmfile = vmfile\n self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)\n@@ -85,9 +89,12 @@\n try:\n vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])\n neighbor_eos = vmsall.get_neighbor_eos()\n- for eos in neighbor_eos:\n+ tgen_mgmt_ips = list(ipaddress.IPNetwork(unicode(TGEN_MGMT_NETWORK)))\n+ for index, eos in enumerate(neighbor_eos):\n vmname = 'VM'+format(neighbor_eos[eos], '04d')\n- if vmname in vmsall.inv_mgr.hosts:\n+ if 'tgen' in topo_type:\n+ vmsall.vmhosts[eos] = str(tgen_mgmt_ips[index])\n+ elif vmname in vmsall.inv_mgr.hosts:\n vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']\n else:\n err_msg = \"cannot find the vm \" + vmname + \" in VM inventory file, please make sure you have enough VMs for the topology you are using\"\n", "issue": "Investigate RDMA nightly run failures on 202012\n<!--\r\nIf you are reporting a new issue, make sure that we do not have any duplicates\r\nalready open. You can ensure this by searching the issue list for this\r\nrepository. If there is a duplicate, please close your issue and add a comment\r\nto the existing issue instead.\r\n\r\nIf you suspect your issue is a bug, please edit your issue description to\r\ninclude the BUG REPORT INFORMATION shown below. If you fail to provide this\r\ninformation within 7 days, we cannot debug your issue and will close it. We\r\nwill, however, reopen it if you later provide the information.\r\n\r\nFor more information about reporting issues, see\r\nhttps://github.com/Azure/SONiC/wiki#report-issues\r\n\r\n---------------------------------------------------\r\nGENERAL SUPPORT INFORMATION\r\n---------------------------------------------------\r\n\r\nThe GitHub issue tracker is for bug reports and feature requests.\r\nGeneral support can be found at the following locations:\r\n\r\n- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject\r\n\r\n---------------------------------------------------\r\nBUG REPORT INFORMATION\r\n---------------------------------------------------\r\nUse the commands below to provide key information from your environment:\r\nYou do NOT have to include this information if this is a FEATURE REQUEST\r\n-->\r\n\r\n**Description**\r\nRDMA test runs on TD2 with 202012 are quite flaky. Different set of test failures are seen daily and sometimes test fails at pretest\r\n09/09 run skipped all tgen tests with the following reason\r\nSKIPPED [1] /azp/agent/_work/27/s/tests/common/helpers/assertions.py:13: Port is not mapped to the expected DUT\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport re\nimport yaml\nimport os\nimport traceback\nimport subprocess\nimport ipaddr as ipaddress\nfrom operator import itemgetter\nfrom itertools import groupby\nfrom collections import defaultdict\nimport re\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.inventory.manager import InventoryManager\n\nDOCUMENTATION = '''\nmodule: testbed_vm_info.py\nAnsible_version_added: 2.0.0.2\nshort_description: Gather all related VMs info\nDescription:\n When deploy testbed topology with VM connected to SONiC, gather neighbor VMs info for generating SONiC minigraph file\n options:\n base_vm: base vm name defined in testbed.csv for the deployed topology; required: True\n topo: topology name defined in testbed.csv for the deployed topology; required: True\n vm_file: the virtual machine file path ; default: 'veos'\n\nAnsible_facts:\n 'neighbor_eosvm_mgmt': all VM hosts management IPs\n 'topoall': topology information\n\n'''\n\nEXAMPLES = '''\n - name: gather vm information\n testbed_vm_info: base_vm='VM0100' topo='t1' vm_file='veos'\n'''\n\n### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here\nTOPO_PATH = 'vars/'\nVM_INV_FILE = 'veos'\n\n\nclass TestbedVMFacts():\n \"\"\"\n Retrieve testbed VMs management information that for a specified toplogy defined in testbed.csv\n\n \"\"\"\n\n def __init__(self, toponame, vmbase, vmfile):\n CLET_SUFFIX = \"-clet\"\n toponame = re.sub(CLET_SUFFIX + \"$\", \"\", toponame)\n self.topofile = TOPO_PATH+'topo_'+toponame +'.yml'\n self.start_index = int(re.findall('VM(\\d+)', vmbase)[0])\n self.vmhosts = {}\n self.vmfile = vmfile\n self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile)\n return\n\n\n def get_neighbor_eos(self):\n eos = {}\n with open(self.topofile) as f:\n vm_topology = yaml.load(f)\n self.topoall = vm_topology\n for vm in vm_topology['topology']['VMs']:\n vm_index = int(vm_topology['topology']['VMs'][vm]['vm_offset'])+self.start_index\n eos[vm] = vm_index\n return eos\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n base_vm=dict(required=True, type='str'),\n topo=dict(required=True, type='str'),\n vm_file=dict(default=VM_INV_FILE, type='str')\n ),\n supports_check_mode=True\n )\n m_args = module.params\n topo_type = m_args['topo']\n if 'ptf' in topo_type:\n module.exit_json(ansible_facts={'neighbor_eosvm_mgmt': {}})\n try:\n vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file'])\n neighbor_eos = vmsall.get_neighbor_eos()\n for eos in neighbor_eos:\n vmname = 'VM'+format(neighbor_eos[eos], '04d')\n if vmname in vmsall.inv_mgr.hosts:\n vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host']\n else:\n err_msg = \"cannot find the vm \" + vmname + \" in VM inventory file, please make sure you have enough VMs for the topology you are using\"\n module.fail_json(msg=err_msg)\n module.exit_json(ansible_facts={'neighbor_eosvm_mgmt':vmsall.vmhosts, 'topoall': vmsall.topoall})\n except (IOError, OSError):\n module.fail_json(msg=\"Can not find file \"+vmsall.topofile+\" or \"+m_args['vm_file']+\" or \"+VM_INV_FILE)\n except Exception as e:\n module.fail_json(msg=traceback.format_exc())\n\nfrom ansible.module_utils.basic import *\nif __name__ == \"__main__\":\n main()\n\n", "path": "ansible/library/testbed_vm_info.py"}]} | 2,027 | 524 |
gh_patches_debug_32068 | rasdani/github-patches | git_diff | conan-io__conan-center-index-16242 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] libudev/system: Fails build for conan 2.0
### Description
libudev/system fails to download or build with conan 2.0 installed. it needs an update to use conan 2.0 code for conan tools as it currently is dependent on conan 1.x code.
### Package and Environment Details
* Package Name/Version: **libudev/system**
* Operating System+version: **Linux Ubuntu 20.04**
### Conan profile
[settings]
arch=x86_64
build_type=Release
compiler=gcc
compiler.cppstd=gnu17
compiler.libcxx=libstdc++11
compiler.version=9
os=Linux
### Steps to reproduce
conan download -r conancenter libudev/system@
### Logs
ERROR: Error loading conanfile at '/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py': Unable to load conanfile in /home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py
File "<frozen importlib._bootstrap_external>", line 848, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py", line 4, in <module>
from conans import tools
ImportError: cannot import name 'tools' from 'conans' (/home/tbitz/.local/lib/python3.8/site-packages/conans/__init__.py)
</issue>
<code>
[start of recipes/libudev/all/conanfile.py]
1 from conan import ConanFile
2 from conan.errors import ConanException, ConanInvalidConfiguration
3 from conan.tools.system import package_manager
4 from conans import tools
5
6 required_conan_version = ">=1.47"
7
8
9 class LibUDEVConan(ConanFile):
10 name = "libudev"
11 version = "system"
12 description = "API for enumerating and introspecting local devices"
13 topics = ("udev", "devices", "enumerating")
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://www.freedesktop.org/software/systemd/man/udev.html"
16 license = "GPL-2.0-or-later", "LGPL-2.1-or-later"
17 settings = "os", "arch", "compiler", "build_type"
18
19 def validate(self):
20 if self.settings.os != "Linux":
21 raise ConanInvalidConfiguration("libudev is only supported on Linux.")
22
23 def package_id(self):
24 self.info.header_only()
25
26 def _fill_cppinfo_from_pkgconfig(self, name):
27 pkg_config = tools.PkgConfig(name)
28 if not pkg_config.provides:
29 raise ConanException("libudev development files aren't available, give up")
30 libs = [lib[2:] for lib in pkg_config.libs_only_l]
31 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
32 ldflags = [flag for flag in pkg_config.libs_only_other]
33 include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
34 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
35 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
36
37 self.cpp_info.system_libs = libs
38 self.cpp_info.libdirs = lib_dirs
39 self.cpp_info.sharedlinkflags = ldflags
40 self.cpp_info.exelinkflags = ldflags
41 self.cpp_info.defines = defines
42 self.cpp_info.includedirs = include_dirs
43 self.cpp_info.cflags = cflags
44 self.cpp_info.cxxflags = cflags
45
46 def system_requirements(self):
47 dnf = package_manager.Dnf(self)
48 dnf.install(["systemd-devel"], update=True, check=True)
49
50 yum = package_manager.Yum(self)
51 yum.install(["systemd-devel"], update=True, check=True)
52
53 apt = package_manager.Apt(self)
54 apt.install(["libudev-dev"], update=True, check=True)
55
56 pacman = package_manager.PacMan(self)
57 pacman.install(["systemd-libs"], update=True, check=True)
58
59 zypper = package_manager.Zypper(self)
60 zypper.install(["libudev-devel"], update=True, check=True)
61
62 def package_info(self):
63 self.cpp_info.includedirs = []
64 self.cpp_info.libdirs = []
65 self._fill_cppinfo_from_pkgconfig("libudev")
66
[end of recipes/libudev/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/libudev/all/conanfile.py b/recipes/libudev/all/conanfile.py
--- a/recipes/libudev/all/conanfile.py
+++ b/recipes/libudev/all/conanfile.py
@@ -1,7 +1,7 @@
from conan import ConanFile
-from conan.errors import ConanException, ConanInvalidConfiguration
+from conan.errors import ConanInvalidConfiguration
from conan.tools.system import package_manager
-from conans import tools
+from conan.tools.gnu import PkgConfig
required_conan_version = ">=1.47"
@@ -21,27 +21,7 @@
raise ConanInvalidConfiguration("libudev is only supported on Linux.")
def package_id(self):
- self.info.header_only()
-
- def _fill_cppinfo_from_pkgconfig(self, name):
- pkg_config = tools.PkgConfig(name)
- if not pkg_config.provides:
- raise ConanException("libudev development files aren't available, give up")
- libs = [lib[2:] for lib in pkg_config.libs_only_l]
- lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
- ldflags = [flag for flag in pkg_config.libs_only_other]
- include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
- cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
- defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
-
- self.cpp_info.system_libs = libs
- self.cpp_info.libdirs = lib_dirs
- self.cpp_info.sharedlinkflags = ldflags
- self.cpp_info.exelinkflags = ldflags
- self.cpp_info.defines = defines
- self.cpp_info.includedirs = include_dirs
- self.cpp_info.cflags = cflags
- self.cpp_info.cxxflags = cflags
+ self.info.clear()
def system_requirements(self):
dnf = package_manager.Dnf(self)
@@ -62,4 +42,5 @@
def package_info(self):
self.cpp_info.includedirs = []
self.cpp_info.libdirs = []
- self._fill_cppinfo_from_pkgconfig("libudev")
+ pkg_config = PkgConfig(self, "libudev")
+ pkg_config.fill_cpp_info(self.cpp_info)
| {"golden_diff": "diff --git a/recipes/libudev/all/conanfile.py b/recipes/libudev/all/conanfile.py\n--- a/recipes/libudev/all/conanfile.py\n+++ b/recipes/libudev/all/conanfile.py\n@@ -1,7 +1,7 @@\n from conan import ConanFile\n-from conan.errors import ConanException, ConanInvalidConfiguration\n+from conan.errors import ConanInvalidConfiguration\n from conan.tools.system import package_manager\n-from conans import tools\n+from conan.tools.gnu import PkgConfig\n \n required_conan_version = \">=1.47\"\n \n@@ -21,27 +21,7 @@\n raise ConanInvalidConfiguration(\"libudev is only supported on Linux.\")\n \n def package_id(self):\n- self.info.header_only()\n-\n- def _fill_cppinfo_from_pkgconfig(self, name):\n- pkg_config = tools.PkgConfig(name)\n- if not pkg_config.provides:\n- raise ConanException(\"libudev development files aren't available, give up\")\n- libs = [lib[2:] for lib in pkg_config.libs_only_l]\n- lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n- ldflags = [flag for flag in pkg_config.libs_only_other]\n- include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n- cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n- defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n-\n- self.cpp_info.system_libs = libs\n- self.cpp_info.libdirs = lib_dirs\n- self.cpp_info.sharedlinkflags = ldflags\n- self.cpp_info.exelinkflags = ldflags\n- self.cpp_info.defines = defines\n- self.cpp_info.includedirs = include_dirs\n- self.cpp_info.cflags = cflags\n- self.cpp_info.cxxflags = cflags\n+ self.info.clear()\n \n def system_requirements(self):\n dnf = package_manager.Dnf(self)\n@@ -62,4 +42,5 @@\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n- self._fill_cppinfo_from_pkgconfig(\"libudev\")\n+ pkg_config = PkgConfig(self, \"libudev\")\n+ pkg_config.fill_cpp_info(self.cpp_info)\n", "issue": "[package] libudev/system: Fails build for conan 2.0\n### Description\n\nlibudev/system fails to download or build with conan 2.0 installed. it needs an update to use conan 2.0 code for conan tools as it currently is dependent on conan 1.x code. \n\n### Package and Environment Details\n\n* Package Name/Version: **libudev/system**\r\n* Operating System+version: **Linux Ubuntu 20.04**\r\n\n\n### Conan profile\n\n[settings]\r\narch=x86_64\r\nbuild_type=Release\r\ncompiler=gcc\r\ncompiler.cppstd=gnu17\r\ncompiler.libcxx=libstdc++11\r\ncompiler.version=9\r\nos=Linux\r\n\n\n### Steps to reproduce\n\nconan download -r conancenter libudev/system@\n\n### Logs\n\nERROR: Error loading conanfile at '/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py': Unable to load conanfile in /home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py\r\n File \"<frozen importlib._bootstrap_external>\", line 848, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/tbitz/.conan2/p/libudadcb0d08572c6/e/conanfile.py\", line 4, in <module>\r\n from conans import tools\r\nImportError: cannot import name 'tools' from 'conans' (/home/tbitz/.local/lib/python3.8/site-packages/conans/__init__.py)\r\n\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanException, ConanInvalidConfiguration\nfrom conan.tools.system import package_manager\nfrom conans import tools\n\nrequired_conan_version = \">=1.47\"\n\n\nclass LibUDEVConan(ConanFile):\n name = \"libudev\"\n version = \"system\"\n description = \"API for enumerating and introspecting local devices\"\n topics = (\"udev\", \"devices\", \"enumerating\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.freedesktop.org/software/systemd/man/udev.html\"\n license = \"GPL-2.0-or-later\", \"LGPL-2.1-or-later\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n def validate(self):\n if self.settings.os != \"Linux\":\n raise ConanInvalidConfiguration(\"libudev is only supported on Linux.\")\n\n def package_id(self):\n self.info.header_only()\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"libudev development files aren't available, give up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.system_libs = libs\n self.cpp_info.libdirs = lib_dirs\n self.cpp_info.sharedlinkflags = ldflags\n self.cpp_info.exelinkflags = ldflags\n self.cpp_info.defines = defines\n self.cpp_info.includedirs = include_dirs\n self.cpp_info.cflags = cflags\n self.cpp_info.cxxflags = cflags\n\n def system_requirements(self):\n dnf = package_manager.Dnf(self)\n dnf.install([\"systemd-devel\"], update=True, check=True)\n\n yum = package_manager.Yum(self)\n yum.install([\"systemd-devel\"], update=True, check=True)\n\n apt = package_manager.Apt(self)\n apt.install([\"libudev-dev\"], update=True, check=True)\n\n pacman = package_manager.PacMan(self)\n pacman.install([\"systemd-libs\"], update=True, check=True)\n\n zypper = package_manager.Zypper(self)\n zypper.install([\"libudev-devel\"], update=True, check=True)\n\n def package_info(self):\n self.cpp_info.includedirs = []\n self.cpp_info.libdirs = []\n self._fill_cppinfo_from_pkgconfig(\"libudev\")\n", "path": "recipes/libudev/all/conanfile.py"}]} | 1,676 | 530 |
gh_patches_debug_32378 | rasdani/github-patches | git_diff | optuna__optuna-4684 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove experimental label from `_ProgressBar`
### Motivation
Several issues related to `_ProgressBar` have been already addressed (ref: https://github.com/optuna/optuna/issues/2892, https://github.com/optuna/optuna/issues/2957, https://github.com/optuna/optuna/issues/2958). Now we can remove the experimental label from `_ProgressBar`.
### Suggestion
Remove the `@experimental_func` decorator from `_ProgressBar`. Also, `_init_valid` method can be removed as explained in [TODO comment](https://github.com/optuna/optuna/blob/806448420863606c113aeb2e33457acf022be066/optuna/progress_bar.py#L57C28-L58).
### Additional context (optional)
_No response_
</issue>
<code>
[start of optuna/progress_bar.py]
1 import logging
2 from typing import Any
3 from typing import Optional
4 from typing import TYPE_CHECKING
5
6 from tqdm.auto import tqdm
7
8 from optuna import logging as optuna_logging
9 from optuna._experimental import experimental_func
10
11
12 if TYPE_CHECKING:
13 from optuna.study import Study
14
15 _tqdm_handler: Optional["_TqdmLoggingHandler"] = None
16
17
18 # Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02
19 class _TqdmLoggingHandler(logging.StreamHandler):
20 def emit(self, record: Any) -> None:
21 try:
22 msg = self.format(record)
23 tqdm.write(msg)
24 self.flush()
25 except (KeyboardInterrupt, SystemExit):
26 raise
27 except Exception:
28 self.handleError(record)
29
30
31 class _ProgressBar:
32 """Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.
33
34 Args:
35 is_valid:
36 Whether to show progress bars in :func:`~optuna.study.Study.optimize`.
37 n_trials:
38 The number of trials.
39 timeout:
40 Stop study after the given number of second(s).
41 """
42
43 def __init__(
44 self,
45 is_valid: bool,
46 n_trials: Optional[int] = None,
47 timeout: Optional[float] = None,
48 ) -> None:
49 self._is_valid = is_valid and (n_trials or timeout) is not None
50 self._n_trials = n_trials
51 self._timeout = timeout
52 self._last_elapsed_seconds = 0.0
53
54 if self._is_valid:
55 self._init_valid()
56
57 # TODO(hvy): Remove initialization indirection via this method when the progress bar is no
58 # longer experimental.
59 @experimental_func("1.2.0", name="Progress bar")
60 def _init_valid(self) -> None:
61 if self._n_trials is not None:
62 self._progress_bar = tqdm(total=self._n_trials)
63
64 elif self._timeout is not None:
65 total = tqdm.format_interval(self._timeout)
66 fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total
67 self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
68 else:
69 assert False
70
71 global _tqdm_handler
72
73 _tqdm_handler = _TqdmLoggingHandler()
74 _tqdm_handler.setLevel(logging.INFO)
75 _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
76 optuna_logging.disable_default_handler()
77 optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
78
79 def update(self, elapsed_seconds: float, study: "Study") -> None:
80 """Update the progress bars if ``is_valid`` is :obj:`True`.
81
82 Args:
83 elapsed_seconds:
84 The time past since :func:`~optuna.study.Study.optimize` started.
85 study:
86 The current study object.
87 """
88
89 if self._is_valid:
90 if not study._is_multi_objective():
91 # Not updating the progress bar when there are no complete trial.
92 try:
93 msg = (
94 f"Best trial: {study.best_trial.number}. "
95 f"Best value: {study.best_value:.6g}"
96 )
97
98 self._progress_bar.set_description(msg)
99 except ValueError:
100 pass
101
102 if self._n_trials is not None:
103 self._progress_bar.update(1)
104 if self._timeout is not None:
105 self._progress_bar.set_postfix_str(
106 "{:.02f}/{} seconds".format(elapsed_seconds, self._timeout)
107 )
108
109 elif self._timeout is not None:
110 time_diff = elapsed_seconds - self._last_elapsed_seconds
111 if elapsed_seconds > self._timeout:
112 # Clip elapsed time to avoid tqdm warnings.
113 time_diff -= elapsed_seconds - self._timeout
114
115 self._progress_bar.update(time_diff)
116 self._last_elapsed_seconds = elapsed_seconds
117
118 else:
119 assert False
120
121 def close(self) -> None:
122 """Close progress bars."""
123
124 if self._is_valid:
125 self._progress_bar.close()
126 assert _tqdm_handler is not None
127 optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)
128 optuna_logging.enable_default_handler()
129
[end of optuna/progress_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py
--- a/optuna/progress_bar.py
+++ b/optuna/progress_bar.py
@@ -6,7 +6,6 @@
from tqdm.auto import tqdm
from optuna import logging as optuna_logging
-from optuna._experimental import experimental_func
if TYPE_CHECKING:
@@ -52,29 +51,22 @@
self._last_elapsed_seconds = 0.0
if self._is_valid:
- self._init_valid()
-
- # TODO(hvy): Remove initialization indirection via this method when the progress bar is no
- # longer experimental.
- @experimental_func("1.2.0", name="Progress bar")
- def _init_valid(self) -> None:
- if self._n_trials is not None:
- self._progress_bar = tqdm(total=self._n_trials)
-
- elif self._timeout is not None:
- total = tqdm.format_interval(self._timeout)
- fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total
- self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
- else:
- assert False
-
- global _tqdm_handler
-
- _tqdm_handler = _TqdmLoggingHandler()
- _tqdm_handler.setLevel(logging.INFO)
- _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
- optuna_logging.disable_default_handler()
- optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
+ if self._n_trials is not None:
+ self._progress_bar = tqdm(total=self._n_trials)
+ elif self._timeout is not None:
+ total = tqdm.format_interval(self._timeout)
+ fmt = "{desc} {percentage:3.0f}%|{bar}| {elapsed}/" + total
+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)
+ else:
+ assert False
+
+ global _tqdm_handler
+
+ _tqdm_handler = _TqdmLoggingHandler()
+ _tqdm_handler.setLevel(logging.INFO)
+ _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())
+ optuna_logging.disable_default_handler()
+ optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)
def update(self, elapsed_seconds: float, study: "Study") -> None:
"""Update the progress bars if ``is_valid`` is :obj:`True`.
| {"golden_diff": "diff --git a/optuna/progress_bar.py b/optuna/progress_bar.py\n--- a/optuna/progress_bar.py\n+++ b/optuna/progress_bar.py\n@@ -6,7 +6,6 @@\n from tqdm.auto import tqdm\n \n from optuna import logging as optuna_logging\n-from optuna._experimental import experimental_func\n \n \n if TYPE_CHECKING:\n@@ -52,29 +51,22 @@\n self._last_elapsed_seconds = 0.0\n \n if self._is_valid:\n- self._init_valid()\n-\n- # TODO(hvy): Remove initialization indirection via this method when the progress bar is no\n- # longer experimental.\n- @experimental_func(\"1.2.0\", name=\"Progress bar\")\n- def _init_valid(self) -> None:\n- if self._n_trials is not None:\n- self._progress_bar = tqdm(total=self._n_trials)\n-\n- elif self._timeout is not None:\n- total = tqdm.format_interval(self._timeout)\n- fmt = \"{desc} {percentage:3.0f}%|{bar}| {elapsed}/\" + total\n- self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n- else:\n- assert False\n-\n- global _tqdm_handler\n-\n- _tqdm_handler = _TqdmLoggingHandler()\n- _tqdm_handler.setLevel(logging.INFO)\n- _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n- optuna_logging.disable_default_handler()\n- optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n+ if self._n_trials is not None:\n+ self._progress_bar = tqdm(total=self._n_trials)\n+ elif self._timeout is not None:\n+ total = tqdm.format_interval(self._timeout)\n+ fmt = \"{desc} {percentage:3.0f}%|{bar}| {elapsed}/\" + total\n+ self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n+ else:\n+ assert False\n+\n+ global _tqdm_handler\n+\n+ _tqdm_handler = _TqdmLoggingHandler()\n+ _tqdm_handler.setLevel(logging.INFO)\n+ _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n+ optuna_logging.disable_default_handler()\n+ optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n \n def update(self, elapsed_seconds: float, study: \"Study\") -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n", "issue": "Remove experimental label from `_ProgressBar`\n### Motivation\n\nSeveral issues related to `_ProgressBar` have been already addressed (ref: https://github.com/optuna/optuna/issues/2892, https://github.com/optuna/optuna/issues/2957, https://github.com/optuna/optuna/issues/2958). Now we can remove the experimental label from `_ProgressBar`.\n\n### Suggestion\n\nRemove the `@experimental_func` decorator from `_ProgressBar`. Also, `_init_valid` method can be removed as explained in [TODO comment](https://github.com/optuna/optuna/blob/806448420863606c113aeb2e33457acf022be066/optuna/progress_bar.py#L57C28-L58).\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "import logging\nfrom typing import Any\nfrom typing import Optional\nfrom typing import TYPE_CHECKING\n\nfrom tqdm.auto import tqdm\n\nfrom optuna import logging as optuna_logging\nfrom optuna._experimental import experimental_func\n\n\nif TYPE_CHECKING:\n from optuna.study import Study\n\n_tqdm_handler: Optional[\"_TqdmLoggingHandler\"] = None\n\n\n# Reference: https://gist.github.com/hvy/8b80c2cedf02b15c24f85d1fa17ebe02\nclass _TqdmLoggingHandler(logging.StreamHandler):\n def emit(self, record: Any) -> None:\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)\n\n\nclass _ProgressBar:\n \"\"\"Progress Bar implementation for :func:`~optuna.study.Study.optimize` on the top of `tqdm`.\n\n Args:\n is_valid:\n Whether to show progress bars in :func:`~optuna.study.Study.optimize`.\n n_trials:\n The number of trials.\n timeout:\n Stop study after the given number of second(s).\n \"\"\"\n\n def __init__(\n self,\n is_valid: bool,\n n_trials: Optional[int] = None,\n timeout: Optional[float] = None,\n ) -> None:\n self._is_valid = is_valid and (n_trials or timeout) is not None\n self._n_trials = n_trials\n self._timeout = timeout\n self._last_elapsed_seconds = 0.0\n\n if self._is_valid:\n self._init_valid()\n\n # TODO(hvy): Remove initialization indirection via this method when the progress bar is no\n # longer experimental.\n @experimental_func(\"1.2.0\", name=\"Progress bar\")\n def _init_valid(self) -> None:\n if self._n_trials is not None:\n self._progress_bar = tqdm(total=self._n_trials)\n\n elif self._timeout is not None:\n total = tqdm.format_interval(self._timeout)\n fmt = \"{desc} {percentage:3.0f}%|{bar}| {elapsed}/\" + total\n self._progress_bar = tqdm(total=self._timeout, bar_format=fmt)\n else:\n assert False\n\n global _tqdm_handler\n\n _tqdm_handler = _TqdmLoggingHandler()\n _tqdm_handler.setLevel(logging.INFO)\n _tqdm_handler.setFormatter(optuna_logging.create_default_formatter())\n optuna_logging.disable_default_handler()\n optuna_logging._get_library_root_logger().addHandler(_tqdm_handler)\n\n def update(self, elapsed_seconds: float, study: \"Study\") -> None:\n \"\"\"Update the progress bars if ``is_valid`` is :obj:`True`.\n\n Args:\n elapsed_seconds:\n The time past since :func:`~optuna.study.Study.optimize` started.\n study:\n The current study object.\n \"\"\"\n\n if self._is_valid:\n if not study._is_multi_objective():\n # Not updating the progress bar when there are no complete trial.\n try:\n msg = (\n f\"Best trial: {study.best_trial.number}. \"\n f\"Best value: {study.best_value:.6g}\"\n )\n\n self._progress_bar.set_description(msg)\n except ValueError:\n pass\n\n if self._n_trials is not None:\n self._progress_bar.update(1)\n if self._timeout is not None:\n self._progress_bar.set_postfix_str(\n \"{:.02f}/{} seconds\".format(elapsed_seconds, self._timeout)\n )\n\n elif self._timeout is not None:\n time_diff = elapsed_seconds - self._last_elapsed_seconds\n if elapsed_seconds > self._timeout:\n # Clip elapsed time to avoid tqdm warnings.\n time_diff -= elapsed_seconds - self._timeout\n\n self._progress_bar.update(time_diff)\n self._last_elapsed_seconds = elapsed_seconds\n\n else:\n assert False\n\n def close(self) -> None:\n \"\"\"Close progress bars.\"\"\"\n\n if self._is_valid:\n self._progress_bar.close()\n assert _tqdm_handler is not None\n optuna_logging._get_library_root_logger().removeHandler(_tqdm_handler)\n optuna_logging.enable_default_handler()\n", "path": "optuna/progress_bar.py"}]} | 1,954 | 571 |
gh_patches_debug_20648 | rasdani/github-patches | git_diff | microsoft__ptvsd-1253 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PTVSD_LOG_DIR doesn't work with VS
No logs are generated even with the environment variable set. It looks like logging initialization is missing on the VS entry point (`debugger.py`).
</issue>
<code>
[start of src/ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import sys
6
7 from ptvsd._local import run_module, run_file, run_main
8
9
10 # TODO: not needed?
11 DONT_DEBUG = []
12
13 LOCALHOST = 'localhost'
14
15 RUNNERS = {
16 'module': run_module, # python -m spam
17 'script': run_file, # python spam.py
18 'code': run_file, # python -c 'print("spam")'
19 None: run_file, # catchall
20 }
21
22
23 def debug(filename, port_num, debug_id, debug_options, run_as,
24 _runners=RUNNERS, _extra=None, *args, **kwargs):
25 # TODO: docstring
26 if _extra is None:
27 _extra = sys.argv[1:]
28 address = (LOCALHOST, port_num)
29 try:
30 run = _runners[run_as]
31 except KeyError:
32 # TODO: fail?
33 run = _runners[None]
34 if _extra:
35 args = _extra + list(args)
36 kwargs.setdefault('singlesession', True)
37 run(address, filename, *args, **kwargs)
38
39
40 def run(filename, port_num, run_as,
41 *args, **kwargs):
42 address = (LOCALHOST, port_num)
43 run_main(address, filename, run_as, *args, **kwargs)
44
[end of src/ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py
--- a/src/ptvsd/debugger.py
+++ b/src/ptvsd/debugger.py
@@ -4,6 +4,7 @@
import sys
+import ptvsd.log
from ptvsd._local import run_module, run_file, run_main
@@ -22,7 +23,10 @@
def debug(filename, port_num, debug_id, debug_options, run_as,
_runners=RUNNERS, _extra=None, *args, **kwargs):
- # TODO: docstring
+
+ ptvsd.log.to_file()
+ ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))
+
if _extra is None:
_extra = sys.argv[1:]
address = (LOCALHOST, port_num)
@@ -39,5 +43,9 @@
def run(filename, port_num, run_as,
*args, **kwargs):
+
+ ptvsd.log.to_file()
+ ptvsd.log.info('run{0!r}', (filename, port_num, run_as))
+
address = (LOCALHOST, port_num)
run_main(address, filename, run_as, *args, **kwargs)
| {"golden_diff": "diff --git a/src/ptvsd/debugger.py b/src/ptvsd/debugger.py\n--- a/src/ptvsd/debugger.py\n+++ b/src/ptvsd/debugger.py\n@@ -4,6 +4,7 @@\n \n import sys\n \n+import ptvsd.log\n from ptvsd._local import run_module, run_file, run_main\n \n \n@@ -22,7 +23,10 @@\n \n def debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n- # TODO: docstring\n+\n+ ptvsd.log.to_file()\n+ ptvsd.log.info('debug{0!r}', (filename, port_num, debug_id, debug_options, run_as))\n+\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n@@ -39,5 +43,9 @@\n \n def run(filename, port_num, run_as,\n *args, **kwargs):\n+\n+ ptvsd.log.to_file()\n+ ptvsd.log.info('run{0!r}', (filename, port_num, run_as))\n+\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "issue": "PTVSD_LOG_DIR doesn't work with VS\nNo logs are generated even with the environment variable set. It looks like logging initialization is missing on the VS entry point (`debugger.py`).\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nfrom ptvsd._local import run_module, run_file, run_main\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n # TODO: docstring\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n\n\ndef run(filename, port_num, run_as,\n *args, **kwargs):\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "path": "src/ptvsd/debugger.py"}]} | 979 | 296 |
gh_patches_debug_36977 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4942 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failed to run check CKV_AWS_224: TemplateAttributeError: get is invalid
**Describe the issue**
Error occurs when checked ECS Cluster using terraform_plan framework.
**Examples**
```
module "cluster" {
source = "terraform-aws-modules/ecs/aws"
version = "4.1.3"
cluster_name = "foo"
fargate_capacity_providers = {
FARGATE = {}
}
}
```
**Version (please complete the following information):**
- checkov 2.3.165
- terraform 1.4.5
- aws provider 4.63.0
**Additional context**
traceback:
```
2023-04-18 09:53:09,676 [MainThread ] [ERROR] Failed to run check CKV_AWS_224 on /tfplan.json:aws_ecs_cluster.this
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/checkov/common/checks/base_check.py", line 73, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
File "/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 43, in scan_entity_conf
return self.scan_resource_conf(conf)
File "/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py", line 21, in scan_resource_conf
if log_conf.get('cloud_watch_encryption_enabled') == [True] or \
File "/usr/local/lib/python3.9/site-packages/checkov/common/parsers/node.py", line 189, in __getattr__
raise TemplateAttributeError(f'{name} is invalid')
checkov.common.parsers.node.TemplateAttributeError: get is invalid
```
This only occurs when using terraform_plan framework. It works without issue when using vanilla terraform framework.
The plan generation is just `terraform plan -out tfplan.bin && terraform show -json tfplan.bin > tfplan.json` then running `checkof -f tfplan.json`.
Here is my checkov config file in repo:
```
➜ cat .checkov.yaml
block-list-secret-scan: []
compact: true
download-external-modules: true
evaluate-variables: true
external-modules-download-path: .external_modules
file:
- tfplan.json
framework:
- terraform_plan
mask: []
quiet: true
repo-root-for-plan-enrichment:
- .
secrets-history-timeout: 12h
secrets-scan-file-type: []
skip-check:
- CKV2_AWS_34
summary-position: top
```
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py]
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure Cluster logging with CMK"
8 id = "CKV_AWS_224"
9 supported_resources = ['aws_ecs_cluster']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 configuration = conf.get("configuration")
15 if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):
16 command_conf = configuration[0].get('execute_command_configuration')[0]
17 if not command_conf.get('logging') == ['NONE']:
18 if command_conf.get('kms_key_id'):
19 if command_conf.get('log_configuration'):
20 log_conf = command_conf.get('log_configuration')[0]
21 if log_conf.get('cloud_watch_encryption_enabled') == [True] or \
22 log_conf.get('s3_bucket_encryption_enabled') == [True]:
23 return CheckResult.PASSED
24 return CheckResult.FAILED
25 else:
26 return CheckResult.FAILED
27
28 return CheckResult.UNKNOWN
29
30
31 check = ECSClusterLoggingEncryptedWithCMK()
32
[end of checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py
--- a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py
+++ b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py
@@ -1,28 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):
- def __init__(self):
- name = "Ensure Cluster logging with CMK"
+ def __init__(self) -> None:
+ name = "Ensure ECS Cluster logging uses CMK"
id = "CKV_AWS_224"
- supported_resources = ['aws_ecs_cluster']
- categories = [CheckCategories.ENCRYPTION]
+ supported_resources = ("aws_ecs_cluster",)
+ categories = (CheckCategories.ENCRYPTION,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
configuration = conf.get("configuration")
- if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):
- command_conf = configuration[0].get('execute_command_configuration')[0]
- if not command_conf.get('logging') == ['NONE']:
- if command_conf.get('kms_key_id'):
- if command_conf.get('log_configuration'):
- log_conf = command_conf.get('log_configuration')[0]
- if log_conf.get('cloud_watch_encryption_enabled') == [True] or \
- log_conf.get('s3_bucket_encryption_enabled') == [True]:
- return CheckResult.PASSED
- return CheckResult.FAILED
- else:
+ if configuration and isinstance(configuration, list) and isinstance(configuration[0], dict):
+ execute_command = configuration[0].get("execute_command_configuration")
+ if execute_command and isinstance(execute_command, list):
+ execute_command = execute_command[0]
+ if isinstance(execute_command, dict) and not execute_command.get("logging") == ["NONE"]:
+ if execute_command.get("kms_key_id"):
+ log_conf = execute_command.get("log_configuration")
+ if log_conf and isinstance(log_conf, list):
+ log_conf = log_conf[0]
+ if isinstance(log_conf, dict) and (
+ log_conf.get("cloud_watch_encryption_enabled") == [True]
+ or log_conf.get("s3_bucket_encryption_enabled") == [True]
+ ):
+ return CheckResult.PASSED
+
return CheckResult.FAILED
return CheckResult.UNKNOWN
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\n--- a/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\n+++ b/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\n@@ -1,28 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n class ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):\n- def __init__(self):\n- name = \"Ensure Cluster logging with CMK\"\n+ def __init__(self) -> None:\n+ name = \"Ensure ECS Cluster logging uses CMK\"\n id = \"CKV_AWS_224\"\n- supported_resources = ['aws_ecs_cluster']\n- categories = [CheckCategories.ENCRYPTION]\n+ supported_resources = (\"aws_ecs_cluster\",)\n+ categories = (CheckCategories.ENCRYPTION,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n configuration = conf.get(\"configuration\")\n- if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):\n- command_conf = configuration[0].get('execute_command_configuration')[0]\n- if not command_conf.get('logging') == ['NONE']:\n- if command_conf.get('kms_key_id'):\n- if command_conf.get('log_configuration'):\n- log_conf = command_conf.get('log_configuration')[0]\n- if log_conf.get('cloud_watch_encryption_enabled') == [True] or \\\n- log_conf.get('s3_bucket_encryption_enabled') == [True]:\n- return CheckResult.PASSED\n- return CheckResult.FAILED\n- else:\n+ if configuration and isinstance(configuration, list) and isinstance(configuration[0], dict):\n+ execute_command = configuration[0].get(\"execute_command_configuration\")\n+ if execute_command and isinstance(execute_command, list):\n+ execute_command = execute_command[0]\n+ if isinstance(execute_command, dict) and not execute_command.get(\"logging\") == [\"NONE\"]:\n+ if execute_command.get(\"kms_key_id\"):\n+ log_conf = execute_command.get(\"log_configuration\")\n+ if log_conf and isinstance(log_conf, list):\n+ log_conf = log_conf[0]\n+ if isinstance(log_conf, dict) and (\n+ log_conf.get(\"cloud_watch_encryption_enabled\") == [True]\n+ or log_conf.get(\"s3_bucket_encryption_enabled\") == [True]\n+ ):\n+ return CheckResult.PASSED\n+\n return CheckResult.FAILED\n \n return CheckResult.UNKNOWN\n", "issue": "Failed to run check CKV_AWS_224: TemplateAttributeError: get is invalid\n**Describe the issue**\r\nError occurs when checked ECS Cluster using terraform_plan framework.\r\n\r\n**Examples**\r\n```\r\nmodule \"cluster\" {\r\n source = \"terraform-aws-modules/ecs/aws\"\r\n version = \"4.1.3\"\r\n\r\n cluster_name = \"foo\"\r\n fargate_capacity_providers = {\r\n FARGATE = {}\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n- checkov 2.3.165\r\n- terraform 1.4.5\r\n- aws provider 4.63.0\r\n\r\n**Additional context**\r\ntraceback:\r\n```\r\n2023-04-18 09:53:09,676 [MainThread ] [ERROR] Failed to run check CKV_AWS_224 on /tfplan.json:aws_ecs_cluster.this\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/common/checks/base_check.py\", line 73, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 43, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py\", line 21, in scan_resource_conf\r\n if log_conf.get('cloud_watch_encryption_enabled') == [True] or \\\r\n File \"/usr/local/lib/python3.9/site-packages/checkov/common/parsers/node.py\", line 189, in __getattr__\r\n raise TemplateAttributeError(f'{name} is invalid')\r\ncheckov.common.parsers.node.TemplateAttributeError: get is invalid\r\n```\r\n\r\nThis only occurs when using terraform_plan framework. It works without issue when using vanilla terraform framework.\r\n\r\nThe plan generation is just `terraform plan -out tfplan.bin && terraform show -json tfplan.bin > tfplan.json` then running `checkof -f tfplan.json`.\r\n\r\nHere is my checkov config file in repo:\r\n```\r\n\u279c cat .checkov.yaml \r\nblock-list-secret-scan: []\r\ncompact: true\r\ndownload-external-modules: true\r\nevaluate-variables: true\r\nexternal-modules-download-path: .external_modules\r\nfile:\r\n- tfplan.json\r\nframework:\r\n- terraform_plan\r\nmask: []\r\nquiet: true\r\nrepo-root-for-plan-enrichment:\r\n- .\r\nsecrets-history-timeout: 12h\r\nsecrets-scan-file-type: []\r\nskip-check:\r\n- CKV2_AWS_34\r\nsummary-position: top\r\n```\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass ECSClusterLoggingEncryptedWithCMK(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure Cluster logging with CMK\"\n id = \"CKV_AWS_224\"\n supported_resources = ['aws_ecs_cluster']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n configuration = conf.get(\"configuration\")\n if configuration and isinstance(configuration[0], dict) and configuration[0].get('execute_command_configuration'):\n command_conf = configuration[0].get('execute_command_configuration')[0]\n if not command_conf.get('logging') == ['NONE']:\n if command_conf.get('kms_key_id'):\n if command_conf.get('log_configuration'):\n log_conf = command_conf.get('log_configuration')[0]\n if log_conf.get('cloud_watch_encryption_enabled') == [True] or \\\n log_conf.get('s3_bucket_encryption_enabled') == [True]:\n return CheckResult.PASSED\n return CheckResult.FAILED\n else:\n return CheckResult.FAILED\n\n return CheckResult.UNKNOWN\n\n\ncheck = ECSClusterLoggingEncryptedWithCMK()\n", "path": "checkov/terraform/checks/resource/aws/ECSClusterLoggingEncryptedWithCMK.py"}]} | 1,526 | 672 |
gh_patches_debug_16904 | rasdani/github-patches | git_diff | saleor__saleor-5443 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Creating a new sale raises error in Celery task
### Steps to reproduce the problem
1. Run the following mutation as an admin user (with `MANAGE_DISCOUNTS` permission):
```
mutation {
saleCreate(input: {name: "Test"}) {
errors {
field
message
}
sale {
id
name
}
}
}
```
The response from API is successful, but in the Django server console I'm getting the following error:
```
ERROR celery.app.trace Task saleor.product.tasks.update_products_minimal_variant_prices_of_discount_task[4ec46245-d1f1-47ae-ab23-0c0ab73a9981] raised unexpected: ValueError('Provide at least one of the ID lists:\n\tproduct_ids,\n\tcategory_ids,\n\tcollection_ids.') [PID:31316:Thread-175]
Traceback (most recent call last):
File "/Users/marcin/.pyenv/versions/saleor3.8.1/lib/python3.8/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/tasks.py", line 64, in update_products_minimal_variant_prices_of_discount_task
update_products_minimal_variant_prices_of_discount(discount)
File "/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py", line 76, in update_products_minimal_variant_prices_of_discount
update_products_minimal_variant_prices_of_catalogues(
File "/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py", line 62, in update_products_minimal_variant_prices_of_catalogues
raise ValueError(
ValueError: Provide at least one of the ID lists:
product_ids,
category_ids,
collection_ids.
```
I suppose that the Celery task that recalculates minimal variant prices is run even there are no products to update. Probably an additional check needs to be added to not run the task in this case.
</issue>
<code>
[start of saleor/product/utils/variant_prices.py]
1 import operator
2 from functools import reduce
3
4 from django.db.models.query_utils import Q
5 from prices import Money
6
7 from ...discount.utils import fetch_active_discounts
8 from ..models import Product
9
10
11 def _get_product_minimal_variant_price(product, discounts) -> Money:
12 # Start with the product's price as the minimal one
13 minimal_variant_price = product.price
14 for variant in product.variants.all():
15 variant_price = variant.get_price(discounts=discounts)
16 minimal_variant_price = min(minimal_variant_price, variant_price)
17 return minimal_variant_price
18
19
20 def update_product_minimal_variant_price(product, discounts=None, save=True):
21 if discounts is None:
22 discounts = fetch_active_discounts()
23 minimal_variant_price = _get_product_minimal_variant_price(product, discounts)
24 if product.minimal_variant_price != minimal_variant_price:
25 product.minimal_variant_price_amount = minimal_variant_price.amount
26 if save:
27 product.save(update_fields=["minimal_variant_price_amount", "updated_at"])
28 return product
29
30
31 def update_products_minimal_variant_prices(products, discounts=None):
32 if discounts is None:
33 discounts = fetch_active_discounts()
34 changed_products_to_update = []
35 for product in products:
36 old_minimal_variant_price = product.minimal_variant_price
37 updated_product = update_product_minimal_variant_price(
38 product, discounts, save=False
39 )
40 # Check if the "minimal_variant_price" has changed
41 if updated_product.minimal_variant_price != old_minimal_variant_price:
42 changed_products_to_update.append(updated_product)
43 # Bulk update the changed products
44 Product.objects.bulk_update(
45 changed_products_to_update, ["minimal_variant_price_amount"]
46 )
47
48
49 def update_products_minimal_variant_prices_of_catalogues(
50 product_ids=None, category_ids=None, collection_ids=None
51 ):
52 # Building the matching products query
53 q_list = []
54 if product_ids:
55 q_list.append(Q(pk__in=product_ids))
56 if category_ids:
57 q_list.append(Q(category_id__in=category_ids))
58 if collection_ids:
59 q_list.append(Q(collectionproduct__collection_id__in=collection_ids))
60 # Asserting that the function was called with some ids
61 if not q_list:
62 raise ValueError(
63 "Provide at least one of the ID lists:\n"
64 "\tproduct_ids,\n"
65 "\tcategory_ids,\n"
66 "\tcollection_ids."
67 )
68 # Querying the products
69 q_or = reduce(operator.or_, q_list)
70 products = Product.objects.filter(q_or).distinct()
71
72 update_products_minimal_variant_prices(products)
73
74
75 def update_products_minimal_variant_prices_of_discount(discount):
76 update_products_minimal_variant_prices_of_catalogues(
77 product_ids=discount.products.all().values_list("id", flat=True),
78 category_ids=discount.categories.all().values_list("id", flat=True),
79 collection_ids=discount.collections.all().values_list("id", flat=True),
80 )
81
[end of saleor/product/utils/variant_prices.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/product/utils/variant_prices.py b/saleor/product/utils/variant_prices.py
--- a/saleor/product/utils/variant_prices.py
+++ b/saleor/product/utils/variant_prices.py
@@ -58,18 +58,12 @@
if collection_ids:
q_list.append(Q(collectionproduct__collection_id__in=collection_ids))
# Asserting that the function was called with some ids
- if not q_list:
- raise ValueError(
- "Provide at least one of the ID lists:\n"
- "\tproduct_ids,\n"
- "\tcategory_ids,\n"
- "\tcollection_ids."
- )
- # Querying the products
- q_or = reduce(operator.or_, q_list)
- products = Product.objects.filter(q_or).distinct()
+ if q_list:
+ # Querying the products
+ q_or = reduce(operator.or_, q_list)
+ products = Product.objects.filter(q_or).distinct()
- update_products_minimal_variant_prices(products)
+ update_products_minimal_variant_prices(products)
def update_products_minimal_variant_prices_of_discount(discount):
| {"golden_diff": "diff --git a/saleor/product/utils/variant_prices.py b/saleor/product/utils/variant_prices.py\n--- a/saleor/product/utils/variant_prices.py\n+++ b/saleor/product/utils/variant_prices.py\n@@ -58,18 +58,12 @@\n if collection_ids:\n q_list.append(Q(collectionproduct__collection_id__in=collection_ids))\n # Asserting that the function was called with some ids\n- if not q_list:\n- raise ValueError(\n- \"Provide at least one of the ID lists:\\n\"\n- \"\\tproduct_ids,\\n\"\n- \"\\tcategory_ids,\\n\"\n- \"\\tcollection_ids.\"\n- )\n- # Querying the products\n- q_or = reduce(operator.or_, q_list)\n- products = Product.objects.filter(q_or).distinct()\n+ if q_list:\n+ # Querying the products\n+ q_or = reduce(operator.or_, q_list)\n+ products = Product.objects.filter(q_or).distinct()\n \n- update_products_minimal_variant_prices(products)\n+ update_products_minimal_variant_prices(products)\n \n \n def update_products_minimal_variant_prices_of_discount(discount):\n", "issue": "Creating a new sale raises error in Celery task\n### Steps to reproduce the problem\r\n1. Run the following mutation as an admin user (with `MANAGE_DISCOUNTS` permission):\r\n```\r\nmutation {\r\n saleCreate(input: {name: \"Test\"}) {\r\n errors {\r\n field\r\n message\r\n }\r\n sale {\r\n id\r\n name\r\n }\r\n }\r\n}\r\n```\r\n\r\nThe response from API is successful, but in the Django server console I'm getting the following error:\r\n\r\n```\r\nERROR celery.app.trace Task saleor.product.tasks.update_products_minimal_variant_prices_of_discount_task[4ec46245-d1f1-47ae-ab23-0c0ab73a9981] raised unexpected: ValueError('Provide at least one of the ID lists:\\n\\tproduct_ids,\\n\\tcategory_ids,\\n\\tcollection_ids.') [PID:31316:Thread-175]\r\nTraceback (most recent call last):\r\n File \"/Users/marcin/.pyenv/versions/saleor3.8.1/lib/python3.8/site-packages/celery/app/trace.py\", line 385, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/tasks.py\", line 64, in update_products_minimal_variant_prices_of_discount_task\r\n update_products_minimal_variant_prices_of_discount(discount)\r\n File \"/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py\", line 76, in update_products_minimal_variant_prices_of_discount\r\n update_products_minimal_variant_prices_of_catalogues(\r\n File \"/Users/marcin/mirumee/saleor-platform/saleor/saleor/product/utils/variant_prices.py\", line 62, in update_products_minimal_variant_prices_of_catalogues\r\n raise ValueError(\r\nValueError: Provide at least one of the ID lists:\r\n\tproduct_ids,\r\n\tcategory_ids,\r\n\tcollection_ids.\r\n```\r\n\r\nI suppose that the Celery task that recalculates minimal variant prices is run even there are no products to update. Probably an additional check needs to be added to not run the task in this case.\n", "before_files": [{"content": "import operator\nfrom functools import reduce\n\nfrom django.db.models.query_utils import Q\nfrom prices import Money\n\nfrom ...discount.utils import fetch_active_discounts\nfrom ..models import Product\n\n\ndef _get_product_minimal_variant_price(product, discounts) -> Money:\n # Start with the product's price as the minimal one\n minimal_variant_price = product.price\n for variant in product.variants.all():\n variant_price = variant.get_price(discounts=discounts)\n minimal_variant_price = min(minimal_variant_price, variant_price)\n return minimal_variant_price\n\n\ndef update_product_minimal_variant_price(product, discounts=None, save=True):\n if discounts is None:\n discounts = fetch_active_discounts()\n minimal_variant_price = _get_product_minimal_variant_price(product, discounts)\n if product.minimal_variant_price != minimal_variant_price:\n product.minimal_variant_price_amount = minimal_variant_price.amount\n if save:\n product.save(update_fields=[\"minimal_variant_price_amount\", \"updated_at\"])\n return product\n\n\ndef update_products_minimal_variant_prices(products, discounts=None):\n if discounts is None:\n discounts = fetch_active_discounts()\n changed_products_to_update = []\n for product in products:\n old_minimal_variant_price = product.minimal_variant_price\n updated_product = update_product_minimal_variant_price(\n product, discounts, save=False\n )\n # Check if the \"minimal_variant_price\" has changed\n if updated_product.minimal_variant_price != old_minimal_variant_price:\n changed_products_to_update.append(updated_product)\n # Bulk update the changed products\n Product.objects.bulk_update(\n changed_products_to_update, [\"minimal_variant_price_amount\"]\n )\n\n\ndef update_products_minimal_variant_prices_of_catalogues(\n product_ids=None, category_ids=None, collection_ids=None\n):\n # Building the matching products query\n q_list = []\n if product_ids:\n q_list.append(Q(pk__in=product_ids))\n if category_ids:\n q_list.append(Q(category_id__in=category_ids))\n if collection_ids:\n q_list.append(Q(collectionproduct__collection_id__in=collection_ids))\n # Asserting that the function was called with some ids\n if not q_list:\n raise ValueError(\n \"Provide at least one of the ID lists:\\n\"\n \"\\tproduct_ids,\\n\"\n \"\\tcategory_ids,\\n\"\n \"\\tcollection_ids.\"\n )\n # Querying the products\n q_or = reduce(operator.or_, q_list)\n products = Product.objects.filter(q_or).distinct()\n\n update_products_minimal_variant_prices(products)\n\n\ndef update_products_minimal_variant_prices_of_discount(discount):\n update_products_minimal_variant_prices_of_catalogues(\n product_ids=discount.products.all().values_list(\"id\", flat=True),\n category_ids=discount.categories.all().values_list(\"id\", flat=True),\n collection_ids=discount.collections.all().values_list(\"id\", flat=True),\n )\n", "path": "saleor/product/utils/variant_prices.py"}]} | 1,803 | 255 |
gh_patches_debug_24558 | rasdani/github-patches | git_diff | marshmallow-code__webargs-43 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pyramid parser use_kwargs throws exception when used
The following code using the pyramid parser throws an exception:
``` python
@parser.use_kwargs({'myvalue': Arg(int)})
def baz(request, myvalue):
return {'myvalue': myvalue}
```
The exception:
```
kwargs['as_kwargs'] = True
> return self.use_args(*args, **kwargs)
E TypeError: use_args() got an unexpected keyword argument 'as_kwargs'
```
Pyramid parser use_kwargs throws exception when used
The following code using the pyramid parser throws an exception:
``` python
@parser.use_kwargs({'myvalue': Arg(int)})
def baz(request, myvalue):
return {'myvalue': myvalue}
```
The exception:
```
kwargs['as_kwargs'] = True
> return self.use_args(*args, **kwargs)
E TypeError: use_args() got an unexpected keyword argument 'as_kwargs'
```
</issue>
<code>
[start of webargs/pyramidparser.py]
1 # -*- coding: utf-8 -*-
2 """Pyramid request argument parsing.
3
4 Example usage: ::
5
6 from wsgiref.simple_server import make_server
7 from pyramid.config import Configurator
8 from pyramid.response import Response
9 from webargs import Arg
10 from webargs.pyramidparser import use_args
11
12 hello_args = {
13 'name': Arg(str, default='World')
14 }
15
16 @use_args(hello_args)
17 def hello_world(request, args):
18 return Response('Hello ' + args['name'])
19
20 if __name__ == '__main__':
21 config = Configurator()
22 config.add_route('hello', '/')
23 config.add_view(hello_world, route_name='hello')
24 app = config.make_wsgi_app()
25 server = make_server('0.0.0.0', 6543, app)
26 server.serve_forever()
27 """
28 import functools
29 import logging
30
31 from webob.multidict import MultiDict
32 from pyramid.httpexceptions import exception_response
33
34 from webargs import core
35 from webargs.core import text_type
36
37 logger = logging.getLogger(__name__)
38
39 class PyramidParser(core.Parser):
40 """Pyramid request argument parser."""
41
42 def parse_querystring(self, req, name, arg):
43 """Pull a querystring value from the request."""
44 return core.get_value(req.GET, name, arg.multiple)
45
46 def parse_form(self, req, name, arg):
47 """Pull a form value from the request."""
48 return core.get_value(req.POST, name, arg.multiple)
49
50 def parse_json(self, req, name, arg):
51 """Pull a json value from the request."""
52 try:
53 json_data = req.json_body
54 except ValueError:
55 return core.Missing
56
57 return core.get_value(json_data, name, arg.multiple)
58
59 def parse_cookies(self, req, name, arg):
60 """Pull the value from the cookiejar."""
61 return core.get_value(req.cookies, name, arg.multiple)
62
63 def parse_headers(self, req, name, arg):
64 """Pull a value from the header data."""
65 return core.get_value(req.headers, name, arg.multiple)
66
67 def parse_files(self, req, name, arg):
68 """Pull a file from the request."""
69 files = ((k, v) for k, v in req.POST.items() if hasattr(v, 'file'))
70 return core.get_value(MultiDict(files), name, arg.multiple)
71
72 def handle_error(self, error):
73 """Handles errors during parsing. Aborts the current HTTP request and
74 responds with a 400 error.
75 """
76 logger.error(error)
77 status_code = getattr(error, 'status_code', 400)
78 data = getattr(error, 'data', {})
79 raise exception_response(status_code, detail=text_type(error), **data)
80
81 def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,
82 validate=None):
83 """Decorator that injects parsed arguments into a view callable.
84 Supports the *Class-based View* pattern where `request` is saved as an instance
85 attribute on a view class.
86
87 :param dict argmap: Dictionary of argument_name:Arg object pairs.
88 :param req: The request object to parse
89 :param tuple locations: Where on the request to search for values.
90 :param callable validate:
91 Validation function that receives the dictionary of parsed arguments.
92 If the function returns ``False``, the parser will raise a
93 :exc:`ValidationError`.
94 """
95 def decorator(func):
96 @functools.wraps(func)
97 def wrapper(obj, *args, **kwargs):
98 # The first argument is either `self` or `request`
99 try: # get self.request
100 request = obj.request
101 except AttributeError: # first arg is request
102 request = obj
103 parsed_args = self.parse(argmap, req=request, locations=locations,
104 validate=None)
105 return func(obj, parsed_args, *args, **kwargs)
106 return wrapper
107 return decorator
108
109 parser = PyramidParser()
110 use_args = parser.use_args
111 use_kwargs = parser.use_kwargs
112
[end of webargs/pyramidparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/webargs/pyramidparser.py b/webargs/pyramidparser.py
--- a/webargs/pyramidparser.py
+++ b/webargs/pyramidparser.py
@@ -79,7 +79,7 @@
raise exception_response(status_code, detail=text_type(error), **data)
def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,
- validate=None):
+ as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view callable.
Supports the *Class-based View* pattern where `request` is saved as an instance
attribute on a view class.
@@ -102,7 +102,11 @@
request = obj
parsed_args = self.parse(argmap, req=request, locations=locations,
validate=None)
- return func(obj, parsed_args, *args, **kwargs)
+ if as_kwargs:
+ kwargs.update(parsed_args)
+ return func(obj, *args, **kwargs)
+ else:
+ return func(obj, parsed_args, *args, **kwargs)
return wrapper
return decorator
| {"golden_diff": "diff --git a/webargs/pyramidparser.py b/webargs/pyramidparser.py\n--- a/webargs/pyramidparser.py\n+++ b/webargs/pyramidparser.py\n@@ -79,7 +79,7 @@\n raise exception_response(status_code, detail=text_type(error), **data)\n \n def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,\n- validate=None):\n+ as_kwargs=False, validate=None):\n \"\"\"Decorator that injects parsed arguments into a view callable.\n Supports the *Class-based View* pattern where `request` is saved as an instance\n attribute on a view class.\n@@ -102,7 +102,11 @@\n request = obj\n parsed_args = self.parse(argmap, req=request, locations=locations,\n validate=None)\n- return func(obj, parsed_args, *args, **kwargs)\n+ if as_kwargs:\n+ kwargs.update(parsed_args)\n+ return func(obj, *args, **kwargs)\n+ else:\n+ return func(obj, parsed_args, *args, **kwargs)\n return wrapper\n return decorator\n", "issue": "Pyramid parser use_kwargs throws exception when used\nThe following code using the pyramid parser throws an exception:\n\n``` python\[email protected]_kwargs({'myvalue': Arg(int)})\ndef baz(request, myvalue):\n return {'myvalue': myvalue}\n```\n\nThe exception:\n\n```\n kwargs['as_kwargs'] = True\n> return self.use_args(*args, **kwargs)\nE TypeError: use_args() got an unexpected keyword argument 'as_kwargs'\n```\n\nPyramid parser use_kwargs throws exception when used\nThe following code using the pyramid parser throws an exception:\n\n``` python\[email protected]_kwargs({'myvalue': Arg(int)})\ndef baz(request, myvalue):\n return {'myvalue': myvalue}\n```\n\nThe exception:\n\n```\n kwargs['as_kwargs'] = True\n> return self.use_args(*args, **kwargs)\nE TypeError: use_args() got an unexpected keyword argument 'as_kwargs'\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Pyramid request argument parsing.\n\nExample usage: ::\n\n from wsgiref.simple_server import make_server\n from pyramid.config import Configurator\n from pyramid.response import Response\n from webargs import Arg\n from webargs.pyramidparser import use_args\n\n hello_args = {\n 'name': Arg(str, default='World')\n }\n\n @use_args(hello_args)\n def hello_world(request, args):\n return Response('Hello ' + args['name'])\n\n if __name__ == '__main__':\n config = Configurator()\n config.add_route('hello', '/')\n config.add_view(hello_world, route_name='hello')\n app = config.make_wsgi_app()\n server = make_server('0.0.0.0', 6543, app)\n server.serve_forever()\n\"\"\"\nimport functools\nimport logging\n\nfrom webob.multidict import MultiDict\nfrom pyramid.httpexceptions import exception_response\n\nfrom webargs import core\nfrom webargs.core import text_type\n\nlogger = logging.getLogger(__name__)\n\nclass PyramidParser(core.Parser):\n \"\"\"Pyramid request argument parser.\"\"\"\n\n def parse_querystring(self, req, name, arg):\n \"\"\"Pull a querystring value from the request.\"\"\"\n return core.get_value(req.GET, name, arg.multiple)\n\n def parse_form(self, req, name, arg):\n \"\"\"Pull a form value from the request.\"\"\"\n return core.get_value(req.POST, name, arg.multiple)\n\n def parse_json(self, req, name, arg):\n \"\"\"Pull a json value from the request.\"\"\"\n try:\n json_data = req.json_body\n except ValueError:\n return core.Missing\n\n return core.get_value(json_data, name, arg.multiple)\n\n def parse_cookies(self, req, name, arg):\n \"\"\"Pull the value from the cookiejar.\"\"\"\n return core.get_value(req.cookies, name, arg.multiple)\n\n def parse_headers(self, req, name, arg):\n \"\"\"Pull a value from the header data.\"\"\"\n return core.get_value(req.headers, name, arg.multiple)\n\n def parse_files(self, req, name, arg):\n \"\"\"Pull a file from the request.\"\"\"\n files = ((k, v) for k, v in req.POST.items() if hasattr(v, 'file'))\n return core.get_value(MultiDict(files), name, arg.multiple)\n\n def handle_error(self, error):\n \"\"\"Handles errors during parsing. Aborts the current HTTP request and\n responds with a 400 error.\n \"\"\"\n logger.error(error)\n status_code = getattr(error, 'status_code', 400)\n data = getattr(error, 'data', {})\n raise exception_response(status_code, detail=text_type(error), **data)\n\n def use_args(self, argmap, req=None, locations=core.Parser.DEFAULT_LOCATIONS,\n validate=None):\n \"\"\"Decorator that injects parsed arguments into a view callable.\n Supports the *Class-based View* pattern where `request` is saved as an instance\n attribute on a view class.\n\n :param dict argmap: Dictionary of argument_name:Arg object pairs.\n :param req: The request object to parse\n :param tuple locations: Where on the request to search for values.\n :param callable validate:\n Validation function that receives the dictionary of parsed arguments.\n If the function returns ``False``, the parser will raise a\n :exc:`ValidationError`.\n \"\"\"\n def decorator(func):\n @functools.wraps(func)\n def wrapper(obj, *args, **kwargs):\n # The first argument is either `self` or `request`\n try: # get self.request\n request = obj.request\n except AttributeError: # first arg is request\n request = obj\n parsed_args = self.parse(argmap, req=request, locations=locations,\n validate=None)\n return func(obj, parsed_args, *args, **kwargs)\n return wrapper\n return decorator\n\nparser = PyramidParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "webargs/pyramidparser.py"}]} | 1,843 | 245 |
gh_patches_debug_10387 | rasdani/github-patches | git_diff | WordPress__openverse-api-727 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possibly make `thumbnail` null for audio files without artwork
## Description
<!-- Concisely describe the bug. -->
Currently the frontend tries to fetch thumbnails for all audio files regardless of whether the audio file in question has one or not.
I noticed that the API returns the thumbnail URL for all tracks. That makes sense, but could we improve this to be `null` for audio tracks without artwork? Then we could check the field in the frontend before making a network request.
</issue>
<code>
[start of api/catalog/api/serializers/audio_serializers.py]
1 from rest_framework import serializers
2
3 from elasticsearch_dsl.response import Hit
4
5 from catalog.api.constants.field_order import field_position_map
6 from catalog.api.constants.field_values import AUDIO_CATEGORIES, LENGTHS
7 from catalog.api.docs.media_docs import fields_to_md
8 from catalog.api.models import Audio, AudioReport, AudioSet
9 from catalog.api.serializers.fields import (
10 EnumCharField,
11 SchemableHyperlinkedIdentityField,
12 )
13 from catalog.api.serializers.media_serializers import (
14 MediaReportRequestSerializer,
15 MediaSearchRequestSerializer,
16 MediaSearchSerializer,
17 MediaSerializer,
18 get_hyperlinks_serializer,
19 get_search_request_source_serializer,
20 )
21
22
23 #######################
24 # Request serializers #
25 #######################
26
27
28 AudioSearchRequestSourceSerializer = get_search_request_source_serializer("audio")
29
30
31 class AudioSearchRequestSerializer(
32 AudioSearchRequestSourceSerializer,
33 MediaSearchRequestSerializer,
34 ):
35 """Parse and validate search query string parameters."""
36
37 fields_names = [
38 *MediaSearchRequestSerializer.fields_names,
39 *AudioSearchRequestSourceSerializer.field_names,
40 "category",
41 "length",
42 ]
43 """
44 Keep the fields names in sync with the actual fields below as this list is
45 used to generate Swagger documentation.
46 """
47
48 category = EnumCharField(
49 plural="categories",
50 enum_class=AUDIO_CATEGORIES,
51 required=False,
52 )
53 length = EnumCharField(
54 plural="lengths",
55 enum_class=LENGTHS,
56 required=False,
57 )
58
59
60 class AudioReportRequestSerializer(MediaReportRequestSerializer):
61 class Meta(MediaReportRequestSerializer.Meta):
62 model = AudioReport
63
64
65 ########################
66 # Response serializers #
67 ########################
68
69
70 class AudioSetSerializer(serializers.ModelSerializer):
71 """An audio set, rendered as a part of the ``AudioSerializer`` output."""
72
73 class Meta:
74 model = AudioSet
75 fields = [
76 "title",
77 "foreign_landing_url",
78 "creator",
79 "creator_url",
80 "url",
81 "filesize",
82 "filetype",
83 ]
84
85
86 AudioHyperlinksSerializer = get_hyperlinks_serializer("audio")
87
88
89 class AudioSerializer(AudioHyperlinksSerializer, MediaSerializer):
90 """A single audio file. Used in search results."""
91
92 class Meta:
93 model = Audio
94 fields = sorted( # keep this list ordered logically
95 [
96 *MediaSerializer.Meta.fields,
97 *AudioHyperlinksSerializer.field_names,
98 "genres",
99 "alt_files",
100 "audio_set",
101 "duration",
102 "bit_rate",
103 "sample_rate",
104 "waveform", # hyperlink to the endpoint that generates the waveform
105 "peaks", # waveform peaks, if they have already been generated
106 ],
107 key=lambda val: field_position_map.get(val, 999),
108 )
109 """
110 Keep the fields names in sync with the actual fields below as this list is
111 used to generate Swagger documentation.
112 """
113
114 audio_set = AudioSetSerializer(
115 allow_null=True,
116 help_text="Reference to set of which this track is a part.",
117 read_only=True,
118 )
119
120 waveform = SchemableHyperlinkedIdentityField(
121 read_only=True,
122 view_name="audio-waveform",
123 lookup_field="identifier",
124 help_text="A direct link to the waveform peaks.",
125 )
126
127 # Add-on data
128 peaks = serializers.SerializerMethodField(
129 help_text="The list of peaks used to generate the waveform for the audio."
130 )
131
132 @staticmethod
133 def get_peaks(obj) -> list[int]:
134 if isinstance(obj, Hit):
135 obj = Audio.objects.get(identifier=obj.identifier)
136 return obj.get_waveform()
137
138
139 class AudioSearchSerializer(MediaSearchSerializer):
140 """
141 The full audio search response.
142 This serializer is purely representational and not actually used to
143 serialize the response.
144 """
145
146 results = AudioSerializer(
147 many=True,
148 help_text=(
149 "An array of audios and their details such as "
150 f"{fields_to_md(AudioSerializer.Meta.fields)}."
151 ),
152 )
153
154
155 ##########################
156 # Additional serializers #
157 ##########################
158
159
160 class AudioWaveformSerializer(serializers.Serializer):
161 len = serializers.SerializerMethodField()
162 points = serializers.ListField(
163 child=serializers.FloatField(min_value=0, max_value=1)
164 )
165
166 @staticmethod
167 def get_len(obj) -> int:
168 return len(obj.get("points", []))
169
[end of api/catalog/api/serializers/audio_serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/catalog/api/serializers/audio_serializers.py b/api/catalog/api/serializers/audio_serializers.py
--- a/api/catalog/api/serializers/audio_serializers.py
+++ b/api/catalog/api/serializers/audio_serializers.py
@@ -135,6 +135,18 @@
obj = Audio.objects.get(identifier=obj.identifier)
return obj.get_waveform()
+ def to_representation(self, instance):
+ # Get the original representation
+ output = super().to_representation(instance)
+
+ if isinstance(instance, Hit):
+ # TODO: Remove when updating ES indexes
+ audio = Audio.objects.get(identifier=instance.identifier)
+ if not audio.thumbnail:
+ output["thumbnail"] = None
+
+ return output
+
class AudioSearchSerializer(MediaSearchSerializer):
"""
| {"golden_diff": "diff --git a/api/catalog/api/serializers/audio_serializers.py b/api/catalog/api/serializers/audio_serializers.py\n--- a/api/catalog/api/serializers/audio_serializers.py\n+++ b/api/catalog/api/serializers/audio_serializers.py\n@@ -135,6 +135,18 @@\n obj = Audio.objects.get(identifier=obj.identifier)\n return obj.get_waveform()\n \n+ def to_representation(self, instance):\n+ # Get the original representation\n+ output = super().to_representation(instance)\n+\n+ if isinstance(instance, Hit):\n+ # TODO: Remove when updating ES indexes\n+ audio = Audio.objects.get(identifier=instance.identifier)\n+ if not audio.thumbnail:\n+ output[\"thumbnail\"] = None\n+\n+ return output\n+\n \n class AudioSearchSerializer(MediaSearchSerializer):\n \"\"\"\n", "issue": "Possibly make `thumbnail` null for audio files without artwork\n## Description\r\n<!-- Concisely describe the bug. -->\r\n\r\nCurrently the frontend tries to fetch thumbnails for all audio files regardless of whether the audio file in question has one or not. \r\nI noticed that the API returns the thumbnail URL for all tracks. That makes sense, but could we improve this to be `null` for audio tracks without artwork? Then we could check the field in the frontend before making a network request.\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom elasticsearch_dsl.response import Hit\n\nfrom catalog.api.constants.field_order import field_position_map\nfrom catalog.api.constants.field_values import AUDIO_CATEGORIES, LENGTHS\nfrom catalog.api.docs.media_docs import fields_to_md\nfrom catalog.api.models import Audio, AudioReport, AudioSet\nfrom catalog.api.serializers.fields import (\n EnumCharField,\n SchemableHyperlinkedIdentityField,\n)\nfrom catalog.api.serializers.media_serializers import (\n MediaReportRequestSerializer,\n MediaSearchRequestSerializer,\n MediaSearchSerializer,\n MediaSerializer,\n get_hyperlinks_serializer,\n get_search_request_source_serializer,\n)\n\n\n#######################\n# Request serializers #\n#######################\n\n\nAudioSearchRequestSourceSerializer = get_search_request_source_serializer(\"audio\")\n\n\nclass AudioSearchRequestSerializer(\n AudioSearchRequestSourceSerializer,\n MediaSearchRequestSerializer,\n):\n \"\"\"Parse and validate search query string parameters.\"\"\"\n\n fields_names = [\n *MediaSearchRequestSerializer.fields_names,\n *AudioSearchRequestSourceSerializer.field_names,\n \"category\",\n \"length\",\n ]\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n category = EnumCharField(\n plural=\"categories\",\n enum_class=AUDIO_CATEGORIES,\n required=False,\n )\n length = EnumCharField(\n plural=\"lengths\",\n enum_class=LENGTHS,\n required=False,\n )\n\n\nclass AudioReportRequestSerializer(MediaReportRequestSerializer):\n class Meta(MediaReportRequestSerializer.Meta):\n model = AudioReport\n\n\n########################\n# Response serializers #\n########################\n\n\nclass AudioSetSerializer(serializers.ModelSerializer):\n \"\"\"An audio set, rendered as a part of the ``AudioSerializer`` output.\"\"\"\n\n class Meta:\n model = AudioSet\n fields = [\n \"title\",\n \"foreign_landing_url\",\n \"creator\",\n \"creator_url\",\n \"url\",\n \"filesize\",\n \"filetype\",\n ]\n\n\nAudioHyperlinksSerializer = get_hyperlinks_serializer(\"audio\")\n\n\nclass AudioSerializer(AudioHyperlinksSerializer, MediaSerializer):\n \"\"\"A single audio file. Used in search results.\"\"\"\n\n class Meta:\n model = Audio\n fields = sorted( # keep this list ordered logically\n [\n *MediaSerializer.Meta.fields,\n *AudioHyperlinksSerializer.field_names,\n \"genres\",\n \"alt_files\",\n \"audio_set\",\n \"duration\",\n \"bit_rate\",\n \"sample_rate\",\n \"waveform\", # hyperlink to the endpoint that generates the waveform\n \"peaks\", # waveform peaks, if they have already been generated\n ],\n key=lambda val: field_position_map.get(val, 999),\n )\n \"\"\"\n Keep the fields names in sync with the actual fields below as this list is\n used to generate Swagger documentation.\n \"\"\"\n\n audio_set = AudioSetSerializer(\n allow_null=True,\n help_text=\"Reference to set of which this track is a part.\",\n read_only=True,\n )\n\n waveform = SchemableHyperlinkedIdentityField(\n read_only=True,\n view_name=\"audio-waveform\",\n lookup_field=\"identifier\",\n help_text=\"A direct link to the waveform peaks.\",\n )\n\n # Add-on data\n peaks = serializers.SerializerMethodField(\n help_text=\"The list of peaks used to generate the waveform for the audio.\"\n )\n\n @staticmethod\n def get_peaks(obj) -> list[int]:\n if isinstance(obj, Hit):\n obj = Audio.objects.get(identifier=obj.identifier)\n return obj.get_waveform()\n\n\nclass AudioSearchSerializer(MediaSearchSerializer):\n \"\"\"\n The full audio search response.\n This serializer is purely representational and not actually used to\n serialize the response.\n \"\"\"\n\n results = AudioSerializer(\n many=True,\n help_text=(\n \"An array of audios and their details such as \"\n f\"{fields_to_md(AudioSerializer.Meta.fields)}.\"\n ),\n )\n\n\n##########################\n# Additional serializers #\n##########################\n\n\nclass AudioWaveformSerializer(serializers.Serializer):\n len = serializers.SerializerMethodField()\n points = serializers.ListField(\n child=serializers.FloatField(min_value=0, max_value=1)\n )\n\n @staticmethod\n def get_len(obj) -> int:\n return len(obj.get(\"points\", []))\n", "path": "api/catalog/api/serializers/audio_serializers.py"}]} | 1,969 | 179 |
gh_patches_debug_25250 | rasdani/github-patches | git_diff | pre-commit__pre-commit-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
^C^C during installation may leave pre-commit in a bad state
There's code which handles the first ^C, however I think the second one (during execution of the finally block) may not be handled well. I probably need to make the cleanup atomic somehow...
</issue>
<code>
[start of pre_commit/repository.py]
1 from __future__ import unicode_literals
2
3 from cached_property import cached_property
4
5 from pre_commit.languages.all import languages
6 from pre_commit.manifest import Manifest
7 from pre_commit.prefixed_command_runner import PrefixedCommandRunner
8
9
10 class Repository(object):
11 def __init__(self, repo_config, repo_path_getter):
12 self.repo_config = repo_config
13 self.repo_path_getter = repo_path_getter
14 self.__installed = False
15
16 @classmethod
17 def create(cls, config, store):
18 repo_path_getter = store.get_repo_path_getter(
19 config['repo'], config['sha']
20 )
21 return cls(config, repo_path_getter)
22
23 @cached_property
24 def repo_url(self):
25 return self.repo_config['repo']
26
27 @cached_property
28 def sha(self):
29 return self.repo_config['sha']
30
31 @cached_property
32 def languages(self):
33 return set(
34 (hook['language'], hook['language_version'])
35 for _, hook in self.hooks
36 )
37
38 @cached_property
39 def hooks(self):
40 # TODO: merging in manifest dicts is a smell imo
41 return tuple(
42 (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))
43 for hook in self.repo_config['hooks']
44 )
45
46 @cached_property
47 def manifest(self):
48 return Manifest(self.repo_path_getter)
49
50 @cached_property
51 def cmd_runner(self):
52 return PrefixedCommandRunner(self.repo_path_getter.repo_path)
53
54 def require_installed(self):
55 if self.__installed:
56 return
57
58 self.install()
59 self.__installed = True
60
61 def install(self):
62 """Install the hook repository."""
63 for language_name, language_version in self.languages:
64 language = languages[language_name]
65 if (
66 language.ENVIRONMENT_DIR is None or
67 self.cmd_runner.exists(language.ENVIRONMENT_DIR)
68 ):
69 # The language is already installed
70 continue
71 language.install_environment(self.cmd_runner, language_version)
72
73 def run_hook(self, hook, file_args):
74 """Run a hook.
75
76 Args:
77 hook - Hook dictionary
78 file_args - List of files to run
79 """
80 self.require_installed()
81 return languages[hook['language']].run_hook(
82 self.cmd_runner, hook, file_args,
83 )
84
[end of pre_commit/repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/repository.py b/pre_commit/repository.py
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -1,5 +1,7 @@
from __future__ import unicode_literals
+import shutil
+
from cached_property import cached_property
from pre_commit.languages.all import languages
@@ -64,11 +66,21 @@
language = languages[language_name]
if (
language.ENVIRONMENT_DIR is None or
- self.cmd_runner.exists(language.ENVIRONMENT_DIR)
+ self.cmd_runner.exists(language.ENVIRONMENT_DIR, '.installed')
):
# The language is already installed
continue
+ # There's potentially incomplete cleanup from previous runs
+ # Clean it up!
+ if self.cmd_runner.exists(language.ENVIRONMENT_DIR):
+ shutil.rmtree(self.cmd_runner.path(language.ENVIRONMENT_DIR))
+
language.install_environment(self.cmd_runner, language_version)
+ # Touch the .installed file (atomic) to indicate we've installed
+ open(
+ self.cmd_runner.path(language.ENVIRONMENT_DIR, '.installed'),
+ 'w',
+ ).close()
def run_hook(self, hook, file_args):
"""Run a hook.
| {"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -1,5 +1,7 @@\n from __future__ import unicode_literals\n \n+import shutil\n+\n from cached_property import cached_property\n \n from pre_commit.languages.all import languages\n@@ -64,11 +66,21 @@\n language = languages[language_name]\n if (\n language.ENVIRONMENT_DIR is None or\n- self.cmd_runner.exists(language.ENVIRONMENT_DIR)\n+ self.cmd_runner.exists(language.ENVIRONMENT_DIR, '.installed')\n ):\n # The language is already installed\n continue\n+ # There's potentially incomplete cleanup from previous runs\n+ # Clean it up!\n+ if self.cmd_runner.exists(language.ENVIRONMENT_DIR):\n+ shutil.rmtree(self.cmd_runner.path(language.ENVIRONMENT_DIR))\n+\n language.install_environment(self.cmd_runner, language_version)\n+ # Touch the .installed file (atomic) to indicate we've installed\n+ open(\n+ self.cmd_runner.path(language.ENVIRONMENT_DIR, '.installed'),\n+ 'w',\n+ ).close()\n \n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n", "issue": "^C^C during installation may leave pre-commit in a bad state\nThere's code which handles the first ^C, however I think the second one (during execution of the finally block) may not be handled well. I probably need to make the cleanup atomic somehow...\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom cached_property import cached_property\n\nfrom pre_commit.languages.all import languages\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def hooks(self):\n # TODO: merging in manifest dicts is a smell imo\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n for language_name, language_version in self.languages:\n language = languages[language_name]\n if (\n language.ENVIRONMENT_DIR is None or\n self.cmd_runner.exists(language.ENVIRONMENT_DIR)\n ):\n # The language is already installed\n continue\n language.install_environment(self.cmd_runner, language_version)\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n", "path": "pre_commit/repository.py"}]} | 1,250 | 264 |
gh_patches_debug_27672 | rasdani/github-patches | git_diff | bids-standard__pybids-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
model: JSON to dict modified key values for transformation
In ` Replace` transformation, you specify as a dict which variables to transform.
e.g.:
```
{'LIKELY': "5"}
```
However, the parser from JSON to dict to convert BIDS Stats Models modifies keys to lower case, which in the case of specific case sensitive values modifies the transformation itself.
</issue>
<code>
[start of bids/utils.py]
1 """ Utility functions. """
2
3 import re
4 import os
5
6
7 def listify(obj):
8 ''' Wraps all non-list or tuple objects in a list; provides a simple way
9 to accept flexible arguments. '''
10 return obj if isinstance(obj, (list, tuple, type(None))) else [obj]
11
12
13 def matches_entities(obj, entities, strict=False):
14 ''' Checks whether an object's entities match the input. '''
15 if strict and set(obj.entities.keys()) != set(entities.keys()):
16 return False
17
18 comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))
19 for k in comm_ents:
20 current = obj.entities[k]
21 target = entities[k]
22 if isinstance(target, (list, tuple)):
23 if current not in target:
24 return False
25 elif current != target:
26 return False
27 return True
28
29
30 def natural_sort(l, field=None):
31 '''
32 based on snippet found at http://stackoverflow.com/a/4836734/2445984
33 '''
34 convert = lambda text: int(text) if text.isdigit() else text.lower()
35
36 def alphanum_key(key):
37 if field is not None:
38 key = getattr(key, field)
39 if not isinstance(key, str):
40 key = str(key)
41 return [convert(c) for c in re.split('([0-9]+)', key)]
42 return sorted(l, key=alphanum_key)
43
44
45 def convert_JSON(j):
46 """ Recursively convert CamelCase keys to snake_case.
47 From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria
48 """
49
50 def camel_to_snake(s):
51 a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
52 return a.sub(r'_\1', s).lower()
53
54 def convertArray(a):
55 newArr = []
56 for i in a:
57 if isinstance(i,list):
58 newArr.append(convertArray(i))
59 elif isinstance(i, dict):
60 newArr.append(convert_JSON(i))
61 else:
62 newArr.append(i)
63 return newArr
64
65 out = {}
66 for k, value in j.items():
67 newK = camel_to_snake(k)
68
69 if isinstance(value, dict):
70 out[newK] = convert_JSON(value)
71 elif isinstance(value, list):
72 out[newK] = convertArray(value)
73 else:
74 out[newK] = value
75
76 return out
77
78
79 def splitext(path):
80 """splitext for paths with directories that may contain dots.
81 From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module"""
82 li = []
83 path_without_extensions = os.path.join(os.path.dirname(path),
84 os.path.basename(path).split(os.extsep)[0])
85 extensions = os.path.basename(path).split(os.extsep)[1:]
86 li.append(path_without_extensions)
87 # li.append(extensions) if you want extensions in another list inside the list that is returned.
88 li.extend(extensions)
89 return li
90
91
92 def make_bidsfile(filename):
93 """Create a BIDSFile instance of the appropriate class. """
94 from .layout import models
95
96 patt = re.compile("[._]*[a-zA-Z0-9]*?\\.([^/\\\\]+)$")
97 m = re.search(patt, filename)
98
99 ext = None if not m else m.group(1)
100
101 if ext in ['nii', 'nii.gz']:
102 cls = 'BIDSImageFile'
103 elif ext in ['tsv', 'tsv.gz']:
104 cls = 'BIDSDataFile'
105 elif ext == 'json':
106 cls = 'BIDSJSONFile'
107 else:
108 cls = 'BIDSFile'
109
110 Cls = getattr(models, cls)
111 return Cls(filename)
112
[end of bids/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bids/utils.py b/bids/utils.py
--- a/bids/utils.py
+++ b/bids/utils.py
@@ -44,9 +44,10 @@
def convert_JSON(j):
""" Recursively convert CamelCase keys to snake_case.
- From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria
+ From: https://stackoverflow.com/questions/17156078/
+ converting-identifier-naming-between-camelcase-and-
+ underscores-during-json-seria
"""
-
def camel_to_snake(s):
a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
return a.sub(r'_\1', s).lower()
@@ -54,7 +55,7 @@
def convertArray(a):
newArr = []
for i in a:
- if isinstance(i,list):
+ if isinstance(i, list):
newArr.append(convertArray(i))
elif isinstance(i, dict):
newArr.append(convert_JSON(i))
@@ -66,7 +67,8 @@
for k, value in j.items():
newK = camel_to_snake(k)
- if isinstance(value, dict):
+ # Replace transformation uses a dict, so skip lower-casing
+ if isinstance(value, dict) and k != 'Replace':
out[newK] = convert_JSON(value)
elif isinstance(value, list):
out[newK] = convertArray(value)
| {"golden_diff": "diff --git a/bids/utils.py b/bids/utils.py\n--- a/bids/utils.py\n+++ b/bids/utils.py\n@@ -44,9 +44,10 @@\n \n def convert_JSON(j):\n \"\"\" Recursively convert CamelCase keys to snake_case.\n- From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria\n+ From: https://stackoverflow.com/questions/17156078/\n+ converting-identifier-naming-between-camelcase-and-\n+ underscores-during-json-seria\n \"\"\"\n-\n def camel_to_snake(s):\n a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')\n return a.sub(r'_\\1', s).lower()\n@@ -54,7 +55,7 @@\n def convertArray(a):\n newArr = []\n for i in a:\n- if isinstance(i,list):\n+ if isinstance(i, list):\n newArr.append(convertArray(i))\n elif isinstance(i, dict):\n newArr.append(convert_JSON(i))\n@@ -66,7 +67,8 @@\n for k, value in j.items():\n newK = camel_to_snake(k)\n \n- if isinstance(value, dict):\n+ # Replace transformation uses a dict, so skip lower-casing\n+ if isinstance(value, dict) and k != 'Replace':\n out[newK] = convert_JSON(value)\n elif isinstance(value, list):\n out[newK] = convertArray(value)\n", "issue": "model: JSON to dict modified key values for transformation\nIn ` Replace` transformation, you specify as a dict which variables to transform.\r\n\r\ne.g.:\r\n\r\n```\r\n{'LIKELY': \"5\"}\r\n```\r\n\r\nHowever, the parser from JSON to dict to convert BIDS Stats Models modifies keys to lower case, which in the case of specific case sensitive values modifies the transformation itself.\n", "before_files": [{"content": "\"\"\" Utility functions. \"\"\"\n\nimport re\nimport os\n\n\ndef listify(obj):\n ''' Wraps all non-list or tuple objects in a list; provides a simple way\n to accept flexible arguments. '''\n return obj if isinstance(obj, (list, tuple, type(None))) else [obj]\n\n\ndef matches_entities(obj, entities, strict=False):\n ''' Checks whether an object's entities match the input. '''\n if strict and set(obj.entities.keys()) != set(entities.keys()):\n return False\n\n comm_ents = list(set(obj.entities.keys()) & set(entities.keys()))\n for k in comm_ents:\n current = obj.entities[k]\n target = entities[k]\n if isinstance(target, (list, tuple)):\n if current not in target:\n return False\n elif current != target:\n return False\n return True\n\n\ndef natural_sort(l, field=None):\n '''\n based on snippet found at http://stackoverflow.com/a/4836734/2445984\n '''\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n\n def alphanum_key(key):\n if field is not None:\n key = getattr(key, field)\n if not isinstance(key, str):\n key = str(key)\n return [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)\n\n\ndef convert_JSON(j):\n \"\"\" Recursively convert CamelCase keys to snake_case.\n From: https://stackoverflow.com/questions/17156078/converting-identifier-naming-between-camelcase-and-underscores-during-json-seria\n \"\"\"\n\n def camel_to_snake(s):\n a = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')\n return a.sub(r'_\\1', s).lower()\n\n def convertArray(a):\n newArr = []\n for i in a:\n if isinstance(i,list):\n newArr.append(convertArray(i))\n elif isinstance(i, dict):\n newArr.append(convert_JSON(i))\n else:\n newArr.append(i)\n return newArr\n\n out = {}\n for k, value in j.items():\n newK = camel_to_snake(k)\n\n if isinstance(value, dict):\n out[newK] = convert_JSON(value)\n elif isinstance(value, list):\n out[newK] = convertArray(value)\n else:\n out[newK] = value\n\n return out\n\n\ndef splitext(path):\n \"\"\"splitext for paths with directories that may contain dots.\n From https://stackoverflow.com/questions/5930036/separating-file-extensions-using-python-os-path-module\"\"\"\n li = []\n path_without_extensions = os.path.join(os.path.dirname(path),\n os.path.basename(path).split(os.extsep)[0])\n extensions = os.path.basename(path).split(os.extsep)[1:]\n li.append(path_without_extensions)\n # li.append(extensions) if you want extensions in another list inside the list that is returned.\n li.extend(extensions)\n return li\n\n\ndef make_bidsfile(filename):\n \"\"\"Create a BIDSFile instance of the appropriate class. \"\"\"\n from .layout import models\n\n patt = re.compile(\"[._]*[a-zA-Z0-9]*?\\\\.([^/\\\\\\\\]+)$\")\n m = re.search(patt, filename)\n\n ext = None if not m else m.group(1)\n\n if ext in ['nii', 'nii.gz']:\n cls = 'BIDSImageFile'\n elif ext in ['tsv', 'tsv.gz']:\n cls = 'BIDSDataFile'\n elif ext == 'json':\n cls = 'BIDSJSONFile'\n else:\n cls = 'BIDSFile'\n\n Cls = getattr(models, cls)\n return Cls(filename)\n", "path": "bids/utils.py"}]} | 1,684 | 354 |
gh_patches_debug_20588 | rasdani/github-patches | git_diff | dotkom__onlineweb4-812 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hide attendanceevent from django admin
https://online.ntnu.no/admin/events/attendanceevent/
This view should not be used by anyone and attendance info should be edited through the event directly.
Should be possible to hide this by removing
`admin.site.register(AttendanceEvent, AttendanceEventAdmin)`
in events/admin.py (untested)
</issue>
<code>
[start of apps/events/admin.py]
1 # -*- coding: utf-8 -*-
2
3 from django import forms
4 from django.contrib import admin
5 from django.core import validators
6 from django.utils.translation import ugettext as _
7
8 from apps.events.models import Event
9 from apps.events.models import AttendanceEvent
10 from apps.events.models import Attendee
11 from apps.events.models import CompanyEvent
12 from apps.events.models import RuleBundle
13 from apps.events.models import FieldOfStudyRule
14 from apps.events.models import GradeRule
15 from apps.events.models import UserGroupRule
16 from apps.feedback.admin import FeedbackRelationInline
17
18
19
20 class AttendeeInline(admin.TabularInline):
21 model = Attendee
22 extra = 1
23 classes = ('grp-collapse grp-open',) # style
24 inline_classes = ('grp-collapse grp-open',) # style
25
26
27 class CompanyInline(admin.TabularInline):
28 model = CompanyEvent
29 max_num = 20
30 extra = 0
31 classes = ('grp-collapse grp-open',) # style
32 inline_classes = ('grp-collapse grp-open',) # style
33
34
35 class RuleBundleInline(admin.TabularInline):
36 model = RuleBundle
37 extra = 1
38 max_num = 20
39 classes = ('grp-collapse grp-open',) # style
40 inline_classes = ('grp-collapse grp-open',) # style
41
42
43 class AttendanceEventAdmin(admin.ModelAdmin):
44 model = AttendanceEvent
45 inlines = (AttendeeInline, RuleBundleInline)
46
47
48 class AttendeeAdmin(admin.ModelAdmin):
49 model = Attendee
50 list_display = ('user', 'event', 'paid')
51 actions = None
52
53 def delete_model(self, request, obj):
54 event = obj.event.event
55 event.notify_waiting_list(host=request.META['HTTP_HOST'], unattended_user=obj.user)
56 obj.delete()
57
58
59 class CompanyEventAdmin(admin.ModelAdmin):
60 model = CompanyEvent
61 inlines = (CompanyInline,)
62
63
64 class RuleBundleAdmin(admin.ModelAdmin):
65 model = RuleBundle
66
67
68 class FieldOfStudyRuleAdmin(admin.ModelAdmin):
69 model = FieldOfStudyRule
70
71
72 class GradeRuleAdmin(admin.ModelAdmin):
73 model = GradeRule
74
75
76 class UserGroupRuleAdmin(admin.ModelAdmin):
77 model = UserGroupRule
78
79
80 class AttendanceEventInline(admin.StackedInline):
81 model = AttendanceEvent
82 max_num = 1
83 extra = 0
84 filter_horizontal = ('rule_bundles',)
85 classes = ('grp-collapse grp-open',) # style
86 inline_classes = ('grp-collapse grp-open',) # style
87
88
89 class EventAdmin(admin.ModelAdmin):
90 inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)
91 exclude = ("author", )
92 search_fields = ('title',)
93
94 def save_model(self, request, obj, form, change):
95 if not change: # created
96 obj.author = request.user
97 else:
98 # If attendance max capacity changed we will notify users that they are now on the attend list
99 old_event = Event.objects.get(id=obj.id)
100 if old_event.is_attendance_event() and old_event.wait_list:
101 diff_capacity = obj.attendance_event.max_capacity - old_event.attendance_event.max_capacity
102 if diff_capacity > 0:
103 if diff_capacity > len(old_event.wait_list):
104 diff_capacity = len(old_event.wait_list)
105 # Using old_event because max_capacity has already been changed in obj
106 old_event.notify_waiting_list(host=request.META['HTTP_HOST'], extra_capacity=diff_capacity)
107 obj.save()
108
109 def save_formset(self, request, form, formset, change):
110 instances = formset.save(commit=False)
111 for instance in instances:
112 instance.save()
113 formset.save_m2m()
114
115 def get_form(self, request, obj=None, **kwargs):
116 form = super(EventAdmin, self).get_form(request, obj, **kwargs)
117 form.base_fields['ingress_short'].validators=[validators.MinLengthValidator(50)]
118 form.base_fields['ingress'].validators=[validators.MinLengthValidator(75)]
119 form.base_fields['description'].validators=[validators.MinLengthValidator(140)]
120 return form
121
122 admin.site.register(Event, EventAdmin)
123 admin.site.register(Attendee, AttendeeAdmin)
124 admin.site.register(AttendanceEvent, AttendanceEventAdmin)
125 admin.site.register(RuleBundle, RuleBundleAdmin)
126 admin.site.register(GradeRule, GradeRuleAdmin)
127 admin.site.register(UserGroupRule, UserGroupRuleAdmin)
128 admin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)
129
[end of apps/events/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/admin.py b/apps/events/admin.py
--- a/apps/events/admin.py
+++ b/apps/events/admin.py
@@ -40,11 +40,6 @@
inline_classes = ('grp-collapse grp-open',) # style
-class AttendanceEventAdmin(admin.ModelAdmin):
- model = AttendanceEvent
- inlines = (AttendeeInline, RuleBundleInline)
-
-
class AttendeeAdmin(admin.ModelAdmin):
model = Attendee
list_display = ('user', 'event', 'paid')
@@ -119,9 +114,9 @@
form.base_fields['description'].validators=[validators.MinLengthValidator(140)]
return form
+
admin.site.register(Event, EventAdmin)
admin.site.register(Attendee, AttendeeAdmin)
-admin.site.register(AttendanceEvent, AttendanceEventAdmin)
admin.site.register(RuleBundle, RuleBundleAdmin)
admin.site.register(GradeRule, GradeRuleAdmin)
admin.site.register(UserGroupRule, UserGroupRuleAdmin)
| {"golden_diff": "diff --git a/apps/events/admin.py b/apps/events/admin.py\n--- a/apps/events/admin.py\n+++ b/apps/events/admin.py\n@@ -40,11 +40,6 @@\n inline_classes = ('grp-collapse grp-open',) # style\n \n \n-class AttendanceEventAdmin(admin.ModelAdmin):\n- model = AttendanceEvent\n- inlines = (AttendeeInline, RuleBundleInline)\n-\n-\n class AttendeeAdmin(admin.ModelAdmin):\n model = Attendee\n list_display = ('user', 'event', 'paid')\n@@ -119,9 +114,9 @@\n form.base_fields['description'].validators=[validators.MinLengthValidator(140)]\n return form\n \n+\n admin.site.register(Event, EventAdmin)\n admin.site.register(Attendee, AttendeeAdmin)\n-admin.site.register(AttendanceEvent, AttendanceEventAdmin)\n admin.site.register(RuleBundle, RuleBundleAdmin)\n admin.site.register(GradeRule, GradeRuleAdmin)\n admin.site.register(UserGroupRule, UserGroupRuleAdmin)\n", "issue": "Hide attendanceevent from django admin\nhttps://online.ntnu.no/admin/events/attendanceevent/\n\nThis view should not be used by anyone and attendance info should be edited through the event directly. \n\nShould be possible to hide this by removing \n`admin.site.register(AttendanceEvent, AttendanceEventAdmin)`\n in events/admin.py (untested)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.contrib import admin\nfrom django.core import validators\nfrom django.utils.translation import ugettext as _\n\nfrom apps.events.models import Event\nfrom apps.events.models import AttendanceEvent\nfrom apps.events.models import Attendee\nfrom apps.events.models import CompanyEvent\nfrom apps.events.models import RuleBundle\nfrom apps.events.models import FieldOfStudyRule\nfrom apps.events.models import GradeRule\nfrom apps.events.models import UserGroupRule\nfrom apps.feedback.admin import FeedbackRelationInline\n\n\n\nclass AttendeeInline(admin.TabularInline):\n model = Attendee\n extra = 1\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass CompanyInline(admin.TabularInline):\n model = CompanyEvent\n max_num = 20\n extra = 0\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass RuleBundleInline(admin.TabularInline):\n model = RuleBundle\n extra = 1\n max_num = 20\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass AttendanceEventAdmin(admin.ModelAdmin):\n model = AttendanceEvent\n inlines = (AttendeeInline, RuleBundleInline)\n\n\nclass AttendeeAdmin(admin.ModelAdmin):\n model = Attendee\n list_display = ('user', 'event', 'paid')\n actions = None\n\n def delete_model(self, request, obj):\n event = obj.event.event\n event.notify_waiting_list(host=request.META['HTTP_HOST'], unattended_user=obj.user)\n obj.delete()\n\n\nclass CompanyEventAdmin(admin.ModelAdmin):\n model = CompanyEvent\n inlines = (CompanyInline,)\n\n\nclass RuleBundleAdmin(admin.ModelAdmin):\n model = RuleBundle\n\n\nclass FieldOfStudyRuleAdmin(admin.ModelAdmin):\n model = FieldOfStudyRule\n\n\nclass GradeRuleAdmin(admin.ModelAdmin):\n model = GradeRule\n\n\nclass UserGroupRuleAdmin(admin.ModelAdmin):\n model = UserGroupRule\n\n\nclass AttendanceEventInline(admin.StackedInline):\n model = AttendanceEvent\n max_num = 1\n extra = 0\n filter_horizontal = ('rule_bundles',)\n classes = ('grp-collapse grp-open',) # style\n inline_classes = ('grp-collapse grp-open',) # style\n\n\nclass EventAdmin(admin.ModelAdmin):\n inlines = (AttendanceEventInline, FeedbackRelationInline, CompanyInline)\n exclude = (\"author\", )\n search_fields = ('title',)\n\n def save_model(self, request, obj, form, change):\n if not change: # created\n obj.author = request.user\n else:\n # If attendance max capacity changed we will notify users that they are now on the attend list\n old_event = Event.objects.get(id=obj.id)\n if old_event.is_attendance_event() and old_event.wait_list:\n diff_capacity = obj.attendance_event.max_capacity - old_event.attendance_event.max_capacity\n if diff_capacity > 0:\n if diff_capacity > len(old_event.wait_list):\n diff_capacity = len(old_event.wait_list)\n # Using old_event because max_capacity has already been changed in obj\n old_event.notify_waiting_list(host=request.META['HTTP_HOST'], extra_capacity=diff_capacity)\n obj.save()\n\n def save_formset(self, request, form, formset, change):\n instances = formset.save(commit=False)\n for instance in instances:\n instance.save()\n formset.save_m2m()\n\n def get_form(self, request, obj=None, **kwargs):\n form = super(EventAdmin, self).get_form(request, obj, **kwargs)\n form.base_fields['ingress_short'].validators=[validators.MinLengthValidator(50)]\n form.base_fields['ingress'].validators=[validators.MinLengthValidator(75)]\n form.base_fields['description'].validators=[validators.MinLengthValidator(140)]\n return form\n\nadmin.site.register(Event, EventAdmin)\nadmin.site.register(Attendee, AttendeeAdmin)\nadmin.site.register(AttendanceEvent, AttendanceEventAdmin)\nadmin.site.register(RuleBundle, RuleBundleAdmin)\nadmin.site.register(GradeRule, GradeRuleAdmin)\nadmin.site.register(UserGroupRule, UserGroupRuleAdmin)\nadmin.site.register(FieldOfStudyRule, FieldOfStudyRuleAdmin)\n", "path": "apps/events/admin.py"}]} | 1,843 | 215 |
gh_patches_debug_13193 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-499 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make plugin integtest.sh run against non-snapshot build
The plugin integtest.sh picks up the opensearch version provided in build.gradle, which is 1.1.0-SNAPSHOT. Since the release candidates are non snapshot built artifacts, make this configurable in integ test job
</issue>
<code>
[start of bundle-workflow/src/paths/script_finder.py]
1 # SPDX-License-Identifier: Apache-2.0
2 #
3 # The OpenSearch Contributors require contributions made to
4 # this file be licensed under the Apache-2.0 license or a
5 # compatible open source license.
6
7 import os
8
9
10 class ScriptFinder:
11 class ScriptNotFoundError(Exception):
12 def __init__(self, kind, paths):
13 self.kind = kind
14 self.paths = paths
15 super().__init__(f"Could not find {kind} script. Looked in {paths}.")
16
17 component_scripts_path = os.path.realpath(
18 os.path.join(
19 os.path.dirname(os.path.abspath(__file__)), "../../scripts/components"
20 )
21 )
22
23 default_scripts_path = os.path.realpath(
24 os.path.join(
25 os.path.dirname(os.path.abspath(__file__)), "../../scripts/default"
26 )
27 )
28
29 """
30 ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.
31
32 For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,
33 it will look in the following locations, in order:
34 * Root of the Git repository
35 * /scripts/<script-name> in the Git repository
36 * <component_scripts_path>/<component_name>/<script-name>
37 * <default_scripts_path>/<script-name>
38
39 For install.sh scripts, given a component name, it will look in the following locations, in order:
40 * <component_scripts_path>/<component_name>/<script-name>
41 * <default_scripts_path>/<script-name>
42 """
43
44 @classmethod
45 def __find_script(cls, name, paths):
46 script = next(filter(lambda path: os.path.exists(path), paths), None)
47 if script is None:
48 raise ScriptFinder.ScriptNotFoundError(name, paths)
49 return script
50
51 @classmethod
52 def find_build_script(cls, component_name, git_dir):
53 paths = [
54 os.path.realpath(os.path.join(git_dir, "build.sh")),
55 os.path.realpath(os.path.join(git_dir, "scripts/build.sh")),
56 os.path.realpath(
57 os.path.join(cls.component_scripts_path, component_name, "build.sh")
58 ),
59 os.path.realpath(os.path.join(cls.default_scripts_path, "build.sh")),
60 ]
61
62 return cls.__find_script("build.sh", paths)
63
64 @classmethod
65 def find_integ_test_script(cls, component_name, git_dir):
66 paths = [
67 os.path.realpath(os.path.join(git_dir, "integtest.sh")),
68 os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
69 os.path.realpath(
70 os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
71 ),
72 os.path.realpath(os.path.join(cls.default_scripts_path, "integtest.sh")),
73 ]
74
75 return cls.__find_script("integtest.sh", paths)
76
77 @classmethod
78 def find_install_script(cls, component_name):
79 paths = [
80 os.path.realpath(
81 os.path.join(cls.component_scripts_path, component_name, "install.sh")
82 ),
83 os.path.realpath(os.path.join(cls.default_scripts_path, "install.sh")),
84 ]
85
86 return cls.__find_script("install.sh", paths)
87
[end of bundle-workflow/src/paths/script_finder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py
--- a/bundle-workflow/src/paths/script_finder.py
+++ b/bundle-workflow/src/paths/script_finder.py
@@ -64,8 +64,9 @@
@classmethod
def find_integ_test_script(cls, component_name, git_dir):
paths = [
- os.path.realpath(os.path.join(git_dir, "integtest.sh")),
- os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
+ # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497
+ # os.path.realpath(os.path.join(git_dir, "integtest.sh")),
+ # os.path.realpath(os.path.join(git_dir, "scripts/integtest.sh")),
os.path.realpath(
os.path.join(cls.component_scripts_path, component_name, "integtest.sh")
),
| {"golden_diff": "diff --git a/bundle-workflow/src/paths/script_finder.py b/bundle-workflow/src/paths/script_finder.py\n--- a/bundle-workflow/src/paths/script_finder.py\n+++ b/bundle-workflow/src/paths/script_finder.py\n@@ -64,8 +64,9 @@\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n- os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n- os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n+ # TODO: Uncomment this after the integtest.sh tool is removed from plugin repos. See issue #497\n+ # os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n+ # os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n", "issue": "Make plugin integtest.sh run against non-snapshot build\nThe plugin integtest.sh picks up the opensearch version provided in build.gradle, which is 1.1.0-SNAPSHOT. Since the release candidates are non snapshot built artifacts, make this configurable in integ test job\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\n\n\nclass ScriptFinder:\n class ScriptNotFoundError(Exception):\n def __init__(self, kind, paths):\n self.kind = kind\n self.paths = paths\n super().__init__(f\"Could not find {kind} script. Looked in {paths}.\")\n\n component_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/components\"\n )\n )\n\n default_scripts_path = os.path.realpath(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../scripts/default\"\n )\n )\n\n \"\"\"\n ScriptFinder is a helper that abstracts away the details of where to look for build, test and install scripts.\n\n For build.sh and integtest.sh scripts, given a component name and a checked-out Git repository,\n it will look in the following locations, in order:\n * Root of the Git repository\n * /scripts/<script-name> in the Git repository\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n\n For install.sh scripts, given a component name, it will look in the following locations, in order:\n * <component_scripts_path>/<component_name>/<script-name>\n * <default_scripts_path>/<script-name>\n \"\"\"\n\n @classmethod\n def __find_script(cls, name, paths):\n script = next(filter(lambda path: os.path.exists(path), paths), None)\n if script is None:\n raise ScriptFinder.ScriptNotFoundError(name, paths)\n return script\n\n @classmethod\n def find_build_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"build.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/build.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"build.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"build.sh\")),\n ]\n\n return cls.__find_script(\"build.sh\", paths)\n\n @classmethod\n def find_integ_test_script(cls, component_name, git_dir):\n paths = [\n os.path.realpath(os.path.join(git_dir, \"integtest.sh\")),\n os.path.realpath(os.path.join(git_dir, \"scripts/integtest.sh\")),\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"integtest.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"integtest.sh\")),\n ]\n\n return cls.__find_script(\"integtest.sh\", paths)\n\n @classmethod\n def find_install_script(cls, component_name):\n paths = [\n os.path.realpath(\n os.path.join(cls.component_scripts_path, component_name, \"install.sh\")\n ),\n os.path.realpath(os.path.join(cls.default_scripts_path, \"install.sh\")),\n ]\n\n return cls.__find_script(\"install.sh\", paths)\n", "path": "bundle-workflow/src/paths/script_finder.py"}]} | 1,446 | 215 |
gh_patches_debug_15180 | rasdani/github-patches | git_diff | pre-commit__pre-commit-38 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consider using --no-checkout for cloning
I'd assume it is faster...
</issue>
<code>
[start of pre_commit/repository.py]
1
2 import contextlib
3 from plumbum import local
4
5 import pre_commit.constants as C
6 from pre_commit.clientlib.validate_manifest import validate_manifest
7 from pre_commit.hooks_workspace import in_hooks_workspace
8 from pre_commit.languages.all import languages
9 from pre_commit.util import cached_property
10
11
12 class Repository(object):
13 def __init__(self, repo_config):
14 self.repo_config = repo_config
15
16 @cached_property
17 def repo_url(self):
18 return self.repo_config['repo']
19
20 @cached_property
21 def sha(self):
22 return self.repo_config['sha']
23
24 @cached_property
25 def languages(self):
26 return set(filter(None, (
27 hook.get('language') for hook in self.hooks.values()
28 )))
29
30 @cached_property
31 def hooks(self):
32 return dict(
33 (hook['id'], dict(hook, **self.manifest[hook['id']]))
34 for hook in self.repo_config['hooks']
35 )
36
37 @cached_property
38 def manifest(self):
39 with self.in_checkout():
40 return dict(
41 (hook['id'], hook)
42 for hook in validate_manifest(C.MANIFEST_FILE)
43 )
44
45 @contextlib.contextmanager
46 def in_checkout(self):
47 with in_hooks_workspace():
48 # SMELL:
49 self.create()
50 with local.cwd(self.sha):
51 yield
52
53 def create(self):
54 with in_hooks_workspace():
55 if local.path(self.sha).exists():
56 # Project already exists, no reason to re-create it
57 return
58
59 local['git']['clone', self.repo_url, self.sha]()
60 with self.in_checkout():
61 local['git']['checkout', self.sha]()
62
63 def install(self):
64 with self.in_checkout():
65 for language in C.SUPPORTED_LANGUAGES:
66 if language in self.languages:
67 languages[language].install_environment()
68
69 def run_hook(self, hook_id, file_args):
70 with self.in_checkout():
71 hook = self.hooks[hook_id]
72 return languages[hook['language']].run_hook(hook, file_args)
[end of pre_commit/repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/repository.py b/pre_commit/repository.py
--- a/pre_commit/repository.py
+++ b/pre_commit/repository.py
@@ -56,7 +56,7 @@
# Project already exists, no reason to re-create it
return
- local['git']['clone', self.repo_url, self.sha]()
+ local['git']['clone', '--no-checkout', self.repo_url, self.sha]()
with self.in_checkout():
local['git']['checkout', self.sha]()
@@ -69,4 +69,4 @@
def run_hook(self, hook_id, file_args):
with self.in_checkout():
hook = self.hooks[hook_id]
- return languages[hook['language']].run_hook(hook, file_args)
\ No newline at end of file
+ return languages[hook['language']].run_hook(hook, file_args)
| {"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -56,7 +56,7 @@\n # Project already exists, no reason to re-create it\n return\n \n- local['git']['clone', self.repo_url, self.sha]()\n+ local['git']['clone', '--no-checkout', self.repo_url, self.sha]()\n with self.in_checkout():\n local['git']['checkout', self.sha]()\n \n@@ -69,4 +69,4 @@\n def run_hook(self, hook_id, file_args):\n with self.in_checkout():\n hook = self.hooks[hook_id]\n- return languages[hook['language']].run_hook(hook, file_args)\n\\ No newline at end of file\n+ return languages[hook['language']].run_hook(hook, file_args)\n", "issue": "Consider using --no-checkout for cloning\nI'd assume it is faster...\n\n", "before_files": [{"content": "\nimport contextlib\nfrom plumbum import local\n\nimport pre_commit.constants as C\nfrom pre_commit.clientlib.validate_manifest import validate_manifest\nfrom pre_commit.hooks_workspace import in_hooks_workspace\nfrom pre_commit.languages.all import languages\nfrom pre_commit.util import cached_property\n\n\nclass Repository(object):\n def __init__(self, repo_config):\n self.repo_config = repo_config\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(filter(None, (\n hook.get('language') for hook in self.hooks.values()\n )))\n\n @cached_property\n def hooks(self):\n return dict(\n (hook['id'], dict(hook, **self.manifest[hook['id']]))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n with self.in_checkout():\n return dict(\n (hook['id'], hook)\n for hook in validate_manifest(C.MANIFEST_FILE)\n )\n\n @contextlib.contextmanager\n def in_checkout(self):\n with in_hooks_workspace():\n # SMELL:\n self.create()\n with local.cwd(self.sha):\n yield\n\n def create(self):\n with in_hooks_workspace():\n if local.path(self.sha).exists():\n # Project already exists, no reason to re-create it\n return\n\n local['git']['clone', self.repo_url, self.sha]()\n with self.in_checkout():\n local['git']['checkout', self.sha]()\n\n def install(self):\n with self.in_checkout():\n for language in C.SUPPORTED_LANGUAGES:\n if language in self.languages:\n languages[language].install_environment()\n\n def run_hook(self, hook_id, file_args):\n with self.in_checkout():\n hook = self.hooks[hook_id]\n return languages[hook['language']].run_hook(hook, file_args)", "path": "pre_commit/repository.py"}]} | 1,120 | 191 |
gh_patches_debug_12470 | rasdani/github-patches | git_diff | joke2k__faker-759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Generating invalid cpf (brazillian ssn)
Faker is generating invalid checksum digits for cpf (brazillian ssn).
### Steps to reproduce
1. Create fake instance using localization "pt_BR"
1. Call fake.cpf()
### Expected behavior
It should generate a valid CPF.
### Actual behavior
It is generating a CPF with invalid checksum digits, in some cases.
</issue>
<code>
[start of faker/providers/ssn/pt_BR/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 from __future__ import unicode_literals
4 from .. import Provider as SsnProvider
5
6
7 def checksum(digits):
8 s = 0
9 p = len(digits) + 1
10 for i in range(0, len(digits)):
11 s += digits[i] * p
12 p -= 1
13
14 reminder = s % 11
15 if reminder == 0 or reminder == 1:
16 return 1
17 else:
18 return 11 - reminder
19
20
21 class Provider(SsnProvider):
22 """
23 Provider for Brazilian SSN also known in Brazil as CPF.
24 There are two methods Provider.ssn and Provider.cpf
25 The snn returns a valid number with numbers only
26 The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn
27 """
28
29 def ssn(self):
30 digits = self.generator.random.sample(range(10), 9)
31
32 dv = checksum(digits)
33 digits.append(dv)
34 digits.append(checksum(digits))
35
36 return ''.join(map(str, digits))
37
38 def cpf(self):
39 c = self.ssn()
40 return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]
41
[end of faker/providers/ssn/pt_BR/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py
--- a/faker/providers/ssn/pt_BR/__init__.py
+++ b/faker/providers/ssn/pt_BR/__init__.py
@@ -5,6 +5,12 @@
def checksum(digits):
+ """
+ Returns the checksum of CPF digits.
+ References to the algorithm:
+ https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo
+ https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm
+ """
s = 0
p = len(digits) + 1
for i in range(0, len(digits)):
@@ -13,7 +19,7 @@
reminder = s % 11
if reminder == 0 or reminder == 1:
- return 1
+ return 0
else:
return 11 - reminder
| {"golden_diff": "diff --git a/faker/providers/ssn/pt_BR/__init__.py b/faker/providers/ssn/pt_BR/__init__.py\n--- a/faker/providers/ssn/pt_BR/__init__.py\n+++ b/faker/providers/ssn/pt_BR/__init__.py\n@@ -5,6 +5,12 @@\n \n \n def checksum(digits):\n+ \"\"\"\n+ Returns the checksum of CPF digits.\n+ References to the algorithm:\n+ https://pt.wikipedia.org/wiki/Cadastro_de_pessoas_f%C3%ADsicas#Algoritmo\n+ https://metacpan.org/source/MAMAWE/Algorithm-CheckDigits-v1.3.0/lib/Algorithm/CheckDigits/M11_004.pm\n+ \"\"\"\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n@@ -13,7 +19,7 @@\n \n reminder = s % 11\n if reminder == 0 or reminder == 1:\n- return 1\n+ return 0\n else:\n return 11 - reminder\n", "issue": "Generating invalid cpf (brazillian ssn)\nFaker is generating invalid checksum digits for cpf (brazillian ssn).\r\n\r\n### Steps to reproduce\r\n\r\n1. Create fake instance using localization \"pt_BR\"\r\n1. Call fake.cpf()\r\n\r\n### Expected behavior\r\n\r\nIt should generate a valid CPF.\r\n\r\n### Actual behavior\r\n\r\nIt is generating a CPF with invalid checksum digits, in some cases.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as SsnProvider\n\n\ndef checksum(digits):\n s = 0\n p = len(digits) + 1\n for i in range(0, len(digits)):\n s += digits[i] * p\n p -= 1\n\n reminder = s % 11\n if reminder == 0 or reminder == 1:\n return 1\n else:\n return 11 - reminder\n\n\nclass Provider(SsnProvider):\n \"\"\"\n Provider for Brazilian SSN also known in Brazil as CPF.\n There are two methods Provider.ssn and Provider.cpf\n The snn returns a valid number with numbers only\n The cpf return a valid number formatted with brazilian mask. eg nnn.nnn.nnn-nn\n \"\"\"\n\n def ssn(self):\n digits = self.generator.random.sample(range(10), 9)\n\n dv = checksum(digits)\n digits.append(dv)\n digits.append(checksum(digits))\n\n return ''.join(map(str, digits))\n\n def cpf(self):\n c = self.ssn()\n return c[:3] + '.' + c[3:6] + '.' + c[6:9] + '-' + c[9:]\n", "path": "faker/providers/ssn/pt_BR/__init__.py"}]} | 984 | 247 |
gh_patches_debug_63106 | rasdani/github-patches | git_diff | kornia__kornia-1263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] save pointcloud not updates num_points when inf
## 🐛 Bug
The function `K.utils.save_pointcloud_ply` doesn't update the final number of points to be serialized when one of the values contain an infinite value.
How to fix:
update this line https://github.com/kornia/kornia/blob/master/kornia/utils/pointcloud_io.py#L34
```python
if not bool(torch.isfinite(xyz).any()):
continue
```
by
```python
if not bool(torch.isfinite(xyz).any()):
num_points -= 1
continue
```
</issue>
<code>
[start of kornia/utils/pointcloud_io.py]
1 import os
2 from typing import Optional
3
4 import torch
5
6
7 def save_pointcloud_ply(filename: str, pointcloud: torch.Tensor) -> None:
8 r"""Utility function to save to disk a pointcloud in PLY format.
9
10 Args:
11 filename: the path to save the pointcloud.
12 pointcloud: tensor containing the pointcloud to save.
13 The tensor must be in the shape of :math:`(*, 3)` where the last
14 component is assumed to be a 3d point coordinate :math:`(X, Y, Z)`.
15 """
16 if not isinstance(filename, str) and filename[-3:] == '.ply':
17 raise TypeError("Input filename must be a string in with the .ply " "extension. Got {}".format(filename))
18
19 if not torch.is_tensor(pointcloud):
20 raise TypeError(f"Input pointcloud type is not a torch.Tensor. Got {type(pointcloud)}")
21
22 if not len(pointcloud.shape) == 3 and pointcloud.shape[-1] == 3:
23 raise TypeError("Input pointcloud must be in the following shape " "HxWx3. Got {}.".format(pointcloud.shape))
24
25 # flatten the input pointcloud in a vector to iterate points
26 xyz_vec: torch.Tensor = pointcloud.reshape(-1, 3)
27
28 with open(filename, 'w') as f:
29 data_str: str = ''
30 num_points: int = xyz_vec.shape[0]
31 for idx in range(num_points):
32 xyz = xyz_vec[idx]
33 if not bool(torch.isfinite(xyz).any()):
34 continue
35 x: float = xyz[0].item()
36 y: float = xyz[1].item()
37 z: float = xyz[2].item()
38 data_str += f'{x} {y} {z}\n'
39
40 f.write("ply\n")
41 f.write("format ascii 1.0\n")
42 f.write("comment arraiy generated\n")
43 f.write("element vertex %d\n" % num_points)
44 f.write("property double x\n")
45 f.write("property double y\n")
46 f.write("property double z\n")
47 f.write("end_header\n")
48 f.write(data_str)
49
50
51 def load_pointcloud_ply(filename: str, header_size: int = 8) -> torch.Tensor:
52 r"""Utility function to load from disk a pointcloud in PLY format.
53
54 Args:
55 filename: the path to the pointcloud.
56 header_size: the size of the ply file header that will
57 be skipped during loading.
58
59 Return:
60 tensor containing the loaded point with shape :math:`(*, 3)` where
61 :math:`*` represents the number of points.
62 """
63 if not isinstance(filename, str) and filename[-3:] == '.ply':
64 raise TypeError("Input filename must be a string in with the .ply " "extension. Got {}".format(filename))
65 if not os.path.isfile(filename):
66 raise ValueError("Input filename is not an existing file.")
67 if not (isinstance(header_size, int) and header_size > 0):
68 raise TypeError(f"Input header_size must be a positive integer. Got {header_size}.")
69 # open the file and populate tensor
70 with open(filename) as f:
71 points = []
72
73 # skip header
74 lines = f.readlines()[header_size:]
75
76 # iterate over the points
77 for line in lines:
78 x_str, y_str, z_str = line.split()
79 points.append((torch.tensor(float(x_str)), torch.tensor(float(y_str)), torch.tensor(float(z_str))))
80
81 # create tensor from list
82 pointcloud: torch.Tensor = torch.tensor(points)
83 return pointcloud
84
[end of kornia/utils/pointcloud_io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/utils/pointcloud_io.py b/kornia/utils/pointcloud_io.py
--- a/kornia/utils/pointcloud_io.py
+++ b/kornia/utils/pointcloud_io.py
@@ -31,6 +31,7 @@
for idx in range(num_points):
xyz = xyz_vec[idx]
if not bool(torch.isfinite(xyz).any()):
+ num_points -= 1
continue
x: float = xyz[0].item()
y: float = xyz[1].item()
| {"golden_diff": "diff --git a/kornia/utils/pointcloud_io.py b/kornia/utils/pointcloud_io.py\n--- a/kornia/utils/pointcloud_io.py\n+++ b/kornia/utils/pointcloud_io.py\n@@ -31,6 +31,7 @@\n for idx in range(num_points):\n xyz = xyz_vec[idx]\n if not bool(torch.isfinite(xyz).any()):\n+ num_points -= 1\n continue\n x: float = xyz[0].item()\n y: float = xyz[1].item()\n", "issue": "[Bug] save pointcloud not updates num_points when inf\n## \ud83d\udc1b Bug\r\n\r\nThe function `K.utils.save_pointcloud_ply` doesn't update the final number of points to be serialized when one of the values contain an infinite value.\r\n\r\nHow to fix:\r\n\r\nupdate this line https://github.com/kornia/kornia/blob/master/kornia/utils/pointcloud_io.py#L34\r\n\r\n```python\r\n if not bool(torch.isfinite(xyz).any()):\r\n continue\r\n```\r\nby\r\n\r\n```python\r\n if not bool(torch.isfinite(xyz).any()):\r\n num_points -= 1\r\n continue\r\n```\n", "before_files": [{"content": "import os\nfrom typing import Optional\n\nimport torch\n\n\ndef save_pointcloud_ply(filename: str, pointcloud: torch.Tensor) -> None:\n r\"\"\"Utility function to save to disk a pointcloud in PLY format.\n\n Args:\n filename: the path to save the pointcloud.\n pointcloud: tensor containing the pointcloud to save.\n The tensor must be in the shape of :math:`(*, 3)` where the last\n component is assumed to be a 3d point coordinate :math:`(X, Y, Z)`.\n \"\"\"\n if not isinstance(filename, str) and filename[-3:] == '.ply':\n raise TypeError(\"Input filename must be a string in with the .ply \" \"extension. Got {}\".format(filename))\n\n if not torch.is_tensor(pointcloud):\n raise TypeError(f\"Input pointcloud type is not a torch.Tensor. Got {type(pointcloud)}\")\n\n if not len(pointcloud.shape) == 3 and pointcloud.shape[-1] == 3:\n raise TypeError(\"Input pointcloud must be in the following shape \" \"HxWx3. Got {}.\".format(pointcloud.shape))\n\n # flatten the input pointcloud in a vector to iterate points\n xyz_vec: torch.Tensor = pointcloud.reshape(-1, 3)\n\n with open(filename, 'w') as f:\n data_str: str = ''\n num_points: int = xyz_vec.shape[0]\n for idx in range(num_points):\n xyz = xyz_vec[idx]\n if not bool(torch.isfinite(xyz).any()):\n continue\n x: float = xyz[0].item()\n y: float = xyz[1].item()\n z: float = xyz[2].item()\n data_str += f'{x} {y} {z}\\n'\n\n f.write(\"ply\\n\")\n f.write(\"format ascii 1.0\\n\")\n f.write(\"comment arraiy generated\\n\")\n f.write(\"element vertex %d\\n\" % num_points)\n f.write(\"property double x\\n\")\n f.write(\"property double y\\n\")\n f.write(\"property double z\\n\")\n f.write(\"end_header\\n\")\n f.write(data_str)\n\n\ndef load_pointcloud_ply(filename: str, header_size: int = 8) -> torch.Tensor:\n r\"\"\"Utility function to load from disk a pointcloud in PLY format.\n\n Args:\n filename: the path to the pointcloud.\n header_size: the size of the ply file header that will\n be skipped during loading.\n\n Return:\n tensor containing the loaded point with shape :math:`(*, 3)` where\n :math:`*` represents the number of points.\n \"\"\"\n if not isinstance(filename, str) and filename[-3:] == '.ply':\n raise TypeError(\"Input filename must be a string in with the .ply \" \"extension. Got {}\".format(filename))\n if not os.path.isfile(filename):\n raise ValueError(\"Input filename is not an existing file.\")\n if not (isinstance(header_size, int) and header_size > 0):\n raise TypeError(f\"Input header_size must be a positive integer. Got {header_size}.\")\n # open the file and populate tensor\n with open(filename) as f:\n points = []\n\n # skip header\n lines = f.readlines()[header_size:]\n\n # iterate over the points\n for line in lines:\n x_str, y_str, z_str = line.split()\n points.append((torch.tensor(float(x_str)), torch.tensor(float(y_str)), torch.tensor(float(z_str))))\n\n # create tensor from list\n pointcloud: torch.Tensor = torch.tensor(points)\n return pointcloud\n", "path": "kornia/utils/pointcloud_io.py"}]} | 1,635 | 121 |
gh_patches_debug_42864 | rasdani/github-patches | git_diff | sunpy__sunpy-4129 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Maintain coherence between keycomments and the metadict
See #2748
This is probably best implemented by adding the functionality to our `MetaDict` object or something, so that we don't have to do it manually everywhere.
</issue>
<code>
[start of sunpy/util/metadata.py]
1 """
2 This module provides a generalized dictionary class that deals with header
3 parsing and normalization.
4 """
5 from collections import OrderedDict
6
7 __all__ = ['MetaDict']
8
9
10 class MetaDict(OrderedDict):
11 """
12 A class to hold metadata associated with a `sunpy.map.Map
13 <sunpy.map.map_factory.MapFactory.__call__>` derivative.
14
15 This class handles everything in lower case. This allows case
16 insensitive indexing.
17 """
18
19 def __init__(self, *args):
20 """
21 Creates a new MapHeader instance.
22 """
23 # Store all keys as upper-case to allow for case-insensitive indexing
24 # OrderedDict can be instantiated from a list of lists or a tuple of tuples
25 tags = dict()
26 if args:
27 args = list(args)
28 adict = args[0]
29 if isinstance(adict, list) or isinstance(adict, tuple):
30 tags = OrderedDict((k.upper(), v) for k, v in adict)
31 elif isinstance(adict, dict):
32 tags = OrderedDict((k.upper(), v) for k, v in adict.items())
33 else:
34 raise TypeError("Can not create a MetaDict from this type input")
35 args[0] = tags
36
37 super().__init__(*args)
38
39 def __contains__(self, key):
40 """
41 Override ``__contains__``.
42 """
43 return OrderedDict.__contains__(self, key.lower())
44
45 def __getitem__(self, key):
46 """
47 Override ``[]`` indexing.
48 """
49 return OrderedDict.__getitem__(self, key.lower())
50
51 def __setitem__(self, key, value):
52 """
53 Override ``[]`` indexing.
54 """
55 return OrderedDict.__setitem__(self, key.lower(), value)
56
57 def get(self, key, default=None):
58 """
59 Override ``.get()`` indexing.
60 """
61 return OrderedDict.get(self, key.lower(), default)
62
63 def has_key(self, key):
64 """
65 Override ``.has_key()`` to perform case-insensitively.
66 """
67 return key.lower() in self
68
69 def pop(self, key, default=None):
70 """
71 Override ``.pop()`` to perform case-insensitively.
72 """
73 return OrderedDict.pop(self, key.lower(), default)
74
75 def update(self, d2):
76 """
77 Override ``.update()`` to perform case-insensitively.
78 """
79 return OrderedDict.update(self, OrderedDict((k.lower(), v) for k, v in d2.items()))
80
81 def setdefault(self, key, default=None):
82 """
83 Override ``.setdefault()`` to perform case-insensitively.
84 """
85 return OrderedDict.setdefault(self, key.lower(), default)
86
[end of sunpy/util/metadata.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/util/metadata.py b/sunpy/util/metadata.py
--- a/sunpy/util/metadata.py
+++ b/sunpy/util/metadata.py
@@ -1,6 +1,6 @@
"""
This module provides a generalized dictionary class that deals with header
-parsing and normalization.
+parsing, normalization, and maintaining coherence between keys and keycomments.
"""
from collections import OrderedDict
@@ -14,28 +14,67 @@
This class handles everything in lower case. This allows case
insensitive indexing.
+
+ If the key 'keycomments' exists, its value must be a dictionary mapping
+ keys in the `MetaDict` to their comments. The casing of keys in the
+ keycomments dictionary is not significant. If a key is removed from the
+ `MetaDict`, it will also be removed from the keycomments dictionary.
+ Additionally, any extraneous keycomments will be removed when the
+ `MetaDict` is instantiated.
"""
def __init__(self, *args):
"""
- Creates a new MapHeader instance.
+ Creates a new MetaDict instance.
"""
- # Store all keys as upper-case to allow for case-insensitive indexing
+ # Store all keys as lower-case to allow for case-insensitive indexing
# OrderedDict can be instantiated from a list of lists or a tuple of tuples
tags = dict()
if args:
args = list(args)
adict = args[0]
if isinstance(adict, list) or isinstance(adict, tuple):
- tags = OrderedDict((k.upper(), v) for k, v in adict)
+ tags = OrderedDict((k.lower(), v) for k, v in adict)
elif isinstance(adict, dict):
- tags = OrderedDict((k.upper(), v) for k, v in adict.items())
+ tags = OrderedDict((k.lower(), v) for k, v in adict.items())
else:
raise TypeError("Can not create a MetaDict from this type input")
args[0] = tags
super().__init__(*args)
+ # Use `copy=True` to avoid mutating the caller's keycomments
+ # dictionary (if they provided one).
+ self._prune_keycomments(copy=True)
+
+ def _prune_keycomments(self, copy=False):
+ """
+ Remove keycomments for keys that are not contained in the MetaDict.
+
+ Parameters
+ ----------
+ copy : `bool`, optional
+ Make a copy of the current keycomments dict before removing keys.
+ """
+ if 'keycomments' not in self:
+ return
+
+ keycomments = self['keycomments']
+
+ if not isinstance(keycomments, dict):
+ raise TypeError(
+ "'keycomments' key must have a value of type `dict`. Found "
+ "the following type: %r" % type(keycomments))
+
+ if copy:
+ keycomments = keycomments.copy()
+
+ for key in list(keycomments.keys()):
+ if key not in self:
+ del keycomments[key]
+
+ self['keycomments'] = keycomments
+
def __contains__(self, key):
"""
Override ``__contains__``.
@@ -54,6 +93,15 @@
"""
return OrderedDict.__setitem__(self, key.lower(), value)
+ # Note: `OrderedDict.popitem()` does not need to be overridden to prune
+ # keycomments because it calls `__delitem__` internally.
+ def __delitem__(self, key):
+ """
+ Override ``del dict[key]`` key deletion.
+ """
+ OrderedDict.__delitem__(self, key.lower())
+ self._prune_keycomments()
+
def get(self, key, default=None):
"""
Override ``.get()`` indexing.
@@ -70,7 +118,11 @@
"""
Override ``.pop()`` to perform case-insensitively.
"""
- return OrderedDict.pop(self, key.lower(), default)
+ has_key = key in self
+ result = OrderedDict.pop(self, key.lower(), default)
+ if has_key:
+ self._prune_keycomments()
+ return result
def update(self, d2):
"""
| {"golden_diff": "diff --git a/sunpy/util/metadata.py b/sunpy/util/metadata.py\n--- a/sunpy/util/metadata.py\n+++ b/sunpy/util/metadata.py\n@@ -1,6 +1,6 @@\n \"\"\"\n This module provides a generalized dictionary class that deals with header\n-parsing and normalization.\n+parsing, normalization, and maintaining coherence between keys and keycomments.\n \"\"\"\n from collections import OrderedDict\n \n@@ -14,28 +14,67 @@\n \n This class handles everything in lower case. This allows case\n insensitive indexing.\n+\n+ If the key 'keycomments' exists, its value must be a dictionary mapping\n+ keys in the `MetaDict` to their comments. The casing of keys in the\n+ keycomments dictionary is not significant. If a key is removed from the\n+ `MetaDict`, it will also be removed from the keycomments dictionary.\n+ Additionally, any extraneous keycomments will be removed when the\n+ `MetaDict` is instantiated.\n \"\"\"\n \n def __init__(self, *args):\n \"\"\"\n- Creates a new MapHeader instance.\n+ Creates a new MetaDict instance.\n \"\"\"\n- # Store all keys as upper-case to allow for case-insensitive indexing\n+ # Store all keys as lower-case to allow for case-insensitive indexing\n # OrderedDict can be instantiated from a list of lists or a tuple of tuples\n tags = dict()\n if args:\n args = list(args)\n adict = args[0]\n if isinstance(adict, list) or isinstance(adict, tuple):\n- tags = OrderedDict((k.upper(), v) for k, v in adict)\n+ tags = OrderedDict((k.lower(), v) for k, v in adict)\n elif isinstance(adict, dict):\n- tags = OrderedDict((k.upper(), v) for k, v in adict.items())\n+ tags = OrderedDict((k.lower(), v) for k, v in adict.items())\n else:\n raise TypeError(\"Can not create a MetaDict from this type input\")\n args[0] = tags\n \n super().__init__(*args)\n \n+ # Use `copy=True` to avoid mutating the caller's keycomments\n+ # dictionary (if they provided one).\n+ self._prune_keycomments(copy=True)\n+\n+ def _prune_keycomments(self, copy=False):\n+ \"\"\"\n+ Remove keycomments for keys that are not contained in the MetaDict.\n+\n+ Parameters\n+ ----------\n+ copy : `bool`, optional\n+ Make a copy of the current keycomments dict before removing keys.\n+ \"\"\"\n+ if 'keycomments' not in self:\n+ return\n+\n+ keycomments = self['keycomments']\n+\n+ if not isinstance(keycomments, dict):\n+ raise TypeError(\n+ \"'keycomments' key must have a value of type `dict`. Found \"\n+ \"the following type: %r\" % type(keycomments))\n+\n+ if copy:\n+ keycomments = keycomments.copy()\n+\n+ for key in list(keycomments.keys()):\n+ if key not in self:\n+ del keycomments[key]\n+\n+ self['keycomments'] = keycomments\n+\n def __contains__(self, key):\n \"\"\"\n Override ``__contains__``.\n@@ -54,6 +93,15 @@\n \"\"\"\n return OrderedDict.__setitem__(self, key.lower(), value)\n \n+ # Note: `OrderedDict.popitem()` does not need to be overridden to prune\n+ # keycomments because it calls `__delitem__` internally.\n+ def __delitem__(self, key):\n+ \"\"\"\n+ Override ``del dict[key]`` key deletion.\n+ \"\"\"\n+ OrderedDict.__delitem__(self, key.lower())\n+ self._prune_keycomments()\n+\n def get(self, key, default=None):\n \"\"\"\n Override ``.get()`` indexing.\n@@ -70,7 +118,11 @@\n \"\"\"\n Override ``.pop()`` to perform case-insensitively.\n \"\"\"\n- return OrderedDict.pop(self, key.lower(), default)\n+ has_key = key in self\n+ result = OrderedDict.pop(self, key.lower(), default)\n+ if has_key:\n+ self._prune_keycomments()\n+ return result\n \n def update(self, d2):\n \"\"\"\n", "issue": "Maintain coherence between keycomments and the metadict\nSee #2748 \r\n\r\nThis is probably best implemented by adding the functionality to our `MetaDict` object or something, so that we don't have to do it manually everywhere.\n", "before_files": [{"content": "\"\"\"\nThis module provides a generalized dictionary class that deals with header\nparsing and normalization.\n\"\"\"\nfrom collections import OrderedDict\n\n__all__ = ['MetaDict']\n\n\nclass MetaDict(OrderedDict):\n \"\"\"\n A class to hold metadata associated with a `sunpy.map.Map\n <sunpy.map.map_factory.MapFactory.__call__>` derivative.\n\n This class handles everything in lower case. This allows case\n insensitive indexing.\n \"\"\"\n\n def __init__(self, *args):\n \"\"\"\n Creates a new MapHeader instance.\n \"\"\"\n # Store all keys as upper-case to allow for case-insensitive indexing\n # OrderedDict can be instantiated from a list of lists or a tuple of tuples\n tags = dict()\n if args:\n args = list(args)\n adict = args[0]\n if isinstance(adict, list) or isinstance(adict, tuple):\n tags = OrderedDict((k.upper(), v) for k, v in adict)\n elif isinstance(adict, dict):\n tags = OrderedDict((k.upper(), v) for k, v in adict.items())\n else:\n raise TypeError(\"Can not create a MetaDict from this type input\")\n args[0] = tags\n\n super().__init__(*args)\n\n def __contains__(self, key):\n \"\"\"\n Override ``__contains__``.\n \"\"\"\n return OrderedDict.__contains__(self, key.lower())\n\n def __getitem__(self, key):\n \"\"\"\n Override ``[]`` indexing.\n \"\"\"\n return OrderedDict.__getitem__(self, key.lower())\n\n def __setitem__(self, key, value):\n \"\"\"\n Override ``[]`` indexing.\n \"\"\"\n return OrderedDict.__setitem__(self, key.lower(), value)\n\n def get(self, key, default=None):\n \"\"\"\n Override ``.get()`` indexing.\n \"\"\"\n return OrderedDict.get(self, key.lower(), default)\n\n def has_key(self, key):\n \"\"\"\n Override ``.has_key()`` to perform case-insensitively.\n \"\"\"\n return key.lower() in self\n\n def pop(self, key, default=None):\n \"\"\"\n Override ``.pop()`` to perform case-insensitively.\n \"\"\"\n return OrderedDict.pop(self, key.lower(), default)\n\n def update(self, d2):\n \"\"\"\n Override ``.update()`` to perform case-insensitively.\n \"\"\"\n return OrderedDict.update(self, OrderedDict((k.lower(), v) for k, v in d2.items()))\n\n def setdefault(self, key, default=None):\n \"\"\"\n Override ``.setdefault()`` to perform case-insensitively.\n \"\"\"\n return OrderedDict.setdefault(self, key.lower(), default)\n", "path": "sunpy/util/metadata.py"}]} | 1,326 | 952 |
gh_patches_debug_16288 | rasdani/github-patches | git_diff | pytorch__vision-7702 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
to_grayscale gives non-actionable deprecation warning
_Originally reported in the [user forum](https://discuss.pytorch.org/t/cannot-find-convert-color-space/182591) by `@function2`._
> When I use to_grayscale, there’s a deprecation warning:
> ```
> UserWarning: The function `to_grayscale(...)` is deprecated in will be removed in a future release. Instead, please use `convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)`.
> ```
> However, I can’t find this function in the current code base
---
Note that this only applies to `torchvision.transforms.v2.function`
https://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/v2/functional/_deprecated.py#L12-L22
since the v1 version, i.e. `torchvision.transforms.functional` does not emit the warning
https://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/functional.py#L1249-L1253
Fixing the v2 warning was forgotten in #7120.
cc @vfdev-5
</issue>
<code>
[start of torchvision/transforms/v2/functional/_deprecated.py]
1 import warnings
2 from typing import Any, List, Union
3
4 import PIL.Image
5 import torch
6
7 from torchvision import datapoints
8 from torchvision.transforms import functional as _F
9
10
11 @torch.jit.unused
12 def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
13 call = ", num_output_channels=3" if num_output_channels == 3 else ""
14 replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
15 if num_output_channels == 3:
16 replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
17 warnings.warn(
18 f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
19 f"Instead, please use `{replacement}`.",
20 )
21
22 return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
23
24
25 @torch.jit.unused
26 def to_tensor(inpt: Any) -> torch.Tensor:
27 warnings.warn(
28 "The function `to_tensor(...)` is deprecated and will be removed in a future release. "
29 "Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
30 )
31 return _F.to_tensor(inpt)
32
33
34 def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
35 warnings.warn(
36 "The function `get_image_size(...)` is deprecated and will be removed in a future release. "
37 "Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
38 )
39 return _F.get_image_size(inpt)
40
[end of torchvision/transforms/v2/functional/_deprecated.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/transforms/v2/functional/_deprecated.py b/torchvision/transforms/v2/functional/_deprecated.py
--- a/torchvision/transforms/v2/functional/_deprecated.py
+++ b/torchvision/transforms/v2/functional/_deprecated.py
@@ -10,15 +10,10 @@
@torch.jit.unused
def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:
- call = ", num_output_channels=3" if num_output_channels == 3 else ""
- replacement = "convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)"
- if num_output_channels == 3:
- replacement = f"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)"
warnings.warn(
- f"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. "
- f"Instead, please use `{replacement}`.",
+ "The function `to_grayscale` is deprecated in will be removed in a future release. "
+ "Instead, please use `rgb_to_grayscale`.",
)
-
return _F.to_grayscale(inpt, num_output_channels=num_output_channels)
| {"golden_diff": "diff --git a/torchvision/transforms/v2/functional/_deprecated.py b/torchvision/transforms/v2/functional/_deprecated.py\n--- a/torchvision/transforms/v2/functional/_deprecated.py\n+++ b/torchvision/transforms/v2/functional/_deprecated.py\n@@ -10,15 +10,10 @@\n \n @torch.jit.unused\n def to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:\n- call = \", num_output_channels=3\" if num_output_channels == 3 else \"\"\n- replacement = \"convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)\"\n- if num_output_channels == 3:\n- replacement = f\"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)\"\n warnings.warn(\n- f\"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. \"\n- f\"Instead, please use `{replacement}`.\",\n+ \"The function `to_grayscale` is deprecated in will be removed in a future release. \"\n+ \"Instead, please use `rgb_to_grayscale`.\",\n )\n-\n return _F.to_grayscale(inpt, num_output_channels=num_output_channels)\n", "issue": "to_grayscale gives non-actionable deprecation warning\n_Originally reported in the [user forum](https://discuss.pytorch.org/t/cannot-find-convert-color-space/182591) by `@function2`._\r\n\r\n> When I use to_grayscale, there\u2019s a deprecation warning:\r\n> ```\r\n> UserWarning: The function `to_grayscale(...)` is deprecated in will be removed in a future release. Instead, please use `convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)`.\r\n> ```\r\n> However, I can\u2019t find this function in the current code base\r\n\r\n---\r\n\r\nNote that this only applies to `torchvision.transforms.v2.function`\r\n\r\nhttps://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/v2/functional/_deprecated.py#L12-L22\r\n\r\nsince the v1 version, i.e. `torchvision.transforms.functional` does not emit the warning\r\n\r\nhttps://github.com/pytorch/vision/blob/52eb5039bed1a23eee14014ff4cd6fd9cc9b2b08/torchvision/transforms/functional.py#L1249-L1253\r\n\r\nFixing the v2 warning was forgotten in #7120.\r\n\n\ncc @vfdev-5\n", "before_files": [{"content": "import warnings\nfrom typing import Any, List, Union\n\nimport PIL.Image\nimport torch\n\nfrom torchvision import datapoints\nfrom torchvision.transforms import functional as _F\n\n\[email protected]\ndef to_grayscale(inpt: PIL.Image.Image, num_output_channels: int = 1) -> PIL.Image.Image:\n call = \", num_output_channels=3\" if num_output_channels == 3 else \"\"\n replacement = \"convert_color_space(..., color_space=datapoints.ColorSpace.GRAY)\"\n if num_output_channels == 3:\n replacement = f\"convert_color_space({replacement}, color_space=datapoints.ColorSpace.RGB)\"\n warnings.warn(\n f\"The function `to_grayscale(...{call})` is deprecated in will be removed in a future release. \"\n f\"Instead, please use `{replacement}`.\",\n )\n\n return _F.to_grayscale(inpt, num_output_channels=num_output_channels)\n\n\[email protected]\ndef to_tensor(inpt: Any) -> torch.Tensor:\n warnings.warn(\n \"The function `to_tensor(...)` is deprecated and will be removed in a future release. \"\n \"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`.\"\n )\n return _F.to_tensor(inpt)\n\n\ndef get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:\n warnings.warn(\n \"The function `get_image_size(...)` is deprecated and will be removed in a future release. \"\n \"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`.\"\n )\n return _F.get_image_size(inpt)\n", "path": "torchvision/transforms/v2/functional/_deprecated.py"}]} | 1,306 | 277 |
gh_patches_debug_28038 | rasdani/github-patches | git_diff | TheAlgorithms__Python-796 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
math CheckPrime is wrong
The current implementation doesn't support:
* Negative values, -1 is not a prime number. Current implementation raise a TypeError due to `math.sqrt` on negative values.
The current implementation return the wrong value for:
* 0, 0 doesn't have any divider, primes must have two.
* 1, 1 just have one divider, primes must have two.
</issue>
<code>
[start of maths/PrimeCheck.py]
1 import math
2 def primeCheck(number):
3 if number % 2 == 0 and number > 2:
4 return False
5 return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
6
7 def main():
8 print(primeCheck(37))
9 print(primeCheck(100))
10 print(primeCheck(77))
11
12 if __name__ == '__main__':
13 main()
14
[end of maths/PrimeCheck.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/maths/PrimeCheck.py b/maths/PrimeCheck.py
--- a/maths/PrimeCheck.py
+++ b/maths/PrimeCheck.py
@@ -1,13 +1,54 @@
import math
+import unittest
+
+
def primeCheck(number):
- if number % 2 == 0 and number > 2:
+ """
+ A number is prime if it has exactly two dividers: 1 and itself.
+ """
+ if number < 2:
+ # Negatives, 0 and 1 are not primes
return False
- return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
+ if number < 4:
+ # 2 and 3 are primes
+ return True
+ if number % 2 == 0:
+ # Even values are not primes
+ return False
+
+ # Except 2, all primes are odd. If any odd value divide
+ # the number, then that number is not prime.
+ odd_numbers = range(3, int(math.sqrt(number)) + 1, 2)
+ return not any(number % i == 0 for i in odd_numbers)
+
+
+class Test(unittest.TestCase):
+ def test_primes(self):
+ self.assertTrue(primeCheck(2))
+ self.assertTrue(primeCheck(3))
+ self.assertTrue(primeCheck(5))
+ self.assertTrue(primeCheck(7))
+ self.assertTrue(primeCheck(11))
+ self.assertTrue(primeCheck(13))
+ self.assertTrue(primeCheck(17))
+ self.assertTrue(primeCheck(19))
+ self.assertTrue(primeCheck(23))
+ self.assertTrue(primeCheck(29))
+
+ def test_not_primes(self):
+ self.assertFalse(primeCheck(-19),
+ "Negative numbers are not prime.")
+ self.assertFalse(primeCheck(0),
+ "Zero doesn't have any divider, primes must have two")
+ self.assertFalse(primeCheck(1),
+ "One just have 1 divider, primes must have two.")
+ self.assertFalse(primeCheck(2 * 2))
+ self.assertFalse(primeCheck(2 * 3))
+ self.assertFalse(primeCheck(3 * 3))
+ self.assertFalse(primeCheck(3 * 5))
+ self.assertFalse(primeCheck(3 * 5 * 7))
-def main():
- print(primeCheck(37))
- print(primeCheck(100))
- print(primeCheck(77))
if __name__ == '__main__':
- main()
+ unittest.main()
+
| {"golden_diff": "diff --git a/maths/PrimeCheck.py b/maths/PrimeCheck.py\n--- a/maths/PrimeCheck.py\n+++ b/maths/PrimeCheck.py\n@@ -1,13 +1,54 @@\n import math\n+import unittest\n+\n+\n def primeCheck(number):\n- if number % 2 == 0 and number > 2: \n+ \"\"\"\n+ A number is prime if it has exactly two dividers: 1 and itself.\n+ \"\"\"\n+ if number < 2:\n+ # Negatives, 0 and 1 are not primes\n return False\n- return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))\n+ if number < 4:\n+ # 2 and 3 are primes\n+ return True\n+ if number % 2 == 0:\n+ # Even values are not primes\n+ return False\n+\n+ # Except 2, all primes are odd. If any odd value divide\n+ # the number, then that number is not prime.\n+ odd_numbers = range(3, int(math.sqrt(number)) + 1, 2)\n+ return not any(number % i == 0 for i in odd_numbers)\n+\n+\n+class Test(unittest.TestCase):\n+ def test_primes(self):\n+ self.assertTrue(primeCheck(2))\n+ self.assertTrue(primeCheck(3))\n+ self.assertTrue(primeCheck(5))\n+ self.assertTrue(primeCheck(7))\n+ self.assertTrue(primeCheck(11))\n+ self.assertTrue(primeCheck(13))\n+ self.assertTrue(primeCheck(17))\n+ self.assertTrue(primeCheck(19))\n+ self.assertTrue(primeCheck(23))\n+ self.assertTrue(primeCheck(29))\n+\n+ def test_not_primes(self):\n+ self.assertFalse(primeCheck(-19),\n+ \"Negative numbers are not prime.\")\n+ self.assertFalse(primeCheck(0),\n+ \"Zero doesn't have any divider, primes must have two\")\n+ self.assertFalse(primeCheck(1),\n+ \"One just have 1 divider, primes must have two.\")\n+ self.assertFalse(primeCheck(2 * 2))\n+ self.assertFalse(primeCheck(2 * 3))\n+ self.assertFalse(primeCheck(3 * 3))\n+ self.assertFalse(primeCheck(3 * 5))\n+ self.assertFalse(primeCheck(3 * 5 * 7))\n \n-def main():\n- print(primeCheck(37))\n- print(primeCheck(100))\n- print(primeCheck(77))\n \n if __name__ == '__main__':\n-\tmain()\n+ unittest.main()\n+\n", "issue": "math CheckPrime is wrong\nThe current implementation doesn't support:\r\n\r\n* Negative values, -1 is not a prime number. Current implementation raise a TypeError due to `math.sqrt` on negative values.\r\n\r\nThe current implementation return the wrong value for:\r\n\r\n* 0, 0 doesn't have any divider, primes must have two.\r\n* 1, 1 just have one divider, primes must have two.\n", "before_files": [{"content": "import math\ndef primeCheck(number):\n if number % 2 == 0 and number > 2: \n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))\n\ndef main():\n print(primeCheck(37))\n print(primeCheck(100))\n print(primeCheck(77))\n\nif __name__ == '__main__':\n\tmain()\n", "path": "maths/PrimeCheck.py"}]} | 734 | 600 |
gh_patches_debug_655 | rasdani/github-patches | git_diff | pex-tool__pex-2104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.130
On the docket:
+ [x] Pex fails to lock - missing artifact #2098
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.129"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.129"
+__version__ = "2.1.130"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.129\"\n+__version__ = \"2.1.130\"\n", "issue": "Release 2.1.130\nOn the docket:\r\n+ [x] Pex fails to lock - missing artifact #2098 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.129\"\n", "path": "pex/version.py"}]} | 617 | 99 |
gh_patches_debug_22746 | rasdani/github-patches | git_diff | pre-commit__pre-commit-346 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows: Terminal width support
We detect terminal width in unixlikes by running `tput cols`. This works fine for those platforms but doesn't work well for windows. Maybe find a package which does this logic for us and depend on that.
</issue>
<code>
[start of pre_commit/output.py]
1 from __future__ import unicode_literals
2
3 import os
4 import subprocess
5 import sys
6
7 from pre_commit import color
8 from pre_commit import five
9
10
11 # TODO: smell: import side-effects
12 try:
13 if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)
14 raise OSError('Cannot determine width without TERM')
15 else: # pragma no cover (windows)
16 COLS = int(
17 subprocess.Popen(
18 ('tput', 'cols'), stdout=subprocess.PIPE,
19 ).communicate()[0] or
20 # Default in the case of no terminal
21 80
22 )
23 except OSError: # pragma: no cover (windows)
24 COLS = 80
25
26
27 def get_hook_message(
28 start,
29 postfix='',
30 end_msg=None,
31 end_len=0,
32 end_color=None,
33 use_color=None,
34 cols=COLS,
35 ):
36 """Prints a message for running a hook.
37
38 This currently supports three approaches:
39
40 # Print `start` followed by dots, leaving 6 characters at the end
41 >>> print_hook_message('start', end_len=6)
42 start...............................................................
43
44 # Print `start` followed by dots with the end message colored if coloring
45 # is specified and a newline afterwards
46 >>> print_hook_message(
47 'start',
48 end_msg='end',
49 end_color=color.RED,
50 use_color=True,
51 )
52 start...................................................................end
53
54 # Print `start` followed by dots, followed by the `postfix` message
55 # uncolored, followed by the `end_msg` colored if specified and a newline
56 # afterwards
57 >>> print_hook_message(
58 'start',
59 postfix='postfix ',
60 end_msg='end',
61 end_color=color.RED,
62 use_color=True,
63 )
64 start...........................................................postfix end
65 """
66 if bool(end_msg) == bool(end_len):
67 raise ValueError('Expected one of (`end_msg`, `end_len`)')
68 if end_msg is not None and (end_color is None or use_color is None):
69 raise ValueError(
70 '`end_color` and `use_color` are required with `end_msg`'
71 )
72
73 if end_len:
74 return start + '.' * (cols - len(start) - end_len - 1)
75 else:
76 return '{0}{1}{2}{3}\n'.format(
77 start,
78 '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
79 postfix,
80 color.format_color(end_msg, end_color, use_color),
81 )
82
83
84 stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
85
86
87 def sys_stdout_write_wrapper(s, stream=stdout_byte_stream):
88 stream.write(five.to_bytes(s))
89
[end of pre_commit/output.py]
[start of setup.py]
1 from setuptools import find_packages
2 from setuptools import setup
3
4
5 setup(
6 name='pre_commit',
7 description=(
8 'A framework for managing and maintaining multi-language pre-commit '
9 'hooks.'
10 ),
11 url='https://github.com/pre-commit/pre-commit',
12 version='0.7.6',
13
14 author='Anthony Sottile',
15 author_email='[email protected]',
16
17 platforms='linux',
18 classifiers=[
19 'License :: OSI Approved :: MIT License',
20 'Programming Language :: Python :: 2',
21 'Programming Language :: Python :: 2.6',
22 'Programming Language :: Python :: 2.7',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.3',
25 'Programming Language :: Python :: 3.4',
26 'Programming Language :: Python :: 3.5',
27 'Programming Language :: Python :: Implementation :: CPython',
28 'Programming Language :: Python :: Implementation :: PyPy',
29 ],
30
31 packages=find_packages('.', exclude=('tests*', 'testing*')),
32 package_data={
33 'pre_commit': [
34 'resources/hook-tmpl',
35 'resources/pre-push-tmpl',
36 'resources/rbenv.tar.gz',
37 'resources/ruby-build.tar.gz',
38 'resources/ruby-download.tar.gz',
39 ]
40 },
41 install_requires=[
42 'argparse',
43 'aspy.yaml',
44 'cached-property',
45 'jsonschema',
46 'nodeenv>=0.11.1',
47 'ordereddict',
48 'pyyaml',
49 'virtualenv',
50 ],
51 entry_points={
52 'console_scripts': [
53 'pre-commit = pre_commit.main:main',
54 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa
55 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa
56 ],
57 },
58 )
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/output.py b/pre_commit/output.py
--- a/pre_commit/output.py
+++ b/pre_commit/output.py
@@ -1,27 +1,15 @@
from __future__ import unicode_literals
-import os
-import subprocess
import sys
+from backports.shutil_get_terminal_size import get_terminal_size
+
from pre_commit import color
from pre_commit import five
-
# TODO: smell: import side-effects
-try:
- if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)
- raise OSError('Cannot determine width without TERM')
- else: # pragma no cover (windows)
- COLS = int(
- subprocess.Popen(
- ('tput', 'cols'), stdout=subprocess.PIPE,
- ).communicate()[0] or
- # Default in the case of no terminal
- 80
- )
-except OSError: # pragma: no cover (windows)
- COLS = 80
+# TODO: https://github.com/chrippa/backports.shutil_get_terminal_size/issues/4
+COLS = get_terminal_size().columns or 80
def get_hook_message(
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,6 +41,7 @@
install_requires=[
'argparse',
'aspy.yaml',
+ 'backports.shutil_get_terminal_size',
'cached-property',
'jsonschema',
'nodeenv>=0.11.1',
| {"golden_diff": "diff --git a/pre_commit/output.py b/pre_commit/output.py\n--- a/pre_commit/output.py\n+++ b/pre_commit/output.py\n@@ -1,27 +1,15 @@\n from __future__ import unicode_literals\n \n-import os\n-import subprocess\n import sys\n \n+from backports.shutil_get_terminal_size import get_terminal_size\n+\n from pre_commit import color\n from pre_commit import five\n \n-\n # TODO: smell: import side-effects\n-try:\n- if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n- raise OSError('Cannot determine width without TERM')\n- else: # pragma no cover (windows)\n- COLS = int(\n- subprocess.Popen(\n- ('tput', 'cols'), stdout=subprocess.PIPE,\n- ).communicate()[0] or\n- # Default in the case of no terminal\n- 80\n- )\n-except OSError: # pragma: no cover (windows)\n- COLS = 80\n+# TODO: https://github.com/chrippa/backports.shutil_get_terminal_size/issues/4\n+COLS = get_terminal_size().columns or 80\n \n \n def get_hook_message(\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,6 +41,7 @@\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n+ 'backports.shutil_get_terminal_size',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n", "issue": "Windows: Terminal width support\nWe detect terminal width in unixlikes by running `tput cols`. This works fine for those platforms but doesn't work well for windows. Maybe find a package which does this logic for us and depend on that.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import five\n\n\n# TODO: smell: import side-effects\ntry:\n if not os.environ.get('TERM'): # pragma: no cover (dumb terminal)\n raise OSError('Cannot determine width without TERM')\n else: # pragma no cover (windows)\n COLS = int(\n subprocess.Popen(\n ('tput', 'cols'), stdout=subprocess.PIPE,\n ).communicate()[0] or\n # Default in the case of no terminal\n 80\n )\nexcept OSError: # pragma: no cover (windows)\n COLS = 80\n\n\ndef get_hook_message(\n start,\n postfix='',\n end_msg=None,\n end_len=0,\n end_color=None,\n use_color=None,\n cols=COLS,\n):\n \"\"\"Prints a message for running a hook.\n\n This currently supports three approaches:\n\n # Print `start` followed by dots, leaving 6 characters at the end\n >>> print_hook_message('start', end_len=6)\n start...............................................................\n\n # Print `start` followed by dots with the end message colored if coloring\n # is specified and a newline afterwards\n >>> print_hook_message(\n 'start',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...................................................................end\n\n # Print `start` followed by dots, followed by the `postfix` message\n # uncolored, followed by the `end_msg` colored if specified and a newline\n # afterwards\n >>> print_hook_message(\n 'start',\n postfix='postfix ',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...........................................................postfix end\n \"\"\"\n if bool(end_msg) == bool(end_len):\n raise ValueError('Expected one of (`end_msg`, `end_len`)')\n if end_msg is not None and (end_color is None or use_color is None):\n raise ValueError(\n '`end_color` and `use_color` are required with `end_msg`'\n )\n\n if end_len:\n return start + '.' * (cols - len(start) - end_len - 1)\n else:\n return '{0}{1}{2}{3}\\n'.format(\n start,\n '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),\n postfix,\n color.format_color(end_msg, end_color, use_color),\n )\n\n\nstdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)\n\n\ndef sys_stdout_write_wrapper(s, stream=stdout_byte_stream):\n stream.write(five.to_bytes(s))\n", "path": "pre_commit/output.py"}, {"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.7.6',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/hook-tmpl',\n 'resources/pre-push-tmpl',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'argparse',\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'ordereddict',\n 'pyyaml',\n 'virtualenv',\n ],\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa\n 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa\n ],\n },\n)\n", "path": "setup.py"}]} | 1,876 | 340 |
gh_patches_debug_60612 | rasdani/github-patches | git_diff | cloudtools__troposphere-2037 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for additional Flink runtimes in Kinesis Data Analytics.
Kinesis supports additional Flink runtimes (FLINK-1_13, ZEPPELIN-FLINK-1_0, ZEPPELIN-FLINK-2_0), see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html.
</issue>
<code>
[start of troposphere/validators/kinesisanalyticsv2.py]
1 # Copyright (c) 2012-2022, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6
7 def validate_runtime_environment(runtime_environment):
8 """
9 Validate RuntimeEnvironment for Application
10 Property: Application.RuntimeEnvironment
11 """
12
13 VALID_RUNTIME_ENVIRONMENTS = ("SQL-1_0", "FLINK-1_6", "FLINK-1_8", "FLINK-1_11")
14
15 if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:
16 raise ValueError(
17 "Application RuntimeEnvironment must be one of: %s"
18 % ", ".join(VALID_RUNTIME_ENVIRONMENTS)
19 )
20 return runtime_environment
21
[end of troposphere/validators/kinesisanalyticsv2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/validators/kinesisanalyticsv2.py b/troposphere/validators/kinesisanalyticsv2.py
--- a/troposphere/validators/kinesisanalyticsv2.py
+++ b/troposphere/validators/kinesisanalyticsv2.py
@@ -10,7 +10,15 @@
Property: Application.RuntimeEnvironment
"""
- VALID_RUNTIME_ENVIRONMENTS = ("SQL-1_0", "FLINK-1_6", "FLINK-1_8", "FLINK-1_11")
+ VALID_RUNTIME_ENVIRONMENTS = (
+ "FLINK-1_6",
+ "FLINK-1_8",
+ "FLINK-1_11",
+ "FLINK-1_13",
+ "SQL-1_0",
+ "ZEPPELIN-FLINK-1_0",
+ "ZEPPELIN-FLINK-2_0",
+ )
if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:
raise ValueError(
| {"golden_diff": "diff --git a/troposphere/validators/kinesisanalyticsv2.py b/troposphere/validators/kinesisanalyticsv2.py\n--- a/troposphere/validators/kinesisanalyticsv2.py\n+++ b/troposphere/validators/kinesisanalyticsv2.py\n@@ -10,7 +10,15 @@\n Property: Application.RuntimeEnvironment\n \"\"\"\n \n- VALID_RUNTIME_ENVIRONMENTS = (\"SQL-1_0\", \"FLINK-1_6\", \"FLINK-1_8\", \"FLINK-1_11\")\n+ VALID_RUNTIME_ENVIRONMENTS = (\n+ \"FLINK-1_6\",\n+ \"FLINK-1_8\",\n+ \"FLINK-1_11\",\n+ \"FLINK-1_13\",\n+ \"SQL-1_0\",\n+ \"ZEPPELIN-FLINK-1_0\",\n+ \"ZEPPELIN-FLINK-2_0\",\n+ )\n \n if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:\n raise ValueError(\n", "issue": "Add support for additional Flink runtimes in Kinesis Data Analytics.\nKinesis supports additional Flink runtimes (FLINK-1_13, ZEPPELIN-FLINK-1_0, ZEPPELIN-FLINK-2_0), see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html.\n", "before_files": [{"content": "# Copyright (c) 2012-2022, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\n\ndef validate_runtime_environment(runtime_environment):\n \"\"\"\n Validate RuntimeEnvironment for Application\n Property: Application.RuntimeEnvironment\n \"\"\"\n\n VALID_RUNTIME_ENVIRONMENTS = (\"SQL-1_0\", \"FLINK-1_6\", \"FLINK-1_8\", \"FLINK-1_11\")\n\n if runtime_environment not in VALID_RUNTIME_ENVIRONMENTS:\n raise ValueError(\n \"Application RuntimeEnvironment must be one of: %s\"\n % \", \".join(VALID_RUNTIME_ENVIRONMENTS)\n )\n return runtime_environment\n", "path": "troposphere/validators/kinesisanalyticsv2.py"}]} | 816 | 234 |
gh_patches_debug_6154 | rasdani/github-patches | git_diff | litestar-org__litestar-1659 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/contrib/repository/filters.py]
1 """Collection filter datastructures."""
2 from __future__ import annotations
3
4 from dataclasses import dataclass
5 from datetime import datetime # noqa: TCH003
6 from typing import TYPE_CHECKING, Generic, Literal, TypeVar
7
8 if TYPE_CHECKING:
9 from collections import abc
10
11
12 T = TypeVar("T")
13
14 __all__ = ["BeforeAfter", "CollectionFilter", "LimitOffset", "OrderBy", "SearchFilter"]
15
16
17 @dataclass
18 class BeforeAfter:
19 """Data required to filter a query on a ``datetime`` column."""
20
21 field_name: str
22 """Name of the model attribute to filter on."""
23 before: datetime | None
24 """Filter results where field earlier than this."""
25 after: datetime | None
26 """Filter results where field later than this."""
27
28
29 @dataclass
30 class CollectionFilter(Generic[T]):
31 """Data required to construct a ``WHERE ... IN (...)`` clause."""
32
33 field_name: str
34 """Name of the model attribute to filter on."""
35 values: abc.Collection[T]
36 """Values for ``IN`` clause."""
37
38
39 @dataclass
40 class LimitOffset:
41 """Data required to add limit/offset filtering to a query."""
42
43 limit: int
44 """Value for ``LIMIT`` clause of query."""
45 offset: int
46 """Value for ``OFFSET`` clause of query."""
47
48
49 @dataclass
50 class OrderBy:
51 """Data required to construct a ``ORDER BY ...`` clause."""
52
53 field_name: str
54 """Name of the model attribute to sort on."""
55 sort_order: Literal["asc", "desc"] = "asc"
56 """Sort ascending or descending"""
57
58
59 @dataclass
60 class SearchFilter:
61 """Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause."""
62
63 field_name: str
64 """Name of the model attribute to sort on."""
65 value: str
66 """Values for ``LIKE`` clause."""
67 ignore_case: bool | None = False
68 """Should the search be case insensitive."""
69
[end of litestar/contrib/repository/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/contrib/repository/filters.py b/litestar/contrib/repository/filters.py
--- a/litestar/contrib/repository/filters.py
+++ b/litestar/contrib/repository/filters.py
@@ -1,13 +1,10 @@
"""Collection filter datastructures."""
from __future__ import annotations
+from collections import abc # noqa: TCH003
from dataclasses import dataclass
from datetime import datetime # noqa: TCH003
-from typing import TYPE_CHECKING, Generic, Literal, TypeVar
-
-if TYPE_CHECKING:
- from collections import abc
-
+from typing import Generic, Literal, TypeVar
T = TypeVar("T")
| {"golden_diff": "diff --git a/litestar/contrib/repository/filters.py b/litestar/contrib/repository/filters.py\n--- a/litestar/contrib/repository/filters.py\n+++ b/litestar/contrib/repository/filters.py\n@@ -1,13 +1,10 @@\n \"\"\"Collection filter datastructures.\"\"\"\n from __future__ import annotations\n \n+from collections import abc # noqa: TCH003\n from dataclasses import dataclass\n from datetime import datetime # noqa: TCH003\n-from typing import TYPE_CHECKING, Generic, Literal, TypeVar\n-\n-if TYPE_CHECKING:\n- from collections import abc\n-\n+from typing import Generic, Literal, TypeVar\n \n T = TypeVar(\"T\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "\"\"\"Collection filter datastructures.\"\"\"\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom datetime import datetime # noqa: TCH003\nfrom typing import TYPE_CHECKING, Generic, Literal, TypeVar\n\nif TYPE_CHECKING:\n from collections import abc\n\n\nT = TypeVar(\"T\")\n\n__all__ = [\"BeforeAfter\", \"CollectionFilter\", \"LimitOffset\", \"OrderBy\", \"SearchFilter\"]\n\n\n@dataclass\nclass BeforeAfter:\n \"\"\"Data required to filter a query on a ``datetime`` column.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n before: datetime | None\n \"\"\"Filter results where field earlier than this.\"\"\"\n after: datetime | None\n \"\"\"Filter results where field later than this.\"\"\"\n\n\n@dataclass\nclass CollectionFilter(Generic[T]):\n \"\"\"Data required to construct a ``WHERE ... IN (...)`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to filter on.\"\"\"\n values: abc.Collection[T]\n \"\"\"Values for ``IN`` clause.\"\"\"\n\n\n@dataclass\nclass LimitOffset:\n \"\"\"Data required to add limit/offset filtering to a query.\"\"\"\n\n limit: int\n \"\"\"Value for ``LIMIT`` clause of query.\"\"\"\n offset: int\n \"\"\"Value for ``OFFSET`` clause of query.\"\"\"\n\n\n@dataclass\nclass OrderBy:\n \"\"\"Data required to construct a ``ORDER BY ...`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n sort_order: Literal[\"asc\", \"desc\"] = \"asc\"\n \"\"\"Sort ascending or descending\"\"\"\n\n\n@dataclass\nclass SearchFilter:\n \"\"\"Data required to construct a ``WHERE field_name LIKE '%' || :value || '%'`` clause.\"\"\"\n\n field_name: str\n \"\"\"Name of the model attribute to sort on.\"\"\"\n value: str\n \"\"\"Values for ``LIKE`` clause.\"\"\"\n ignore_case: bool | None = False\n \"\"\"Should the search be case insensitive.\"\"\"\n", "path": "litestar/contrib/repository/filters.py"}]} | 1,274 | 156 |
gh_patches_debug_26852 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-1999 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tiles on plans and container: blue corner missing for external projects
for external projects the little blue corner is missing
mac on chrome and firefox
<img width="400" alt="bildschirmfoto 2019-02-11 um 16 45 01" src="https://user-images.githubusercontent.com/35491681/52574395-7d708980-2e1c-11e9-8cfd-b9f8be74ea16.png">
</issue>
<code>
[start of meinberlin/apps/dashboard/__init__.py]
1 from adhocracy4.dashboard import components
2 from adhocracy4.dashboard import ProjectDashboard
3 from meinberlin.apps.projects import get_project_type
4
5
6 default_app_config = 'meinberlin.apps.dashboard.apps.Config'
7
8
9 class TypedProjectDashboard(ProjectDashboard):
10 def __init__(self, project):
11 self.project_type = get_project_type(project)
12 if self.project_type == 'bplan':
13 project = project.externalproject.bplan
14 elif self.project_type == 'external':
15 project = project.externalproject
16 elif self.project_type == 'container':
17 project = project.projectcontainer
18 super().__init__(project)
19
20 def get_project_components(self):
21 if self.project_type == 'bplan':
22 return [components.projects.get('bplan'),
23 components.projects.get('adminlog')]
24 elif self.project_type == 'external':
25 return [components.projects.get('external'),
26 components.projects.get('adminlog')]
27 elif self.project_type == 'container':
28 return [components.projects.get('container-basic'),
29 components.projects.get('container-information'),
30 components.projects.get('topics'),
31 components.projects.get('point'),
32 components.projects.get('container-projects')]
33
34 return [component for component in components.get_project_components()
35 if component.is_effective(self.project)]
36
37 def get_module_components(self):
38 if self.project_type == 'bplan':
39 return []
40 elif self.project_type == 'external':
41 return []
42 elif self.project_type == 'container':
43 return []
44
45 return components.get_module_components()
46
[end of meinberlin/apps/dashboard/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/dashboard/__init__.py b/meinberlin/apps/dashboard/__init__.py
--- a/meinberlin/apps/dashboard/__init__.py
+++ b/meinberlin/apps/dashboard/__init__.py
@@ -20,15 +20,20 @@
def get_project_components(self):
if self.project_type == 'bplan':
return [components.projects.get('bplan'),
+ components.projects.get('plans'),
components.projects.get('adminlog')]
elif self.project_type == 'external':
return [components.projects.get('external'),
+ components.projects.get('topics'),
+ components.projects.get('point'),
+ components.projects.get('plans'),
components.projects.get('adminlog')]
elif self.project_type == 'container':
return [components.projects.get('container-basic'),
components.projects.get('container-information'),
components.projects.get('topics'),
components.projects.get('point'),
+ components.projects.get('plans'),
components.projects.get('container-projects')]
return [component for component in components.get_project_components()
| {"golden_diff": "diff --git a/meinberlin/apps/dashboard/__init__.py b/meinberlin/apps/dashboard/__init__.py\n--- a/meinberlin/apps/dashboard/__init__.py\n+++ b/meinberlin/apps/dashboard/__init__.py\n@@ -20,15 +20,20 @@\n def get_project_components(self):\n if self.project_type == 'bplan':\n return [components.projects.get('bplan'),\n+ components.projects.get('plans'),\n components.projects.get('adminlog')]\n elif self.project_type == 'external':\n return [components.projects.get('external'),\n+ components.projects.get('topics'),\n+ components.projects.get('point'),\n+ components.projects.get('plans'),\n components.projects.get('adminlog')]\n elif self.project_type == 'container':\n return [components.projects.get('container-basic'),\n components.projects.get('container-information'),\n components.projects.get('topics'),\n components.projects.get('point'),\n+ components.projects.get('plans'),\n components.projects.get('container-projects')]\n \n return [component for component in components.get_project_components()\n", "issue": "tiles on plans and container: blue corner missing for external projects\nfor external projects the little blue corner is missing\r\n\r\nmac on chrome and firefox\r\n\r\n<img width=\"400\" alt=\"bildschirmfoto 2019-02-11 um 16 45 01\" src=\"https://user-images.githubusercontent.com/35491681/52574395-7d708980-2e1c-11e9-8cfd-b9f8be74ea16.png\">\r\n\n", "before_files": [{"content": "from adhocracy4.dashboard import components\nfrom adhocracy4.dashboard import ProjectDashboard\nfrom meinberlin.apps.projects import get_project_type\n\n\ndefault_app_config = 'meinberlin.apps.dashboard.apps.Config'\n\n\nclass TypedProjectDashboard(ProjectDashboard):\n def __init__(self, project):\n self.project_type = get_project_type(project)\n if self.project_type == 'bplan':\n project = project.externalproject.bplan\n elif self.project_type == 'external':\n project = project.externalproject\n elif self.project_type == 'container':\n project = project.projectcontainer\n super().__init__(project)\n\n def get_project_components(self):\n if self.project_type == 'bplan':\n return [components.projects.get('bplan'),\n components.projects.get('adminlog')]\n elif self.project_type == 'external':\n return [components.projects.get('external'),\n components.projects.get('adminlog')]\n elif self.project_type == 'container':\n return [components.projects.get('container-basic'),\n components.projects.get('container-information'),\n components.projects.get('topics'),\n components.projects.get('point'),\n components.projects.get('container-projects')]\n\n return [component for component in components.get_project_components()\n if component.is_effective(self.project)]\n\n def get_module_components(self):\n if self.project_type == 'bplan':\n return []\n elif self.project_type == 'external':\n return []\n elif self.project_type == 'container':\n return []\n\n return components.get_module_components()\n", "path": "meinberlin/apps/dashboard/__init__.py"}]} | 1,073 | 231 |
gh_patches_debug_4607 | rasdani/github-patches | git_diff | CTFd__CTFd-1726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect update alert in Admin panel
<!--
If this is a bug report please fill out the template below.
If this is a feature request please describe the behavior that you'd like to see.
-->
**Environment**:
- CTFd Version/Commit: 3.1.1
- Operating System: Ubuntu 20.4
- Web Browser and Version: Chrome 85
**What happened?**
The admin panel shows an alert: "A new CTFd version is available!", which links to "https://github.com/CTFd/CTFd/releases/tag/2.4.2". I encountered the issue with version 3.0.2. as well. After complete reinstall and upgrade to version 3.1.1 the problem persisted
**What did you expect to happen?**
I expected no alert, as my CTFd version is the newest, and certainly newer than 2.4.2.
**How to reproduce your issue**
Go to the admin pages.
**Any associated stack traces or error logs**
No
</issue>
<code>
[start of CTFd/utils/updates/__init__.py]
1 import sys
2 import time
3 from distutils.version import StrictVersion
4 from platform import python_version
5
6 import requests
7 from flask import current_app as app
8
9 from CTFd.models import Challenges, Teams, Users, db
10 from CTFd.utils import get_app_config, get_config, set_config
11 from CTFd.utils.config import is_setup
12 from CTFd.utils.crypto import sha256
13
14
15 def update_check(force=False):
16 """
17 Makes a request to ctfd.io to check if there is a new version of CTFd available. The service is provided in return
18 for users opting in to anonymous usage data collection. Users can opt-out of update checks by specifying
19 UPDATE_CHECK = False in config.py
20
21 :param force:
22 :return:
23 """
24 # If UPDATE_CHECK is disabled don't check for updates at all.
25 if app.config.get("UPDATE_CHECK") is False:
26 return
27
28 # Don't do an update check if not setup
29 if is_setup() is False:
30 return
31
32 # Get when we should check for updates next.
33 next_update_check = get_config("next_update_check") or 0
34
35 # If we have passed our saved time or we are forcing we should check.
36 update = (next_update_check < time.time()) or force
37
38 if update:
39 try:
40 name = str(get_config("ctf_name")) or ""
41 params = {
42 "ctf_id": sha256(name),
43 "current": app.VERSION,
44 "python_version_raw": sys.hexversion,
45 "python_version": python_version(),
46 "db_driver": db.session.bind.dialect.name,
47 "challenge_count": Challenges.query.count(),
48 "user_mode": get_config("user_mode"),
49 "user_count": Users.query.count(),
50 "team_count": Teams.query.count(),
51 "theme": get_config("ctf_theme"),
52 "upload_provider": get_app_config("UPLOAD_PROVIDER"),
53 "channel": app.CHANNEL,
54 }
55 check = requests.get(
56 "https://versioning.ctfd.io/check", params=params, timeout=0.1
57 ).json()
58 except requests.exceptions.RequestException:
59 pass
60 except ValueError:
61 pass
62 else:
63 try:
64 latest = check["resource"]["tag"]
65 html_url = check["resource"]["html_url"]
66 if StrictVersion(latest) > StrictVersion(app.VERSION):
67 set_config("version_latest", html_url)
68 elif StrictVersion(latest) <= StrictVersion(app.VERSION):
69 set_config("version_latest", None)
70 next_update_check_time = check["resource"].get(
71 "next", int(time.time() + 43200)
72 )
73 set_config("next_update_check", next_update_check_time)
74 except KeyError:
75 set_config("version_latest", None)
76
[end of CTFd/utils/updates/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/utils/updates/__init__.py b/CTFd/utils/updates/__init__.py
--- a/CTFd/utils/updates/__init__.py
+++ b/CTFd/utils/updates/__init__.py
@@ -53,7 +53,7 @@
"channel": app.CHANNEL,
}
check = requests.get(
- "https://versioning.ctfd.io/check", params=params, timeout=0.1
+ "https://versioning.ctfd.io/check", params=params, timeout=3
).json()
except requests.exceptions.RequestException:
pass
| {"golden_diff": "diff --git a/CTFd/utils/updates/__init__.py b/CTFd/utils/updates/__init__.py\n--- a/CTFd/utils/updates/__init__.py\n+++ b/CTFd/utils/updates/__init__.py\n@@ -53,7 +53,7 @@\n \"channel\": app.CHANNEL,\n }\n check = requests.get(\n- \"https://versioning.ctfd.io/check\", params=params, timeout=0.1\n+ \"https://versioning.ctfd.io/check\", params=params, timeout=3\n ).json()\n except requests.exceptions.RequestException:\n pass\n", "issue": "Incorrect update alert in Admin panel\n<!--\r\nIf this is a bug report please fill out the template below.\r\n\r\nIf this is a feature request please describe the behavior that you'd like to see.\r\n-->\r\n\r\n**Environment**:\r\n\r\n- CTFd Version/Commit: 3.1.1\r\n- Operating System: Ubuntu 20.4\r\n- Web Browser and Version: Chrome 85\r\n\r\n**What happened?**\r\nThe admin panel shows an alert: \"A new CTFd version is available!\", which links to \"https://github.com/CTFd/CTFd/releases/tag/2.4.2\". I encountered the issue with version 3.0.2. as well. After complete reinstall and upgrade to version 3.1.1 the problem persisted\r\n\r\n**What did you expect to happen?**\r\nI expected no alert, as my CTFd version is the newest, and certainly newer than 2.4.2.\r\n\r\n**How to reproduce your issue**\r\nGo to the admin pages.\r\n\r\n**Any associated stack traces or error logs**\r\nNo\n", "before_files": [{"content": "import sys\nimport time\nfrom distutils.version import StrictVersion\nfrom platform import python_version\n\nimport requests\nfrom flask import current_app as app\n\nfrom CTFd.models import Challenges, Teams, Users, db\nfrom CTFd.utils import get_app_config, get_config, set_config\nfrom CTFd.utils.config import is_setup\nfrom CTFd.utils.crypto import sha256\n\n\ndef update_check(force=False):\n \"\"\"\n Makes a request to ctfd.io to check if there is a new version of CTFd available. The service is provided in return\n for users opting in to anonymous usage data collection. Users can opt-out of update checks by specifying\n UPDATE_CHECK = False in config.py\n\n :param force:\n :return:\n \"\"\"\n # If UPDATE_CHECK is disabled don't check for updates at all.\n if app.config.get(\"UPDATE_CHECK\") is False:\n return\n\n # Don't do an update check if not setup\n if is_setup() is False:\n return\n\n # Get when we should check for updates next.\n next_update_check = get_config(\"next_update_check\") or 0\n\n # If we have passed our saved time or we are forcing we should check.\n update = (next_update_check < time.time()) or force\n\n if update:\n try:\n name = str(get_config(\"ctf_name\")) or \"\"\n params = {\n \"ctf_id\": sha256(name),\n \"current\": app.VERSION,\n \"python_version_raw\": sys.hexversion,\n \"python_version\": python_version(),\n \"db_driver\": db.session.bind.dialect.name,\n \"challenge_count\": Challenges.query.count(),\n \"user_mode\": get_config(\"user_mode\"),\n \"user_count\": Users.query.count(),\n \"team_count\": Teams.query.count(),\n \"theme\": get_config(\"ctf_theme\"),\n \"upload_provider\": get_app_config(\"UPLOAD_PROVIDER\"),\n \"channel\": app.CHANNEL,\n }\n check = requests.get(\n \"https://versioning.ctfd.io/check\", params=params, timeout=0.1\n ).json()\n except requests.exceptions.RequestException:\n pass\n except ValueError:\n pass\n else:\n try:\n latest = check[\"resource\"][\"tag\"]\n html_url = check[\"resource\"][\"html_url\"]\n if StrictVersion(latest) > StrictVersion(app.VERSION):\n set_config(\"version_latest\", html_url)\n elif StrictVersion(latest) <= StrictVersion(app.VERSION):\n set_config(\"version_latest\", None)\n next_update_check_time = check[\"resource\"].get(\n \"next\", int(time.time() + 43200)\n )\n set_config(\"next_update_check\", next_update_check_time)\n except KeyError:\n set_config(\"version_latest\", None)\n", "path": "CTFd/utils/updates/__init__.py"}]} | 1,507 | 135 |
gh_patches_debug_1657 | rasdani/github-patches | git_diff | kubeflow__pipelines-5054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeErro occurs in gcp/automl/create_dataset_for_tables component
### What steps did you take:
[A clear and concise description of what the bug is.]
[gcp/automl/create_dataset_for_tables component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/automl/create_dataset_for_tables)'s `create_time` output is declared as a string:
https://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.yaml#L15
however, `google.protobuf.timestamp_pb2.Timestamp` is returned in actual fact:
https://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.py#L54
FYI: The `dataset` object is an instance of `google.cloud.automl_v1beta1.types.Dataset` class and its [document](https://googleapis.dev/python/automl/0.4.0/gapic/v1beta1/types.html#google.cloud.automl_v1beta1.types.Dataset.create_time) says:
> **create_time**
> Output only. Timestamp when this dataset was created.
### What happened:
`TypeError` occurs

### What did you expect to happen:
Work.
### Environment:
<!-- Please fill in those that seem relevant. -->
How did you deploy Kubeflow Pipelines (KFP)? AI Platform Pipelines
<!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). -->
KFP version: 1.0.4 <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. -->
KFP SDK version: 1.3.0 <!-- Please attach the output of this shell command: $pip list | grep kfp -->
### Anything else you would like to add:
[Miscellaneous information that will assist in solving the issue.]
/kind bug
<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->
<!--
// /area frontend
// /area backend
// /area sdk
// /area testing
// /area engprod
-->
</issue>
<code>
[start of components/gcp/automl/create_dataset_for_tables/component.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import NamedTuple
16
17
18 def automl_create_dataset_for_tables(
19 gcp_project_id: str,
20 gcp_region: str,
21 display_name: str,
22 description: str = None,
23 tables_dataset_metadata: dict = {},
24 retry=None, #=google.api_core.gapic_v1.method.DEFAULT,
25 timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,
26 metadata: dict = None,
27 ) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):
28 '''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables
29 '''
30 import google
31 from google.cloud import automl
32 client = automl.AutoMlClient()
33
34 location_path = client.location_path(gcp_project_id, gcp_region)
35 dataset_dict = {
36 'display_name': display_name,
37 'description': description,
38 'tables_dataset_metadata': tables_dataset_metadata,
39 }
40 dataset = client.create_dataset(
41 location_path,
42 dataset_dict,
43 retry or google.api_core.gapic_v1.method.DEFAULT,
44 timeout or google.api_core.gapic_v1.method.DEFAULT,
45 metadata,
46 )
47 print(dataset)
48 dataset_id = dataset.name.rsplit('/', 1)[-1]
49 dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(
50 project_id=gcp_project_id,
51 region=gcp_region,
52 dataset_id=dataset_id,
53 )
54 return (dataset.name, dataset.create_time, dataset_id, dataset_url)
55
56
57 if __name__ == '__main__':
58 import kfp
59 kfp.components.func_to_container_op(
60 automl_create_dataset_for_tables,
61 output_component_file='component.yaml',
62 base_image='python:3.7',
63 packages_to_install=['google-cloud-automl==0.4.0']
64 )
65
[end of components/gcp/automl/create_dataset_for_tables/component.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/components/gcp/automl/create_dataset_for_tables/component.py b/components/gcp/automl/create_dataset_for_tables/component.py
--- a/components/gcp/automl/create_dataset_for_tables/component.py
+++ b/components/gcp/automl/create_dataset_for_tables/component.py
@@ -51,7 +51,7 @@
region=gcp_region,
dataset_id=dataset_id,
)
- return (dataset.name, dataset.create_time, dataset_id, dataset_url)
+ return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)
if __name__ == '__main__':
| {"golden_diff": "diff --git a/components/gcp/automl/create_dataset_for_tables/component.py b/components/gcp/automl/create_dataset_for_tables/component.py\n--- a/components/gcp/automl/create_dataset_for_tables/component.py\n+++ b/components/gcp/automl/create_dataset_for_tables/component.py\n@@ -51,7 +51,7 @@\n region=gcp_region,\n dataset_id=dataset_id,\n )\n- return (dataset.name, dataset.create_time, dataset_id, dataset_url)\n+ return (dataset.name, str(dataset.create_time), dataset_id, dataset_url)\n \n \n if __name__ == '__main__':\n", "issue": "TypeErro occurs in gcp/automl/create_dataset_for_tables component\n### What steps did you take:\r\n[A clear and concise description of what the bug is.]\r\n\r\n[gcp/automl/create_dataset_for_tables component](https://github.com/kubeflow/pipelines/tree/master/components/gcp/automl/create_dataset_for_tables)'s `create_time` output is declared as a string:\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.yaml#L15\r\n\r\nhowever, `google.protobuf.timestamp_pb2.Timestamp` is returned in actual fact:\r\n\r\nhttps://github.com/kubeflow/pipelines/blob/ecb14f40bb819c0678589b6458892ece5369fa71/components/gcp/automl/create_dataset_for_tables/component.py#L54\r\n\r\nFYI: The `dataset` object is an instance of `google.cloud.automl_v1beta1.types.Dataset` class and its [document](https://googleapis.dev/python/automl/0.4.0/gapic/v1beta1/types.html#google.cloud.automl_v1beta1.types.Dataset.create_time) says:\r\n\r\n> **create_time**\r\n> Output only. Timestamp when this dataset was created.\r\n\r\n### What happened:\r\n\r\n`TypeError` occurs\r\n\r\n\r\n\r\n### What did you expect to happen:\r\n\r\nWork.\r\n\r\n### Environment:\r\n<!-- Please fill in those that seem relevant. -->\r\n\r\nHow did you deploy Kubeflow Pipelines (KFP)? AI Platform Pipelines\r\n<!-- If you are not sure, here's [an introduction of all options](https://www.kubeflow.org/docs/pipelines/installation/overview/). -->\r\n\r\nKFP version: 1.0.4 <!-- If you are not sure, build commit shows on bottom of KFP UI left sidenav. -->\r\n\r\nKFP SDK version: 1.3.0 <!-- Please attach the output of this shell command: $pip list | grep kfp -->\r\n\r\n\r\n### Anything else you would like to add:\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n/kind bug\r\n<!-- Please include labels by uncommenting them to help us better triage issues, choose from the following -->\r\n<!--\r\n// /area frontend\r\n// /area backend\r\n// /area sdk\r\n// /area testing\r\n// /area engprod\r\n-->\r\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import NamedTuple\n\n\ndef automl_create_dataset_for_tables(\n gcp_project_id: str,\n gcp_region: str,\n display_name: str,\n description: str = None,\n tables_dataset_metadata: dict = {},\n retry=None, #=google.api_core.gapic_v1.method.DEFAULT,\n timeout: float = None, #=google.api_core.gapic_v1.method.DEFAULT,\n metadata: dict = None,\n) -> NamedTuple('Outputs', [('dataset_path', str), ('create_time', str), ('dataset_id', str), ('dataset_url', 'URI')]):\n '''automl_create_dataset_for_tables creates an empty Dataset for AutoML tables\n '''\n import google\n from google.cloud import automl\n client = automl.AutoMlClient()\n\n location_path = client.location_path(gcp_project_id, gcp_region)\n dataset_dict = {\n 'display_name': display_name,\n 'description': description,\n 'tables_dataset_metadata': tables_dataset_metadata,\n }\n dataset = client.create_dataset(\n location_path,\n dataset_dict,\n retry or google.api_core.gapic_v1.method.DEFAULT,\n timeout or google.api_core.gapic_v1.method.DEFAULT,\n metadata,\n )\n print(dataset)\n dataset_id = dataset.name.rsplit('/', 1)[-1]\n dataset_url = 'https://console.cloud.google.com/automl-tables/locations/{region}/datasets/{dataset_id}/schemav2?project={project_id}'.format(\n project_id=gcp_project_id,\n region=gcp_region,\n dataset_id=dataset_id,\n )\n return (dataset.name, dataset.create_time, dataset_id, dataset_url)\n\n\nif __name__ == '__main__':\n import kfp\n kfp.components.func_to_container_op(\n automl_create_dataset_for_tables,\n output_component_file='component.yaml',\n base_image='python:3.7',\n packages_to_install=['google-cloud-automl==0.4.0']\n )\n", "path": "components/gcp/automl/create_dataset_for_tables/component.py"}]} | 1,827 | 132 |
gh_patches_debug_7432 | rasdani/github-patches | git_diff | pulp__pulpcore-3412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc
**Version**
3.18.10
**Describe the bug**
Migration 0077 fails when you have a remote that has an @ somewhere in the path
```
Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):
File "/usr/bin/pulpcore-manager", line 33, in <module>
sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())
File "/usr/lib/python3.9/site-packages/pulpcore/app/manage.py", line 11, in manage
execute_from_command_line(sys.argv)
File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/usr/lib/python3.9/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 354, in run_from_argv
self.execute(*args, **cmd_options)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 398, in execute
output = self.handle(*args, **options)
File "/usr/lib/python3.9/site-packages/django/core/management/base.py", line 89, in wrapped
res = handle_func(*args, **kwargs)
File "/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py", line 244, in handle
post_migrate_state = executor.migrate(
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 117, in migrate
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 147, in _migrate_all_forwards
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
File "/usr/lib/python3.9/site-packages/django/db/migrations/executor.py", line 227, in apply_migration
state = migration.apply(state, schema_editor)
File "/usr/lib/python3.9/site-packages/django/db/migrations/migration.py", line 126, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py", line 190, in database_forwards
self.code(from_state.apps, schema_editor)
File "/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py", line 19, in move_remote_url_credentials
_, url_split = url.netloc.rsplit("@", maxsplit=1)
ValueError: not enough values to unpack (expected 2, got 1)
```
**To Reproduce**
Steps to reproduce the behavior:
* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`
* Try to migrate 0077
**Expected behavior**
migration aplies
**Additional context**
https://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088
</issue>
<code>
[start of pulpcore/app/migrations/0077_move_remote_url_credentials.py]
1 # Generated by Django 3.2.6 on 2021-09-29 14:00
2
3 from urllib.parse import urlparse, urlunparse
4
5 from django.db import migrations
6
7
8 def move_remote_url_credentials(apps, schema_editor):
9 Remote = apps.get_model("core", "Remote")
10
11 for remote in Remote.objects.filter(url__contains="@").iterator():
12 url = urlparse(remote.url)
13
14 if not remote.username:
15 remote.username = url.username
16 if not remote.password:
17 remote.password = url.password
18
19 _, url_split = url.netloc.rsplit("@", maxsplit=1)
20 remote.url = urlunparse(url._replace(netloc=url_split))
21 remote.save()
22
23
24 class Migration(migrations.Migration):
25
26 dependencies = [
27 ('core', '0076_remove_reserved_resource'),
28 ]
29
30 operations = [
31 migrations.RunPython(
32 code=move_remote_url_credentials,
33 reverse_code=migrations.RunPython.noop,
34 elidable=True,
35 )
36 ]
37
[end of pulpcore/app/migrations/0077_move_remote_url_credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py
--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py
+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py
@@ -11,6 +11,11 @@
for remote in Remote.objects.filter(url__contains="@").iterator():
url = urlparse(remote.url)
+ if '@' not in url.netloc:
+ # URLs can have an @ in other places than the netloc,
+ # but those do not indicate credentials
+ continue
+
if not remote.username:
remote.username = url.username
if not remote.password:
| {"golden_diff": "diff --git a/pulpcore/app/migrations/0077_move_remote_url_credentials.py b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n--- a/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n+++ b/pulpcore/app/migrations/0077_move_remote_url_credentials.py\n@@ -11,6 +11,11 @@\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n \n+ if '@' not in url.netloc:\n+ # URLs can have an @ in other places than the netloc,\n+ # but those do not indicate credentials\n+ continue\n+\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n", "issue": "0077_move_remote_url_credentials.py fails on Remotes that have @ in path, not netloc\n**Version**\r\n3.18.10\r\n\r\n**Describe the bug**\r\nMigration 0077 fails when you have a remote that has an @ somewhere in the path\r\n\r\n```\r\n Applying core.0077_move_remote_url_credentials...Traceback (most recent call last):\r\n File \"/usr/bin/pulpcore-manager\", line 33, in <module>\r\n sys.exit(load_entry_point('pulpcore==3.18.10', 'console_scripts', 'pulpcore-manager')())\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/manage.py\", line 11, in manage\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 398, in execute\r\n output = self.handle(*args, **options)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/base.py\", line 89, in wrapped\r\n res = handle_func(*args, **kwargs)\r\n File \"/usr/lib/python3.9/site-packages/django/core/management/commands/migrate.py\", line 244, in handle\r\n post_migrate_state = executor.migrate(\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 117, in migrate\r\n state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 147, in _migrate_all_forwards\r\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/executor.py\", line 227, in apply_migration\r\n state = migration.apply(state, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/migration.py\", line 126, in apply\r\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\n File \"/usr/lib/python3.9/site-packages/django/db/migrations/operations/special.py\", line 190, in database_forwards\r\n self.code(from_state.apps, schema_editor)\r\n File \"/usr/lib/python3.9/site-packages/pulpcore/app/migrations/0077_move_remote_url_credentials.py\", line 19, in move_remote_url_credentials\r\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\r\nValueError: not enough values to unpack (expected 2, got 1)\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n* Have a remote `https://download.copr.fedorainfracloud.org/results/@caddy/caddy/epel-8-x86_64/`\r\n* Try to migrate 0077\r\n\r\n**Expected behavior**\r\nmigration aplies\r\n\r\n**Additional context**\r\nhttps://community.theforeman.org/t/foreman-3-3-katello-4-5-upgrade-failed-pulpcore-manager-migrate-noinput/31088\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.6 on 2021-09-29 14:00\n\nfrom urllib.parse import urlparse, urlunparse\n\nfrom django.db import migrations\n\n\ndef move_remote_url_credentials(apps, schema_editor):\n Remote = apps.get_model(\"core\", \"Remote\")\n\n for remote in Remote.objects.filter(url__contains=\"@\").iterator():\n url = urlparse(remote.url)\n\n if not remote.username:\n remote.username = url.username\n if not remote.password:\n remote.password = url.password\n\n _, url_split = url.netloc.rsplit(\"@\", maxsplit=1)\n remote.url = urlunparse(url._replace(netloc=url_split))\n remote.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0076_remove_reserved_resource'),\n ]\n\n operations = [\n migrations.RunPython(\n code=move_remote_url_credentials,\n reverse_code=migrations.RunPython.noop,\n elidable=True,\n )\n ]\n", "path": "pulpcore/app/migrations/0077_move_remote_url_credentials.py"}]} | 1,674 | 173 |
gh_patches_debug_112 | rasdani/github-patches | git_diff | InstaPy__InstaPy-4046 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instapy-chromedriver not supporting latest Chrome browser version
The Instapy-chrome driver only supports Chrome upto versions 71 and since the update, the whole program quits with the error of ensure chromedriver is installed at .../insta-py/chromedriver_linux64..
</issue>
<code>
[start of instapy/__init__.py]
1 # flake8: noqa
2
3 from .instapy import InstaPy
4 from .util import smart_run
5 from .settings import Settings
6 from .file_manager import set_workspace
7 from .file_manager import get_workspace
8
9
10 # __variables__ with double-quoted values will be available in setup.py
11 __version__ = "0.2.1"
12
13
[end of instapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instapy/__init__.py b/instapy/__init__.py
--- a/instapy/__init__.py
+++ b/instapy/__init__.py
@@ -8,5 +8,5 @@
# __variables__ with double-quoted values will be available in setup.py
-__version__ = "0.2.1"
+__version__ = "0.2.2"
| {"golden_diff": "diff --git a/instapy/__init__.py b/instapy/__init__.py\n--- a/instapy/__init__.py\n+++ b/instapy/__init__.py\n@@ -8,5 +8,5 @@\n \n \n # __variables__ with double-quoted values will be available in setup.py\n-__version__ = \"0.2.1\"\n+__version__ = \"0.2.2\"\n", "issue": "Instapy-chromedriver not supporting latest Chrome browser version\nThe Instapy-chrome driver only supports Chrome upto versions 71 and since the update, the whole program quits with the error of ensure chromedriver is installed at .../insta-py/chromedriver_linux64..\n", "before_files": [{"content": "# flake8: noqa\n\nfrom .instapy import InstaPy\nfrom .util import smart_run\nfrom .settings import Settings\nfrom .file_manager import set_workspace\nfrom .file_manager import get_workspace\n\n\n# __variables__ with double-quoted values will be available in setup.py\n__version__ = \"0.2.1\"\n\n", "path": "instapy/__init__.py"}]} | 690 | 92 |
gh_patches_debug_25769 | rasdani/github-patches | git_diff | encode__starlette-1401 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
templateing: jinja2: pass kwargs for environment
I think it would be good to pass something like `env_kwargs` via https://github.com/blueyed/starlette/blob/24c135de71ac56a73f7f797258115941579155bf/starlette/templating.py#L51-L53.
While you can change the env afterwards, it would allow Jinja2 to validate e.g. `enable_async`, and call `load_extensions` etc.
</issue>
<code>
[start of starlette/templating.py]
1 import typing
2 from os import PathLike
3
4 from starlette.background import BackgroundTask
5 from starlette.responses import Response
6 from starlette.types import Receive, Scope, Send
7
8 try:
9 import jinja2
10
11 # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1
12 if hasattr(jinja2, "pass_context"):
13 pass_context = jinja2.pass_context
14 else: # pragma: nocover
15 pass_context = jinja2.contextfunction
16 except ImportError: # pragma: nocover
17 jinja2 = None # type: ignore
18
19
20 class _TemplateResponse(Response):
21 media_type = "text/html"
22
23 def __init__(
24 self,
25 template: typing.Any,
26 context: dict,
27 status_code: int = 200,
28 headers: dict = None,
29 media_type: str = None,
30 background: BackgroundTask = None,
31 ):
32 self.template = template
33 self.context = context
34 content = template.render(context)
35 super().__init__(content, status_code, headers, media_type, background)
36
37 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
38 request = self.context.get("request", {})
39 extensions = request.get("extensions", {})
40 if "http.response.template" in extensions:
41 await send(
42 {
43 "type": "http.response.template",
44 "template": self.template,
45 "context": self.context,
46 }
47 )
48 await super().__call__(scope, receive, send)
49
50
51 class Jinja2Templates:
52 """
53 templates = Jinja2Templates("templates")
54
55 return templates.TemplateResponse("index.html", {"request": request})
56 """
57
58 def __init__(self, directory: typing.Union[str, PathLike]) -> None:
59 assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates"
60 self.env = self._create_env(directory)
61
62 def _create_env(
63 self, directory: typing.Union[str, PathLike]
64 ) -> "jinja2.Environment":
65 @pass_context
66 def url_for(context: dict, name: str, **path_params: typing.Any) -> str:
67 request = context["request"]
68 return request.url_for(name, **path_params)
69
70 loader = jinja2.FileSystemLoader(directory)
71 env = jinja2.Environment(loader=loader, autoescape=True)
72 env.globals["url_for"] = url_for
73 return env
74
75 def get_template(self, name: str) -> "jinja2.Template":
76 return self.env.get_template(name)
77
78 def TemplateResponse(
79 self,
80 name: str,
81 context: dict,
82 status_code: int = 200,
83 headers: dict = None,
84 media_type: str = None,
85 background: BackgroundTask = None,
86 ) -> _TemplateResponse:
87 if "request" not in context:
88 raise ValueError('context must include a "request" key')
89 template = self.get_template(name)
90 return _TemplateResponse(
91 template,
92 context,
93 status_code=status_code,
94 headers=headers,
95 media_type=media_type,
96 background=background,
97 )
98
[end of starlette/templating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/templating.py b/starlette/templating.py
--- a/starlette/templating.py
+++ b/starlette/templating.py
@@ -55,12 +55,14 @@
return templates.TemplateResponse("index.html", {"request": request})
"""
- def __init__(self, directory: typing.Union[str, PathLike]) -> None:
+ def __init__(
+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any
+ ) -> None:
assert jinja2 is not None, "jinja2 must be installed to use Jinja2Templates"
- self.env = self._create_env(directory)
+ self.env = self._create_env(directory, **env_options)
def _create_env(
- self, directory: typing.Union[str, PathLike]
+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any
) -> "jinja2.Environment":
@pass_context
def url_for(context: dict, name: str, **path_params: typing.Any) -> str:
@@ -68,7 +70,10 @@
return request.url_for(name, **path_params)
loader = jinja2.FileSystemLoader(directory)
- env = jinja2.Environment(loader=loader, autoescape=True)
+ env_options.setdefault("loader", loader)
+ env_options.setdefault("autoescape", True)
+
+ env = jinja2.Environment(**env_options)
env.globals["url_for"] = url_for
return env
| {"golden_diff": "diff --git a/starlette/templating.py b/starlette/templating.py\n--- a/starlette/templating.py\n+++ b/starlette/templating.py\n@@ -55,12 +55,14 @@\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n \"\"\"\n \n- def __init__(self, directory: typing.Union[str, PathLike]) -> None:\n+ def __init__(\n+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any\n+ ) -> None:\n assert jinja2 is not None, \"jinja2 must be installed to use Jinja2Templates\"\n- self.env = self._create_env(directory)\n+ self.env = self._create_env(directory, **env_options)\n \n def _create_env(\n- self, directory: typing.Union[str, PathLike]\n+ self, directory: typing.Union[str, PathLike], **env_options: typing.Any\n ) -> \"jinja2.Environment\":\n @pass_context\n def url_for(context: dict, name: str, **path_params: typing.Any) -> str:\n@@ -68,7 +70,10 @@\n return request.url_for(name, **path_params)\n \n loader = jinja2.FileSystemLoader(directory)\n- env = jinja2.Environment(loader=loader, autoescape=True)\n+ env_options.setdefault(\"loader\", loader)\n+ env_options.setdefault(\"autoescape\", True)\n+\n+ env = jinja2.Environment(**env_options)\n env.globals[\"url_for\"] = url_for\n return env\n", "issue": "templateing: jinja2: pass kwargs for environment\nI think it would be good to pass something like `env_kwargs` via https://github.com/blueyed/starlette/blob/24c135de71ac56a73f7f797258115941579155bf/starlette/templating.py#L51-L53.\r\n\r\nWhile you can change the env afterwards, it would allow Jinja2 to validate e.g. `enable_async`, and call `load_extensions` etc.\n", "before_files": [{"content": "import typing\nfrom os import PathLike\n\nfrom starlette.background import BackgroundTask\nfrom starlette.responses import Response\nfrom starlette.types import Receive, Scope, Send\n\ntry:\n import jinja2\n\n # @contextfunction renamed to @pass_context in Jinja 3.0, to be removed in 3.1\n if hasattr(jinja2, \"pass_context\"):\n pass_context = jinja2.pass_context\n else: # pragma: nocover\n pass_context = jinja2.contextfunction\nexcept ImportError: # pragma: nocover\n jinja2 = None # type: ignore\n\n\nclass _TemplateResponse(Response):\n media_type = \"text/html\"\n\n def __init__(\n self,\n template: typing.Any,\n context: dict,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ):\n self.template = template\n self.context = context\n content = template.render(context)\n super().__init__(content, status_code, headers, media_type, background)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n request = self.context.get(\"request\", {})\n extensions = request.get(\"extensions\", {})\n if \"http.response.template\" in extensions:\n await send(\n {\n \"type\": \"http.response.template\",\n \"template\": self.template,\n \"context\": self.context,\n }\n )\n await super().__call__(scope, receive, send)\n\n\nclass Jinja2Templates:\n \"\"\"\n templates = Jinja2Templates(\"templates\")\n\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n \"\"\"\n\n def __init__(self, directory: typing.Union[str, PathLike]) -> None:\n assert jinja2 is not None, \"jinja2 must be installed to use Jinja2Templates\"\n self.env = self._create_env(directory)\n\n def _create_env(\n self, directory: typing.Union[str, PathLike]\n ) -> \"jinja2.Environment\":\n @pass_context\n def url_for(context: dict, name: str, **path_params: typing.Any) -> str:\n request = context[\"request\"]\n return request.url_for(name, **path_params)\n\n loader = jinja2.FileSystemLoader(directory)\n env = jinja2.Environment(loader=loader, autoescape=True)\n env.globals[\"url_for\"] = url_for\n return env\n\n def get_template(self, name: str) -> \"jinja2.Template\":\n return self.env.get_template(name)\n\n def TemplateResponse(\n self,\n name: str,\n context: dict,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n background: BackgroundTask = None,\n ) -> _TemplateResponse:\n if \"request\" not in context:\n raise ValueError('context must include a \"request\" key')\n template = self.get_template(name)\n return _TemplateResponse(\n template,\n context,\n status_code=status_code,\n headers=headers,\n media_type=media_type,\n background=background,\n )\n", "path": "starlette/templating.py"}]} | 1,546 | 350 |
gh_patches_debug_18185 | rasdani/github-patches | git_diff | mozilla__bugbug-214 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use the bug snapshot transform in the "uplift" model
Depends on #5.
</issue>
<code>
[start of bugbug/models/uplift.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import xgboost
7 from imblearn.under_sampling import RandomUnderSampler
8 from sklearn.compose import ColumnTransformer
9 from sklearn.feature_extraction import DictVectorizer
10 from sklearn.pipeline import Pipeline
11
12 from bugbug import bug_features
13 from bugbug import bugzilla
14 from bugbug.model import Model
15
16
17 class UpliftModel(Model):
18 def __init__(self, lemmatization=False):
19 Model.__init__(self, lemmatization)
20
21 self.sampler = RandomUnderSampler(random_state=0)
22
23 feature_extractors = [
24 bug_features.has_str(),
25 bug_features.has_regression_range(),
26 bug_features.severity(),
27 bug_features.keywords(),
28 bug_features.is_coverity_issue(),
29 bug_features.has_crash_signature(),
30 bug_features.has_url(),
31 bug_features.has_w3c_url(),
32 bug_features.has_github_url(),
33 bug_features.whiteboard(),
34 bug_features.patches(),
35 bug_features.landings(),
36 bug_features.title(),
37 ]
38
39 cleanup_functions = [
40 bug_features.cleanup_fileref,
41 bug_features.cleanup_url,
42 bug_features.cleanup_synonyms,
43 ]
44
45 self.extraction_pipeline = Pipeline([
46 ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),
47 ('union', ColumnTransformer([
48 ('data', DictVectorizer(), 'data'),
49
50 ('title', self.text_vectorizer(), 'title'),
51
52 ('comments', self.text_vectorizer(), 'comments'),
53 ])),
54 ])
55
56 self.clf = xgboost.XGBClassifier(n_jobs=16)
57 self.clf.set_params(predictor='cpu_predictor')
58
59 def get_labels(self):
60 classes = {}
61
62 for bug_data in bugzilla.get_bugs():
63 bug_id = int(bug_data['id'])
64
65 for attachment in bug_data['attachments']:
66 for flag in attachment['flags']:
67 if not flag['name'].startswith('approval-mozilla-') or flag['status'] not in ['+', '-']:
68 continue
69
70 if flag['status'] == '+':
71 classes[bug_id] = 1
72 elif flag['status'] == '-':
73 classes[bug_id] = 0
74
75 return classes
76
77 def get_feature_names(self):
78 return self.extraction_pipeline.named_steps['union'].get_feature_names()
79
[end of bugbug/models/uplift.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bugbug/models/uplift.py b/bugbug/models/uplift.py
--- a/bugbug/models/uplift.py
+++ b/bugbug/models/uplift.py
@@ -43,7 +43,7 @@
]
self.extraction_pipeline = Pipeline([
- ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),
+ ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),
('union', ColumnTransformer([
('data', DictVectorizer(), 'data'),
@@ -56,6 +56,9 @@
self.clf = xgboost.XGBClassifier(n_jobs=16)
self.clf.set_params(predictor='cpu_predictor')
+ def rollback(self, change):
+ return (change['field_name'] == 'flagtypes.name' and change['added'].startswith('approval-mozilla-') and (change['added'].endswith('+') or change['added'].endswith('-')))
+
def get_labels(self):
classes = {}
| {"golden_diff": "diff --git a/bugbug/models/uplift.py b/bugbug/models/uplift.py\n--- a/bugbug/models/uplift.py\n+++ b/bugbug/models/uplift.py\n@@ -43,7 +43,7 @@\n ]\n \n self.extraction_pipeline = Pipeline([\n- ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),\n+ ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n \n@@ -56,6 +56,9 @@\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n \n+ def rollback(self, change):\n+ return (change['field_name'] == 'flagtypes.name' and change['added'].startswith('approval-mozilla-') and (change['added'].endswith('+') or change['added'].endswith('-')))\n+\n def get_labels(self):\n classes = {}\n", "issue": "Use the bug snapshot transform in the \"uplift\" model\nDepends on #5.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass UpliftModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n self.sampler = RandomUnderSampler(random_state=0)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(), 'title'),\n\n ('comments', self.text_vectorizer(), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for attachment in bug_data['attachments']:\n for flag in attachment['flags']:\n if not flag['name'].startswith('approval-mozilla-') or flag['status'] not in ['+', '-']:\n continue\n\n if flag['status'] == '+':\n classes[bug_id] = 1\n elif flag['status'] == '-':\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/uplift.py"}]} | 1,250 | 232 |
gh_patches_debug_349 | rasdani/github-patches | git_diff | google__turbinia-1070 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing sys module import in logger.py
Logger module is missing an import statement for 'sys'
</issue>
<code>
[start of turbinia/config/logger.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2017 Google Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Sets up logging."""
16
17 from __future__ import unicode_literals
18 import logging
19
20 import warnings
21 import logging.handlers
22 import os
23
24 from turbinia import config
25 from turbinia import TurbiniaException
26
27 # Environment variable to look for node name in
28 ENVNODENAME = 'NODE_NAME'
29
30
31 def setup(need_file_handler=True, need_stream_handler=True, log_file_path=None):
32 """Set up logging parameters.
33
34 This will also set the root logger, which is the default logger when a named
35 logger is not specified. We currently use 'turbinia' as the named logger,
36 however some external modules that are called by Turbinia can use the root
37 logger, so we want to be able to optionally configure that as well.
38 """
39 # Remove known warning about credentials
40 warnings.filterwarnings(
41 'ignore', 'Your application has authenticated using end user credentials')
42
43 logger = logging.getLogger('turbinia')
44 # Eliminate double logging from root logger
45 logger.propagate = False
46
47 # We only need a handler if one of that type doesn't exist already
48 if logger.handlers:
49 for handler in logger.handlers:
50 # Want to do strict type-checking here because is instance will include
51 # subclasses and so won't distinguish between StreamHandlers and
52 # FileHandlers.
53 # pylint: disable=unidiomatic-typecheck
54 if type(handler) == logging.FileHandler:
55 need_file_handler = False
56
57 # pylint: disable=unidiomatic-typecheck
58 if type(handler) == logging.StreamHandler:
59 need_stream_handler = False
60
61 if need_file_handler:
62 try:
63 config.LoadConfig()
64 except TurbiniaException as exception:
65 print(
66 'Could not load config file ({0!s}).\n{1:s}'.format(
67 exception, config.CONFIG_MSG))
68 sys.exit(1)
69
70 # Check if a user specified log path was provided else create default path
71 if not log_file_path:
72 log_name = os.uname().nodename
73 # Check if NODE_NAME available for GKE setups
74 if ENVNODENAME in os.environ:
75 log_name = log_name + '.{0!s}'.format(os.environ[ENVNODENAME])
76 log_file_path = os.path.join(config.LOG_DIR, log_name) + '.log'
77
78 file_handler = logging.FileHandler(log_file_path)
79 formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
80 file_handler.setFormatter(formatter)
81 file_handler.setLevel(logging.DEBUG)
82 logger.addHandler(file_handler)
83
84 console_handler = logging.StreamHandler()
85 formatter = logging.Formatter(
86 '%(asctime)s [%(levelname)s] %(message)s', "%Y-%m-%d %H:%M:%S")
87 console_handler.setFormatter(formatter)
88 if need_stream_handler:
89 logger.addHandler(console_handler)
90
91 # Configure the root logger to use exactly our handlers because other modules
92 # like PSQ use this, and we want to see log messages from it when executing
93 # from CLI.
94 root_log = logging.getLogger()
95 for handler in root_log.handlers:
96 root_log.removeHandler(handler)
97 root_log.addHandler(console_handler)
98 if need_file_handler:
99 root_log.addHandler(file_handler)
100
101 # Set filelock logging to ERROR due to log spam
102 logging.getLogger("filelock").setLevel(logging.ERROR)
103
[end of turbinia/config/logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/turbinia/config/logger.py b/turbinia/config/logger.py
--- a/turbinia/config/logger.py
+++ b/turbinia/config/logger.py
@@ -20,6 +20,7 @@
import warnings
import logging.handlers
import os
+import sys
from turbinia import config
from turbinia import TurbiniaException
| {"golden_diff": "diff --git a/turbinia/config/logger.py b/turbinia/config/logger.py\n--- a/turbinia/config/logger.py\n+++ b/turbinia/config/logger.py\n@@ -20,6 +20,7 @@\n import warnings\n import logging.handlers\n import os\n+import sys\n \n from turbinia import config\n from turbinia import TurbiniaException\n", "issue": "Missing sys module import in logger.py\nLogger module is missing an import statement for 'sys'\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Sets up logging.\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\n\nimport warnings\nimport logging.handlers\nimport os\n\nfrom turbinia import config\nfrom turbinia import TurbiniaException\n\n# Environment variable to look for node name in\nENVNODENAME = 'NODE_NAME'\n\n\ndef setup(need_file_handler=True, need_stream_handler=True, log_file_path=None):\n \"\"\"Set up logging parameters.\n\n This will also set the root logger, which is the default logger when a named\n logger is not specified. We currently use 'turbinia' as the named logger,\n however some external modules that are called by Turbinia can use the root\n logger, so we want to be able to optionally configure that as well.\n \"\"\"\n # Remove known warning about credentials\n warnings.filterwarnings(\n 'ignore', 'Your application has authenticated using end user credentials')\n\n logger = logging.getLogger('turbinia')\n # Eliminate double logging from root logger\n logger.propagate = False\n\n # We only need a handler if one of that type doesn't exist already\n if logger.handlers:\n for handler in logger.handlers:\n # Want to do strict type-checking here because is instance will include\n # subclasses and so won't distinguish between StreamHandlers and\n # FileHandlers.\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.FileHandler:\n need_file_handler = False\n\n # pylint: disable=unidiomatic-typecheck\n if type(handler) == logging.StreamHandler:\n need_stream_handler = False\n\n if need_file_handler:\n try:\n config.LoadConfig()\n except TurbiniaException as exception:\n print(\n 'Could not load config file ({0!s}).\\n{1:s}'.format(\n exception, config.CONFIG_MSG))\n sys.exit(1)\n\n # Check if a user specified log path was provided else create default path\n if not log_file_path:\n log_name = os.uname().nodename\n # Check if NODE_NAME available for GKE setups\n if ENVNODENAME in os.environ:\n log_name = log_name + '.{0!s}'.format(os.environ[ENVNODENAME])\n log_file_path = os.path.join(config.LOG_DIR, log_name) + '.log'\n\n file_handler = logging.FileHandler(log_file_path)\n formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n if need_stream_handler:\n logger.addHandler(console_handler)\n\n # Configure the root logger to use exactly our handlers because other modules\n # like PSQ use this, and we want to see log messages from it when executing\n # from CLI.\n root_log = logging.getLogger()\n for handler in root_log.handlers:\n root_log.removeHandler(handler)\n root_log.addHandler(console_handler)\n if need_file_handler:\n root_log.addHandler(file_handler)\n\n # Set filelock logging to ERROR due to log spam\n logging.getLogger(\"filelock\").setLevel(logging.ERROR)\n", "path": "turbinia/config/logger.py"}]} | 1,619 | 84 |
gh_patches_debug_23568 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up dependencies
Spring cleaning! We currently declare some dependencies which are either unused or can easily be substituted:
- h11 - not used at all?
- requests - tests + examples only.
We should IMHO also eventually consider removing the following dependencies, although that involves a bit of work and shouldn't be in scope for this issue:
- pyasn1 - replace with asn1crypto, which is used by cryptography/pyOpenSSL
- ldap3 - only used for ldap proxy auth, which should probably live outside of the core once we have a healthy addon system.
</issue>
<code>
[start of setup.py]
1 import os
2 from codecs import open
3
4 import re
5 from setuptools import setup, find_packages
6
7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
13 long_description = f.read()
14
15 with open(os.path.join(here, "mitmproxy", "version.py")) as f:
16 VERSION = re.search(r'VERSION = "(.+?)(?:-0x|")', f.read()).group(1)
17
18 setup(
19 name="mitmproxy",
20 version=VERSION,
21 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
22 long_description=long_description,
23 url="http://mitmproxy.org",
24 author="Aldo Cortesi",
25 author_email="[email protected]",
26 license="MIT",
27 classifiers=[
28 "License :: OSI Approved :: MIT License",
29 "Development Status :: 5 - Production/Stable",
30 "Environment :: Console",
31 "Environment :: Console :: Curses",
32 "Operating System :: MacOS :: MacOS X",
33 "Operating System :: POSIX",
34 "Operating System :: Microsoft :: Windows",
35 "Programming Language :: Python",
36 "Programming Language :: Python :: 3",
37 "Programming Language :: Python :: 3 :: Only",
38 "Programming Language :: Python :: 3.5",
39 "Programming Language :: Python :: 3.6",
40 "Programming Language :: Python :: Implementation :: CPython",
41 "Topic :: Security",
42 "Topic :: Internet",
43 "Topic :: Internet :: WWW/HTTP",
44 "Topic :: Internet :: Proxy Servers",
45 "Topic :: Software Development :: Testing"
46 ],
47 packages=find_packages(include=[
48 "mitmproxy", "mitmproxy.*",
49 "pathod", "pathod.*",
50 ]),
51 include_package_data=True,
52 entry_points={
53 'console_scripts': [
54 "mitmproxy = mitmproxy.tools.main:mitmproxy",
55 "mitmdump = mitmproxy.tools.main:mitmdump",
56 "mitmweb = mitmproxy.tools.main:mitmweb",
57 "pathod = pathod.pathod_cmdline:go_pathod",
58 "pathoc = pathod.pathoc_cmdline:go_pathoc"
59 ]
60 },
61 # https://packaging.python.org/en/latest/requirements/#install-requires
62 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
63 install_requires=[
64 "blinker>=1.4, <1.5",
65 "brotlipy>=0.7.0,<0.8",
66 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
67 "click>=6.2, <7",
68 "cryptography>=2.1.4,<2.2",
69 'h11>=0.7.0,<0.8',
70 "h2>=3.0.1,<4",
71 "hyperframe>=5.1.0,<6",
72 "kaitaistruct>=0.7,<0.9",
73 "ldap3>=2.4,<2.5",
74 "passlib>=1.6.5, <1.8",
75 "pyasn1>=0.3.1,<0.5",
76 "pyOpenSSL>=17.5,<17.6",
77 "pyparsing>=2.1.3, <2.3",
78 "pyperclip>=1.6.0, <1.7",
79 "requests>=2.9.1, <3",
80 "ruamel.yaml>=0.13.2, <0.16",
81 "sortedcontainers>=1.5.4, <1.6",
82 "tornado>=4.3, <4.6",
83 "urwid>=2.0.1,<2.1",
84 "wsproto>=0.11.0,<0.12.0",
85 ],
86 extras_require={
87 ':sys_platform == "win32"': [
88 "pydivert>=2.0.3,<2.2",
89 ],
90 'dev': [
91 "flake8>=3.5, <3.6",
92 "Flask>=0.10.1, <0.13",
93 "mypy>=0.560,<0.561",
94 "pytest-cov>=2.5.1,<3",
95 "pytest-faulthandler>=1.3.1,<2",
96 "pytest-timeout>=1.2.1,<2",
97 "pytest-xdist>=1.22,<2",
98 "pytest>=3.3,<4",
99 "tox>=2.3, <3",
100 "rstcheck>=2.2, <4.0",
101 ],
102 'examples': [
103 "beautifulsoup4>=4.4.1, <4.7",
104 "Pillow>=4.3,<5.1",
105 ]
106 }
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,6 @@
"certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
"click>=6.2, <7",
"cryptography>=2.1.4,<2.2",
- 'h11>=0.7.0,<0.8',
"h2>=3.0.1,<4",
"hyperframe>=5.1.0,<6",
"kaitaistruct>=0.7,<0.9",
@@ -76,7 +75,6 @@
"pyOpenSSL>=17.5,<17.6",
"pyparsing>=2.1.3, <2.3",
"pyperclip>=1.6.0, <1.7",
- "requests>=2.9.1, <3",
"ruamel.yaml>=0.13.2, <0.16",
"sortedcontainers>=1.5.4, <1.6",
"tornado>=4.3, <4.6",
@@ -96,6 +94,7 @@
"pytest-timeout>=1.2.1,<2",
"pytest-xdist>=1.22,<2",
"pytest>=3.3,<4",
+ "requests>=2.9.1, <3",
"tox>=2.3, <3",
"rstcheck>=2.2, <4.0",
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,6 @@\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.1.4,<2.2\",\n- 'h11>=0.7.0,<0.8',\n \"h2>=3.0.1,<4\",\n \"hyperframe>=5.1.0,<6\",\n \"kaitaistruct>=0.7,<0.9\",\n@@ -76,7 +75,6 @@\n \"pyOpenSSL>=17.5,<17.6\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.6.0, <1.7\",\n- \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n@@ -96,6 +94,7 @@\n \"pytest-timeout>=1.2.1,<2\",\n \"pytest-xdist>=1.22,<2\",\n \"pytest>=3.3,<4\",\n+ \"requests>=2.9.1, <3\",\n \"tox>=2.3, <3\",\n \"rstcheck>=2.2, <4.0\",\n ],\n", "issue": "Clean up dependencies\nSpring cleaning! We currently declare some dependencies which are either unused or can easily be substituted:\r\n\r\n - h11 - not used at all?\r\n - requests - tests + examples only.\r\n\r\nWe should IMHO also eventually consider removing the following dependencies, although that involves a bit of work and shouldn't be in scope for this issue:\r\n\r\n - pyasn1 - replace with asn1crypto, which is used by cryptography/pyOpenSSL\r\n - ldap3 - only used for ldap proxy auth, which should probably live outside of the core once we have a healthy addon system.\n", "before_files": [{"content": "import os\nfrom codecs import open\n\nimport re\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nwith open(os.path.join(here, \"mitmproxy\", \"version.py\")) as f:\n VERSION = re.search(r'VERSION = \"(.+?)(?:-0x|\")', f.read()).group(1)\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.7.0,<0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.1.4,<2.2\",\n 'h11>=0.7.0,<0.8',\n \"h2>=3.0.1,<4\",\n \"hyperframe>=5.1.0,<6\",\n \"kaitaistruct>=0.7,<0.9\",\n \"ldap3>=2.4,<2.5\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1,<0.5\",\n \"pyOpenSSL>=17.5,<17.6\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.6.0, <1.7\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=2.0.1,<2.1\",\n \"wsproto>=0.11.0,<0.12.0\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.560,<0.561\",\n \"pytest-cov>=2.5.1,<3\",\n \"pytest-faulthandler>=1.3.1,<2\",\n \"pytest-timeout>=1.2.1,<2\",\n \"pytest-xdist>=1.22,<2\",\n \"pytest>=3.3,<4\",\n \"tox>=2.3, <3\",\n \"rstcheck>=2.2, <4.0\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<5.1\",\n ]\n }\n)\n", "path": "setup.py"}]} | 1,992 | 368 |
gh_patches_debug_33273 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-1377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Should not disable edit button if having bypass structure permission
Workaround: write url by hand (eg. "/trek/edit/1/").
</issue>
<code>
[start of geotrek/authent/models.py]
1 # -*- coding: utf-8 -*-
2
3 """
4 Models to manage users and profiles
5 """
6 from django.db import models
7 from django.contrib.auth.models import User
8 from django.conf import settings
9 from django.utils.translation import ugettext_lazy as _
10 from django.dispatch import receiver
11 from django.contrib.auth.signals import user_logged_in
12
13 from geotrek.common.utils import reify
14
15
16 class Structure(models.Model):
17 """
18 Represents an organisational structure, to which users are related.
19 """
20 name = models.CharField(max_length=256, verbose_name=_(u"Nom"))
21
22 def __unicode__(self):
23 return self.name
24
25 class Meta:
26 verbose_name = _(u"Structure")
27 verbose_name_plural = _(u"Structures")
28 ordering = ['name']
29 permissions = (("can_bypass_structure", _("Can by structure")),)
30
31
32 def default_structure():
33 """ Create default structure if necessary """
34 return Structure.objects.get_or_create(name=settings.DEFAULT_STRUCTURE_NAME)[0]
35
36
37 class StructureRelatedQuerySet(models.query.QuerySet):
38 def for_user(self, user):
39 return StructureRelatedQuerySet.queryset_for_user(self, user)
40
41 @staticmethod
42 def queryset_for_user(queryset, user):
43 return queryset.filter(structure=user.profile.structure)
44
45
46 class StructureRelatedManager(models.Manager):
47 """ A simple manager to manage structure related objects"""
48 def get_queryset(self):
49 return StructureRelatedQuerySet(self.model, using=self._db)
50
51 def for_user(self, user):
52 """ Filter by user's structure """
53 return self.get_queryset().for_user(user)
54
55
56 class StructureRelated(models.Model):
57 """
58 A mixin used for any entities that belong to a structure
59 """
60 structure = models.ForeignKey(Structure, default=default_structure,
61 verbose_name=_(u"Related structure"), db_column='structure')
62
63 objects = models.Manager()
64 in_structure = StructureRelatedManager()
65
66 @classmethod
67 def for_user(cls, user):
68 """ Shortcut to manager's filter by user """
69 return cls.in_structure.for_user(user)
70
71 def same_structure(self, user):
72 """ Returns True if the user is in the same structure, False otherwise. """
73 return user.profile.structure == self.structure
74
75 class Meta:
76 abstract = True
77 verbose_name = _(u"Related structures")
78 verbose_name_plural = _(u"Related structure")
79
80
81 class UserProfile(StructureRelated):
82 """
83 A custom user profile
84 """
85 user = models.OneToOneField(User, unique=True)
86
87 language = models.CharField(_(u"Language"), max_length=10,
88 choices=settings.LANGUAGES,
89 default=settings.LANGUAGE_CODE)
90
91 class Meta:
92 verbose_name = _(u"User's profile")
93 verbose_name_plural = _(u"User's profiles")
94
95 def __unicode__(self):
96 return _("Profile for %s") % self.user
97
98 User.profile = reify(lambda u: UserProfile.objects.get_or_create(user=u)[0])
99
100
101 @receiver(user_logged_in)
102 def lang(sender, **kwargs):
103 """ Set user's language in session when he logs in. """
104 lang_code = kwargs['user'].profile.language
105 kwargs['request'].session['django_language'] = lang_code
106
[end of geotrek/authent/models.py]
[start of geotrek/authent/decorators.py]
1 from functools import wraps
2
3 from django.shortcuts import redirect
4 from django.contrib import messages
5 from django.http import HttpResponseRedirect
6 from django.utils.translation import ugettext_lazy as _
7 from django.utils.decorators import available_attrs
8
9
10 def same_structure_required(redirect_to):
11 """
12 A decorator for class-based views. It relies on ``self.get_object()``
13 method object, and assumes decorated views to handle ``StructureRelated``
14 objects.
15 """
16 def decorator(view_func):
17 @wraps(view_func, assigned=available_attrs(view_func))
18 def _wrapped_view(self, request, *args, **kwargs):
19 result = view_func(self, request, *args, **kwargs)
20
21 # Superuser is always allowed
22 if request.user.is_superuser:
23 return result
24
25 if isinstance(result, HttpResponseRedirect):
26 return result
27
28 can_bypass_structure = request.user.has_perm('can_bypass_structure')
29 obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)
30 if can_bypass_structure or (obj and obj.same_structure(request.user)):
31 return result
32 messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.'))
33
34 return redirect(redirect_to, *args, **kwargs)
35 return _wrapped_view
36 return decorator
37
[end of geotrek/authent/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/authent/decorators.py b/geotrek/authent/decorators.py
--- a/geotrek/authent/decorators.py
+++ b/geotrek/authent/decorators.py
@@ -18,16 +18,11 @@
def _wrapped_view(self, request, *args, **kwargs):
result = view_func(self, request, *args, **kwargs)
- # Superuser is always allowed
- if request.user.is_superuser:
- return result
-
if isinstance(result, HttpResponseRedirect):
return result
- can_bypass_structure = request.user.has_perm('can_bypass_structure')
obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)
- if can_bypass_structure or (obj and obj.same_structure(request.user)):
+ if obj.same_structure(request.user):
return result
messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.'))
diff --git a/geotrek/authent/models.py b/geotrek/authent/models.py
--- a/geotrek/authent/models.py
+++ b/geotrek/authent/models.py
@@ -26,7 +26,7 @@
verbose_name = _(u"Structure")
verbose_name_plural = _(u"Structures")
ordering = ['name']
- permissions = (("can_bypass_structure", _("Can by structure")),)
+ permissions = (("can_bypass_structure", _("Can bypass structure")),)
def default_structure():
@@ -69,8 +69,11 @@
return cls.in_structure.for_user(user)
def same_structure(self, user):
- """ Returns True if the user is in the same structure, False otherwise. """
- return user.profile.structure == self.structure
+ """ Returns True if the user is in the same structure or has
+ bypass_structure permission, False otherwise. """
+ return (user.profile.structure == self.structure or
+ user.is_superuser or
+ user.has_perm('authent.can_bypass_structure'))
class Meta:
abstract = True
| {"golden_diff": "diff --git a/geotrek/authent/decorators.py b/geotrek/authent/decorators.py\n--- a/geotrek/authent/decorators.py\n+++ b/geotrek/authent/decorators.py\n@@ -18,16 +18,11 @@\n def _wrapped_view(self, request, *args, **kwargs):\n result = view_func(self, request, *args, **kwargs)\n \n- # Superuser is always allowed\n- if request.user.is_superuser:\n- return result\n-\n if isinstance(result, HttpResponseRedirect):\n return result\n \n- can_bypass_structure = request.user.has_perm('can_bypass_structure')\n obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)\n- if can_bypass_structure or (obj and obj.same_structure(request.user)):\n+ if obj.same_structure(request.user):\n return result\n messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.'))\n \ndiff --git a/geotrek/authent/models.py b/geotrek/authent/models.py\n--- a/geotrek/authent/models.py\n+++ b/geotrek/authent/models.py\n@@ -26,7 +26,7 @@\n verbose_name = _(u\"Structure\")\n verbose_name_plural = _(u\"Structures\")\n ordering = ['name']\n- permissions = ((\"can_bypass_structure\", _(\"Can by structure\")),)\n+ permissions = ((\"can_bypass_structure\", _(\"Can bypass structure\")),)\n \n \n def default_structure():\n@@ -69,8 +69,11 @@\n return cls.in_structure.for_user(user)\n \n def same_structure(self, user):\n- \"\"\" Returns True if the user is in the same structure, False otherwise. \"\"\"\n- return user.profile.structure == self.structure\n+ \"\"\" Returns True if the user is in the same structure or has\n+ bypass_structure permission, False otherwise. \"\"\"\n+ return (user.profile.structure == self.structure or\n+ user.is_superuser or\n+ user.has_perm('authent.can_bypass_structure'))\n \n class Meta:\n abstract = True\n", "issue": "Should not disable edit button if having bypass structure permission\nWorkaround: write url by hand (eg. \"/trek/edit/1/\").\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\n Models to manage users and profiles\n\"\"\"\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.dispatch import receiver\nfrom django.contrib.auth.signals import user_logged_in\n\nfrom geotrek.common.utils import reify\n\n\nclass Structure(models.Model):\n \"\"\"\n Represents an organisational structure, to which users are related.\n \"\"\"\n name = models.CharField(max_length=256, verbose_name=_(u\"Nom\"))\n\n def __unicode__(self):\n return self.name\n\n class Meta:\n verbose_name = _(u\"Structure\")\n verbose_name_plural = _(u\"Structures\")\n ordering = ['name']\n permissions = ((\"can_bypass_structure\", _(\"Can by structure\")),)\n\n\ndef default_structure():\n \"\"\" Create default structure if necessary \"\"\"\n return Structure.objects.get_or_create(name=settings.DEFAULT_STRUCTURE_NAME)[0]\n\n\nclass StructureRelatedQuerySet(models.query.QuerySet):\n def for_user(self, user):\n return StructureRelatedQuerySet.queryset_for_user(self, user)\n\n @staticmethod\n def queryset_for_user(queryset, user):\n return queryset.filter(structure=user.profile.structure)\n\n\nclass StructureRelatedManager(models.Manager):\n \"\"\" A simple manager to manage structure related objects\"\"\"\n def get_queryset(self):\n return StructureRelatedQuerySet(self.model, using=self._db)\n\n def for_user(self, user):\n \"\"\" Filter by user's structure \"\"\"\n return self.get_queryset().for_user(user)\n\n\nclass StructureRelated(models.Model):\n \"\"\"\n A mixin used for any entities that belong to a structure\n \"\"\"\n structure = models.ForeignKey(Structure, default=default_structure,\n verbose_name=_(u\"Related structure\"), db_column='structure')\n\n objects = models.Manager()\n in_structure = StructureRelatedManager()\n\n @classmethod\n def for_user(cls, user):\n \"\"\" Shortcut to manager's filter by user \"\"\"\n return cls.in_structure.for_user(user)\n\n def same_structure(self, user):\n \"\"\" Returns True if the user is in the same structure, False otherwise. \"\"\"\n return user.profile.structure == self.structure\n\n class Meta:\n abstract = True\n verbose_name = _(u\"Related structures\")\n verbose_name_plural = _(u\"Related structure\")\n\n\nclass UserProfile(StructureRelated):\n \"\"\"\n A custom user profile\n \"\"\"\n user = models.OneToOneField(User, unique=True)\n\n language = models.CharField(_(u\"Language\"), max_length=10,\n choices=settings.LANGUAGES,\n default=settings.LANGUAGE_CODE)\n\n class Meta:\n verbose_name = _(u\"User's profile\")\n verbose_name_plural = _(u\"User's profiles\")\n\n def __unicode__(self):\n return _(\"Profile for %s\") % self.user\n\nUser.profile = reify(lambda u: UserProfile.objects.get_or_create(user=u)[0])\n\n\n@receiver(user_logged_in)\ndef lang(sender, **kwargs):\n \"\"\" Set user's language in session when he logs in. \"\"\"\n lang_code = kwargs['user'].profile.language\n kwargs['request'].session['django_language'] = lang_code\n", "path": "geotrek/authent/models.py"}, {"content": "from functools import wraps\n\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils.decorators import available_attrs\n\n\ndef same_structure_required(redirect_to):\n \"\"\"\n A decorator for class-based views. It relies on ``self.get_object()``\n method object, and assumes decorated views to handle ``StructureRelated``\n objects.\n \"\"\"\n def decorator(view_func):\n @wraps(view_func, assigned=available_attrs(view_func))\n def _wrapped_view(self, request, *args, **kwargs):\n result = view_func(self, request, *args, **kwargs)\n\n # Superuser is always allowed\n if request.user.is_superuser:\n return result\n\n if isinstance(result, HttpResponseRedirect):\n return result\n\n can_bypass_structure = request.user.has_perm('can_bypass_structure')\n obj = hasattr(self, 'get_object') and self.get_object() or getattr(self, 'object', None)\n if can_bypass_structure or (obj and obj.same_structure(request.user)):\n return result\n messages.warning(request, _(u'Access to the requested resource is restricted by structure. You have been redirected.'))\n\n return redirect(redirect_to, *args, **kwargs)\n return _wrapped_view\n return decorator\n", "path": "geotrek/authent/decorators.py"}]} | 1,832 | 466 |
gh_patches_debug_19323 | rasdani/github-patches | git_diff | PokemonGoF__PokemonGo-Bot-5036 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash on Level Up
I'm gonna guess an issue with:
https://github.com/PokemonGoF/PokemonGo-Bot/pull/5016
which is also the version im on
```
Traceback (most recent call last):
File "pokecli.py", line 781, in <module>
main()
File "pokecli.py", line 139, in main
bot.tick()
File "C:\Users\Steve\Downloads\PokemonGo-Bot\pokemongo_bot\__init__.py", line 658, in tick
if worker.work() == WorkerResult.RUNNING:
File "C:\Users\Steve\Downloads\PokemonGo-Bot\pokemongo_bot\cell_workers\collect_level_up_reward.py", line 37, in work
self._collect_level_reward()
File "C:\Users\Steve\Downloads\PokemonGo-Bot\pokemongo_bot\cell_workers\collect_level_up_reward.py", line 70, in _collect_level_reward
'items': ', '.join(["{}x {}".format(data[x], x) for x in data])
TypeError: list indices must be integers, not dict
```
</issue>
<code>
[start of pokemongo_bot/cell_workers/collect_level_up_reward.py]
1 import sys
2
3 from pokemongo_bot.base_task import BaseTask
4 from pokemongo_bot import inventory
5
6
7 class CollectLevelUpReward(BaseTask):
8 SUPPORTED_TASK_API_VERSION = 1
9
10 current_level = 0
11 previous_level = 0
12
13 def initialize(self):
14 self._process_config()
15 self.current_level = inventory.player().level
16 self.previous_level = 0
17
18 def work(self):
19 if self._should_run():
20 self.current_level = inventory.player().level
21
22 if self.collect_reward:
23 # let's check level reward on bot initialization
24 # to be able get rewards for old bots
25 if self.previous_level == 0:
26 self._collect_level_reward()
27 # level up situation
28 elif self.current_level > self.previous_level:
29 self.emit_event(
30 'level_up',
31 formatted='Level up from {previous_level} to {current_level}',
32 data={
33 'previous_level': self.previous_level,
34 'current_level': self.current_level
35 }
36 )
37 self._collect_level_reward()
38
39 if self.level_limit != -1 and self.current_level >= self.level_limit:
40 sys.exit("You have reached your target level! Exiting now.")
41
42 self.previous_level = self.current_level
43
44 def _process_config(self):
45 self.level_limit = self.config.get('level_limit', -1)
46 self.collect_reward = self.config.get('collect_reward', True)
47
48 def _should_run(self):
49 return self.level_limit != -1 or self.collect_reward
50
51 def _collect_level_reward(self):
52 response_dict = self.bot.api.level_up_rewards(level=self.current_level)
53 if 'status_code' in response_dict and response_dict['status_code'] == 1:
54 data = (response_dict
55 .get('responses', {})
56 .get('LEVEL_UP_REWARDS', {})
57 .get('items_awarded', []))
58
59 for item in data:
60 if 'item_id' in item and str(item['item_id']) in self.bot.item_list:
61 got_item = self.bot.item_list[str(item['item_id'])]
62 item['name'] = got_item
63 count = 'item_count' in item and item['item_count'] or 0
64 inventory.items().get(item['item_id']).add(count)
65 try:
66 self.emit_event(
67 'level_up_reward',
68 formatted='Received level up reward: {items}',
69 data={
70 'items': ', '.join(["{}x {}".format(data[x], x) for x in data])
71 }
72 )
73 except TypeError:
74 pass
75
[end of pokemongo_bot/cell_workers/collect_level_up_reward.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pokemongo_bot/cell_workers/collect_level_up_reward.py b/pokemongo_bot/cell_workers/collect_level_up_reward.py
--- a/pokemongo_bot/cell_workers/collect_level_up_reward.py
+++ b/pokemongo_bot/cell_workers/collect_level_up_reward.py
@@ -62,13 +62,11 @@
item['name'] = got_item
count = 'item_count' in item and item['item_count'] or 0
inventory.items().get(item['item_id']).add(count)
- try:
- self.emit_event(
- 'level_up_reward',
- formatted='Received level up reward: {items}',
- data={
- 'items': ', '.join(["{}x {}".format(data[x], x) for x in data])
- }
- )
- except TypeError:
- pass
+ self.emit_event(
+ 'level_up_reward',
+ formatted='Received level up reward: {items}',
+ data={
+ # [{'item_id': 3, 'name': u'Ultraball', 'item_count': 10}, {'item_id': 103, 'name': u'Hyper Potion', 'item_count': 10}]
+ 'items': ', '.join(["{}x {}".format(x['item_count'], x['name']) for x in data])
+ }
+ )
| {"golden_diff": "diff --git a/pokemongo_bot/cell_workers/collect_level_up_reward.py b/pokemongo_bot/cell_workers/collect_level_up_reward.py\n--- a/pokemongo_bot/cell_workers/collect_level_up_reward.py\n+++ b/pokemongo_bot/cell_workers/collect_level_up_reward.py\n@@ -62,13 +62,11 @@\n item['name'] = got_item\n count = 'item_count' in item and item['item_count'] or 0\n inventory.items().get(item['item_id']).add(count)\n- try:\n- self.emit_event(\n- 'level_up_reward',\n- formatted='Received level up reward: {items}',\n- data={\n- 'items': ', '.join([\"{}x {}\".format(data[x], x) for x in data])\n- }\n- )\n- except TypeError:\n- pass\n+ self.emit_event(\n+ 'level_up_reward',\n+ formatted='Received level up reward: {items}',\n+ data={\n+ # [{'item_id': 3, 'name': u'Ultraball', 'item_count': 10}, {'item_id': 103, 'name': u'Hyper Potion', 'item_count': 10}]\n+ 'items': ', '.join([\"{}x {}\".format(x['item_count'], x['name']) for x in data])\n+ }\n+ )\n", "issue": "Crash on Level Up\nI'm gonna guess an issue with:\nhttps://github.com/PokemonGoF/PokemonGo-Bot/pull/5016\n\nwhich is also the version im on\n\n```\nTraceback (most recent call last):\n File \"pokecli.py\", line 781, in <module>\n main()\n File \"pokecli.py\", line 139, in main\n bot.tick()\n File \"C:\\Users\\Steve\\Downloads\\PokemonGo-Bot\\pokemongo_bot\\__init__.py\", line 658, in tick\n if worker.work() == WorkerResult.RUNNING:\n File \"C:\\Users\\Steve\\Downloads\\PokemonGo-Bot\\pokemongo_bot\\cell_workers\\collect_level_up_reward.py\", line 37, in work\n self._collect_level_reward()\n File \"C:\\Users\\Steve\\Downloads\\PokemonGo-Bot\\pokemongo_bot\\cell_workers\\collect_level_up_reward.py\", line 70, in _collect_level_reward\n 'items': ', '.join([\"{}x {}\".format(data[x], x) for x in data])\nTypeError: list indices must be integers, not dict\n```\n\n", "before_files": [{"content": "import sys\n\nfrom pokemongo_bot.base_task import BaseTask\nfrom pokemongo_bot import inventory\n\n\nclass CollectLevelUpReward(BaseTask):\n SUPPORTED_TASK_API_VERSION = 1\n\n current_level = 0\n previous_level = 0\n\n def initialize(self):\n self._process_config()\n self.current_level = inventory.player().level\n self.previous_level = 0\n\n def work(self):\n if self._should_run():\n self.current_level = inventory.player().level\n\n if self.collect_reward:\n # let's check level reward on bot initialization\n # to be able get rewards for old bots\n if self.previous_level == 0:\n self._collect_level_reward()\n # level up situation\n elif self.current_level > self.previous_level:\n self.emit_event(\n 'level_up',\n formatted='Level up from {previous_level} to {current_level}',\n data={\n 'previous_level': self.previous_level,\n 'current_level': self.current_level\n }\n )\n self._collect_level_reward()\n\n if self.level_limit != -1 and self.current_level >= self.level_limit:\n sys.exit(\"You have reached your target level! Exiting now.\")\n\n self.previous_level = self.current_level\n\n def _process_config(self):\n self.level_limit = self.config.get('level_limit', -1)\n self.collect_reward = self.config.get('collect_reward', True)\n\n def _should_run(self):\n return self.level_limit != -1 or self.collect_reward\n\n def _collect_level_reward(self):\n response_dict = self.bot.api.level_up_rewards(level=self.current_level)\n if 'status_code' in response_dict and response_dict['status_code'] == 1:\n data = (response_dict\n .get('responses', {})\n .get('LEVEL_UP_REWARDS', {})\n .get('items_awarded', []))\n\n for item in data:\n if 'item_id' in item and str(item['item_id']) in self.bot.item_list:\n got_item = self.bot.item_list[str(item['item_id'])]\n item['name'] = got_item\n count = 'item_count' in item and item['item_count'] or 0\n inventory.items().get(item['item_id']).add(count)\n try:\n self.emit_event(\n 'level_up_reward',\n formatted='Received level up reward: {items}',\n data={\n 'items': ', '.join([\"{}x {}\".format(data[x], x) for x in data])\n }\n )\n except TypeError:\n pass\n", "path": "pokemongo_bot/cell_workers/collect_level_up_reward.py"}]} | 1,508 | 310 |
gh_patches_debug_664 | rasdani/github-patches | git_diff | fedora-infra__bodhi-507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py test doesn't include extra_requires from fedmsg deps
```
======================================================================
ERROR: Failure: ImportError (No module named psutil)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/loader.py", line 418, in loadTestsFromName
addr.filename, addr.module)
File "/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py", line 47, in importFromPath
return self.importFromDir(dir_path, fqname)
File "/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py", line 94, in importFromDir
mod = load_module(part_fqname, fh, filename, desc)
File "/home/decause/code/bodhi/bodhi/tests/test_masher.py", line 27, in <module>
from bodhi.consumers.masher import Masher, MasherThread
File "/home/decause/code/bodhi/bodhi/consumers/masher.py", line 30, in <module>
import fedmsg.consumers
File "/home/decause/code/bodhi/.eggs/fedmsg-0.16.0-py2.7.egg/fedmsg/consumers/__init__.py", line 25, in <module>
import psutil
ImportError: No module named psutil
----------------------------------------------------------------------
Ran 335 tests in 138.787s
FAILED (errors=1)
```
</issue>
<code>
[start of setup.py]
1 import __main__
2 __requires__ = __main__.__requires__ = 'WebOb>=1.4.1'
3 import pkg_resources
4
5 # The following two imports are required to shut up an
6 # atexit error when running tests with python 2.7
7 import logging
8 import multiprocessing
9
10 import os
11 import sys
12
13 from setuptools import setup, find_packages
14
15 here = os.path.abspath(os.path.dirname(__file__))
16 README = open(os.path.join(here, 'README.rst')).read()
17 CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
18
19 requires = [
20 'pyramid',
21 'pyramid_mako',
22 'pyramid_debugtoolbar',
23 'pyramid_tm',
24 'waitress',
25 'colander',
26 'cornice',
27
28 'python-openid',
29 'pyramid_fas_openid',
30 'packagedb-cli',
31
32 'sqlalchemy',
33 'zope.sqlalchemy',
34
35 'webhelpers',
36 'progressbar',
37
38 'bunch',
39
40 # for captchas
41 'cryptography',
42 'Pillow',
43
44 # Useful tools
45 'kitchen',
46 'python-fedora',
47 'pylibravatar',
48 'pyDNS',
49 'dogpile.cache',
50 'arrow',
51 'markdown',
52
53 # i18n, that we're not actually doing yet.
54 #'Babel',
55 #'lingua',
56
57 # External resources
58 'python-bugzilla',
59 'simplemediawiki',
60 'fedmsg',
61
62 'Sphinx',
63
64 # For the bodhi-client
65 'click',
66
67 'WebOb>=1.4.1',
68 ]
69
70 if sys.version_info[:3] < (2,7,0):
71 requires.append('importlib')
72
73 if sys.version_info[:3] < (2,5,0):
74 requires.append('pysqlite')
75
76 setup(name='bodhi',
77 version='2.0',
78 description='bodhi',
79 long_description=README + '\n\n' + CHANGES,
80 classifiers=[
81 "Programming Language :: Python",
82 "Framework :: Pyramid",
83 "Topic :: Internet :: WWW/HTTP",
84 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
85 ],
86 author='',
87 author_email='',
88 url='',
89 keywords='web fedora pyramid',
90 packages=find_packages(),
91 include_package_data=True,
92 zip_safe=False,
93 install_requires = requires,
94 tests_require = [
95 'nose',
96 'nose-cov',
97 'webtest',
98 'mock'
99 ],
100 test_suite="nose.collector",
101 message_extractors = { '.': [
102 #('**.py', 'lingua_python', None),
103 #('**.mak', 'lingua_xml', None),
104 ]},
105 entry_points = """\
106 [paste.app_factory]
107 main = bodhi:main
108 [console_scripts]
109 initialize_bodhi_db = bodhi.scripts.initializedb:main
110 bodhi = bodhi.cli:cli
111 bodhi-push = bodhi.push:push
112 bodhi-expire-overrides = bodhi.scripts.expire_overrides:main
113 [moksha.consumer]
114 masher = bodhi.consumers.masher:Masher
115 updates = bodhi.consumers.updates:UpdatesHandler
116 """,
117 paster_plugins=['pyramid'],
118 )
119
120
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,9 @@
# External resources
'python-bugzilla',
'simplemediawiki',
- 'fedmsg',
+
+ # "python setup.py test" needs one of fedmsg's setup.py extra_requires
+ 'fedmsg[consumers]',
'Sphinx',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,9 @@\n # External resources\n 'python-bugzilla',\n 'simplemediawiki',\n- 'fedmsg',\n+\n+ # \"python setup.py test\" needs one of fedmsg's setup.py extra_requires\n+ 'fedmsg[consumers]',\n \n 'Sphinx',\n", "issue": "setup.py test doesn't include extra_requires from fedmsg deps\n```\n======================================================================\nERROR: Failure: ImportError (No module named psutil)\n----------------------------------------------------------------------\nTraceback (most recent call last):\n File \"/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/loader.py\", line 418, in loadTestsFromName\n addr.filename, addr.module)\n File \"/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py\", line 47, in importFromPath\n return self.importFromDir(dir_path, fqname)\n File \"/home/decause/.virtualenvs/bodhi-python2.7/lib/python2.7/site-packages/nose/importer.py\", line 94, in importFromDir\n mod = load_module(part_fqname, fh, filename, desc)\n File \"/home/decause/code/bodhi/bodhi/tests/test_masher.py\", line 27, in <module>\n from bodhi.consumers.masher import Masher, MasherThread\n File \"/home/decause/code/bodhi/bodhi/consumers/masher.py\", line 30, in <module>\n import fedmsg.consumers\n File \"/home/decause/code/bodhi/.eggs/fedmsg-0.16.0-py2.7.egg/fedmsg/consumers/__init__.py\", line 25, in <module>\n import psutil\nImportError: No module named psutil\n\n----------------------------------------------------------------------\nRan 335 tests in 138.787s\n\nFAILED (errors=1)\n```\n\n", "before_files": [{"content": "import __main__\n__requires__ = __main__.__requires__ = 'WebOb>=1.4.1'\nimport pkg_resources\n\n# The following two imports are required to shut up an\n# atexit error when running tests with python 2.7\nimport logging\nimport multiprocessing\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGES = open(os.path.join(here, 'CHANGES.txt')).read()\n\nrequires = [\n 'pyramid',\n 'pyramid_mako',\n 'pyramid_debugtoolbar',\n 'pyramid_tm',\n 'waitress',\n 'colander',\n 'cornice',\n\n 'python-openid',\n 'pyramid_fas_openid',\n 'packagedb-cli',\n\n 'sqlalchemy',\n 'zope.sqlalchemy',\n\n 'webhelpers',\n 'progressbar',\n\n 'bunch',\n\n # for captchas\n 'cryptography',\n 'Pillow',\n\n # Useful tools\n 'kitchen',\n 'python-fedora',\n 'pylibravatar',\n 'pyDNS',\n 'dogpile.cache',\n 'arrow',\n 'markdown',\n\n # i18n, that we're not actually doing yet.\n #'Babel',\n #'lingua',\n\n # External resources\n 'python-bugzilla',\n 'simplemediawiki',\n 'fedmsg',\n\n 'Sphinx',\n\n # For the bodhi-client\n 'click',\n\n 'WebOb>=1.4.1',\n ]\n\nif sys.version_info[:3] < (2,7,0):\n requires.append('importlib')\n\nif sys.version_info[:3] < (2,5,0):\n requires.append('pysqlite')\n\nsetup(name='bodhi',\n version='2.0',\n description='bodhi',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n author='',\n author_email='',\n url='',\n keywords='web fedora pyramid',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires = requires,\n tests_require = [\n 'nose',\n 'nose-cov',\n 'webtest',\n 'mock'\n ],\n test_suite=\"nose.collector\",\n message_extractors = { '.': [\n #('**.py', 'lingua_python', None),\n #('**.mak', 'lingua_xml', None),\n ]},\n entry_points = \"\"\"\\\n [paste.app_factory]\n main = bodhi:main\n [console_scripts]\n initialize_bodhi_db = bodhi.scripts.initializedb:main\n bodhi = bodhi.cli:cli\n bodhi-push = bodhi.push:push\n bodhi-expire-overrides = bodhi.scripts.expire_overrides:main\n [moksha.consumer]\n masher = bodhi.consumers.masher:Masher\n updates = bodhi.consumers.updates:UpdatesHandler\n \"\"\",\n paster_plugins=['pyramid'],\n )\n\n", "path": "setup.py"}]} | 1,887 | 94 |
gh_patches_debug_25493 | rasdani/github-patches | git_diff | liqd__adhocracy4-211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Subject with new line crashes email sending
Subject with new line crashes email sending
</issue>
<code>
[start of adhocracy4/emails/mixins.py]
1 from email.mime.image import MIMEImage
2
3 from django.contrib.staticfiles import finders
4 from .base import EmailBase
5
6
7 class PlatformEmailMixin:
8 """
9 Attaches the static file images/logo.png so it can be used in an html
10 email.
11 """
12 def get_attachments(self):
13 attachments = super().get_attachments()
14 filename = (
15 finders.find('images/email_logo.png')
16 or finders.find('images/email_logo.svg')
17 )
18 if filename:
19 if filename.endswith('.png'):
20 imagetype = 'png'
21 else:
22 imagetype = 'svg+xml'
23
24 with open(filename, 'rb') as f:
25 logo = MIMEImage(f.read(), imagetype)
26
27 logo.add_header('Content-ID', '<{}>'.format('logo'))
28 return attachments + [logo]
29 return attachments
30
31
32 class SyncEmailMixin(EmailBase):
33 """Send Emails synchronously."""
34
35 @classmethod
36 def send(cls, object, *args, **kwargs):
37 """Call dispatch immediately"""
38 return cls().dispatch(object, *args, **kwargs)
39
[end of adhocracy4/emails/mixins.py]
[start of adhocracy4/emails/base.py]
1 from django.conf import settings
2 from django.contrib.contenttypes.models import ContentType
3 from django.contrib.sites import models as site_models
4 from django.core.mail.message import EmailMultiAlternatives
5 from django.template.loader import select_template
6 from django.utils import translation
7
8 from . import tasks
9
10
11 class EmailBase:
12 site_id = 1
13 object = None
14 template_name = None
15 fallback_language = 'en'
16 for_moderator = False
17
18 def get_site(self):
19 return site_models.Site.objects.get(pk=self.site_id)
20
21 def get_host(self):
22 site = self.get_site()
23 ssl_enabled = True
24 if site.domain.startswith('localhost:'):
25 ssl_enabled = False
26
27 url = 'http{ssl_flag}://{domain}'.format(
28 ssl_flag='s' if ssl_enabled else '',
29 domain=site.domain,
30 )
31 return url
32
33 def get_context(self):
34 object_context_key = self.object.__class__.__name__.lower()
35 return {
36 'email': self,
37 'site': self.get_site(),
38 object_context_key: self.object
39 }
40
41 def get_receivers(self):
42 return []
43
44 def get_attachments(self):
45 return []
46
47 def get_languages(self, receiver):
48 return [translation.get_language(), self.fallback_language]
49
50 def get_reply_to(self):
51 return None
52
53 @classmethod
54 def send(cls, object, *args, **kwargs):
55 """Send email asynchronously.
56
57 NOTE: args and kwargs must be JSON serializable.
58 """
59 ct = ContentType.objects.get_for_model(object)
60 tasks.send_async(
61 cls.__module__, cls.__name__,
62 ct.app_label, ct.model, object.pk,
63 args, kwargs)
64 return []
65
66 def render(self, template_name, context):
67 languages = self.get_languages(context['receiver'])
68 template = select_template([
69 '{}.{}.email'.format(template_name, lang)
70 for lang in languages
71 ])
72
73 # Get the actually chosen language from the template name
74 language = template.template.name.split('.', 2)[-2]
75
76 with translation.override(language):
77 parts = []
78 for part_type in ('subject', 'txt', 'html'):
79 context['part_type'] = part_type
80 parts.append(template.render(context))
81 context.pop('part_type')
82
83 return tuple(parts)
84
85 def dispatch(self, object, *args, **kwargs):
86 self.object = object
87 self.kwargs = kwargs
88 receivers = self.get_receivers()
89 context = self.get_context()
90 context.update(kwargs)
91 attachments = self.get_attachments()
92 template = self.template_name
93
94 mails = []
95 for receiver in receivers:
96 context['receiver'] = receiver
97 (subject, text, html) = self.render(template, context)
98 context.pop('receiver')
99
100 if hasattr(receiver, 'email'):
101 to_address = receiver.email
102 else:
103 to_address = receiver
104
105 mail = EmailMultiAlternatives(
106 subject=subject.strip(),
107 body=text,
108 from_email=settings.DEFAULT_FROM_EMAIL,
109 to=[to_address],
110 reply_to=self.get_reply_to(),
111 )
112
113 if len(attachments) > 0:
114 mail.mixed_subtype = 'related'
115
116 for attachment in attachments:
117 mail.attach(attachment)
118
119 mail.attach_alternative(html, 'text/html')
120 mail.send()
121 mails.append(mail)
122 return mails
123
[end of adhocracy4/emails/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/adhocracy4/emails/base.py b/adhocracy4/emails/base.py
--- a/adhocracy4/emails/base.py
+++ b/adhocracy4/emails/base.py
@@ -1,3 +1,5 @@
+import re
+
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites import models as site_models
@@ -102,8 +104,10 @@
else:
to_address = receiver
+ subject_clean = re.sub(r'[\r\n]', '', subject).strip()
+
mail = EmailMultiAlternatives(
- subject=subject.strip(),
+ subject=subject_clean,
body=text,
from_email=settings.DEFAULT_FROM_EMAIL,
to=[to_address],
diff --git a/adhocracy4/emails/mixins.py b/adhocracy4/emails/mixins.py
--- a/adhocracy4/emails/mixins.py
+++ b/adhocracy4/emails/mixins.py
@@ -1,7 +1,6 @@
from email.mime.image import MIMEImage
from django.contrib.staticfiles import finders
-from .base import EmailBase
class PlatformEmailMixin:
@@ -29,7 +28,7 @@
return attachments
-class SyncEmailMixin(EmailBase):
+class SyncEmailMixin:
"""Send Emails synchronously."""
@classmethod
| {"golden_diff": "diff --git a/adhocracy4/emails/base.py b/adhocracy4/emails/base.py\n--- a/adhocracy4/emails/base.py\n+++ b/adhocracy4/emails/base.py\n@@ -1,3 +1,5 @@\n+import re\n+\n from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.contrib.sites import models as site_models\n@@ -102,8 +104,10 @@\n else:\n to_address = receiver\n \n+ subject_clean = re.sub(r'[\\r\\n]', '', subject).strip()\n+\n mail = EmailMultiAlternatives(\n- subject=subject.strip(),\n+ subject=subject_clean,\n body=text,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=[to_address],\ndiff --git a/adhocracy4/emails/mixins.py b/adhocracy4/emails/mixins.py\n--- a/adhocracy4/emails/mixins.py\n+++ b/adhocracy4/emails/mixins.py\n@@ -1,7 +1,6 @@\n from email.mime.image import MIMEImage\n \n from django.contrib.staticfiles import finders\n-from .base import EmailBase\n \n \n class PlatformEmailMixin:\n@@ -29,7 +28,7 @@\n return attachments\n \n \n-class SyncEmailMixin(EmailBase):\n+class SyncEmailMixin:\n \"\"\"Send Emails synchronously.\"\"\"\n \n @classmethod\n", "issue": "Subject with new line crashes email sending\n\nSubject with new line crashes email sending\n\n", "before_files": [{"content": "from email.mime.image import MIMEImage\n\nfrom django.contrib.staticfiles import finders\nfrom .base import EmailBase\n\n\nclass PlatformEmailMixin:\n \"\"\"\n Attaches the static file images/logo.png so it can be used in an html\n email.\n \"\"\"\n def get_attachments(self):\n attachments = super().get_attachments()\n filename = (\n finders.find('images/email_logo.png')\n or finders.find('images/email_logo.svg')\n )\n if filename:\n if filename.endswith('.png'):\n imagetype = 'png'\n else:\n imagetype = 'svg+xml'\n\n with open(filename, 'rb') as f:\n logo = MIMEImage(f.read(), imagetype)\n\n logo.add_header('Content-ID', '<{}>'.format('logo'))\n return attachments + [logo]\n return attachments\n\n\nclass SyncEmailMixin(EmailBase):\n \"\"\"Send Emails synchronously.\"\"\"\n\n @classmethod\n def send(cls, object, *args, **kwargs):\n \"\"\"Call dispatch immediately\"\"\"\n return cls().dispatch(object, *args, **kwargs)\n", "path": "adhocracy4/emails/mixins.py"}, {"content": "from django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites import models as site_models\nfrom django.core.mail.message import EmailMultiAlternatives\nfrom django.template.loader import select_template\nfrom django.utils import translation\n\nfrom . import tasks\n\n\nclass EmailBase:\n site_id = 1\n object = None\n template_name = None\n fallback_language = 'en'\n for_moderator = False\n\n def get_site(self):\n return site_models.Site.objects.get(pk=self.site_id)\n\n def get_host(self):\n site = self.get_site()\n ssl_enabled = True\n if site.domain.startswith('localhost:'):\n ssl_enabled = False\n\n url = 'http{ssl_flag}://{domain}'.format(\n ssl_flag='s' if ssl_enabled else '',\n domain=site.domain,\n )\n return url\n\n def get_context(self):\n object_context_key = self.object.__class__.__name__.lower()\n return {\n 'email': self,\n 'site': self.get_site(),\n object_context_key: self.object\n }\n\n def get_receivers(self):\n return []\n\n def get_attachments(self):\n return []\n\n def get_languages(self, receiver):\n return [translation.get_language(), self.fallback_language]\n\n def get_reply_to(self):\n return None\n\n @classmethod\n def send(cls, object, *args, **kwargs):\n \"\"\"Send email asynchronously.\n\n NOTE: args and kwargs must be JSON serializable.\n \"\"\"\n ct = ContentType.objects.get_for_model(object)\n tasks.send_async(\n cls.__module__, cls.__name__,\n ct.app_label, ct.model, object.pk,\n args, kwargs)\n return []\n\n def render(self, template_name, context):\n languages = self.get_languages(context['receiver'])\n template = select_template([\n '{}.{}.email'.format(template_name, lang)\n for lang in languages\n ])\n\n # Get the actually chosen language from the template name\n language = template.template.name.split('.', 2)[-2]\n\n with translation.override(language):\n parts = []\n for part_type in ('subject', 'txt', 'html'):\n context['part_type'] = part_type\n parts.append(template.render(context))\n context.pop('part_type')\n\n return tuple(parts)\n\n def dispatch(self, object, *args, **kwargs):\n self.object = object\n self.kwargs = kwargs\n receivers = self.get_receivers()\n context = self.get_context()\n context.update(kwargs)\n attachments = self.get_attachments()\n template = self.template_name\n\n mails = []\n for receiver in receivers:\n context['receiver'] = receiver\n (subject, text, html) = self.render(template, context)\n context.pop('receiver')\n\n if hasattr(receiver, 'email'):\n to_address = receiver.email\n else:\n to_address = receiver\n\n mail = EmailMultiAlternatives(\n subject=subject.strip(),\n body=text,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=[to_address],\n reply_to=self.get_reply_to(),\n )\n\n if len(attachments) > 0:\n mail.mixed_subtype = 'related'\n\n for attachment in attachments:\n mail.attach(attachment)\n\n mail.attach_alternative(html, 'text/html')\n mail.send()\n mails.append(mail)\n return mails\n", "path": "adhocracy4/emails/base.py"}]} | 1,874 | 304 |
gh_patches_debug_28410 | rasdani/github-patches | git_diff | mne-tools__mne-python-9092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
split code block in examples/preprocessing/plot_virtual_evoked
right now, because all plots come from a single code block, they are plotted at the top of the example in a group of 4 (and consequently the plots are really small). By splitting the 4 plotting calls into different code blocks, they will plot larger / be easier to see & compare, without increasing run time of the example. Code blocks can be split with a line of 79 `#` marks (adding a bit of explanatory text too is usually a good idea)
</issue>
<code>
[start of examples/preprocessing/plot_virtual_evoked.py]
1 """
2 =======================
3 Remap MEG channel types
4 =======================
5
6 In this example, MEG data are remapped from one channel type to another.
7 This is useful to:
8
9 - visualize combined magnetometers and gradiometers as magnetometers
10 or gradiometers.
11 - run statistics from both magnetometers and gradiometers while
12 working with a single type of channels.
13 """
14
15 # Author: Mainak Jas <[email protected]>
16
17 # License: BSD (3-clause)
18
19 import mne
20 from mne.datasets import sample
21
22 print(__doc__)
23
24 # read the evoked
25 data_path = sample.data_path()
26 fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
27 evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))
28
29 # go from grad + mag to mag
30 virt_evoked = evoked.as_type('mag')
31 evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')
32 virt_evoked.plot_topomap(ch_type='mag', time_unit='s',
33 title='mag (interpolated from mag + grad)')
34
35 # go from grad + mag to grad
36 virt_evoked = evoked.as_type('grad')
37 evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')
38 virt_evoked.plot_topomap(ch_type='grad', time_unit='s',
39 title='grad (interpolated from mag + grad)')
40
[end of examples/preprocessing/plot_virtual_evoked.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/preprocessing/plot_virtual_evoked.py
--- a/examples/preprocessing/plot_virtual_evoked.py
+++ b/examples/preprocessing/plot_virtual_evoked.py
@@ -26,14 +26,30 @@
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))
-# go from grad + mag to mag
+###############################################################################
+# First, let's call remap gradiometers to magnometers, and plot
+# the original and remapped topomaps of the magnetometers.
+
+# go from grad + mag to mag and plot original mag
virt_evoked = evoked.as_type('mag')
evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')
+
+###############################################################################
+
+# plot interpolated grad + mag
virt_evoked.plot_topomap(ch_type='mag', time_unit='s',
title='mag (interpolated from mag + grad)')
-# go from grad + mag to grad
+###############################################################################
+# Now, we remap magnometers to gradiometers, and plot
+# the original and remapped topomaps of the gradiometers
+
+# go from grad + mag to grad and plot original grad
virt_evoked = evoked.as_type('grad')
evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')
+
+###############################################################################
+
+# plot interpolated grad + mag
virt_evoked.plot_topomap(ch_type='grad', time_unit='s',
title='grad (interpolated from mag + grad)')
| {"golden_diff": "diff --git a/examples/preprocessing/plot_virtual_evoked.py b/examples/preprocessing/plot_virtual_evoked.py\n--- a/examples/preprocessing/plot_virtual_evoked.py\n+++ b/examples/preprocessing/plot_virtual_evoked.py\n@@ -26,14 +26,30 @@\n fname = data_path + '/MEG/sample/sample_audvis-ave.fif'\n evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))\n \n-# go from grad + mag to mag\n+###############################################################################\n+# First, let's call remap gradiometers to magnometers, and plot\n+# the original and remapped topomaps of the magnetometers.\n+\n+# go from grad + mag to mag and plot original mag\n virt_evoked = evoked.as_type('mag')\n evoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')\n+\n+###############################################################################\n+\n+# plot interpolated grad + mag\n virt_evoked.plot_topomap(ch_type='mag', time_unit='s',\n title='mag (interpolated from mag + grad)')\n \n-# go from grad + mag to grad\n+###############################################################################\n+# Now, we remap magnometers to gradiometers, and plot\n+# the original and remapped topomaps of the gradiometers\n+\n+# go from grad + mag to grad and plot original grad\n virt_evoked = evoked.as_type('grad')\n evoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')\n+\n+###############################################################################\n+\n+# plot interpolated grad + mag\n virt_evoked.plot_topomap(ch_type='grad', time_unit='s',\n title='grad (interpolated from mag + grad)')\n", "issue": "split code block in examples/preprocessing/plot_virtual_evoked\nright now, because all plots come from a single code block, they are plotted at the top of the example in a group of 4 (and consequently the plots are really small). By splitting the 4 plotting calls into different code blocks, they will plot larger / be easier to see & compare, without increasing run time of the example. Code blocks can be split with a line of 79 `#` marks (adding a bit of explanatory text too is usually a good idea)\n", "before_files": [{"content": "\"\"\"\n=======================\nRemap MEG channel types\n=======================\n\nIn this example, MEG data are remapped from one channel type to another.\nThis is useful to:\n\n - visualize combined magnetometers and gradiometers as magnetometers\n or gradiometers.\n - run statistics from both magnetometers and gradiometers while\n working with a single type of channels.\n\"\"\"\n\n# Author: Mainak Jas <[email protected]>\n\n# License: BSD (3-clause)\n\nimport mne\nfrom mne.datasets import sample\n\nprint(__doc__)\n\n# read the evoked\ndata_path = sample.data_path()\nfname = data_path + '/MEG/sample/sample_audvis-ave.fif'\nevoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0))\n\n# go from grad + mag to mag\nvirt_evoked = evoked.as_type('mag')\nevoked.plot_topomap(ch_type='mag', title='mag (original)', time_unit='s')\nvirt_evoked.plot_topomap(ch_type='mag', time_unit='s',\n title='mag (interpolated from mag + grad)')\n\n# go from grad + mag to grad\nvirt_evoked = evoked.as_type('grad')\nevoked.plot_topomap(ch_type='grad', title='grad (original)', time_unit='s')\nvirt_evoked.plot_topomap(ch_type='grad', time_unit='s',\n title='grad (interpolated from mag + grad)')\n", "path": "examples/preprocessing/plot_virtual_evoked.py"}]} | 1,054 | 366 |
gh_patches_debug_157 | rasdani/github-patches | git_diff | doccano__doccano-1907 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot access Django admin panel in a Heroku deployment
How to reproduce the behaviour
---------
The FAQ describes how to [create a user via the Django admin panel](https://github.com/doccano/doccano/blob/master/docs/faq.md#how-to-create-a-user) for a locally hosted Doccano. When run locally, I have no problem to reach the admin panel on `http://localhost:8000/admin/`, in Heroku however it is not working.
I have tried to reach it on
- `https://mydeployment.herokuapp.com/admin/`
- `https://mydeployment.herokuapp.com/admin/login`
- `https://mydeployment.herokuapp.com/admin/login/`
- `http://mydeployment.herokuapp.com/admin/`
Those urls all result in a `500 Internal Server Error`.
Am I missing something here, or is this perhaps a bug?
Your Environment
---------
<!-- Include details of your environment. -->
* Operating System: -
* Python Version Used: -
* When did you install doccano: A few days ago
* How did you install doccano (Heroku button etc): Heroku button
</issue>
<code>
[start of backend/config/settings/heroku.py]
1 import django_heroku
2
3 from .base import * # noqa: F401,F403
4
5 django_heroku.settings(locals(), test_runner=False)
6
[end of backend/config/settings/heroku.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/config/settings/heroku.py b/backend/config/settings/heroku.py
--- a/backend/config/settings/heroku.py
+++ b/backend/config/settings/heroku.py
@@ -2,4 +2,4 @@
from .base import * # noqa: F401,F403
-django_heroku.settings(locals(), test_runner=False)
+django_heroku.settings(locals(), test_runner=False, staticfiles=False)
| {"golden_diff": "diff --git a/backend/config/settings/heroku.py b/backend/config/settings/heroku.py\n--- a/backend/config/settings/heroku.py\n+++ b/backend/config/settings/heroku.py\n@@ -2,4 +2,4 @@\n \n from .base import * # noqa: F401,F403\n \n-django_heroku.settings(locals(), test_runner=False)\n+django_heroku.settings(locals(), test_runner=False, staticfiles=False)\n", "issue": "Cannot access Django admin panel in a Heroku deployment\nHow to reproduce the behaviour\r\n---------\r\nThe FAQ describes how to [create a user via the Django admin panel](https://github.com/doccano/doccano/blob/master/docs/faq.md#how-to-create-a-user) for a locally hosted Doccano. When run locally, I have no problem to reach the admin panel on `http://localhost:8000/admin/`, in Heroku however it is not working.\r\n\r\nI have tried to reach it on\r\n- `https://mydeployment.herokuapp.com/admin/`\r\n- `https://mydeployment.herokuapp.com/admin/login`\r\n- `https://mydeployment.herokuapp.com/admin/login/`\r\n- `http://mydeployment.herokuapp.com/admin/`\r\n\r\nThose urls all result in a `500 Internal Server Error`.\r\nAm I missing something here, or is this perhaps a bug?\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment. -->\r\n\r\n* Operating System: -\r\n* Python Version Used: -\r\n* When did you install doccano: A few days ago\r\n* How did you install doccano (Heroku button etc): Heroku button\r\n\n", "before_files": [{"content": "import django_heroku\n\nfrom .base import * # noqa: F401,F403\n\ndjango_heroku.settings(locals(), test_runner=False)\n", "path": "backend/config/settings/heroku.py"}]} | 823 | 96 |
gh_patches_debug_30592 | rasdani/github-patches | git_diff | mne-tools__mne-python-4380 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove deprecated imp module
Currently, `mne/commands/utils.py` still uses the deprecated `imp` module, which has long been replaced with `importlib`. According to [this answer on SO](https://stackoverflow.com/a/67692/1112283), the current solution works only on Python 3.5/3.6, and there is a (deprecated) alternative for Python 3.3/3.4. All versions < 3.3 need to use `imp`.
How should this be handled in MNE?
</issue>
<code>
[start of mne/commands/utils.py]
1 """Some utility functions for commands (e.g. for cmdline handling)."""
2
3 # Authors: Yaroslav Halchenko <[email protected]>
4 #
5 # License: BSD (3-clause)
6
7 import imp
8 import os
9 import re
10 from optparse import OptionParser
11
12 import mne
13
14
15 def get_optparser(cmdpath, usage=None):
16 """Create OptionParser with cmd specific settings (e.g. prog value)."""
17 command = os.path.basename(cmdpath)
18 if re.match('mne_(.*).py', command):
19 command = command[4:-3]
20 elif re.match('mne_(.*).pyc', command):
21 command = command[4:-4]
22
23 # Fetch description
24 if cmdpath.endswith('.pyc'):
25 mod = imp.load_compiled('__temp', cmdpath)
26 else:
27 mod = imp.load_source('__temp', cmdpath)
28 if mod.__doc__:
29 doc, description, epilog = mod.__doc__, None, None
30
31 doc_lines = doc.split('\n')
32 description = doc_lines[0]
33 if len(doc_lines) > 1:
34 epilog = '\n'.join(doc_lines[1:])
35
36 # monkey patch OptionParser to not wrap epilog
37 OptionParser.format_epilog = lambda self, formatter: self.epilog
38 parser = OptionParser(prog="mne %s" % command,
39 version=mne.__version__,
40 description=description,
41 epilog=epilog, usage=usage)
42
43 return parser
44
[end of mne/commands/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mne/commands/utils.py b/mne/commands/utils.py
--- a/mne/commands/utils.py
+++ b/mne/commands/utils.py
@@ -4,7 +4,7 @@
#
# License: BSD (3-clause)
-import imp
+import sys
import os
import re
from optparse import OptionParser
@@ -12,6 +12,42 @@
import mne
+def load_module(name, path):
+ """Load module from .py/.pyc file.
+
+ Parameters
+ ----------
+ name : str
+ Name of the module.
+ path : str
+ Path to .py/.pyc file.
+
+ Returns
+ -------
+ mod : module
+ Imported module.
+ """
+ if sys.version_info < (3, 3):
+ import imp
+ if path.endswith('.pyc'):
+ return imp.load_compiled(name, path)
+ else:
+ return imp.load_source(name, path)
+ elif sys.version_info < (3, 5):
+ if path.endswith('.pyc'):
+ from importlib.machinery import SourcelessFileLoader
+ return SourcelessFileLoader(name, path).load_module()
+ else:
+ from importlib.machinery import SourceFileLoader
+ return SourceFileLoader(name, path).load_module()
+ else: # Python 3.5 or greater
+ from importlib.util import spec_from_file_location, module_from_spec
+ spec = spec_from_file_location(name, path)
+ mod = module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
+
+
def get_optparser(cmdpath, usage=None):
"""Create OptionParser with cmd specific settings (e.g. prog value)."""
command = os.path.basename(cmdpath)
@@ -21,10 +57,7 @@
command = command[4:-4]
# Fetch description
- if cmdpath.endswith('.pyc'):
- mod = imp.load_compiled('__temp', cmdpath)
- else:
- mod = imp.load_source('__temp', cmdpath)
+ mod = load_module('__temp', cmdpath)
if mod.__doc__:
doc, description, epilog = mod.__doc__, None, None
| {"golden_diff": "diff --git a/mne/commands/utils.py b/mne/commands/utils.py\n--- a/mne/commands/utils.py\n+++ b/mne/commands/utils.py\n@@ -4,7 +4,7 @@\n #\n # License: BSD (3-clause)\n \n-import imp\n+import sys\n import os\n import re\n from optparse import OptionParser\n@@ -12,6 +12,42 @@\n import mne\n \n \n+def load_module(name, path):\n+ \"\"\"Load module from .py/.pyc file.\n+\n+ Parameters\n+ ----------\n+ name : str\n+ Name of the module.\n+ path : str\n+ Path to .py/.pyc file.\n+\n+ Returns\n+ -------\n+ mod : module\n+ Imported module.\n+ \"\"\"\n+ if sys.version_info < (3, 3):\n+ import imp\n+ if path.endswith('.pyc'):\n+ return imp.load_compiled(name, path)\n+ else:\n+ return imp.load_source(name, path)\n+ elif sys.version_info < (3, 5):\n+ if path.endswith('.pyc'):\n+ from importlib.machinery import SourcelessFileLoader\n+ return SourcelessFileLoader(name, path).load_module()\n+ else:\n+ from importlib.machinery import SourceFileLoader\n+ return SourceFileLoader(name, path).load_module()\n+ else: # Python 3.5 or greater\n+ from importlib.util import spec_from_file_location, module_from_spec\n+ spec = spec_from_file_location(name, path)\n+ mod = module_from_spec(spec)\n+ spec.loader.exec_module(mod)\n+ return mod\n+\n+\n def get_optparser(cmdpath, usage=None):\n \"\"\"Create OptionParser with cmd specific settings (e.g. prog value).\"\"\"\n command = os.path.basename(cmdpath)\n@@ -21,10 +57,7 @@\n command = command[4:-4]\n \n # Fetch description\n- if cmdpath.endswith('.pyc'):\n- mod = imp.load_compiled('__temp', cmdpath)\n- else:\n- mod = imp.load_source('__temp', cmdpath)\n+ mod = load_module('__temp', cmdpath)\n if mod.__doc__:\n doc, description, epilog = mod.__doc__, None, None\n", "issue": "Remove deprecated imp module\nCurrently, `mne/commands/utils.py` still uses the deprecated `imp` module, which has long been replaced with `importlib`. According to [this answer on SO](https://stackoverflow.com/a/67692/1112283), the current solution works only on Python 3.5/3.6, and there is a (deprecated) alternative for Python 3.3/3.4. All versions < 3.3 need to use `imp`.\r\n\r\nHow should this be handled in MNE?\n", "before_files": [{"content": "\"\"\"Some utility functions for commands (e.g. for cmdline handling).\"\"\"\n\n# Authors: Yaroslav Halchenko <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport imp\nimport os\nimport re\nfrom optparse import OptionParser\n\nimport mne\n\n\ndef get_optparser(cmdpath, usage=None):\n \"\"\"Create OptionParser with cmd specific settings (e.g. prog value).\"\"\"\n command = os.path.basename(cmdpath)\n if re.match('mne_(.*).py', command):\n command = command[4:-3]\n elif re.match('mne_(.*).pyc', command):\n command = command[4:-4]\n\n # Fetch description\n if cmdpath.endswith('.pyc'):\n mod = imp.load_compiled('__temp', cmdpath)\n else:\n mod = imp.load_source('__temp', cmdpath)\n if mod.__doc__:\n doc, description, epilog = mod.__doc__, None, None\n\n doc_lines = doc.split('\\n')\n description = doc_lines[0]\n if len(doc_lines) > 1:\n epilog = '\\n'.join(doc_lines[1:])\n\n # monkey patch OptionParser to not wrap epilog\n OptionParser.format_epilog = lambda self, formatter: self.epilog\n parser = OptionParser(prog=\"mne %s\" % command,\n version=mne.__version__,\n description=description,\n epilog=epilog, usage=usage)\n\n return parser\n", "path": "mne/commands/utils.py"}]} | 1,066 | 513 |
gh_patches_debug_37097 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-12975 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request]: Where is the save style button?
### Is there an existing issue for this?
- [X] I have searched the existing issues and checked the recent builds/commits
### What would your feature do ?
Is it possible to make the old implementation of save style as well?
Not being able to save the currently typed prompt is very troublesome.
Why do we have to open the edit screen and copy/paste the prompt?
### Proposed workflow
Restore old implementation of save styles button
### Additional information
_No response_
</issue>
<code>
[start of modules/ui_prompt_styles.py]
1 import gradio as gr
2
3 from modules import shared, ui_common, ui_components, styles
4
5 styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️
6 styles_materialize_symbol = '\U0001f4cb' # 📋
7
8
9 def select_style(name):
10 style = shared.prompt_styles.styles.get(name)
11 existing = style is not None
12 empty = not name
13
14 prompt = style.prompt if style else gr.update()
15 negative_prompt = style.negative_prompt if style else gr.update()
16
17 return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty)
18
19
20 def save_style(name, prompt, negative_prompt):
21 if not name:
22 return gr.update(visible=False)
23
24 style = styles.PromptStyle(name, prompt, negative_prompt)
25 shared.prompt_styles.styles[style.name] = style
26 shared.prompt_styles.save_styles(shared.styles_filename)
27
28 return gr.update(visible=True)
29
30
31 def delete_style(name):
32 if name == "":
33 return
34
35 shared.prompt_styles.styles.pop(name, None)
36 shared.prompt_styles.save_styles(shared.styles_filename)
37
38 return '', '', ''
39
40
41 def materialize_styles(prompt, negative_prompt, styles):
42 prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)
43 negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles)
44
45 return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])]
46
47
48 def refresh_styles():
49 return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles))
50
51
52 class UiPromptStyles:
53 def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt):
54 self.tabname = tabname
55
56 with gr.Row(elem_id=f"{tabname}_styles_row"):
57 self.dropdown = gr.Dropdown(label="Styles", show_label=False, elem_id=f"{tabname}_styles", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip="Styles")
58 edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f"{tabname}_styles_edit_button", tooltip="Edit styles")
59
60 with gr.Box(elem_id=f"{tabname}_styles_dialog", elem_classes="popup-dialog") as styles_dialog:
61 with gr.Row():
62 self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.")
63 ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles")
64 self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.")
65
66 with gr.Row():
67 self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3)
68
69 with gr.Row():
70 self.neg_prompt = gr.Textbox(label="Negative prompt", show_label=True, elem_id=f"{tabname}_edit_style_neg_prompt", lines=3)
71
72 with gr.Row():
73 self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False)
74 self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False)
75 self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close')
76
77 self.selection.change(
78 fn=select_style,
79 inputs=[self.selection],
80 outputs=[self.prompt, self.neg_prompt, self.delete, self.save],
81 show_progress=False,
82 )
83
84 self.save.click(
85 fn=save_style,
86 inputs=[self.selection, self.prompt, self.neg_prompt],
87 outputs=[self.delete],
88 show_progress=False,
89 ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)
90
91 self.delete.click(
92 fn=delete_style,
93 _js='function(name){ if(name == "") return ""; return confirm("Delete style " + name + "?") ? name : ""; }',
94 inputs=[self.selection],
95 outputs=[self.selection, self.prompt, self.neg_prompt],
96 show_progress=False,
97 ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)
98
99 self.materialize.click(
100 fn=materialize_styles,
101 inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown],
102 outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown],
103 show_progress=False,
104 ).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False)
105
106 ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)
107
108
109
110
111
[end of modules/ui_prompt_styles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py
--- a/modules/ui_prompt_styles.py
+++ b/modules/ui_prompt_styles.py
@@ -4,6 +4,7 @@
styles_edit_symbol = '\U0001f58c\uFE0F' # 🖌️
styles_materialize_symbol = '\U0001f4cb' # 📋
+styles_copy_symbol = '\U0001f4dd' # 📝
def select_style(name):
@@ -62,6 +63,7 @@
self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.")
ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles")
self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.")
+ self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.")
with gr.Row():
self.prompt = gr.Textbox(label="Prompt", show_label=True, elem_id=f"{tabname}_edit_style_prompt", lines=3)
@@ -103,6 +105,13 @@
show_progress=False,
).then(fn=None, _js="function(){update_"+tabname+"_tokens(); closePopup();}", show_progress=False)
+ self.copy.click(
+ fn=lambda p, n: (p, n),
+ inputs=[main_ui_prompt, main_ui_negative_prompt],
+ outputs=[self.prompt, self.neg_prompt],
+ show_progress=False,
+ )
+
ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)
| {"golden_diff": "diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py\n--- a/modules/ui_prompt_styles.py\n+++ b/modules/ui_prompt_styles.py\n@@ -4,6 +4,7 @@\n \r\n styles_edit_symbol = '\\U0001f58c\\uFE0F' # \ud83d\udd8c\ufe0f\r\n styles_materialize_symbol = '\\U0001f4cb' # \ud83d\udccb\r\n+styles_copy_symbol = '\\U0001f4dd' # \ud83d\udcdd\r\n \r\n \r\n def select_style(name):\r\n@@ -62,6 +63,7 @@\n self.selection = gr.Dropdown(label=\"Styles\", elem_id=f\"{tabname}_styles_edit_select\", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info=\"Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.\")\r\n ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {\"choices\": list(shared.prompt_styles.styles)}, f\"refresh_{tabname}_styles\")\r\n self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f\"{tabname}_style_apply\", tooltip=\"Apply all selected styles from the style selction dropdown in main UI to the prompt.\")\r\n+ self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f\"{tabname}_style_copy\", tooltip=\"Copy main UI prompt to style.\")\r\n \r\n with gr.Row():\r\n self.prompt = gr.Textbox(label=\"Prompt\", show_label=True, elem_id=f\"{tabname}_edit_style_prompt\", lines=3)\r\n@@ -103,6 +105,13 @@\n show_progress=False,\r\n ).then(fn=None, _js=\"function(){update_\"+tabname+\"_tokens(); closePopup();}\", show_progress=False)\r\n \r\n+ self.copy.click(\r\n+ fn=lambda p, n: (p, n),\r\n+ inputs=[main_ui_prompt, main_ui_negative_prompt],\r\n+ outputs=[self.prompt, self.neg_prompt],\r\n+ show_progress=False,\r\n+ )\r\n+\r\n ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)\n", "issue": "[Feature Request]: Where is the save style button?\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What would your feature do ?\n\nIs it possible to make the old implementation of save style as well?\r\nNot being able to save the currently typed prompt is very troublesome.\r\nWhy do we have to open the edit screen and copy/paste the prompt?\n\n### Proposed workflow\n\nRestore old implementation of save styles button\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "import gradio as gr\r\n\r\nfrom modules import shared, ui_common, ui_components, styles\r\n\r\nstyles_edit_symbol = '\\U0001f58c\\uFE0F' # \ud83d\udd8c\ufe0f\r\nstyles_materialize_symbol = '\\U0001f4cb' # \ud83d\udccb\r\n\r\n\r\ndef select_style(name):\r\n style = shared.prompt_styles.styles.get(name)\r\n existing = style is not None\r\n empty = not name\r\n\r\n prompt = style.prompt if style else gr.update()\r\n negative_prompt = style.negative_prompt if style else gr.update()\r\n\r\n return prompt, negative_prompt, gr.update(visible=existing), gr.update(visible=not empty)\r\n\r\n\r\ndef save_style(name, prompt, negative_prompt):\r\n if not name:\r\n return gr.update(visible=False)\r\n\r\n style = styles.PromptStyle(name, prompt, negative_prompt)\r\n shared.prompt_styles.styles[style.name] = style\r\n shared.prompt_styles.save_styles(shared.styles_filename)\r\n\r\n return gr.update(visible=True)\r\n\r\n\r\ndef delete_style(name):\r\n if name == \"\":\r\n return\r\n\r\n shared.prompt_styles.styles.pop(name, None)\r\n shared.prompt_styles.save_styles(shared.styles_filename)\r\n\r\n return '', '', ''\r\n\r\n\r\ndef materialize_styles(prompt, negative_prompt, styles):\r\n prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, styles)\r\n negative_prompt = shared.prompt_styles.apply_negative_styles_to_prompt(negative_prompt, styles)\r\n\r\n return [gr.Textbox.update(value=prompt), gr.Textbox.update(value=negative_prompt), gr.Dropdown.update(value=[])]\r\n\r\n\r\ndef refresh_styles():\r\n return gr.update(choices=list(shared.prompt_styles.styles)), gr.update(choices=list(shared.prompt_styles.styles))\r\n\r\n\r\nclass UiPromptStyles:\r\n def __init__(self, tabname, main_ui_prompt, main_ui_negative_prompt):\r\n self.tabname = tabname\r\n\r\n with gr.Row(elem_id=f\"{tabname}_styles_row\"):\r\n self.dropdown = gr.Dropdown(label=\"Styles\", show_label=False, elem_id=f\"{tabname}_styles\", choices=list(shared.prompt_styles.styles), value=[], multiselect=True, tooltip=\"Styles\")\r\n edit_button = ui_components.ToolButton(value=styles_edit_symbol, elem_id=f\"{tabname}_styles_edit_button\", tooltip=\"Edit styles\")\r\n\r\n with gr.Box(elem_id=f\"{tabname}_styles_dialog\", elem_classes=\"popup-dialog\") as styles_dialog:\r\n with gr.Row():\r\n self.selection = gr.Dropdown(label=\"Styles\", elem_id=f\"{tabname}_styles_edit_select\", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info=\"Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.\")\r\n ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {\"choices\": list(shared.prompt_styles.styles)}, f\"refresh_{tabname}_styles\")\r\n self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f\"{tabname}_style_apply\", tooltip=\"Apply all selected styles from the style selction dropdown in main UI to the prompt.\")\r\n\r\n with gr.Row():\r\n self.prompt = gr.Textbox(label=\"Prompt\", show_label=True, elem_id=f\"{tabname}_edit_style_prompt\", lines=3)\r\n\r\n with gr.Row():\r\n self.neg_prompt = gr.Textbox(label=\"Negative prompt\", show_label=True, elem_id=f\"{tabname}_edit_style_neg_prompt\", lines=3)\r\n\r\n with gr.Row():\r\n self.save = gr.Button('Save', variant='primary', elem_id=f'{tabname}_edit_style_save', visible=False)\r\n self.delete = gr.Button('Delete', variant='primary', elem_id=f'{tabname}_edit_style_delete', visible=False)\r\n self.close = gr.Button('Close', variant='secondary', elem_id=f'{tabname}_edit_style_close')\r\n\r\n self.selection.change(\r\n fn=select_style,\r\n inputs=[self.selection],\r\n outputs=[self.prompt, self.neg_prompt, self.delete, self.save],\r\n show_progress=False,\r\n )\r\n\r\n self.save.click(\r\n fn=save_style,\r\n inputs=[self.selection, self.prompt, self.neg_prompt],\r\n outputs=[self.delete],\r\n show_progress=False,\r\n ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)\r\n\r\n self.delete.click(\r\n fn=delete_style,\r\n _js='function(name){ if(name == \"\") return \"\"; return confirm(\"Delete style \" + name + \"?\") ? name : \"\"; }',\r\n inputs=[self.selection],\r\n outputs=[self.selection, self.prompt, self.neg_prompt],\r\n show_progress=False,\r\n ).then(refresh_styles, outputs=[self.dropdown, self.selection], show_progress=False)\r\n\r\n self.materialize.click(\r\n fn=materialize_styles,\r\n inputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown],\r\n outputs=[main_ui_prompt, main_ui_negative_prompt, self.dropdown],\r\n show_progress=False,\r\n ).then(fn=None, _js=\"function(){update_\"+tabname+\"_tokens(); closePopup();}\", show_progress=False)\r\n\r\n ui_common.setup_dialog(button_show=edit_button, dialog=styles_dialog, button_close=self.close)\r\n\r\n\r\n\r\n\r\n", "path": "modules/ui_prompt_styles.py"}]} | 1,987 | 491 |
gh_patches_debug_379 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non-executable files with shebangs in the repository
**Describe your environment**
(Nothing relevant to describe)
**Steps to reproduce**
```
$ rg -l '^#!' | xargs ls -l
-rwxr-xr-x. 1 ben ben 1420 Jul 5 2023 docs/examples/django/manage.py
-rw-r--r--. 1 ben ben 1300 Jul 5 2023 docs/examples/opencensus-exporter-tracer/collector.py
-rwxr-xr-x. 1 ben ben 1485 Jul 5 2023 docs/examples/opentracing/main.py
-rwxr-xr-x. 1 ben ben 853 Jul 13 2023 scripts/build.sh
-rwxr-xr-x. 1 ben ben 1163 Jan 22 10:06 scripts/coverage.sh
-rwxr-xr-x. 1 ben ben 20741 Jul 13 2023 scripts/eachdist.py
-rwxr-xr-x. 1 ben ben 215 Jul 5 2023 scripts/generate_website_docs.sh
-rwxr-xr-x. 1 ben ben 2377 Jan 22 10:06 scripts/proto_codegen.sh
-rwxr-xr-x. 1 ben ben 1928 Jan 22 10:06 scripts/semconv/generate.sh
-rwxr-xr-x. 1 ben ben 945 Jul 5 2023 scripts/tracecontext-integration-test.sh
-rw-r--r--. 1 ben ben 2519 Jan 22 11:43 tests/w3c_tracecontext_validation_server.py
```
Note that two files have shebang lines (`#!`) but do not have the executable bit set, which makes the shebang lines useless.
**What is the expected behavior?**
Files should either be non-executable and have no shebang line, or be executable and have a shebang line.
**What is the actual behavior?**
The following files are not executable and have useless shebang lines:
- `docs/examples/opencensus-exporter-tracer/collector.py`
- `tests/w3c_tracecontext_validation_server.py`
**Additional context**
This is a trivial thing, but I would like to fix it in a PR – either by setting the executable bit on these two files, or by removing the useless shebang lines. Both files are “script-like,” i.e. they have `if __name__ == "__main__"` or have useful side effects. Which approach would you prefer?
</issue>
<code>
[start of docs/examples/opencensus-exporter-tracer/collector.py]
1 #!/usr/bin/env python3
2 #
3 # Copyright The OpenTelemetry Authors
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from opentelemetry import trace
18 from opentelemetry.exporter.opencensus.trace_exporter import (
19 OpenCensusSpanExporter,
20 )
21 from opentelemetry.sdk.trace import TracerProvider
22 from opentelemetry.sdk.trace.export import BatchSpanProcessor
23
24 exporter = OpenCensusSpanExporter(endpoint="localhost:55678")
25
26 trace.set_tracer_provider(TracerProvider())
27 tracer = trace.get_tracer(__name__)
28 span_processor = BatchSpanProcessor(exporter)
29
30 trace.get_tracer_provider().add_span_processor(span_processor)
31 with tracer.start_as_current_span("foo"):
32 with tracer.start_as_current_span("bar"):
33 with tracer.start_as_current_span("baz"):
34 print("Hello world from OpenTelemetry Python!")
35
[end of docs/examples/opencensus-exporter-tracer/collector.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/opencensus-exporter-tracer/collector.py b/docs/examples/opencensus-exporter-tracer/collector.py
--- a/docs/examples/opencensus-exporter-tracer/collector.py
+++ b/docs/examples/opencensus-exporter-tracer/collector.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python3
-#
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
| {"golden_diff": "diff --git a/docs/examples/opencensus-exporter-tracer/collector.py b/docs/examples/opencensus-exporter-tracer/collector.py\n--- a/docs/examples/opencensus-exporter-tracer/collector.py\n+++ b/docs/examples/opencensus-exporter-tracer/collector.py\n@@ -1,5 +1,3 @@\n-#!/usr/bin/env python3\n-#\n # Copyright The OpenTelemetry Authors\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n", "issue": "Non-executable files with shebangs in the repository\n**Describe your environment**\r\n\r\n(Nothing relevant to describe)\r\n\r\n**Steps to reproduce**\r\n\r\n```\r\n$ rg -l '^#!' | xargs ls -l\r\n-rwxr-xr-x. 1 ben ben 1420 Jul 5 2023 docs/examples/django/manage.py\r\n-rw-r--r--. 1 ben ben 1300 Jul 5 2023 docs/examples/opencensus-exporter-tracer/collector.py\r\n-rwxr-xr-x. 1 ben ben 1485 Jul 5 2023 docs/examples/opentracing/main.py\r\n-rwxr-xr-x. 1 ben ben 853 Jul 13 2023 scripts/build.sh\r\n-rwxr-xr-x. 1 ben ben 1163 Jan 22 10:06 scripts/coverage.sh\r\n-rwxr-xr-x. 1 ben ben 20741 Jul 13 2023 scripts/eachdist.py\r\n-rwxr-xr-x. 1 ben ben 215 Jul 5 2023 scripts/generate_website_docs.sh\r\n-rwxr-xr-x. 1 ben ben 2377 Jan 22 10:06 scripts/proto_codegen.sh\r\n-rwxr-xr-x. 1 ben ben 1928 Jan 22 10:06 scripts/semconv/generate.sh\r\n-rwxr-xr-x. 1 ben ben 945 Jul 5 2023 scripts/tracecontext-integration-test.sh\r\n-rw-r--r--. 1 ben ben 2519 Jan 22 11:43 tests/w3c_tracecontext_validation_server.py\r\n```\r\n\r\nNote that two files have shebang lines (`#!`) but do not have the executable bit set, which makes the shebang lines useless.\r\n\r\n**What is the expected behavior?**\r\n\r\nFiles should either be non-executable and have no shebang line, or be executable and have a shebang line.\r\n\r\n**What is the actual behavior?**\r\n\r\nThe following files are not executable and have useless shebang lines:\r\n\r\n- `docs/examples/opencensus-exporter-tracer/collector.py`\r\n- `tests/w3c_tracecontext_validation_server.py`\r\n\r\n**Additional context**\r\n\r\nThis is a trivial thing, but I would like to fix it in a PR \u2013 either by setting the executable bit on these two files, or by removing the useless shebang lines. Both files are \u201cscript-like,\u201d i.e. they have `if __name__ == \"__main__\"` or have useful side effects. Which approach would you prefer?\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.opencensus.trace_exporter import (\n OpenCensusSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nexporter = OpenCensusSpanExporter(endpoint=\"localhost:55678\")\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer(__name__)\nspan_processor = BatchSpanProcessor(exporter)\n\ntrace.get_tracer_provider().add_span_processor(span_processor)\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n", "path": "docs/examples/opencensus-exporter-tracer/collector.py"}]} | 1,513 | 107 |
gh_patches_debug_9537 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1452 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SMAPE formula typo
## 📚 Documentation
There's a typo in the [SMAPE formula](https://torchmetrics.readthedocs.io/en/stable/regression/symmetric_mean_absolute_percentage_error.html). It should be `{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(| y_i | + | \hat{y_i} |, \epsilon)}` instead of `{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon})`. The attached screenshot shows the typo and its correction.

</issue>
<code>
[start of src/torchmetrics/regression/symmetric_mape.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any
15
16 from torch import Tensor, tensor
17
18 from torchmetrics.functional.regression.symmetric_mape import (
19 _symmetric_mean_absolute_percentage_error_compute,
20 _symmetric_mean_absolute_percentage_error_update,
21 )
22 from torchmetrics.metric import Metric
23
24
25 class SymmetricMeanAbsolutePercentageError(Metric):
26 r"""Computes symmetric mean absolute percentage error (`SMAPE`_).
27
28 .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon})
29
30 Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
31
32 As input to ``forward`` and ``update`` the metric accepts the following input:
33
34 - ``preds`` (:class:`~torch.Tensor`): Predictions from model
35 - ``target`` (:class:`~torch.Tensor`): Ground truth values
36
37 As output of ``forward`` and ``compute`` the metric returns the following output:
38
39 - ``smape`` (:class:`~torch.Tensor`): A tensor with non-negative floating point smape value between 0 and 1
40
41 Args:
42 kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.
43
44 Example:
45 >>> from torchmetrics import SymmetricMeanAbsolutePercentageError
46 >>> target = tensor([1, 10, 1e6])
47 >>> preds = tensor([0.9, 15, 1.2e6])
48 >>> smape = SymmetricMeanAbsolutePercentageError()
49 >>> smape(preds, target)
50 tensor(0.2290)
51 """
52 is_differentiable: bool = True
53 higher_is_better: bool = False
54 full_state_update: bool = False
55 sum_abs_per_error: Tensor
56 total: Tensor
57
58 def __init__(
59 self,
60 **kwargs: Any,
61 ) -> None:
62 super().__init__(**kwargs)
63
64 self.add_state("sum_abs_per_error", default=tensor(0.0), dist_reduce_fx="sum")
65 self.add_state("total", default=tensor(0.0), dist_reduce_fx="sum")
66
67 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
68 """Update state with predictions and targets."""
69 sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target)
70
71 self.sum_abs_per_error += sum_abs_per_error
72 self.total += num_obs
73
74 def compute(self) -> Tensor:
75 """Computes mean absolute percentage error over state."""
76 return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total)
77
[end of src/torchmetrics/regression/symmetric_mape.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/regression/symmetric_mape.py b/src/torchmetrics/regression/symmetric_mape.py
--- a/src/torchmetrics/regression/symmetric_mape.py
+++ b/src/torchmetrics/regression/symmetric_mape.py
@@ -25,7 +25,7 @@
class SymmetricMeanAbsolutePercentageError(Metric):
r"""Computes symmetric mean absolute percentage error (`SMAPE`_).
- .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n max(\frac{| y_i - \hat{y_i} |}{| y_i | + | \hat{y_i} |, \epsilon})
+ .. math:: \text{SMAPE} = \frac{2}{n}\sum_1^n\frac{| y_i - \hat{y_i} |}{\max(| y_i | + | \hat{y_i} |, \epsilon)}
Where :math:`y` is a tensor of target values, and :math:`\hat{y}` is a tensor of predictions.
| {"golden_diff": "diff --git a/src/torchmetrics/regression/symmetric_mape.py b/src/torchmetrics/regression/symmetric_mape.py\n--- a/src/torchmetrics/regression/symmetric_mape.py\n+++ b/src/torchmetrics/regression/symmetric_mape.py\n@@ -25,7 +25,7 @@\n class SymmetricMeanAbsolutePercentageError(Metric):\n r\"\"\"Computes symmetric mean absolute percentage error (`SMAPE`_).\n \n- .. math:: \\text{SMAPE} = \\frac{2}{n}\\sum_1^n max(\\frac{| y_i - \\hat{y_i} |}{| y_i | + | \\hat{y_i} |, \\epsilon})\n+ .. math:: \\text{SMAPE} = \\frac{2}{n}\\sum_1^n\\frac{| y_i - \\hat{y_i} |}{\\max(| y_i | + | \\hat{y_i} |, \\epsilon)}\n \n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a tensor of predictions.\n", "issue": "SMAPE formula typo\n## \ud83d\udcda Documentation\r\n\r\n\r\nThere's a typo in the [SMAPE formula](https://torchmetrics.readthedocs.io/en/stable/regression/symmetric_mean_absolute_percentage_error.html). It should be `{SMAPE} = \\frac{2}{n}\\sum_1^n\\frac{| y_i - \\hat{y_i} |}{\\max(| y_i | + | \\hat{y_i} |, \\epsilon)}` instead of `{SMAPE} = \\frac{2}{n}\\sum_1^n max(\\frac{| y_i - \\hat{y_i} |}{| y_i | + | \\hat{y_i} |, \\epsilon})`. The attached screenshot shows the typo and its correction.\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any\n\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.symmetric_mape import (\n _symmetric_mean_absolute_percentage_error_compute,\n _symmetric_mean_absolute_percentage_error_update,\n)\nfrom torchmetrics.metric import Metric\n\n\nclass SymmetricMeanAbsolutePercentageError(Metric):\n r\"\"\"Computes symmetric mean absolute percentage error (`SMAPE`_).\n\n .. math:: \\text{SMAPE} = \\frac{2}{n}\\sum_1^n max(\\frac{| y_i - \\hat{y_i} |}{| y_i | + | \\hat{y_i} |, \\epsilon})\n\n Where :math:`y` is a tensor of target values, and :math:`\\hat{y}` is a tensor of predictions.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model\n - ``target`` (:class:`~torch.Tensor`): Ground truth values\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``smape`` (:class:`~torch.Tensor`): A tensor with non-negative floating point smape value between 0 and 1\n\n Args:\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> from torchmetrics import SymmetricMeanAbsolutePercentageError\n >>> target = tensor([1, 10, 1e6])\n >>> preds = tensor([0.9, 15, 1.2e6])\n >>> smape = SymmetricMeanAbsolutePercentageError()\n >>> smape(preds, target)\n tensor(0.2290)\n \"\"\"\n is_differentiable: bool = True\n higher_is_better: bool = False\n full_state_update: bool = False\n sum_abs_per_error: Tensor\n total: Tensor\n\n def __init__(\n self,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n\n self.add_state(\"sum_abs_per_error\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0.0), dist_reduce_fx=\"sum\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Update state with predictions and targets.\"\"\"\n sum_abs_per_error, num_obs = _symmetric_mean_absolute_percentage_error_update(preds, target)\n\n self.sum_abs_per_error += sum_abs_per_error\n self.total += num_obs\n\n def compute(self) -> Tensor:\n \"\"\"Computes mean absolute percentage error over state.\"\"\"\n return _symmetric_mean_absolute_percentage_error_compute(self.sum_abs_per_error, self.total)\n", "path": "src/torchmetrics/regression/symmetric_mape.py"}]} | 1,662 | 242 |
gh_patches_debug_11153 | rasdani/github-patches | git_diff | open-mmlab__mmsegmentation-19 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FileNotFoundError: [Errno 2] No such file or directory: 'VOCdevkit/VOCaug/dataset/trainval.txt'
https://github.com/open-mmlab/mmsegmentation/blob/1c3f54765981ba352d4cf6582edb1c8915e51d71/tools/convert_datasets/voc_aug.py#L53
Directory `VOCdevkit/VOCaug/dataset` does not exist `trainval.txt`, `trainval.txt` is the merger of `train.txt` and `val.txt`?
</issue>
<code>
[start of tools/convert_datasets/voc_aug.py]
1 import argparse
2 import os.path as osp
3 from functools import partial
4
5 import mmcv
6 import numpy as np
7 from PIL import Image
8 from scipy.io import loadmat
9
10 AUG_LEN = 10582
11
12
13 def convert_mat(mat_file, in_dir, out_dir):
14 data = loadmat(osp.join(in_dir, mat_file))
15 mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8)
16 seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png'))
17 Image.fromarray(mask).save(seg_filename, 'PNG')
18
19
20 def generate_aug_list(merged_list, excluded_list):
21 return list(set(merged_list) - set(excluded_list))
22
23
24 def parse_args():
25 parser = argparse.ArgumentParser(
26 description='Convert PASCAL VOC annotations to mmsegmentation format')
27 parser.add_argument('devkit_path', help='pascal voc devkit path')
28 parser.add_argument('aug_path', help='pascal voc aug path')
29 parser.add_argument('-o', '--out_dir', help='output path')
30 parser.add_argument(
31 '--nproc', default=1, type=int, help='number of process')
32 args = parser.parse_args()
33 return args
34
35
36 def main():
37 args = parse_args()
38 devkit_path = args.devkit_path
39 aug_path = args.aug_path
40 nproc = args.nproc
41 if args.out_dir is None:
42 out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')
43 else:
44 out_dir = args.out_dir
45 mmcv.mkdir_or_exist(out_dir)
46 in_dir = osp.join(aug_path, 'dataset', 'cls')
47
48 mmcv.track_parallel_progress(
49 partial(convert_mat, in_dir=in_dir, out_dir=out_dir),
50 list(mmcv.scandir(in_dir, suffix='.mat')),
51 nproc=nproc)
52
53 with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f:
54 full_aug_list = [line.strip() for line in f]
55 with open(
56 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
57 'train.txt')) as f:
58 ori_train_list = [line.strip() for line in f]
59 with open(
60 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
61 'val.txt')) as f:
62 val_list = [line.strip() for line in f]
63
64 aug_train_list = generate_aug_list(ori_train_list + full_aug_list,
65 val_list)
66 assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format(
67 AUG_LEN)
68
69 with open(
70 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
71 'trainaug.txt'), 'w') as f:
72 f.writelines(line + '\n' for line in aug_train_list)
73
74 aug_list = generate_aug_list(full_aug_list, ori_train_list + val_list)
75 assert len(aug_list) == AUG_LEN - len(
76 ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN -
77 len(ori_train_list))
78 with open(
79 osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'),
80 'w') as f:
81 f.writelines(line + '\n' for line in aug_list)
82
83 print('Done!')
84
85
86 if __name__ == '__main__':
87 main()
88
[end of tools/convert_datasets/voc_aug.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/convert_datasets/voc_aug.py b/tools/convert_datasets/voc_aug.py
--- a/tools/convert_datasets/voc_aug.py
+++ b/tools/convert_datasets/voc_aug.py
@@ -50,8 +50,12 @@
list(mmcv.scandir(in_dir, suffix='.mat')),
nproc=nproc)
- with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f:
- full_aug_list = [line.strip() for line in f]
+ full_aug_list = []
+ with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:
+ full_aug_list += [line.strip() for line in f]
+ with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:
+ full_aug_list += [line.strip() for line in f]
+
with open(
osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',
'train.txt')) as f:
| {"golden_diff": "diff --git a/tools/convert_datasets/voc_aug.py b/tools/convert_datasets/voc_aug.py\n--- a/tools/convert_datasets/voc_aug.py\n+++ b/tools/convert_datasets/voc_aug.py\n@@ -50,8 +50,12 @@\n list(mmcv.scandir(in_dir, suffix='.mat')),\n nproc=nproc)\n \n- with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f:\n- full_aug_list = [line.strip() for line in f]\n+ full_aug_list = []\n+ with open(osp.join(aug_path, 'dataset', 'train.txt')) as f:\n+ full_aug_list += [line.strip() for line in f]\n+ with open(osp.join(aug_path, 'dataset', 'val.txt')) as f:\n+ full_aug_list += [line.strip() for line in f]\n+\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'train.txt')) as f:\n", "issue": "FileNotFoundError: [Errno 2] No such file or directory: 'VOCdevkit/VOCaug/dataset/trainval.txt'\nhttps://github.com/open-mmlab/mmsegmentation/blob/1c3f54765981ba352d4cf6582edb1c8915e51d71/tools/convert_datasets/voc_aug.py#L53\r\n\r\nDirectory `VOCdevkit/VOCaug/dataset` does not exist `trainval.txt`, `trainval.txt` is the merger of `train.txt` and `val.txt`?\n", "before_files": [{"content": "import argparse\nimport os.path as osp\nfrom functools import partial\n\nimport mmcv\nimport numpy as np\nfrom PIL import Image\nfrom scipy.io import loadmat\n\nAUG_LEN = 10582\n\n\ndef convert_mat(mat_file, in_dir, out_dir):\n data = loadmat(osp.join(in_dir, mat_file))\n mask = data['GTcls'][0]['Segmentation'][0].astype(np.uint8)\n seg_filename = osp.join(out_dir, mat_file.replace('.mat', '.png'))\n Image.fromarray(mask).save(seg_filename, 'PNG')\n\n\ndef generate_aug_list(merged_list, excluded_list):\n return list(set(merged_list) - set(excluded_list))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Convert PASCAL VOC annotations to mmsegmentation format')\n parser.add_argument('devkit_path', help='pascal voc devkit path')\n parser.add_argument('aug_path', help='pascal voc aug path')\n parser.add_argument('-o', '--out_dir', help='output path')\n parser.add_argument(\n '--nproc', default=1, type=int, help='number of process')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n devkit_path = args.devkit_path\n aug_path = args.aug_path\n nproc = args.nproc\n if args.out_dir is None:\n out_dir = osp.join(devkit_path, 'VOC2012', 'SegmentationClassAug')\n else:\n out_dir = args.out_dir\n mmcv.mkdir_or_exist(out_dir)\n in_dir = osp.join(aug_path, 'dataset', 'cls')\n\n mmcv.track_parallel_progress(\n partial(convert_mat, in_dir=in_dir, out_dir=out_dir),\n list(mmcv.scandir(in_dir, suffix='.mat')),\n nproc=nproc)\n\n with open(osp.join(aug_path, 'dataset', 'trainval.txt')) as f:\n full_aug_list = [line.strip() for line in f]\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'train.txt')) as f:\n ori_train_list = [line.strip() for line in f]\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'val.txt')) as f:\n val_list = [line.strip() for line in f]\n\n aug_train_list = generate_aug_list(ori_train_list + full_aug_list,\n val_list)\n assert len(aug_train_list) == AUG_LEN, 'len(aug_train_list) != {}'.format(\n AUG_LEN)\n\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation',\n 'trainaug.txt'), 'w') as f:\n f.writelines(line + '\\n' for line in aug_train_list)\n\n aug_list = generate_aug_list(full_aug_list, ori_train_list + val_list)\n assert len(aug_list) == AUG_LEN - len(\n ori_train_list), 'len(aug_list) != {}'.format(AUG_LEN -\n len(ori_train_list))\n with open(\n osp.join(devkit_path, 'VOC2012/ImageSets/Segmentation', 'aug.txt'),\n 'w') as f:\n f.writelines(line + '\\n' for line in aug_list)\n\n print('Done!')\n\n\nif __name__ == '__main__':\n main()\n", "path": "tools/convert_datasets/voc_aug.py"}]} | 1,608 | 229 |
gh_patches_debug_10054 | rasdani/github-patches | git_diff | acl-org__acl-anthology-990 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Recaser bug: adding fixed-case inside tex-math markup
`<tex-math><fixed-case>O</fixed-case>(<fixed-case>M</fixed-case>(n^2))</tex-math>` caused the build to fail in #892
</issue>
<code>
[start of bin/fixedcase/protect.py]
1 #!/usr/bin/env python3
2
3 # protect.py <infile> <outfile>
4 # looks for file "truelist" in current dir
5
6 # cd data/xml
7 # for i in *xml ; do (cd ../../tools/fixedcase/ ; python3 ./protect.py ../../data/xml/$i /tmp/$i ; echo $i ); done > log
8
9
10 import lxml.etree as ET
11 import os
12 import sys
13 import copy
14 import itertools
15 import inspect
16
17 from collections import defaultdict
18
19 if __name__ == "__main__":
20 from common import *
21 else:
22 from .common import *
23
24 # recursive helper called by protect
25 # protect text of "node", including children, and tails of children
26 def protect_recurse(node, recased):
27 if node.tag == "fixed-case": # already protected
28 newnode = copy.deepcopy(node) # don't need to modify descendents
29 newnode.tail = None # tail will be protected by caller
30 return newnode
31 newnode = ET.Element(node.tag, node.attrib)
32
33 def process(text, rc):
34 i = 0
35 for upper, chars in itertools.groupby(rc[: len(text)], lambda c: c.isupper()):
36 charstr = "".join(chars)
37 if upper:
38 p = ET.Element("fixed-case")
39 p.text = charstr
40 newnode.append(p)
41 else:
42 append_text(newnode, text[i : i + len(charstr)])
43
44 assert text[i : i + len(charstr)].lower() == charstr.lower(), (
45 i,
46 text,
47 charstr,
48 )
49 i += len(charstr)
50
51 if node.text:
52 process(node.text, recased)
53 recased = recased[len(node.text) :]
54 for child in node:
55 protected_child = protect_recurse(child, recased)
56 recased = recased[len(get_text(protected_child)) :]
57 newnode.append(protected_child)
58 if child.tail:
59 process(child.tail, recased)
60 recased = recased[len(child.tail) :]
61
62 return newnode
63
64
65 def protect(node):
66 rawtext = get_text(node).strip()
67 recased = None
68 if rawtext.lower() in special_titles:
69 recased = special_titles[rawtext.lower()]
70 else:
71 text = tokenize(rawtext)
72 fixed = fixedcase_title(
73 text,
74 truelist=truelist,
75 phrase_truelist=phrase_truelist,
76 amodifiers=amodifiers,
77 ndescriptors=ndescriptors,
78 )
79 if any(fixed):
80 # Generate the recased string so we know where to look in the XML
81 # to apply fixed-case
82 recasedtoks = [(w if b else w.lower()) for w, b in zip(text, fixed)]
83 recased = TreebankWordDetokenizer().detokenize(recasedtoks)
84 # PTB (de)tokenizer doesn't think of hyphens as separate tokens,
85 # so we need to manually detokenize them.
86 # Assuming the only edits that need to be made are adding/deleting
87 # spaces, the following will work:
88 i = 0
89 while i < len(rawtext):
90 # scan rawtext from left to right and adjust recased by adding/removing
91 # spaces until it matches
92 t = rawtext[i]
93 assert i < len(recased), ((i, t), rawtext, recased)
94 c = recased[i]
95 if t.isspace() and not c.isspace(): # may be ' ' or '\n'
96 # add space to recased
97 recased = recased[:i] + t + recased[i:]
98 i += 1
99 elif c.isspace() and not t.isspace():
100 # remove space from recased
101 recased = recased[:i] + recased[i + 1 :]
102 # don't increment i
103 elif t != c and t.isspace() and c.isspace():
104 recased = recased[:i] + t + recased[i + 1 :]
105 i += 1
106 else:
107 assert t == c or t.lower() == c.lower(), (
108 (i, t, c),
109 rawtext,
110 recased,
111 text,
112 )
113 i += 1
114 if len(recased) > len(rawtext):
115 recased = recased[: len(rawtext)]
116 assert rawtext.lower() == recased.lower(), (rawtext, recased)
117
118 if recased:
119 newnode = protect_recurse(node, recased)
120 newnode.tail = node.tail # tail of top level is not protected
121 replace_node(node, newnode)
122
123
124 # Read in the truelist (list of words that should always be protected)
125 truelist, phrase_truelist, special_titles, amodifiers, ndescriptors = load_lists()
126
127 if __name__ == "__main__":
128 infile, outfile = sys.argv[1:]
129
130 tree = ET.parse(infile)
131 if not tree.getroot().tail:
132 tree.getroot().tail = "\n"
133 for paper in tree.getroot().findall(".//paper"):
134 for title in paper.xpath("./title|./booktitle"):
135 protect(title)
136 tree.write(outfile, encoding="UTF-8", xml_declaration=True)
137
[end of bin/fixedcase/protect.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bin/fixedcase/protect.py b/bin/fixedcase/protect.py
--- a/bin/fixedcase/protect.py
+++ b/bin/fixedcase/protect.py
@@ -24,7 +24,7 @@
# recursive helper called by protect
# protect text of "node", including children, and tails of children
def protect_recurse(node, recased):
- if node.tag == "fixed-case": # already protected
+ if node.tag in ("fixed-case", "tex-math"): # already protected text, or math
newnode = copy.deepcopy(node) # don't need to modify descendents
newnode.tail = None # tail will be protected by caller
return newnode
| {"golden_diff": "diff --git a/bin/fixedcase/protect.py b/bin/fixedcase/protect.py\n--- a/bin/fixedcase/protect.py\n+++ b/bin/fixedcase/protect.py\n@@ -24,7 +24,7 @@\n # recursive helper called by protect\n # protect text of \"node\", including children, and tails of children\n def protect_recurse(node, recased):\n- if node.tag == \"fixed-case\": # already protected\n+ if node.tag in (\"fixed-case\", \"tex-math\"): # already protected text, or math\n newnode = copy.deepcopy(node) # don't need to modify descendents\n newnode.tail = None # tail will be protected by caller\n return newnode\n", "issue": "Recaser bug: adding fixed-case inside tex-math markup\n`<tex-math><fixed-case>O</fixed-case>(<fixed-case>M</fixed-case>(n^2))</tex-math>` caused the build to fail in #892\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# protect.py <infile> <outfile>\n# looks for file \"truelist\" in current dir\n\n# cd data/xml\n# for i in *xml ; do (cd ../../tools/fixedcase/ ; python3 ./protect.py ../../data/xml/$i /tmp/$i ; echo $i ); done > log\n\n\nimport lxml.etree as ET\nimport os\nimport sys\nimport copy\nimport itertools\nimport inspect\n\nfrom collections import defaultdict\n\nif __name__ == \"__main__\":\n from common import *\nelse:\n from .common import *\n\n# recursive helper called by protect\n# protect text of \"node\", including children, and tails of children\ndef protect_recurse(node, recased):\n if node.tag == \"fixed-case\": # already protected\n newnode = copy.deepcopy(node) # don't need to modify descendents\n newnode.tail = None # tail will be protected by caller\n return newnode\n newnode = ET.Element(node.tag, node.attrib)\n\n def process(text, rc):\n i = 0\n for upper, chars in itertools.groupby(rc[: len(text)], lambda c: c.isupper()):\n charstr = \"\".join(chars)\n if upper:\n p = ET.Element(\"fixed-case\")\n p.text = charstr\n newnode.append(p)\n else:\n append_text(newnode, text[i : i + len(charstr)])\n\n assert text[i : i + len(charstr)].lower() == charstr.lower(), (\n i,\n text,\n charstr,\n )\n i += len(charstr)\n\n if node.text:\n process(node.text, recased)\n recased = recased[len(node.text) :]\n for child in node:\n protected_child = protect_recurse(child, recased)\n recased = recased[len(get_text(protected_child)) :]\n newnode.append(protected_child)\n if child.tail:\n process(child.tail, recased)\n recased = recased[len(child.tail) :]\n\n return newnode\n\n\ndef protect(node):\n rawtext = get_text(node).strip()\n recased = None\n if rawtext.lower() in special_titles:\n recased = special_titles[rawtext.lower()]\n else:\n text = tokenize(rawtext)\n fixed = fixedcase_title(\n text,\n truelist=truelist,\n phrase_truelist=phrase_truelist,\n amodifiers=amodifiers,\n ndescriptors=ndescriptors,\n )\n if any(fixed):\n # Generate the recased string so we know where to look in the XML\n # to apply fixed-case\n recasedtoks = [(w if b else w.lower()) for w, b in zip(text, fixed)]\n recased = TreebankWordDetokenizer().detokenize(recasedtoks)\n # PTB (de)tokenizer doesn't think of hyphens as separate tokens,\n # so we need to manually detokenize them.\n # Assuming the only edits that need to be made are adding/deleting\n # spaces, the following will work:\n i = 0\n while i < len(rawtext):\n # scan rawtext from left to right and adjust recased by adding/removing\n # spaces until it matches\n t = rawtext[i]\n assert i < len(recased), ((i, t), rawtext, recased)\n c = recased[i]\n if t.isspace() and not c.isspace(): # may be ' ' or '\\n'\n # add space to recased\n recased = recased[:i] + t + recased[i:]\n i += 1\n elif c.isspace() and not t.isspace():\n # remove space from recased\n recased = recased[:i] + recased[i + 1 :]\n # don't increment i\n elif t != c and t.isspace() and c.isspace():\n recased = recased[:i] + t + recased[i + 1 :]\n i += 1\n else:\n assert t == c or t.lower() == c.lower(), (\n (i, t, c),\n rawtext,\n recased,\n text,\n )\n i += 1\n if len(recased) > len(rawtext):\n recased = recased[: len(rawtext)]\n assert rawtext.lower() == recased.lower(), (rawtext, recased)\n\n if recased:\n newnode = protect_recurse(node, recased)\n newnode.tail = node.tail # tail of top level is not protected\n replace_node(node, newnode)\n\n\n# Read in the truelist (list of words that should always be protected)\ntruelist, phrase_truelist, special_titles, amodifiers, ndescriptors = load_lists()\n\nif __name__ == \"__main__\":\n infile, outfile = sys.argv[1:]\n\n tree = ET.parse(infile)\n if not tree.getroot().tail:\n tree.getroot().tail = \"\\n\"\n for paper in tree.getroot().findall(\".//paper\"):\n for title in paper.xpath(\"./title|./booktitle\"):\n protect(title)\n tree.write(outfile, encoding=\"UTF-8\", xml_declaration=True)\n", "path": "bin/fixedcase/protect.py"}]} | 2,046 | 162 |
gh_patches_debug_13 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1779 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ebola Page>Map: disable scroll wheel zoom
CJ - The specific property is here: https://github.com/OCHA-DAP/hdx-design/blob/gh-pages/js/country.js
line 111: map.scrollWheelZoom.disable();
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.5.1'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.1'
+hdx_version = 'v0.5.2'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.1'\n+hdx_version = 'v0.5.2'\n", "issue": "Ebola Page>Map: disable scroll wheel zoom\nCJ - The specific property is here: https://github.com/OCHA-DAP/hdx-design/blob/gh-pages/js/country.js\n\nline 111: map.scrollWheelZoom.disable();\n\n", "before_files": [{"content": "hdx_version = 'v0.5.1'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 622 | 107 |
gh_patches_debug_40775 | rasdani/github-patches | git_diff | streamlink__streamlink-3662 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.bfmtv: No playable streams found
Hello. for few days, the plugin isn't working anymore
/usr/local/bin/streamlink --loglevel debug https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ best
[cli][info] streamlink is running as root! Be careful!
[cli][debug] OS: Linux-5.8.0-44-generic-x86_64-with-glibc2.29
[cli][debug] Python: 3.8.5
[cli][debug] Streamlink: 2.1.1
[cli][debug] Requests(2.22.0), Socks(1.7.1), Websocket(0.58.0)
[cli][debug] Arguments:
[cli][debug] url=https://rmcdecouverte.bfmtv.com/mediaplayer-direct/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin bfmtv for URL https://rmcdecouverte.bfmtv.com/mediaplayer-direct/
error: No playable streams found on this URL: https://rmcdecouverte.bfmtv.com/mediaplayer-direct/
</issue>
<code>
[start of src/streamlink/plugins/bfmtv.py]
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugins.brightcove import BrightcovePlayer
6
7 log = logging.getLogger(__name__)
8
9
10 class BFMTV(Plugin):
11 _url_re = re.compile(r'https://.+\.(?:bfmtv|01net)\.com')
12 _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'
13 _brightcove_video_re = re.compile(
14 r'accountid="(?P<account_id>[0-9]+).*?videoid="(?P<video_id>[0-9]+)"',
15 re.DOTALL
16 )
17 _brightcove_video_alt_re = re.compile(
18 r'data-account="(?P<account_id>[0-9]+).*?data-video-id="(?P<video_id>[0-9]+)"',
19 re.DOTALL
20 )
21 _embed_video_id_re = re.compile(
22 r'<iframe.*?src=".*?/(?P<video_id>\w+)"',
23 re.DOTALL
24 )
25
26 @classmethod
27 def can_handle_url(cls, url):
28 return cls._url_re.match(url) is not None
29
30 def _get_streams(self):
31 # Retrieve URL page and search for Brightcove video data
32 res = self.session.http.get(self.url)
33 match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
34 if match is not None:
35 account_id = match.group('account_id')
36 log.debug(f'Account ID: {account_id}')
37 video_id = match.group('video_id')
38 log.debug(f'Video ID: {video_id}')
39 player = BrightcovePlayer(self.session, account_id)
40 yield from player.get_streams(video_id)
41 else:
42 # Try to find the Dailymotion video ID
43 match = self._embed_video_id_re.search(res.text)
44 if match is not None:
45 video_id = match.group('video_id')
46 log.debug(f'Video ID: {video_id}')
47 yield from self.session.streams(self._dailymotion_url.format(video_id)).items()
48
49
50 __plugin__ = BFMTV
51
[end of src/streamlink/plugins/bfmtv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py
--- a/src/streamlink/plugins/bfmtv.py
+++ b/src/streamlink/plugins/bfmtv.py
@@ -1,8 +1,11 @@
import logging
import re
+from urllib.parse import urljoin, urlparse
from streamlink.plugin import Plugin
+from streamlink.plugin.api.utils import itertags
from streamlink.plugins.brightcove import BrightcovePlayer
+from streamlink.stream import HTTPStream
log = logging.getLogger(__name__)
@@ -22,29 +25,68 @@
r'<iframe.*?src=".*?/(?P<video_id>\w+)"',
re.DOTALL
)
+ _main_js_url_re = re.compile(r'src="([\w/]+/main\.\w+\.js)"')
+ _js_brightcove_video_re = re.compile(
+ r'i\?\([A-Z]="[^"]+",y="(?P<video_id>[0-9]+).*"data-account"\s*:\s*"(?P<account_id>[0-9]+)',
+ )
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
- # Retrieve URL page and search for Brightcove video data
res = self.session.http.get(self.url)
- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
- if match is not None:
- account_id = match.group('account_id')
+
+ m = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)
+ if m:
+ account_id = m.group('account_id')
log.debug(f'Account ID: {account_id}')
- video_id = match.group('video_id')
+ video_id = m.group('video_id')
log.debug(f'Video ID: {video_id}')
player = BrightcovePlayer(self.session, account_id)
yield from player.get_streams(video_id)
- else:
- # Try to find the Dailymotion video ID
- match = self._embed_video_id_re.search(res.text)
- if match is not None:
- video_id = match.group('video_id')
+ return
+
+ # Try to find the Dailymotion video ID
+ m = self._embed_video_id_re.search(res.text)
+ if m:
+ video_id = m.group('video_id')
+ log.debug(f'Video ID: {video_id}')
+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()
+ return
+
+ # Try the JS for Brightcove video data
+ m = self._main_js_url_re.search(res.text)
+ if m:
+ log.debug(f'JS URL: {urljoin(self.url, m.group(1))}')
+ res = self.session.http.get(urljoin(self.url, m.group(1)))
+ m = self._js_brightcove_video_re.search(res.text)
+ if m:
+ account_id = m.group('account_id')
+ log.debug(f'Account ID: {account_id}')
+ video_id = m.group('video_id')
log.debug(f'Video ID: {video_id}')
- yield from self.session.streams(self._dailymotion_url.format(video_id)).items()
+ player = BrightcovePlayer(self.session, account_id)
+ yield from player.get_streams(video_id)
+ return
+
+ # Audio Live
+ audio_url = None
+ for source in itertags(res.text, 'source'):
+ url = source.attributes.get('src')
+ if url:
+ p_url = urlparse(url)
+ if p_url.path.endswith(('.mp3')):
+ audio_url = url
+
+ # Audio VOD
+ for div in itertags(res.text, 'div'):
+ if div.attributes.get('class') == 'audio-player':
+ audio_url = div.attributes.get('data-media-url')
+
+ if audio_url:
+ yield 'audio', HTTPStream(self.session, audio_url)
+ return
__plugin__ = BFMTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/bfmtv.py b/src/streamlink/plugins/bfmtv.py\n--- a/src/streamlink/plugins/bfmtv.py\n+++ b/src/streamlink/plugins/bfmtv.py\n@@ -1,8 +1,11 @@\n import logging\n import re\n+from urllib.parse import urljoin, urlparse\n \n from streamlink.plugin import Plugin\n+from streamlink.plugin.api.utils import itertags\n from streamlink.plugins.brightcove import BrightcovePlayer\n+from streamlink.stream import HTTPStream\n \n log = logging.getLogger(__name__)\n \n@@ -22,29 +25,68 @@\n r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n+ _main_js_url_re = re.compile(r'src=\"([\\w/]+/main\\.\\w+\\.js)\"')\n+ _js_brightcove_video_re = re.compile(\n+ r'i\\?\\([A-Z]=\"[^\"]+\",y=\"(?P<video_id>[0-9]+).*\"data-account\"\\s*:\\s*\"(?P<account_id>[0-9]+)',\n+ )\n \n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n \n def _get_streams(self):\n- # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n- match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n- if match is not None:\n- account_id = match.group('account_id')\n+\n+ m = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n+ if m:\n+ account_id = m.group('account_id')\n log.debug(f'Account ID: {account_id}')\n- video_id = match.group('video_id')\n+ video_id = m.group('video_id')\n log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n yield from player.get_streams(video_id)\n- else:\n- # Try to find the Dailymotion video ID\n- match = self._embed_video_id_re.search(res.text)\n- if match is not None:\n- video_id = match.group('video_id')\n+ return\n+\n+ # Try to find the Dailymotion video ID\n+ m = self._embed_video_id_re.search(res.text)\n+ if m:\n+ video_id = m.group('video_id')\n+ log.debug(f'Video ID: {video_id}')\n+ yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n+ return\n+\n+ # Try the JS for Brightcove video data\n+ m = self._main_js_url_re.search(res.text)\n+ if m:\n+ log.debug(f'JS URL: {urljoin(self.url, m.group(1))}')\n+ res = self.session.http.get(urljoin(self.url, m.group(1)))\n+ m = self._js_brightcove_video_re.search(res.text)\n+ if m:\n+ account_id = m.group('account_id')\n+ log.debug(f'Account ID: {account_id}')\n+ video_id = m.group('video_id')\n log.debug(f'Video ID: {video_id}')\n- yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n+ player = BrightcovePlayer(self.session, account_id)\n+ yield from player.get_streams(video_id)\n+ return\n+\n+ # Audio Live\n+ audio_url = None\n+ for source in itertags(res.text, 'source'):\n+ url = source.attributes.get('src')\n+ if url:\n+ p_url = urlparse(url)\n+ if p_url.path.endswith(('.mp3')):\n+ audio_url = url\n+\n+ # Audio VOD\n+ for div in itertags(res.text, 'div'):\n+ if div.attributes.get('class') == 'audio-player':\n+ audio_url = div.attributes.get('data-media-url')\n+\n+ if audio_url:\n+ yield 'audio', HTTPStream(self.session, audio_url)\n+ return\n \n \n __plugin__ = BFMTV\n", "issue": "plugins.bfmtv: No playable streams found\n Hello. for few days, the plugin isn't working anymore\r\n\r\n\r\n/usr/local/bin/streamlink --loglevel debug https://rmcdecouverte.bfmtv.com/mediaplayer-direct/ best\r\n[cli][info] streamlink is running as root! Be careful!\r\n[cli][debug] OS: Linux-5.8.0-44-generic-x86_64-with-glibc2.29\r\n[cli][debug] Python: 3.8.5\r\n[cli][debug] Streamlink: 2.1.1\r\n[cli][debug] Requests(2.22.0), Socks(1.7.1), Websocket(0.58.0)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://rmcdecouverte.bfmtv.com/mediaplayer-direct/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin bfmtv for URL https://rmcdecouverte.bfmtv.com/mediaplayer-direct/\r\nerror: No playable streams found on this URL: https://rmcdecouverte.bfmtv.com/mediaplayer-direct/\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugins.brightcove import BrightcovePlayer\n\nlog = logging.getLogger(__name__)\n\n\nclass BFMTV(Plugin):\n _url_re = re.compile(r'https://.+\\.(?:bfmtv|01net)\\.com')\n _dailymotion_url = 'https://www.dailymotion.com/embed/video/{}'\n _brightcove_video_re = re.compile(\n r'accountid=\"(?P<account_id>[0-9]+).*?videoid=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _brightcove_video_alt_re = re.compile(\n r'data-account=\"(?P<account_id>[0-9]+).*?data-video-id=\"(?P<video_id>[0-9]+)\"',\n re.DOTALL\n )\n _embed_video_id_re = re.compile(\n r'<iframe.*?src=\".*?/(?P<video_id>\\w+)\"',\n re.DOTALL\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n def _get_streams(self):\n # Retrieve URL page and search for Brightcove video data\n res = self.session.http.get(self.url)\n match = self._brightcove_video_re.search(res.text) or self._brightcove_video_alt_re.search(res.text)\n if match is not None:\n account_id = match.group('account_id')\n log.debug(f'Account ID: {account_id}')\n video_id = match.group('video_id')\n log.debug(f'Video ID: {video_id}')\n player = BrightcovePlayer(self.session, account_id)\n yield from player.get_streams(video_id)\n else:\n # Try to find the Dailymotion video ID\n match = self._embed_video_id_re.search(res.text)\n if match is not None:\n video_id = match.group('video_id')\n log.debug(f'Video ID: {video_id}')\n yield from self.session.streams(self._dailymotion_url.format(video_id)).items()\n\n\n__plugin__ = BFMTV\n", "path": "src/streamlink/plugins/bfmtv.py"}]} | 1,391 | 956 |
gh_patches_debug_486 | rasdani/github-patches | git_diff | DDMAL__CantusDB-228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove the "Users Online" section in footer.
</issue>
<code>
[start of django/cantusdb_project/main_app/templatetags/helper_tags.py]
1 import calendar
2 from typing import Union, Optional
3 from django.utils.http import urlencode
4 from django import template
5 from main_app.models import Source
6 from django.utils.safestring import mark_safe
7
8 register = template.Library()
9
10
11 @register.filter(name="month_to_string")
12 def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:
13 """Converts month number to textual representation, 3 letters (Jan, Mar, etc)"""
14 if type(value) == int and value in range(1, 13):
15 return calendar.month_abbr[value]
16 else:
17 return value
18
19
20 @register.simple_tag(takes_context=True)
21 def url_add_get_params(context, **kwargs):
22 query = context["request"].GET.copy()
23 query.pop("page", None)
24 query.update(kwargs)
25 return query.urlencode()
26
27
28 @register.simple_tag(takes_context=False)
29 def source_links():
30 sources = (
31 Source.objects.filter(public=True, visible=True, segment__id=4063)
32 .exclude(siglum=None)
33 .values("siglum", "id")
34 .order_by("siglum")
35 )
36 options = ""
37 # <option value="source1">Source 1</option>
38 # <option value="source2">Source 2</option>
39 # <option value="source3">Source 3</option>
40 for source in sources:
41 option_str = (
42 f"<option value=source/{source['id']}>{source['siglum']}</option>\n"
43 )
44 options += option_str
45
46 return mark_safe(options)
47
[end of django/cantusdb_project/main_app/templatetags/helper_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py
--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py
+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py
@@ -44,3 +44,7 @@
options += option_str
return mark_safe(options)
+
[email protected](name='has_group')
+def has_group(user, group_name):
+ return user.groups.filter(name=group_name).exists()
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py\n+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n@@ -44,3 +44,7 @@\n options += option_str\n \n return mark_safe(options)\n+\[email protected](name='has_group') \n+def has_group(user, group_name):\n+ return user.groups.filter(name=group_name).exists()\n", "issue": "Remove the \"Users Online\" section in footer.\n\n", "before_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django.utils.http import urlencode\nfrom django import template\nfrom main_app.models import Source\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\n\[email protected](name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"Converts month number to textual representation, 3 letters (Jan, Mar, etc)\"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\[email protected]_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n query = context[\"request\"].GET.copy()\n query.pop(\"page\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\[email protected]_tag(takes_context=False)\ndef source_links():\n sources = (\n Source.objects.filter(public=True, visible=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n # <option value=\"source1\">Source 1</option>\n # <option value=\"source2\">Source 2</option>\n # <option value=\"source3\">Source 3</option>\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}]} | 1,000 | 138 |
gh_patches_debug_13979 | rasdani/github-patches | git_diff | facebookresearch__fairscale-975 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
the main branch is not compatible with python 3.6, but setup.py only requires ">=3.6"
python 3.6 can pip install latest fairscale
https://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/setup.py#L67
but, some code is not compatible with python 3.6
https://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/experimental/nn/ssd_offload.py#L6
and python<3.7 has no dataclasses
https://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/nn/data_parallel/fully_sharded_data_parallel.py#L8
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
4 #
5 # This source code is licensed under the BSD license found in the
6 # LICENSE file in the root directory of this source tree.
7
8 import os
9 import re
10
11 import setuptools
12
13 this_dir = os.path.dirname(os.path.abspath(__file__))
14
15
16 def fetch_requirements():
17 with open("requirements.txt") as f:
18 reqs = f.read().strip().split("\n")
19 return reqs
20
21
22 # https://packaging.python.org/guides/single-sourcing-package-version/
23 def find_version(version_file_path) -> str:
24 with open(version_file_path) as version_file:
25 version_match = re.search(r"^__version_tuple__ = (.*)", version_file.read(), re.M)
26 if version_match:
27 ver_tup = eval(version_match.group(1))
28 ver_str = ".".join([str(x) for x in ver_tup])
29 return ver_str
30 raise RuntimeError("Unable to find version tuple.")
31
32
33 extensions = []
34 cmdclass = {}
35
36 if os.getenv("BUILD_CUDA_EXTENSIONS", "0") == "1":
37 from torch.utils.cpp_extension import BuildExtension, CUDAExtension
38
39 extensions.extend(
40 [
41 CUDAExtension(
42 name="fairscale.fused_adam_cuda",
43 include_dirs=[os.path.join(this_dir, "fairscale/clib/fused_adam_cuda")],
44 sources=[
45 "fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp",
46 "fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu",
47 ],
48 extra_compile_args={"cxx": ["-O3"], "nvcc": ["-O3", "--use_fast_math"]},
49 )
50 ]
51 )
52
53 cmdclass["build_ext"] = BuildExtension
54
55
56 if __name__ == "__main__":
57 setuptools.setup(
58 name="fairscale",
59 description="FairScale: A PyTorch library for large-scale and high-performance training.",
60 version=find_version("fairscale/version.py"),
61 setup_requires=["ninja"], # ninja is required to build extensions
62 install_requires=fetch_requirements(),
63 include_package_data=True,
64 packages=setuptools.find_packages(exclude=("tests", "tests.*")),
65 ext_modules=extensions,
66 cmdclass=cmdclass,
67 python_requires=">=3.6",
68 author="Facebook AI Research",
69 author_email="[email protected]",
70 long_description="FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.",
71 long_description_content_type="text/markdown",
72 classifiers=[
73 "Programming Language :: Python :: 3.7",
74 "Programming Language :: Python :: 3.8",
75 "Programming Language :: Python :: 3.9",
76 "License :: OSI Approved :: BSD License",
77 "Topic :: Scientific/Engineering :: Artificial Intelligence",
78 "Operating System :: OS Independent",
79 ],
80 )
81
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,7 @@
packages=setuptools.find_packages(exclude=("tests", "tests.*")),
ext_modules=extensions,
cmdclass=cmdclass,
- python_requires=">=3.6",
+ python_requires=">=3.7",
author="Facebook AI Research",
author_email="[email protected]",
long_description="FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,7 +64,7 @@\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n- python_requires=\">=3.6\",\n+ python_requires=\">=3.7\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n", "issue": "the main branch is not compatible with python 3.6, but setup.py only requires \">=3.6\"\npython 3.6 can pip install latest fairscale\r\nhttps://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/setup.py#L67\r\n\r\nbut, some code is not compatible with python 3.6\r\nhttps://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/experimental/nn/ssd_offload.py#L6\r\nand python<3.7 has no dataclasses\r\nhttps://github.com/facebookresearch/fairscale/blob/1bc96fa8c69def6d990e42bfbd75f86146ce29bd/fairscale/nn/data_parallel/fully_sharded_data_parallel.py#L8\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport re\n\nimport setuptools\n\nthis_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef fetch_requirements():\n with open(\"requirements.txt\") as f:\n reqs = f.read().strip().split(\"\\n\")\n return reqs\n\n\n# https://packaging.python.org/guides/single-sourcing-package-version/\ndef find_version(version_file_path) -> str:\n with open(version_file_path) as version_file:\n version_match = re.search(r\"^__version_tuple__ = (.*)\", version_file.read(), re.M)\n if version_match:\n ver_tup = eval(version_match.group(1))\n ver_str = \".\".join([str(x) for x in ver_tup])\n return ver_str\n raise RuntimeError(\"Unable to find version tuple.\")\n\n\nextensions = []\ncmdclass = {}\n\nif os.getenv(\"BUILD_CUDA_EXTENSIONS\", \"0\") == \"1\":\n from torch.utils.cpp_extension import BuildExtension, CUDAExtension\n\n extensions.extend(\n [\n CUDAExtension(\n name=\"fairscale.fused_adam_cuda\",\n include_dirs=[os.path.join(this_dir, \"fairscale/clib/fused_adam_cuda\")],\n sources=[\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda.cpp\",\n \"fairscale/clib/fused_adam_cuda/fused_adam_cuda_kernel.cu\",\n ],\n extra_compile_args={\"cxx\": [\"-O3\"], \"nvcc\": [\"-O3\", \"--use_fast_math\"]},\n )\n ]\n )\n\n cmdclass[\"build_ext\"] = BuildExtension\n\n\nif __name__ == \"__main__\":\n setuptools.setup(\n name=\"fairscale\",\n description=\"FairScale: A PyTorch library for large-scale and high-performance training.\",\n version=find_version(\"fairscale/version.py\"),\n setup_requires=[\"ninja\"], # ninja is required to build extensions\n install_requires=fetch_requirements(),\n include_package_data=True,\n packages=setuptools.find_packages(exclude=(\"tests\", \"tests.*\")),\n ext_modules=extensions,\n cmdclass=cmdclass,\n python_requires=\">=3.6\",\n author=\"Facebook AI Research\",\n author_email=\"[email protected]\",\n long_description=\"FairScale is a PyTorch extension library for high performance and large scale training on one or multiple machines/nodes. This library extends basic PyTorch capabilities while adding new experimental ones.\",\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Operating System :: OS Independent\",\n ],\n )\n", "path": "setup.py"}]} | 1,571 | 139 |
gh_patches_debug_16300 | rasdani/github-patches | git_diff | pre-commit__pre-commit-399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop python2.6?
Is it worth attempting to continue to support python2.6?
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages
2 from setuptools import setup
3
4
5 setup(
6 name='pre_commit',
7 description=(
8 'A framework for managing and maintaining multi-language pre-commit '
9 'hooks.'
10 ),
11 url='https://github.com/pre-commit/pre-commit',
12 version='0.8.2',
13
14 author='Anthony Sottile',
15 author_email='[email protected]',
16
17 platforms='linux',
18 classifiers=[
19 'License :: OSI Approved :: MIT License',
20 'Programming Language :: Python :: 2',
21 'Programming Language :: Python :: 2.6',
22 'Programming Language :: Python :: 2.7',
23 'Programming Language :: Python :: 3',
24 'Programming Language :: Python :: 3.4',
25 'Programming Language :: Python :: 3.5',
26 'Programming Language :: Python :: Implementation :: CPython',
27 'Programming Language :: Python :: Implementation :: PyPy',
28 ],
29
30 packages=find_packages('.', exclude=('tests*', 'testing*')),
31 package_data={
32 'pre_commit': [
33 'resources/hook-tmpl',
34 'resources/pre-push-tmpl',
35 'resources/rbenv.tar.gz',
36 'resources/ruby-build.tar.gz',
37 'resources/ruby-download.tar.gz',
38 ]
39 },
40 install_requires=[
41 'aspy.yaml',
42 'cached-property',
43 'jsonschema',
44 'nodeenv>=0.11.1',
45 'pyterminalsize',
46 'pyyaml',
47 'virtualenv',
48 ],
49 extras_require={
50 ':python_version=="2.6"': ['argparse', 'ordereddict'],
51 },
52 entry_points={
53 'console_scripts': [
54 'pre-commit = pre_commit.main:main',
55 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa
56 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa
57 ],
58 },
59 )
60
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,6 @@
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
@@ -46,9 +45,6 @@
'pyyaml',
'virtualenv',
],
- extras_require={
- ':python_version=="2.6"': ['argparse', 'ordereddict'],
- },
entry_points={
'console_scripts': [
'pre-commit = pre_commit.main:main',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,6 @@\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n- 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n@@ -46,9 +45,6 @@\n 'pyyaml',\n 'virtualenv',\n ],\n- extras_require={\n- ':python_version==\"2.6\"': ['argparse', 'ordereddict'],\n- },\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n", "issue": "Drop python2.6?\nIs it worth attempting to continue to support python2.6?\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nsetup(\n name='pre_commit',\n description=(\n 'A framework for managing and maintaining multi-language pre-commit '\n 'hooks.'\n ),\n url='https://github.com/pre-commit/pre-commit',\n version='0.8.2',\n\n author='Anthony Sottile',\n author_email='[email protected]',\n\n platforms='linux',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n\n packages=find_packages('.', exclude=('tests*', 'testing*')),\n package_data={\n 'pre_commit': [\n 'resources/hook-tmpl',\n 'resources/pre-push-tmpl',\n 'resources/rbenv.tar.gz',\n 'resources/ruby-build.tar.gz',\n 'resources/ruby-download.tar.gz',\n ]\n },\n install_requires=[\n 'aspy.yaml',\n 'cached-property',\n 'jsonschema',\n 'nodeenv>=0.11.1',\n 'pyterminalsize',\n 'pyyaml',\n 'virtualenv',\n ],\n extras_require={\n ':python_version==\"2.6\"': ['argparse', 'ordereddict'],\n },\n entry_points={\n 'console_scripts': [\n 'pre-commit = pre_commit.main:main',\n 'pre-commit-validate-config = pre_commit.clientlib.validate_config:run', # noqa\n 'pre-commit-validate-manifest = pre_commit.clientlib.validate_manifest:run', # noqa\n ],\n },\n)\n", "path": "setup.py"}]} | 1,070 | 175 |
gh_patches_debug_21753 | rasdani/github-patches | git_diff | Flexget__Flexget-1600 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nyaa changed TLD
hi peeps. it seems they switched TLD from .eu to .se
i changed my local flexget/plugins/sites/nyaa.py, removed the pyc & reloaded the daemon. its pulling stuff. but i aint got the skills to send a pull request, so i thought i'd do the next best thing and say something
if you don't want to do anything, i guess thats fine too. the old is redirecting to the new
</issue>
<code>
[start of flexget/plugins/sites/nyaa.py]
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3 from future.moves.urllib.parse import quote
4
5 import logging
6
7 import feedparser
8
9 from flexget import plugin
10 from flexget.entry import Entry
11 from flexget.event import event
12 from flexget.utils.search import normalize_unicode
13
14 log = logging.getLogger('nyaa')
15
16 # TODO: Other categories
17 CATEGORIES = {'all': '0_0',
18 'anime': '1_0',
19 'anime eng': '1_37',
20 'anime non-eng': '1_38',
21 'anime raw': '1_11'}
22 FILTERS = ['all', 'filter remakes', 'trusted only', 'a+ only']
23
24
25 class UrlRewriteNyaa(object):
26 """Nyaa urlrewriter and search plugin."""
27
28 schema = {
29 'oneOf': [
30 {'type': 'string', 'enum': list(CATEGORIES)},
31 {
32 'type': 'object',
33 'properties': {
34 'category': {'type': 'string', 'enum': list(CATEGORIES)},
35 'filter': {'type': 'string', 'enum': list(FILTERS)}
36 },
37 'additionalProperties': False
38 }
39 ]
40 }
41
42 def search(self, task, entry, config):
43 if not isinstance(config, dict):
44 config = {'category': config}
45 config.setdefault('category', 'anime eng')
46 config.setdefault('filter', 'all')
47 entries = set()
48 for search_string in entry.get('search_strings', [entry['title']]):
49 name = normalize_unicode(search_string)
50 url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % (
51 CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8')))
52
53 log.debug('requesting: %s' % url)
54 rss = feedparser.parse(url)
55
56 status = rss.get('status', False)
57 if status != 200:
58 log.debug('Search result not 200 (OK), received %s' % status)
59 if status >= 400:
60 continue
61
62 ex = rss.get('bozo_exception', False)
63 if ex:
64 log.error('Got bozo_exception (bad feed) on %s' % url)
65 continue
66
67 for item in rss.entries:
68 entry = Entry()
69 entry['title'] = item.title
70 entry['url'] = item.link
71 # TODO: parse some shit
72 # entry['torrent_seeds'] = int(item.seeds)
73 # entry['torrent_leeches'] = int(item.leechs)
74 # entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
75 # entry['content_size'] = int(item.size) / 1024 / 1024
76
77 entries.add(entry)
78
79 return entries
80
81 def url_rewritable(self, task, entry):
82 return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=')
83
84 def url_rewrite(self, task, entry):
85 entry['url'] = entry['url'].replace('torrentinfo', 'download')
86
87
88 @event('plugin.register')
89 def register_plugin():
90 plugin.register(UrlRewriteNyaa, 'nyaa', groups=['search', 'urlrewriter'], api_ver=2)
91
[end of flexget/plugins/sites/nyaa.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/sites/nyaa.py b/flexget/plugins/sites/nyaa.py
--- a/flexget/plugins/sites/nyaa.py
+++ b/flexget/plugins/sites/nyaa.py
@@ -47,7 +47,7 @@
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
name = normalize_unicode(search_string)
- url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % (
+ url = 'http://www.nyaa.se/?page=rss&cats=%s&filter=%s&term=%s' % (
CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8')))
log.debug('requesting: %s' % url)
@@ -79,7 +79,7 @@
return entries
def url_rewritable(self, task, entry):
- return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=')
+ return entry['url'].startswith('http://www.nyaa.se/?page=torrentinfo&tid=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('torrentinfo', 'download')
| {"golden_diff": "diff --git a/flexget/plugins/sites/nyaa.py b/flexget/plugins/sites/nyaa.py\n--- a/flexget/plugins/sites/nyaa.py\n+++ b/flexget/plugins/sites/nyaa.py\n@@ -47,7 +47,7 @@\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n name = normalize_unicode(search_string)\n- url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % (\n+ url = 'http://www.nyaa.se/?page=rss&cats=%s&filter=%s&term=%s' % (\n CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8')))\n \n log.debug('requesting: %s' % url)\n@@ -79,7 +79,7 @@\n return entries\n \n def url_rewritable(self, task, entry):\n- return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=')\n+ return entry['url'].startswith('http://www.nyaa.se/?page=torrentinfo&tid=')\n \n def url_rewrite(self, task, entry):\n entry['url'] = entry['url'].replace('torrentinfo', 'download')\n", "issue": "nyaa changed TLD\nhi peeps. it seems they switched TLD from .eu to .se\r\n\r\ni changed my local flexget/plugins/sites/nyaa.py, removed the pyc & reloaded the daemon. its pulling stuff. but i aint got the skills to send a pull request, so i thought i'd do the next best thing and say something\r\n\r\nif you don't want to do anything, i guess thats fine too. the old is redirecting to the new\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\nfrom future.moves.urllib.parse import quote\n\nimport logging\n\nimport feedparser\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils.search import normalize_unicode\n\nlog = logging.getLogger('nyaa')\n\n# TODO: Other categories\nCATEGORIES = {'all': '0_0',\n 'anime': '1_0',\n 'anime eng': '1_37',\n 'anime non-eng': '1_38',\n 'anime raw': '1_11'}\nFILTERS = ['all', 'filter remakes', 'trusted only', 'a+ only']\n\n\nclass UrlRewriteNyaa(object):\n \"\"\"Nyaa urlrewriter and search plugin.\"\"\"\n\n schema = {\n 'oneOf': [\n {'type': 'string', 'enum': list(CATEGORIES)},\n {\n 'type': 'object',\n 'properties': {\n 'category': {'type': 'string', 'enum': list(CATEGORIES)},\n 'filter': {'type': 'string', 'enum': list(FILTERS)}\n },\n 'additionalProperties': False\n }\n ]\n }\n\n def search(self, task, entry, config):\n if not isinstance(config, dict):\n config = {'category': config}\n config.setdefault('category', 'anime eng')\n config.setdefault('filter', 'all')\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n name = normalize_unicode(search_string)\n url = 'http://www.nyaa.eu/?page=rss&cats=%s&filter=%s&term=%s' % (\n CATEGORIES[config['category']], FILTERS.index(config['filter']), quote(name.encode('utf-8')))\n\n log.debug('requesting: %s' % url)\n rss = feedparser.parse(url)\n\n status = rss.get('status', False)\n if status != 200:\n log.debug('Search result not 200 (OK), received %s' % status)\n if status >= 400:\n continue\n\n ex = rss.get('bozo_exception', False)\n if ex:\n log.error('Got bozo_exception (bad feed) on %s' % url)\n continue\n\n for item in rss.entries:\n entry = Entry()\n entry['title'] = item.title\n entry['url'] = item.link\n # TODO: parse some shit\n # entry['torrent_seeds'] = int(item.seeds)\n # entry['torrent_leeches'] = int(item.leechs)\n # entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])\n # entry['content_size'] = int(item.size) / 1024 / 1024\n\n entries.add(entry)\n\n return entries\n\n def url_rewritable(self, task, entry):\n return entry['url'].startswith('http://www.nyaa.eu/?page=torrentinfo&tid=')\n\n def url_rewrite(self, task, entry):\n entry['url'] = entry['url'].replace('torrentinfo', 'download')\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteNyaa, 'nyaa', groups=['search', 'urlrewriter'], api_ver=2)\n", "path": "flexget/plugins/sites/nyaa.py"}]} | 1,574 | 293 |
gh_patches_debug_5302 | rasdani/github-patches | git_diff | searx__searx-2991 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only a lower case "ip" displays the IP address
When the feature is enabled to show a user's IP address when "ip" is entered into the search bar, it only does so when it is all lowercase. Querying "IP" does not return an IP. This seems like a bug, apologies if this was intended.
Thanks
</issue>
<code>
[start of searx/plugins/self_info.py]
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17 from flask_babel import gettext
18 import re
19 name = gettext('Self Informations')
20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
21 default_on = True
22
23
24 # Self User Agent regex
25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
26
27
28 # attach callback to the post search hook
29 # request: flask request object
30 # ctx: the whole local context of the pre search hook
31 def post_search(request, search):
32 if search.search_query.pageno > 1:
33 return True
34 if search.search_query.query == 'ip':
35 x_forwarded_for = request.headers.getlist("X-Forwarded-For")
36 if x_forwarded_for:
37 ip = x_forwarded_for[0]
38 else:
39 ip = request.remote_addr
40 search.result_container.answers['ip'] = {'answer': ip}
41 elif p.match(search.search_query.query):
42 ua = request.user_agent
43 search.result_container.answers['user-agent'] = {'answer': ua}
44 return True
45
[end of searx/plugins/self_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py
--- a/searx/plugins/self_info.py
+++ b/searx/plugins/self_info.py
@@ -31,7 +31,7 @@
def post_search(request, search):
if search.search_query.pageno > 1:
return True
- if search.search_query.query == 'ip':
+ if search.search_query.query.lower() == 'ip':
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
if x_forwarded_for:
ip = x_forwarded_for[0]
| {"golden_diff": "diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -31,7 +31,7 @@\n def post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n- if search.search_query.query == 'ip':\n+ if search.search_query.query.lower() == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n", "issue": "Only a lower case \"ip\" displays the IP address\nWhen the feature is enabled to show a user's IP address when \"ip\" is entered into the search bar, it only does so when it is all lowercase. Querying \"IP\" does not return an IP. This seems like a bug, apologies if this was intended.\r\n\r\nThanks\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = gettext('Self Informations')\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n if search.search_query.query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n search.result_container.answers['ip'] = {'answer': ip}\n elif p.match(search.search_query.query):\n ua = request.user_agent\n search.result_container.answers['user-agent'] = {'answer': ua}\n return True\n", "path": "searx/plugins/self_info.py"}]} | 1,087 | 135 |
gh_patches_debug_24158 | rasdani/github-patches | git_diff | pystiche__pystiche-9 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
propagate_guide() of Encoder raises a TypeError
When running the replication of [Gatys et al. 2017](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/replication/gatys_et_al_2017.py#L254), the following error is raised:
```
TypeError: Unions cannot be used with isinstance().
```
This points towards the [Encoder](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L12), specifically these `if` statements in the `propagate_guide()` method:
https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L50-L53
`PoolModule` and `ConvModule` are defined in `pystiche.typing`:
https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/typing.py#L18-L23
</issue>
<code>
[start of pystiche/typing.py]
1 from typing import Union, Sequence
2 import torch
3 from torch import nn
4
5 __all__ = [
6 "Numeric",
7 "TensorMeta",
8 "ConvModule",
9 "ConvModuleMeta",
10 "PoolModule",
11 "PoolModuleMeta",
12 ]
13
14 Numeric = Union[int, float]
15
16 TensorMeta = Union[torch.device, torch.dtype]
17
18 ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d]
19 ConvModuleMeta = Union[int, Sequence[int]]
20
21 PoolModule = Union[
22 nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d
23 ]
24 PoolModuleMeta = Union[int, Sequence[int]]
25
[end of pystiche/typing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pystiche/typing.py b/pystiche/typing.py
--- a/pystiche/typing.py
+++ b/pystiche/typing.py
@@ -1,4 +1,4 @@
-from typing import Union, Sequence
+from typing import Union, Any, Sequence
import torch
from torch import nn
@@ -6,8 +6,10 @@
"Numeric",
"TensorMeta",
"ConvModule",
+ "is_conv_module",
"ConvModuleMeta",
"PoolModule",
+ "is_pool_module",
"PoolModuleMeta",
]
@@ -15,10 +17,32 @@
TensorMeta = Union[torch.device, torch.dtype]
-ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d]
+ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]
+
+
+def is_conv_module(x: Any) -> bool:
+ return isinstance(x, (nn.Conv1d, nn.Conv2d, nn.Conv3d))
+
+
ConvModuleMeta = Union[int, Sequence[int]]
PoolModule = Union[
nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d
]
+
+
+def is_pool_module(x: Any) -> bool:
+ return isinstance(
+ x,
+ (
+ nn.AvgPool1d,
+ nn.AvgPool2d,
+ nn.AvgPool3d,
+ nn.MaxPool1d,
+ nn.MaxPool2d,
+ nn.MaxPool3d,
+ ),
+ )
+
+
PoolModuleMeta = Union[int, Sequence[int]]
| {"golden_diff": "diff --git a/pystiche/typing.py b/pystiche/typing.py\n--- a/pystiche/typing.py\n+++ b/pystiche/typing.py\n@@ -1,4 +1,4 @@\n-from typing import Union, Sequence\n+from typing import Union, Any, Sequence\n import torch\n from torch import nn\n \n@@ -6,8 +6,10 @@\n \"Numeric\",\n \"TensorMeta\",\n \"ConvModule\",\n+ \"is_conv_module\",\n \"ConvModuleMeta\",\n \"PoolModule\",\n+ \"is_pool_module\",\n \"PoolModuleMeta\",\n ]\n \n@@ -15,10 +17,32 @@\n \n TensorMeta = Union[torch.device, torch.dtype]\n \n-ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d]\n+ConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]\n+\n+\n+def is_conv_module(x: Any) -> bool:\n+ return isinstance(x, (nn.Conv1d, nn.Conv2d, nn.Conv3d))\n+\n+\n ConvModuleMeta = Union[int, Sequence[int]]\n \n PoolModule = Union[\n nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d\n ]\n+\n+\n+def is_pool_module(x: Any) -> bool:\n+ return isinstance(\n+ x,\n+ (\n+ nn.AvgPool1d,\n+ nn.AvgPool2d,\n+ nn.AvgPool3d,\n+ nn.MaxPool1d,\n+ nn.MaxPool2d,\n+ nn.MaxPool3d,\n+ ),\n+ )\n+\n+\n PoolModuleMeta = Union[int, Sequence[int]]\n", "issue": "propagate_guide() of Encoder raises a TypeError\nWhen running the replication of [Gatys et al. 2017](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/replication/gatys_et_al_2017.py#L254), the following error is raised:\r\n\r\n```\r\nTypeError: Unions cannot be used with isinstance().\r\n```\r\n\r\nThis points towards the [Encoder](https://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L12), specifically these `if` statements in the `propagate_guide()` method:\r\n\r\nhttps://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/encoding/encoder.py#L50-L53\r\n\r\n`PoolModule` and `ConvModule` are defined in `pystiche.typing`:\r\n\r\nhttps://github.com/pmeier/pystiche/blob/3260b68ea8dd88de433777ad3750d7abe3894743/pystiche/typing.py#L18-L23\r\n\n", "before_files": [{"content": "from typing import Union, Sequence\nimport torch\nfrom torch import nn\n\n__all__ = [\n \"Numeric\",\n \"TensorMeta\",\n \"ConvModule\",\n \"ConvModuleMeta\",\n \"PoolModule\",\n \"PoolModuleMeta\",\n]\n\nNumeric = Union[int, float]\n\nTensorMeta = Union[torch.device, torch.dtype]\n\nConvModule = Union[nn.Conv1d, nn.Conv2d, nn.Conv2d]\nConvModuleMeta = Union[int, Sequence[int]]\n\nPoolModule = Union[\n nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d\n]\nPoolModuleMeta = Union[int, Sequence[int]]\n", "path": "pystiche/typing.py"}]} | 1,079 | 389 |
gh_patches_debug_1220 | rasdani/github-patches | git_diff | DataBiosphere__toil-239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jenkins should only deploy to PyPI when building off the master branch
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3 setup(
4 name='toil',
5 version='3.0.4',
6 description='Pipeline management software for clusters.',
7 author='Benedict Paten',
8 author_email='[email protected]',
9 url="https://github.com/BD2KGenomics/toil",
10 install_requires=['bd2k-python-lib>=1.7.dev1'],
11 extras_require={
12 'mesos': [
13 'mesos.interface==0.22.0',
14 'psutil==3.0.1' ],
15 'aws': [
16 'boto==2.38.0' ] },
17 package_dir={ '': 'src' },
18 packages=find_packages( 'src', exclude=[ '*.test' ] ),
19 entry_points={
20 'console_scripts': [
21 'toilKill = toil.utils.toilKill:main',
22 'toilStatus = toil.utils.toilStatus:main',
23 'toilStats = toil.utils.toilStats:main',
24 'toilRestarts = toil.utils.toilRestarts:main',
25 'multijob = toil.batchSystems.multijob:main',
26 'toil-mesos-executor = toil.batchSystems.mesos.executor:main [mesos]'] } )
27
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
setup(
name='toil',
- version='3.0.4',
+ version='3.0.5.dev1',
description='Pipeline management software for clusters.',
author='Benedict Paten',
author_email='[email protected]',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,7 +2,7 @@\n \n setup(\n name='toil',\n- version='3.0.4',\n+ version='3.0.5.dev1',\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n", "issue": "Jenkins should only deploy to PyPI when building off the master branch\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='toil',\n version='3.0.4',\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/BD2KGenomics/toil\",\n install_requires=['bd2k-python-lib>=1.7.dev1'],\n extras_require={\n 'mesos': [\n 'mesos.interface==0.22.0',\n 'psutil==3.0.1' ],\n 'aws': [\n 'boto==2.38.0' ] },\n package_dir={ '': 'src' },\n packages=find_packages( 'src', exclude=[ '*.test' ] ),\n entry_points={\n 'console_scripts': [\n 'toilKill = toil.utils.toilKill:main',\n 'toilStatus = toil.utils.toilStatus:main',\n 'toilStats = toil.utils.toilStats:main',\n 'toilRestarts = toil.utils.toilRestarts:main',\n 'multijob = toil.batchSystems.multijob:main',\n 'toil-mesos-executor = toil.batchSystems.mesos.executor:main [mesos]'] } )\n", "path": "setup.py"}]} | 874 | 94 |
gh_patches_debug_59245 | rasdani/github-patches | git_diff | facebookresearch__hydra-287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] example of override fail in multirun
This fails
`python examples/tutorial/5_composition/my_app.py -m db=mysql,postgresql db.user=omry`
</issue>
<code>
[start of setup.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import codecs
3 import distutils
4 import os
5 import re
6 import shutil
7 from os.path import join, exists, isdir
8
9 from setuptools import setup, find_packages
10
11 here = os.path.abspath(os.path.dirname(__file__))
12
13
14 def read(*parts):
15 with codecs.open(os.path.join(here, *parts), "r") as fp:
16 return fp.read()
17
18
19 def find_version(*file_paths):
20 version_file = read(*file_paths)
21 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
22 if version_match:
23 return version_match.group(1)
24 raise RuntimeError("Unable to find version string.")
25
26
27 class CleanCommand(distutils.cmd.Command):
28 """
29 Our custom command to clean out junk files.
30 """
31
32 description = "Cleans out junk files we don't want in the repo"
33 user_options = []
34
35 def initialize_options(self):
36 pass
37
38 def finalize_options(self):
39 pass
40
41 @staticmethod
42 def find(root, includes, excludes=[]):
43 res = []
44 for parent, dirs, files in os.walk(root):
45 for f in dirs + files:
46 add = list()
47 for include in includes:
48 if re.findall(include, f):
49 add.append(join(parent, f))
50 res.extend(add)
51 final_list = []
52 # Exclude things that matches an exclude pattern
53 for ex in excludes:
54 for file in res:
55 if not re.findall(ex, file):
56 final_list.append(file)
57 return final_list
58
59 def run(self):
60 delete_patterns = [
61 ".eggs",
62 ".egg-info",
63 ".pytest_cache",
64 "build",
65 "dist",
66 "__pycache__",
67 ".pyc",
68 ]
69 deletion_list = CleanCommand.find(
70 ".", includes=delete_patterns, excludes=["\\.nox/.*"]
71 )
72
73 for f in deletion_list:
74 if exists(f):
75 if isdir(f):
76 shutil.rmtree(f, ignore_errors=True)
77 else:
78 os.unlink(f)
79
80
81 with open("README.md", "r") as fh:
82 LONG_DESC = fh.read()
83 setup(
84 cmdclass={"clean": CleanCommand},
85 name="hydra-core",
86 version=find_version("hydra", "__init__.py"),
87 author="Omry Yadan",
88 author_email="[email protected]",
89 description="Hydra is a library for writing flexible command line applications",
90 long_description=LONG_DESC,
91 long_description_content_type="text/markdown",
92 url="https://github.com/facebookresearch/hydra",
93 keywords="command-line configuration yaml tab-completion",
94 packages=find_packages(),
95 include_package_data=True,
96 classifiers=[
97 "License :: OSI Approved :: MIT License",
98 "Development Status :: 4 - Beta",
99 "Programming Language :: Python :: 2.7",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Operating System :: POSIX :: Linux",
103 "Operating System :: MacOS",
104 "Operating System :: Microsoft :: Windows",
105 ],
106 install_requires=[
107 "omegaconf>=1.4.0rc2",
108 'pathlib2>=2.2.0;python_version<"3.0"',
109 ],
110 # Install development dependencies with
111 # pip install -e .[dev]
112 extras_require={
113 "dev": [
114 "black",
115 "coverage",
116 "flake8",
117 "flake8-copyright",
118 "nox",
119 "pre-commit",
120 "pytest",
121 "setuptools",
122 "towncrier",
123 "twine",
124 ]
125 },
126 )
127
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -104,7 +104,7 @@
"Operating System :: Microsoft :: Windows",
],
install_requires=[
- "omegaconf>=1.4.0rc2",
+ "omegaconf>=1.4.0rc3",
'pathlib2>=2.2.0;python_version<"3.0"',
],
# Install development dependencies with
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -104,7 +104,7 @@\n \"Operating System :: Microsoft :: Windows\",\n ],\n install_requires=[\n- \"omegaconf>=1.4.0rc2\",\n+ \"omegaconf>=1.4.0rc3\",\n 'pathlib2>=2.2.0;python_version<\"3.0\"',\n ],\n # Install development dependencies with\n", "issue": "[Bug] example of override fail in multirun\nThis fails\r\n\r\n`python examples/tutorial/5_composition/my_app.py -m db=mysql,postgresql db.user=omry`\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport codecs\nimport distutils\nimport os\nimport re\nimport shutil\nfrom os.path import join, exists, isdir\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(*parts):\n with codecs.open(os.path.join(here, *parts), \"r\") as fp:\n return fp.read()\n\n\ndef find_version(*file_paths):\n version_file = read(*file_paths)\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nclass CleanCommand(distutils.cmd.Command):\n \"\"\"\n Our custom command to clean out junk files.\n \"\"\"\n\n description = \"Cleans out junk files we don't want in the repo\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n @staticmethod\n def find(root, includes, excludes=[]):\n res = []\n for parent, dirs, files in os.walk(root):\n for f in dirs + files:\n add = list()\n for include in includes:\n if re.findall(include, f):\n add.append(join(parent, f))\n res.extend(add)\n final_list = []\n # Exclude things that matches an exclude pattern\n for ex in excludes:\n for file in res:\n if not re.findall(ex, file):\n final_list.append(file)\n return final_list\n\n def run(self):\n delete_patterns = [\n \".eggs\",\n \".egg-info\",\n \".pytest_cache\",\n \"build\",\n \"dist\",\n \"__pycache__\",\n \".pyc\",\n ]\n deletion_list = CleanCommand.find(\n \".\", includes=delete_patterns, excludes=[\"\\\\.nox/.*\"]\n )\n\n for f in deletion_list:\n if exists(f):\n if isdir(f):\n shutil.rmtree(f, ignore_errors=True)\n else:\n os.unlink(f)\n\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n cmdclass={\"clean\": CleanCommand},\n name=\"hydra-core\",\n version=find_version(\"hydra\", \"__init__.py\"),\n author=\"Omry Yadan\",\n author_email=\"[email protected]\",\n description=\"Hydra is a library for writing flexible command line applications\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra\",\n keywords=\"command-line configuration yaml tab-completion\",\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Operating System :: Microsoft :: Windows\",\n ],\n install_requires=[\n \"omegaconf>=1.4.0rc2\",\n 'pathlib2>=2.2.0;python_version<\"3.0\"',\n ],\n # Install development dependencies with\n # pip install -e .[dev]\n extras_require={\n \"dev\": [\n \"black\",\n \"coverage\",\n \"flake8\",\n \"flake8-copyright\",\n \"nox\",\n \"pre-commit\",\n \"pytest\",\n \"setuptools\",\n \"towncrier\",\n \"twine\",\n ]\n },\n )\n", "path": "setup.py"}]} | 1,640 | 106 |
gh_patches_debug_5110 | rasdani/github-patches | git_diff | mindsdb__mindsdb-177 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values'
**Describe the bug**
After running predict.py in the example mindsdb/docs/examples/time_series/ I got the following AttributeError:
```
Traceback (most recent call last):
File "predict.py", line 12, in <module>
print(result.predicted_values)
AttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values'
```
**To Reproduce**
Steps to reproduce the behavior:
1. First run train.py, with python3 train.py
2. When training is finished, run predict.py with python3 predict.py
3. See error
**Expected behavior**
I expected to see the predicted values.
**Desktop (please complete the following information):**
- OS: Ubuntu 18.04.2 LTS
- mindsdb 1.0.5
- pip 19.1
- python 3.6.7
- virtualenv 15.1.0
- urllib3 1.24
**Additional context**
Before the Traceback I got the following warning many times:
```
WARNING:mindsdb-logger-core-logger:libs/backends/ludwig.py:141 - ('Missing previous predicted values for output column: '
'Main_Engine_Fuel_Consumption_MT_day, these should be included in your input '
'under the name: previous_Main_Engine_Fuel_Consumption_MT_day')
```
Finally, I've installed mindsdb using pip3 inside a virtualenvironment.
</issue>
<code>
[start of docs/examples/time_series/predict.py]
1 """
2
3 """
4
5 from mindsdb import Predictor
6
7 # Here we use the model to make predictions (NOTE: You need to run train.py first)
8 result = Predictor(name='fuel').predict(when_data = 'fuel_predict.csv')
9
10 # you can now print the results
11 print('The predicted main engine fuel consumption')
12 print(result.predicted_values)
[end of docs/examples/time_series/predict.py]
[start of docs/examples/nlp/predict.py]
1 from mindsdb import *
2
3 mdb = Predictor(name='real_estate_desc')
4
5 # Here we use the model to make predictions (NOTE: You need to run train.py first)
6 result = mdb.predict(
7 when={
8 "description": """A true gem
9 rooms: 2
10 bathrooms: 0
11 neighboorhood: thowsand_oaks
12 amenities: parking
13 area: 84.0291068642868
14 condition: great !
15 """
16 }
17 )
18
19 # you can now print the results
20 print('The predicted number of rooms')
21 print(result.predicted_values)
22
[end of docs/examples/nlp/predict.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/nlp/predict.py b/docs/examples/nlp/predict.py
--- a/docs/examples/nlp/predict.py
+++ b/docs/examples/nlp/predict.py
@@ -18,4 +18,4 @@
# you can now print the results
print('The predicted number of rooms')
-print(result.predicted_values)
+print(result)
diff --git a/docs/examples/time_series/predict.py b/docs/examples/time_series/predict.py
--- a/docs/examples/time_series/predict.py
+++ b/docs/examples/time_series/predict.py
@@ -9,4 +9,5 @@
# you can now print the results
print('The predicted main engine fuel consumption')
-print(result.predicted_values)
\ No newline at end of file
+for row in result:
+ print(row)
| {"golden_diff": "diff --git a/docs/examples/nlp/predict.py b/docs/examples/nlp/predict.py\n--- a/docs/examples/nlp/predict.py\n+++ b/docs/examples/nlp/predict.py\n@@ -18,4 +18,4 @@\n \n # you can now print the results\n print('The predicted number of rooms')\n-print(result.predicted_values)\n+print(result)\ndiff --git a/docs/examples/time_series/predict.py b/docs/examples/time_series/predict.py\n--- a/docs/examples/time_series/predict.py\n+++ b/docs/examples/time_series/predict.py\n@@ -9,4 +9,5 @@\n \n # you can now print the results\n print('The predicted main engine fuel consumption')\n-print(result.predicted_values)\n\\ No newline at end of file\n+for row in result:\n+ print(row)\n", "issue": "AttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values'\n**Describe the bug**\r\nAfter running predict.py in the example mindsdb/docs/examples/time_series/ I got the following AttributeError:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"predict.py\", line 12, in <module>\r\n print(result.predicted_values)\r\nAttributeError: 'PredictTransactionOutputData' object has no attribute 'predicted_values'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. First run train.py, with python3 train.py\r\n2. When training is finished, run predict.py with python3 predict.py\r\n3. See error\r\n\r\n**Expected behavior**\r\nI expected to see the predicted values.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 18.04.2 LTS\r\n- mindsdb 1.0.5\r\n- pip 19.1\r\n- python 3.6.7\r\n- virtualenv 15.1.0\r\n- urllib3 1.24\r\n\r\n**Additional context**\r\nBefore the Traceback I got the following warning many times:\r\n\r\n```\r\nWARNING:mindsdb-logger-core-logger:libs/backends/ludwig.py:141 - ('Missing previous predicted values for output column: '\r\n 'Main_Engine_Fuel_Consumption_MT_day, these should be included in your input '\r\n 'under the name: previous_Main_Engine_Fuel_Consumption_MT_day')\r\n```\r\nFinally, I've installed mindsdb using pip3 inside a virtualenvironment.\r\n\n", "before_files": [{"content": "\"\"\"\n\n\"\"\"\n\nfrom mindsdb import Predictor\n\n# Here we use the model to make predictions (NOTE: You need to run train.py first)\nresult = Predictor(name='fuel').predict(when_data = 'fuel_predict.csv')\n\n# you can now print the results\nprint('The predicted main engine fuel consumption')\nprint(result.predicted_values)", "path": "docs/examples/time_series/predict.py"}, {"content": "from mindsdb import *\n\nmdb = Predictor(name='real_estate_desc')\n\n# Here we use the model to make predictions (NOTE: You need to run train.py first)\nresult = mdb.predict(\n when={\n \"description\": \"\"\"A true gem\n rooms: 2\n bathrooms: 0\n neighboorhood: thowsand_oaks\n amenities: parking\n area: 84.0291068642868\n condition: great !\n \"\"\"\n }\n)\n\n# you can now print the results\nprint('The predicted number of rooms')\nprint(result.predicted_values)\n", "path": "docs/examples/nlp/predict.py"}]} | 1,146 | 173 |
gh_patches_debug_58021 | rasdani/github-patches | git_diff | sopel-irc__sopel-949 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problem in (at least) Wikipedia module: possibly Unicode related
Hi,
observe the following use case:
https://en.wikipedia.org/wiki/Hir%C5%8D_Onoda
@willie_5.4.1 KeyError: u'extract' (file "/usr/local/lib/python2.7/dist-packages/willie-5.4.1-py2.7.egg/willie/modules/wikipedia.py", line 89, in mw_snippet)
</issue>
<code>
[start of sopel/modules/wikipedia.py]
1 # coding=utf-8
2 """
3 wikipedia.py - Sopel Wikipedia Module
4 Copyright 2013 Edward Powell - embolalia.net
5 Licensed under the Eiffel Forum License 2.
6
7 http://sopel.chat
8 """
9 from __future__ import unicode_literals, absolute_import, print_function, division
10 from sopel import web, tools
11 from sopel.config.types import StaticSection, ValidatedAttribute
12 from sopel.module import NOLIMIT, commands, example, rule
13 import json
14 import re
15
16 import sys
17 if sys.version_info.major < 3:
18 from urlparse import unquote
19 else:
20 from urllib.parse import unquote
21
22 REDIRECT = re.compile(r'^REDIRECT (.*)')
23
24
25 class WikipediaSection(StaticSection):
26 default_lang = ValidatedAttribute('default_lang', default='en')
27 """The default language to find articles from."""
28 lang_per_channel = ValidatedAttribute('lang_per_channel')
29
30
31 def setup(bot):
32 bot.config.define_section('wikipedia', WikipediaSection)
33
34 regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')
35 if not bot.memory.contains('url_callbacks'):
36 bot.memory['url_callbacks'] = tools.SopelMemory()
37 bot.memory['url_callbacks'][regex] = mw_info
38
39
40 def configure(config):
41 config.define_section('wikipedia', WikipediaSection)
42 config.wikipedia.configure_setting(
43 'default_lang',
44 "Enter the default language to find articles from."
45 )
46
47
48 def mw_search(server, query, num):
49 """
50 Searches the specified MediaWiki server for the given query, and returns
51 the specified number of results.
52 """
53 search_url = ('http://%s/w/api.php?format=json&action=query'
54 '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'
55 '&srsearch=') % (server, num)
56 search_url += query
57 query = json.loads(web.get(search_url))
58 if 'query' in query:
59 query = query['query']['search']
60 return [r['title'] for r in query]
61 else:
62 return None
63
64
65 def say_snippet(bot, server, query, show_url=True):
66 page_name = query.replace('_', ' ')
67 query = query.replace(' ', '_')
68 snippet = mw_snippet(server, query)
69 msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
70 if show_url:
71 msg = msg + ' | https://{}/wiki/{}'.format(server, query)
72 bot.say(msg)
73
74
75 def mw_snippet(server, query):
76 """
77 Retrives a snippet of the specified length from the given page on the given
78 server.
79 """
80 snippet_url = ('https://' + server + '/w/api.php?format=json'
81 '&action=query&prop=extracts&exintro&explaintext'
82 '&exchars=300&redirects&titles=')
83 snippet_url += query
84 snippet = json.loads(web.get(snippet_url))
85 snippet = snippet['query']['pages']
86
87 # For some reason, the API gives the page *number* as the key, so we just
88 # grab the first page number in the results.
89 snippet = snippet[list(snippet.keys())[0]]
90
91 return snippet['extract']
92
93
94 @rule('.*/([a-z]+\.wikipedia.org)/wiki/([^ ]+).*')
95 def mw_info(bot, trigger, found_match=None):
96 """
97 Retrives a snippet of the specified length from the given page on the given
98 server.
99 """
100 match = found_match or trigger
101 say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)
102
103
104 @commands('w', 'wiki', 'wik')
105 @example('.w San Francisco')
106 def wikipedia(bot, trigger):
107 lang = bot.config.wikipedia.default_lang
108
109 #change lang if channel has custom language set
110 if (trigger.sender and not trigger.sender.is_nick() and
111 bot.config.wikipedia.lang_per_channel):
112 customlang = re.search('(' + trigger.sender + '):(\w+)',
113 bot.config.wikipedia.lang_per_channel)
114 if customlang is not None:
115 lang = customlang.group(2)
116
117 if trigger.group(2) is None:
118 bot.reply("What do you want me to look up?")
119 return NOLIMIT
120
121 query = trigger.group(2)
122 args = re.search(r'^-([a-z]{2,12})\s(.*)', query)
123 if args is not None:
124 lang = args.group(1)
125 query = args.group(2)
126
127 if not query:
128 bot.reply('What do you want me to look up?')
129 return NOLIMIT
130 server = lang + '.wikipedia.org'
131 query = mw_search(server, query, 1)
132 if not query:
133 bot.reply("I can't find any results for that.")
134 return NOLIMIT
135 else:
136 query = query[0]
137 say_snippet(bot, server, query)
138
[end of sopel/modules/wikipedia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py
--- a/sopel/modules/wikipedia.py
+++ b/sopel/modules/wikipedia.py
@@ -15,7 +15,8 @@
import sys
if sys.version_info.major < 3:
- from urlparse import unquote
+ from urlparse import unquote as _unquote
+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')
else:
from urllib.parse import unquote
| {"golden_diff": "diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py\n--- a/sopel/modules/wikipedia.py\n+++ b/sopel/modules/wikipedia.py\n@@ -15,7 +15,8 @@\n \n import sys\n if sys.version_info.major < 3:\n- from urlparse import unquote\n+ from urlparse import unquote as _unquote\n+ unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\n else:\n from urllib.parse import unquote\n", "issue": "Problem in (at least) Wikipedia module: possibly Unicode related\nHi,\nobserve the following use case:\n https://en.wikipedia.org/wiki/Hir%C5%8D_Onoda\n @willie_5.4.1 KeyError: u'extract' (file \"/usr/local/lib/python2.7/dist-packages/willie-5.4.1-py2.7.egg/willie/modules/wikipedia.py\", line 89, in mw_snippet)\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nwikipedia.py - Sopel Wikipedia Module\nCopyright 2013 Edward Powell - embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ValidatedAttribute\nfrom sopel.module import NOLIMIT, commands, example, rule\nimport json\nimport re\n\nimport sys\nif sys.version_info.major < 3:\n from urlparse import unquote\nelse:\n from urllib.parse import unquote\n\nREDIRECT = re.compile(r'^REDIRECT (.*)')\n\n\nclass WikipediaSection(StaticSection):\n default_lang = ValidatedAttribute('default_lang', default='en')\n \"\"\"The default language to find articles from.\"\"\"\n lang_per_channel = ValidatedAttribute('lang_per_channel')\n\n\ndef setup(bot):\n bot.config.define_section('wikipedia', WikipediaSection)\n\n regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n bot.memory['url_callbacks'][regex] = mw_info\n\n\ndef configure(config):\n config.define_section('wikipedia', WikipediaSection)\n config.wikipedia.configure_setting(\n 'default_lang',\n \"Enter the default language to find articles from.\"\n )\n\n\ndef mw_search(server, query, num):\n \"\"\"\n Searches the specified MediaWiki server for the given query, and returns\n the specified number of results.\n \"\"\"\n search_url = ('http://%s/w/api.php?format=json&action=query'\n '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'\n '&srsearch=') % (server, num)\n search_url += query\n query = json.loads(web.get(search_url))\n if 'query' in query:\n query = query['query']['search']\n return [r['title'] for r in query]\n else:\n return None\n\n\ndef say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n snippet = mw_snippet(server, query)\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n bot.say(msg)\n\n\ndef mw_snippet(server, query):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n snippet_url = ('https://' + server + '/w/api.php?format=json'\n '&action=query&prop=extracts&exintro&explaintext'\n '&exchars=300&redirects&titles=')\n snippet_url += query\n snippet = json.loads(web.get(snippet_url))\n snippet = snippet['query']['pages']\n\n # For some reason, the API gives the page *number* as the key, so we just\n # grab the first page number in the results.\n snippet = snippet[list(snippet.keys())[0]]\n\n return snippet['extract']\n\n\n@rule('.*/([a-z]+\\.wikipedia.org)/wiki/([^ ]+).*')\ndef mw_info(bot, trigger, found_match=None):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n match = found_match or trigger\n say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n\n\n@commands('w', 'wiki', 'wik')\n@example('.w San Francisco')\ndef wikipedia(bot, trigger):\n lang = bot.config.wikipedia.default_lang\n\n #change lang if channel has custom language set\n if (trigger.sender and not trigger.sender.is_nick() and\n bot.config.wikipedia.lang_per_channel):\n customlang = re.search('(' + trigger.sender + '):(\\w+)',\n bot.config.wikipedia.lang_per_channel)\n if customlang is not None:\n lang = customlang.group(2)\n\n if trigger.group(2) is None:\n bot.reply(\"What do you want me to look up?\")\n return NOLIMIT\n\n query = trigger.group(2)\n args = re.search(r'^-([a-z]{2,12})\\s(.*)', query)\n if args is not None:\n lang = args.group(1)\n query = args.group(2)\n\n if not query:\n bot.reply('What do you want me to look up?')\n return NOLIMIT\n server = lang + '.wikipedia.org'\n query = mw_search(server, query, 1)\n if not query:\n bot.reply(\"I can't find any results for that.\")\n return NOLIMIT\n else:\n query = query[0]\n say_snippet(bot, server, query)\n", "path": "sopel/modules/wikipedia.py"}]} | 2,029 | 120 |
gh_patches_debug_585 | rasdani/github-patches | git_diff | pex-tool__pex-1679 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.73
On the docket:
+ [x] Unexpected distribution hash #1683
+ [x] Pex fails to parse wheel tags correctly when resolving from a lock. #1676
+ [x] `pex3 lock create --style universal` does not fully patch ambient interpreter properties. #1681
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.72"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.72"
+__version__ = "2.1.73"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.72\"\n+__version__ = \"2.1.73\"\n", "issue": "Release 2.1.73\nOn the docket:\r\n+ [x] Unexpected distribution hash #1683 \r\n+ [x] Pex fails to parse wheel tags correctly when resolving from a lock. #1676 \r\n+ [x] `pex3 lock create --style universal` does not fully patch ambient interpreter properties. #1681 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.72\"\n", "path": "pex/version.py"}]} | 662 | 97 |
gh_patches_debug_577 | rasdani/github-patches | git_diff | numba__numba-1356 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use CPython allocator in NRT
NRT should optionally use the CPython memory allocation functions (when imported from CPython). This would allow Numba-allocated memory to be seen by other utilities such as `sys.getallocatedblocks()`, `sys.debugmallocstats()`, and `tracemalloc`.
</issue>
<code>
[start of numba/runtime/nrt.py]
1 from __future__ import print_function, absolute_import, division
2
3 from collections import namedtuple
4
5 from . import atomicops
6 from llvmlite import binding as ll
7
8 from numba.utils import finalize as _finalize
9 from . import _nrt_python as _nrt
10
11 _nrt_mstats = namedtuple("nrt_mstats", ["alloc", "free", "mi_alloc", "mi_free"])
12
13
14 class _Runtime(object):
15 def __init__(self):
16 self._init = False
17
18 def initialize(self, ctx):
19 """Initializes the NRT
20
21 Must be called before any actual call to the NRT API.
22 Safe to be called multiple times.
23 """
24 if self._init:
25 # Already initialized
26 return
27
28 # Register globals into the system
29 for py_name in _nrt.c_helpers:
30 c_name = "NRT_" + py_name
31 c_address = _nrt.c_helpers[py_name]
32 ll.add_symbol(c_name, c_address)
33
34 # Compile atomic operations
35 self._library = atomicops.compile_nrt_functions(ctx)
36
37 self._ptr_inc = self._library.get_pointer_to_function("nrt_atomic_add")
38 self._ptr_dec = self._library.get_pointer_to_function("nrt_atomic_sub")
39 self._ptr_cas = self._library.get_pointer_to_function("nrt_atomic_cas")
40
41 # Install atomic ops to NRT
42 _nrt.memsys_set_atomic_inc_dec(self._ptr_inc, self._ptr_dec)
43 _nrt.memsys_set_atomic_cas(self._ptr_cas)
44
45 self._init = True
46
47 @staticmethod
48 def shutdown():
49 """
50 Shutdown the NRT
51 Safe to be called without calling Runtime.initialize first
52 """
53 _nrt.memsys_shutdown()
54
55 @property
56 def library(self):
57 """
58 Return the Library object containing the various NRT functions.
59 """
60 return self._library
61
62 def meminfo_new(self, data, pyobj):
63 """
64 Returns a MemInfo object that tracks memory at `data` owned by `pyobj`.
65 MemInfo will acquire a reference on `pyobj`.
66 The release of MemInfo will release a reference on `pyobj`.
67 """
68 mi = _nrt.meminfo_new(data, pyobj)
69 return MemInfo(mi)
70
71 def meminfo_alloc(self, size, safe=False):
72 """
73 Allocate a new memory of `size` bytes and returns a MemInfo object
74 that tracks the allocation. When there is no more reference to the
75 MemInfo object, the underlying memory will be deallocated.
76
77 If `safe` flag is True, the memory is allocated using the `safe` scheme.
78 This is used for debugging and testing purposes.
79 See `NRT_MemInfo_alloc_safe()` in "nrt.h" for details.
80 """
81 if safe:
82 mi = _nrt.meminfo_alloc_safe(size)
83 else:
84 mi = _nrt.meminfo_alloc(size)
85 return MemInfo(mi)
86
87 def get_allocation_stats(self):
88 """
89 Returns a namedtuple of (alloc, free, mi_alloc, mi_free) for count of
90 each memory operations.
91 """
92 return _nrt_mstats(alloc=_nrt.memsys_get_stats_alloc(),
93 free=_nrt.memsys_get_stats_free(),
94 mi_alloc=_nrt.memsys_get_stats_mi_alloc(),
95 mi_free=_nrt.memsys_get_stats_mi_free())
96
97
98 # Alias to _nrt_python._MemInfo
99 MemInfo = _nrt._MemInfo
100
101 # Create uninitialized runtime
102 rtsys = _Runtime()
103
104 # Install finalizer
105 _finalize(rtsys, _Runtime.shutdown)
106
107 # Avoid future use of the class
108 del _Runtime
109
[end of numba/runtime/nrt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numba/runtime/nrt.py b/numba/runtime/nrt.py
--- a/numba/runtime/nrt.py
+++ b/numba/runtime/nrt.py
@@ -98,7 +98,8 @@
# Alias to _nrt_python._MemInfo
MemInfo = _nrt._MemInfo
-# Create uninitialized runtime
+# Create runtime
+_nrt.memsys_use_cpython_allocator()
rtsys = _Runtime()
# Install finalizer
| {"golden_diff": "diff --git a/numba/runtime/nrt.py b/numba/runtime/nrt.py\n--- a/numba/runtime/nrt.py\n+++ b/numba/runtime/nrt.py\n@@ -98,7 +98,8 @@\n # Alias to _nrt_python._MemInfo\n MemInfo = _nrt._MemInfo\n \n-# Create uninitialized runtime\n+# Create runtime\n+_nrt.memsys_use_cpython_allocator()\n rtsys = _Runtime()\n \n # Install finalizer\n", "issue": "Use CPython allocator in NRT\nNRT should optionally use the CPython memory allocation functions (when imported from CPython). This would allow Numba-allocated memory to be seen by other utilities such as `sys.getallocatedblocks()`, `sys.debugmallocstats()`, and `tracemalloc`.\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom collections import namedtuple\n\nfrom . import atomicops\nfrom llvmlite import binding as ll\n\nfrom numba.utils import finalize as _finalize\nfrom . import _nrt_python as _nrt\n\n_nrt_mstats = namedtuple(\"nrt_mstats\", [\"alloc\", \"free\", \"mi_alloc\", \"mi_free\"])\n\n\nclass _Runtime(object):\n def __init__(self):\n self._init = False\n\n def initialize(self, ctx):\n \"\"\"Initializes the NRT\n\n Must be called before any actual call to the NRT API.\n Safe to be called multiple times.\n \"\"\"\n if self._init:\n # Already initialized\n return\n\n # Register globals into the system\n for py_name in _nrt.c_helpers:\n c_name = \"NRT_\" + py_name\n c_address = _nrt.c_helpers[py_name]\n ll.add_symbol(c_name, c_address)\n\n # Compile atomic operations\n self._library = atomicops.compile_nrt_functions(ctx)\n\n self._ptr_inc = self._library.get_pointer_to_function(\"nrt_atomic_add\")\n self._ptr_dec = self._library.get_pointer_to_function(\"nrt_atomic_sub\")\n self._ptr_cas = self._library.get_pointer_to_function(\"nrt_atomic_cas\")\n\n # Install atomic ops to NRT\n _nrt.memsys_set_atomic_inc_dec(self._ptr_inc, self._ptr_dec)\n _nrt.memsys_set_atomic_cas(self._ptr_cas)\n\n self._init = True\n\n @staticmethod\n def shutdown():\n \"\"\"\n Shutdown the NRT\n Safe to be called without calling Runtime.initialize first\n \"\"\"\n _nrt.memsys_shutdown()\n\n @property\n def library(self):\n \"\"\"\n Return the Library object containing the various NRT functions.\n \"\"\"\n return self._library\n\n def meminfo_new(self, data, pyobj):\n \"\"\"\n Returns a MemInfo object that tracks memory at `data` owned by `pyobj`.\n MemInfo will acquire a reference on `pyobj`.\n The release of MemInfo will release a reference on `pyobj`.\n \"\"\"\n mi = _nrt.meminfo_new(data, pyobj)\n return MemInfo(mi)\n\n def meminfo_alloc(self, size, safe=False):\n \"\"\"\n Allocate a new memory of `size` bytes and returns a MemInfo object\n that tracks the allocation. When there is no more reference to the\n MemInfo object, the underlying memory will be deallocated.\n\n If `safe` flag is True, the memory is allocated using the `safe` scheme.\n This is used for debugging and testing purposes.\n See `NRT_MemInfo_alloc_safe()` in \"nrt.h\" for details.\n \"\"\"\n if safe:\n mi = _nrt.meminfo_alloc_safe(size)\n else:\n mi = _nrt.meminfo_alloc(size)\n return MemInfo(mi)\n\n def get_allocation_stats(self):\n \"\"\"\n Returns a namedtuple of (alloc, free, mi_alloc, mi_free) for count of\n each memory operations.\n \"\"\"\n return _nrt_mstats(alloc=_nrt.memsys_get_stats_alloc(),\n free=_nrt.memsys_get_stats_free(),\n mi_alloc=_nrt.memsys_get_stats_mi_alloc(),\n mi_free=_nrt.memsys_get_stats_mi_free())\n\n\n# Alias to _nrt_python._MemInfo\nMemInfo = _nrt._MemInfo\n\n# Create uninitialized runtime\nrtsys = _Runtime()\n\n# Install finalizer\n_finalize(rtsys, _Runtime.shutdown)\n\n# Avoid future use of the class\ndel _Runtime\n", "path": "numba/runtime/nrt.py"}]} | 1,633 | 107 |
gh_patches_debug_36848 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1920 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The `ctx threads` (or `threads`) should display all threads no matter of context threads limit
cc: @CptGibbon we should probably add this option for convenience :)
</issue>
<code>
[start of pwndbg/commands/tls.py]
1 """
2 Command to print the information of the current Thread Local Storage (TLS).
3 """
4 from __future__ import annotations
5
6 import argparse
7
8 import pwndbg.commands
9 import pwndbg.gdblib.tls
10 from pwndbg.color import message
11 from pwndbg.commands import CommandCategory
12
13 parser = argparse.ArgumentParser(
14 formatter_class=argparse.RawTextHelpFormatter,
15 description="Print out base address of the current Thread Local Storage (TLS).",
16 )
17
18 parser.add_argument(
19 "-p",
20 "--pthread-self",
21 action="store_true",
22 default=False,
23 help="Try to get the address of TLS by calling pthread_self().",
24 )
25
26
27 @pwndbg.commands.ArgparsedCommand(parser, category=CommandCategory.LINUX)
28 @pwndbg.commands.OnlyWhenRunning
29 @pwndbg.commands.OnlyWhenUserspace
30 def tls(pthread_self=False) -> None:
31 tls_base = (
32 pwndbg.gdblib.tls.find_address_with_register()
33 if not pthread_self
34 else pwndbg.gdblib.tls.find_address_with_pthread_self()
35 )
36 if pwndbg.gdblib.memory.is_readable_address(tls_base):
37 print(message.success("Thread Local Storage (TLS) base: %#x" % tls_base))
38 print(message.success("TLS is located at:"))
39 print(message.notice(pwndbg.gdblib.vmmap.find(tls_base)))
40 return
41 print(message.error("Couldn't find Thread Local Storage (TLS) base."))
42 if not pthread_self:
43 print(
44 message.notice(
45 "You can try to use -p/--pthread option to get the address of TLS by calling pthread_self().\n"
46 "(This might cause problems if the pthread_self() is not in libc or not initialized yet.)"
47 )
48 )
49
[end of pwndbg/commands/tls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/commands/tls.py b/pwndbg/commands/tls.py
--- a/pwndbg/commands/tls.py
+++ b/pwndbg/commands/tls.py
@@ -5,6 +5,10 @@
import argparse
+import gdb
+from tabulate import tabulate
+
+import pwndbg.color.memory as M
import pwndbg.commands
import pwndbg.gdblib.tls
from pwndbg.color import message
@@ -46,3 +50,97 @@
"(This might cause problems if the pthread_self() is not in libc or not initialized yet.)"
)
)
+
+
+parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="List all threads belonging to the selected inferior.",
+)
+group = parser.add_mutually_exclusive_group()
+
+group.add_argument(
+ "num_threads",
+ type=int,
+ nargs="?",
+ default=None,
+ help="Number of threads to display. Omit to display all threads.",
+)
+
+group.add_argument(
+ "-c",
+ "--config",
+ action="store_true",
+ dest="respect_config",
+ help="Respect context-max-threads config to limit number of threads displayed.",
+)
+
+
[email protected](parser, category=CommandCategory.LINUX)
[email protected]
[email protected]
+def threads(num_threads, respect_config) -> None:
+ table = []
+ headers = ["global_num", "name", "status", "pc", "symbol"]
+ bold_green = lambda text: pwndbg.color.bold(pwndbg.color.green(text))
+
+ try:
+ original_thread = gdb.selected_thread()
+ except SystemError:
+ original_thread = None
+
+ all_threads = gdb.selected_inferior().threads()[::-1]
+
+ displayed_threads = []
+
+ if original_thread is not None and original_thread.is_valid():
+ displayed_threads.append(original_thread)
+
+ for thread in all_threads:
+ if respect_config and len(displayed_threads) >= int(
+ pwndbg.commands.context.config_max_threads_display
+ ):
+ break
+ elif num_threads is not None and len(displayed_threads) >= num_threads:
+ break
+
+ if thread.is_valid() and thread is not original_thread:
+ displayed_threads.append(thread)
+
+ for thread in displayed_threads:
+ name = thread.name or ""
+
+ if thread is original_thread:
+ row = [
+ bold_green(thread.global_num),
+ bold_green(name),
+ ]
+ else:
+ row = [
+ thread.global_num,
+ name,
+ ]
+
+ row.append(pwndbg.commands.context.get_thread_status(thread))
+
+ if thread.is_stopped():
+ thread.switch()
+ pc = gdb.selected_frame().pc()
+
+ pc_colored = M.get(pc)
+ symbol = pwndbg.gdblib.symbol.get(pc)
+
+ row.append(pc_colored)
+
+ if symbol:
+ if thread is original_thread:
+ row.append(bold_green(symbol))
+ else:
+ row.append(symbol)
+
+ table.append(row)
+
+ if original_thread is not None and original_thread.is_valid():
+ original_thread.switch()
+
+ print(tabulate(table, headers))
+ print(f"\nShowing {len(displayed_threads)} of {len(all_threads)} threads.")
| {"golden_diff": "diff --git a/pwndbg/commands/tls.py b/pwndbg/commands/tls.py\n--- a/pwndbg/commands/tls.py\n+++ b/pwndbg/commands/tls.py\n@@ -5,6 +5,10 @@\n \n import argparse\n \n+import gdb\n+from tabulate import tabulate\n+\n+import pwndbg.color.memory as M\n import pwndbg.commands\n import pwndbg.gdblib.tls\n from pwndbg.color import message\n@@ -46,3 +50,97 @@\n \"(This might cause problems if the pthread_self() is not in libc or not initialized yet.)\"\n )\n )\n+\n+\n+parser = argparse.ArgumentParser(\n+ formatter_class=argparse.RawTextHelpFormatter,\n+ description=\"List all threads belonging to the selected inferior.\",\n+)\n+group = parser.add_mutually_exclusive_group()\n+\n+group.add_argument(\n+ \"num_threads\",\n+ type=int,\n+ nargs=\"?\",\n+ default=None,\n+ help=\"Number of threads to display. Omit to display all threads.\",\n+)\n+\n+group.add_argument(\n+ \"-c\",\n+ \"--config\",\n+ action=\"store_true\",\n+ dest=\"respect_config\",\n+ help=\"Respect context-max-threads config to limit number of threads displayed.\",\n+)\n+\n+\[email protected](parser, category=CommandCategory.LINUX)\[email protected]\[email protected]\n+def threads(num_threads, respect_config) -> None:\n+ table = []\n+ headers = [\"global_num\", \"name\", \"status\", \"pc\", \"symbol\"]\n+ bold_green = lambda text: pwndbg.color.bold(pwndbg.color.green(text))\n+\n+ try:\n+ original_thread = gdb.selected_thread()\n+ except SystemError:\n+ original_thread = None\n+\n+ all_threads = gdb.selected_inferior().threads()[::-1]\n+\n+ displayed_threads = []\n+\n+ if original_thread is not None and original_thread.is_valid():\n+ displayed_threads.append(original_thread)\n+\n+ for thread in all_threads:\n+ if respect_config and len(displayed_threads) >= int(\n+ pwndbg.commands.context.config_max_threads_display\n+ ):\n+ break\n+ elif num_threads is not None and len(displayed_threads) >= num_threads:\n+ break\n+\n+ if thread.is_valid() and thread is not original_thread:\n+ displayed_threads.append(thread)\n+\n+ for thread in displayed_threads:\n+ name = thread.name or \"\"\n+\n+ if thread is original_thread:\n+ row = [\n+ bold_green(thread.global_num),\n+ bold_green(name),\n+ ]\n+ else:\n+ row = [\n+ thread.global_num,\n+ name,\n+ ]\n+\n+ row.append(pwndbg.commands.context.get_thread_status(thread))\n+\n+ if thread.is_stopped():\n+ thread.switch()\n+ pc = gdb.selected_frame().pc()\n+\n+ pc_colored = M.get(pc)\n+ symbol = pwndbg.gdblib.symbol.get(pc)\n+\n+ row.append(pc_colored)\n+\n+ if symbol:\n+ if thread is original_thread:\n+ row.append(bold_green(symbol))\n+ else:\n+ row.append(symbol)\n+\n+ table.append(row)\n+\n+ if original_thread is not None and original_thread.is_valid():\n+ original_thread.switch()\n+\n+ print(tabulate(table, headers))\n+ print(f\"\\nShowing {len(displayed_threads)} of {len(all_threads)} threads.\")\n", "issue": "The `ctx threads` (or `threads`) should display all threads no matter of context threads limit\ncc: @CptGibbon we should probably add this option for convenience :)\n", "before_files": [{"content": "\"\"\"\nCommand to print the information of the current Thread Local Storage (TLS).\n\"\"\"\nfrom __future__ import annotations\n\nimport argparse\n\nimport pwndbg.commands\nimport pwndbg.gdblib.tls\nfrom pwndbg.color import message\nfrom pwndbg.commands import CommandCategory\n\nparser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=\"Print out base address of the current Thread Local Storage (TLS).\",\n)\n\nparser.add_argument(\n \"-p\",\n \"--pthread-self\",\n action=\"store_true\",\n default=False,\n help=\"Try to get the address of TLS by calling pthread_self().\",\n)\n\n\[email protected](parser, category=CommandCategory.LINUX)\[email protected]\[email protected]\ndef tls(pthread_self=False) -> None:\n tls_base = (\n pwndbg.gdblib.tls.find_address_with_register()\n if not pthread_self\n else pwndbg.gdblib.tls.find_address_with_pthread_self()\n )\n if pwndbg.gdblib.memory.is_readable_address(tls_base):\n print(message.success(\"Thread Local Storage (TLS) base: %#x\" % tls_base))\n print(message.success(\"TLS is located at:\"))\n print(message.notice(pwndbg.gdblib.vmmap.find(tls_base)))\n return\n print(message.error(\"Couldn't find Thread Local Storage (TLS) base.\"))\n if not pthread_self:\n print(\n message.notice(\n \"You can try to use -p/--pthread option to get the address of TLS by calling pthread_self().\\n\"\n \"(This might cause problems if the pthread_self() is not in libc or not initialized yet.)\"\n )\n )\n", "path": "pwndbg/commands/tls.py"}]} | 1,046 | 782 |
gh_patches_debug_8221 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check Domain availability via epp-Testing
### Issue Description
When adding the /availability endpoint we will need to send a CheckDomain request to epp to see if the domain is available. This epp function is already implemented in domain.py and is called available(). It just needs to be tested and updated if the test show any problem with the implementation
### AC
- [x] unit tests added for available
- [x] manually test via sandbox with OT&E to be sure that this is working as expected
- [x] update the implementation as needed or desired
- [x] in your tests, ensure that this function can be called by just doing Domain.available() and not by having an instance of a domain
### Additional Context (optional)
This must be tested by using Domain.available because the /availability endpoint (when implemented) will not have access to any particular domain object and this function needs to be able to be performed on its own.
### Issue Link
blocks: #1015
</issue>
<code>
[start of src/epplibwrapper/__init__.py]
1 import logging
2 from types import SimpleNamespace
3
4 try:
5 from epplib import constants
6 except ImportError:
7 # allow epplibwrapper to load without epplib, for testing and development
8 pass
9
10 logger = logging.getLogger(__name__)
11
12 NAMESPACE = SimpleNamespace(
13 EPP="urn:ietf:params:xml:ns:epp-1.0",
14 XSI="http://www.w3.org/2001/XMLSchema-instance",
15 FRED="noop",
16 NIC_CONTACT="urn:ietf:params:xml:ns:contact-1.0",
17 NIC_DOMAIN="urn:ietf:params:xml:ns:domain-1.0",
18 NIC_ENUMVAL="noop",
19 NIC_EXTRA_ADDR="noop",
20 NIC_HOST="urn:ietf:params:xml:ns:host-1.0",
21 NIC_KEYSET="noop",
22 NIC_NSSET="noop",
23 )
24
25 SCHEMA_LOCATION = SimpleNamespace(
26 XSI="urn:ietf:params:xml:ns:epp-1.0 epp-1.0.xsd",
27 FRED="noop fred-1.5.0.xsd",
28 NIC_CONTACT="urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd",
29 NIC_DOMAIN="urn:ietf:params:xml:ns:domain-1.0 domain-1.0.xsd",
30 NIC_ENUMVAL="noop enumval-1.2.0.xsd",
31 NIC_EXTRA_ADDR="noop extra-addr-1.0.0.xsd",
32 NIC_HOST="urn:ietf:params:xml:ns:host-1.0 host-1.0.xsd",
33 NIC_KEYSET="noop keyset-1.3.2.xsd",
34 NIC_NSSET="noop nsset-1.2.2.xsd",
35 )
36
37 try:
38 constants.NAMESPACE = NAMESPACE
39 constants.SCHEMA_LOCATION = SCHEMA_LOCATION
40 except NameError:
41 pass
42
43 # Attn: these imports should NOT be at the top of the file
44 try:
45 from .client import CLIENT, commands
46 from .errors import RegistryError, ErrorCode
47 from epplib.models import common
48 except ImportError:
49 pass
50
51 __all__ = [
52 "CLIENT",
53 "commands",
54 "common",
55 "ErrorCode",
56 "RegistryError",
57 ]
58
[end of src/epplibwrapper/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/epplibwrapper/__init__.py b/src/epplibwrapper/__init__.py
--- a/src/epplibwrapper/__init__.py
+++ b/src/epplibwrapper/__init__.py
@@ -45,6 +45,7 @@
from .client import CLIENT, commands
from .errors import RegistryError, ErrorCode
from epplib.models import common
+ from epplib import responses
except ImportError:
pass
@@ -52,6 +53,7 @@
"CLIENT",
"commands",
"common",
+ "responses",
"ErrorCode",
"RegistryError",
]
| {"golden_diff": "diff --git a/src/epplibwrapper/__init__.py b/src/epplibwrapper/__init__.py\n--- a/src/epplibwrapper/__init__.py\n+++ b/src/epplibwrapper/__init__.py\n@@ -45,6 +45,7 @@\n from .client import CLIENT, commands\n from .errors import RegistryError, ErrorCode\n from epplib.models import common\n+ from epplib import responses\n except ImportError:\n pass\n \n@@ -52,6 +53,7 @@\n \"CLIENT\",\n \"commands\",\n \"common\",\n+ \"responses\",\n \"ErrorCode\",\n \"RegistryError\",\n ]\n", "issue": "Check Domain availability via epp-Testing\n### Issue Description\r\n\r\nWhen adding the /availability endpoint we will need to send a CheckDomain request to epp to see if the domain is available. This epp function is already implemented in domain.py and is called available(). It just needs to be tested and updated if the test show any problem with the implementation\r\n\r\n### AC\r\n\r\n- [x] unit tests added for available\r\n- [x] manually test via sandbox with OT&E to be sure that this is working as expected \r\n- [x] update the implementation as needed or desired\r\n- [x] in your tests, ensure that this function can be called by just doing Domain.available() and not by having an instance of a domain\r\n\r\n### Additional Context (optional)\r\n\r\nThis must be tested by using Domain.available because the /availability endpoint (when implemented) will not have access to any particular domain object and this function needs to be able to be performed on its own.\r\n\r\n### Issue Link\r\nblocks: #1015 \n", "before_files": [{"content": "import logging\nfrom types import SimpleNamespace\n\ntry:\n from epplib import constants\nexcept ImportError:\n # allow epplibwrapper to load without epplib, for testing and development\n pass\n\nlogger = logging.getLogger(__name__)\n\nNAMESPACE = SimpleNamespace(\n EPP=\"urn:ietf:params:xml:ns:epp-1.0\",\n XSI=\"http://www.w3.org/2001/XMLSchema-instance\",\n FRED=\"noop\",\n NIC_CONTACT=\"urn:ietf:params:xml:ns:contact-1.0\",\n NIC_DOMAIN=\"urn:ietf:params:xml:ns:domain-1.0\",\n NIC_ENUMVAL=\"noop\",\n NIC_EXTRA_ADDR=\"noop\",\n NIC_HOST=\"urn:ietf:params:xml:ns:host-1.0\",\n NIC_KEYSET=\"noop\",\n NIC_NSSET=\"noop\",\n)\n\nSCHEMA_LOCATION = SimpleNamespace(\n XSI=\"urn:ietf:params:xml:ns:epp-1.0 epp-1.0.xsd\",\n FRED=\"noop fred-1.5.0.xsd\",\n NIC_CONTACT=\"urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd\",\n NIC_DOMAIN=\"urn:ietf:params:xml:ns:domain-1.0 domain-1.0.xsd\",\n NIC_ENUMVAL=\"noop enumval-1.2.0.xsd\",\n NIC_EXTRA_ADDR=\"noop extra-addr-1.0.0.xsd\",\n NIC_HOST=\"urn:ietf:params:xml:ns:host-1.0 host-1.0.xsd\",\n NIC_KEYSET=\"noop keyset-1.3.2.xsd\",\n NIC_NSSET=\"noop nsset-1.2.2.xsd\",\n)\n\ntry:\n constants.NAMESPACE = NAMESPACE\n constants.SCHEMA_LOCATION = SCHEMA_LOCATION\nexcept NameError:\n pass\n\n# Attn: these imports should NOT be at the top of the file\ntry:\n from .client import CLIENT, commands\n from .errors import RegistryError, ErrorCode\n from epplib.models import common\nexcept ImportError:\n pass\n\n__all__ = [\n \"CLIENT\",\n \"commands\",\n \"common\",\n \"ErrorCode\",\n \"RegistryError\",\n]\n", "path": "src/epplibwrapper/__init__.py"}]} | 1,348 | 141 |
gh_patches_debug_13162 | rasdani/github-patches | git_diff | chainer__chainer-2143 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stop using ABC in Serializer
AbstractSerializer is currently written as an abstract base class. I don't think it is needed to support ABC.
</issue>
<code>
[start of chainer/serializer.py]
1 import abc
2
3 import six
4
5
6 @six.add_metaclass(abc.ABCMeta)
7 class AbstractSerializer(object):
8
9 """Abstract base class of all serializers and deserializers."""
10
11 @abc.abstractmethod
12 def __getitem__(self, key):
13 """Gets a child serializer.
14
15 This operator creates a _child_ serializer represented by the given
16 key.
17
18 Args:
19 key (str): Name of the child serializer.
20
21 """
22 raise NotImplementedError
23
24 @abc.abstractmethod
25 def __call__(self, key, value):
26 """Serializes or deserializes a value by given name.
27
28 This operator saves or loads a value by given name.
29
30 If this is a serializer, then the value is simply saved at the key.
31 Note that some type information might be missed depending on the
32 implementation (and the target file format).
33
34 If this is a deserializer, then the value is loaded by the key. The
35 deserialization differently works on scalars and arrays. For scalars,
36 the ``value`` argument is used just for determining the type of
37 restored value to be converted, and the converted value is returned.
38 For arrays, the restored elements are directly copied into the
39 ``value`` argument. String values are treated like scalars. If the
40 ``value`` argument is ``None``, the type of the restored value will
41 typically be a numpy array but can depend on the particular subclass
42 implementation.
43
44 Args:
45 key (str): Name of the serialization entry.
46 value (scalar, array, None, or str): Object to be (de)serialized.
47 ``None`` is only supported by deserializers.
48
49 Returns:
50 Serialized or deserialized value.
51
52 """
53 raise NotImplementedError
54
55
56 class Serializer(AbstractSerializer):
57
58 """Base class of all serializers."""
59
60 def save(self, obj):
61 """Saves an object by this serializer.
62
63 This is equivalent to ``obj.serialize(self)``.
64
65 Args:
66 obj: Target object to be serialized.
67
68 """
69 obj.serialize(self)
70
71
72 class Deserializer(AbstractSerializer):
73
74 """Base class of all deserializers."""
75
76 def load(self, obj):
77 """Loads an object from this deserializer.
78
79 This is equivalent to ``obj.serialize(self)``.
80
81 Args:
82 obj: Target object to be serialized.
83
84 """
85 obj.serialize(self)
86
[end of chainer/serializer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/serializer.py b/chainer/serializer.py
--- a/chainer/serializer.py
+++ b/chainer/serializer.py
@@ -1,14 +1,7 @@
-import abc
-
-import six
-
-
[email protected]_metaclass(abc.ABCMeta)
class AbstractSerializer(object):
"""Abstract base class of all serializers and deserializers."""
- @abc.abstractmethod
def __getitem__(self, key):
"""Gets a child serializer.
@@ -21,7 +14,6 @@
"""
raise NotImplementedError
- @abc.abstractmethod
def __call__(self, key, value):
"""Serializes or deserializes a value by given name.
| {"golden_diff": "diff --git a/chainer/serializer.py b/chainer/serializer.py\n--- a/chainer/serializer.py\n+++ b/chainer/serializer.py\n@@ -1,14 +1,7 @@\n-import abc\n-\n-import six\n-\n-\[email protected]_metaclass(abc.ABCMeta)\n class AbstractSerializer(object):\n \n \"\"\"Abstract base class of all serializers and deserializers.\"\"\"\n \n- @abc.abstractmethod\n def __getitem__(self, key):\n \"\"\"Gets a child serializer.\n \n@@ -21,7 +14,6 @@\n \"\"\"\n raise NotImplementedError\n \n- @abc.abstractmethod\n def __call__(self, key, value):\n \"\"\"Serializes or deserializes a value by given name.\n", "issue": "Stop using ABC in Serializer\nAbstractSerializer is currently written as an abstract base class. I don't think it is needed to support ABC.\n", "before_files": [{"content": "import abc\n\nimport six\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass AbstractSerializer(object):\n\n \"\"\"Abstract base class of all serializers and deserializers.\"\"\"\n\n @abc.abstractmethod\n def __getitem__(self, key):\n \"\"\"Gets a child serializer.\n\n This operator creates a _child_ serializer represented by the given\n key.\n\n Args:\n key (str): Name of the child serializer.\n\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def __call__(self, key, value):\n \"\"\"Serializes or deserializes a value by given name.\n\n This operator saves or loads a value by given name.\n\n If this is a serializer, then the value is simply saved at the key.\n Note that some type information might be missed depending on the\n implementation (and the target file format).\n\n If this is a deserializer, then the value is loaded by the key. The\n deserialization differently works on scalars and arrays. For scalars,\n the ``value`` argument is used just for determining the type of\n restored value to be converted, and the converted value is returned.\n For arrays, the restored elements are directly copied into the\n ``value`` argument. String values are treated like scalars. If the\n ``value`` argument is ``None``, the type of the restored value will\n typically be a numpy array but can depend on the particular subclass\n implementation.\n\n Args:\n key (str): Name of the serialization entry.\n value (scalar, array, None, or str): Object to be (de)serialized.\n ``None`` is only supported by deserializers.\n\n Returns:\n Serialized or deserialized value.\n\n \"\"\"\n raise NotImplementedError\n\n\nclass Serializer(AbstractSerializer):\n\n \"\"\"Base class of all serializers.\"\"\"\n\n def save(self, obj):\n \"\"\"Saves an object by this serializer.\n\n This is equivalent to ``obj.serialize(self)``.\n\n Args:\n obj: Target object to be serialized.\n\n \"\"\"\n obj.serialize(self)\n\n\nclass Deserializer(AbstractSerializer):\n\n \"\"\"Base class of all deserializers.\"\"\"\n\n def load(self, obj):\n \"\"\"Loads an object from this deserializer.\n\n This is equivalent to ``obj.serialize(self)``.\n\n Args:\n obj: Target object to be serialized.\n\n \"\"\"\n obj.serialize(self)\n", "path": "chainer/serializer.py"}]} | 1,246 | 159 |
gh_patches_debug_9091 | rasdani/github-patches | git_diff | pytorch__ignite-320 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ParamScheduler docs missing
No docs on `ParamScheduler` and related classes on the [site](https://pytorch.org/ignite/contrib/handlers.html).
</issue>
<code>
[start of ignite/contrib/handlers/__init__.py]
1
2 from ignite.contrib.handlers.param_scheduler import ParamScheduler, CyclicalScheduler, \
3 LinearCyclicalScheduler, CosineAnnealingScheduler
4
5 from ignite.contrib.handlers.tqdm_logger import ProgressBar
6
7 __all__ = ['ProgressBar']
8
[end of ignite/contrib/handlers/__init__.py]
[start of ignite/contrib/engines/__init__.py]
1 # coding: utf-8
2
3 from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer
4 from ignite.contrib.engines.tbptt import Tbptt_Events
5
6
7 __all__ = ["create_supervised_tbptt_trainer", "Tbptt_Events"]
8
[end of ignite/contrib/engines/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/contrib/engines/__init__.py b/ignite/contrib/engines/__init__.py
--- a/ignite/contrib/engines/__init__.py
+++ b/ignite/contrib/engines/__init__.py
@@ -2,6 +2,3 @@
from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer
from ignite.contrib.engines.tbptt import Tbptt_Events
-
-
-__all__ = ["create_supervised_tbptt_trainer", "Tbptt_Events"]
diff --git a/ignite/contrib/handlers/__init__.py b/ignite/contrib/handlers/__init__.py
--- a/ignite/contrib/handlers/__init__.py
+++ b/ignite/contrib/handlers/__init__.py
@@ -3,5 +3,3 @@
LinearCyclicalScheduler, CosineAnnealingScheduler
from ignite.contrib.handlers.tqdm_logger import ProgressBar
-
-__all__ = ['ProgressBar']
| {"golden_diff": "diff --git a/ignite/contrib/engines/__init__.py b/ignite/contrib/engines/__init__.py\n--- a/ignite/contrib/engines/__init__.py\n+++ b/ignite/contrib/engines/__init__.py\n@@ -2,6 +2,3 @@\n \n from ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer\n from ignite.contrib.engines.tbptt import Tbptt_Events\n-\n-\n-__all__ = [\"create_supervised_tbptt_trainer\", \"Tbptt_Events\"]\ndiff --git a/ignite/contrib/handlers/__init__.py b/ignite/contrib/handlers/__init__.py\n--- a/ignite/contrib/handlers/__init__.py\n+++ b/ignite/contrib/handlers/__init__.py\n@@ -3,5 +3,3 @@\n LinearCyclicalScheduler, CosineAnnealingScheduler\n \n from ignite.contrib.handlers.tqdm_logger import ProgressBar\n-\n-__all__ = ['ProgressBar']\n", "issue": "ParamScheduler docs missing\nNo docs on `ParamScheduler` and related classes on the [site](https://pytorch.org/ignite/contrib/handlers.html).\n", "before_files": [{"content": "\nfrom ignite.contrib.handlers.param_scheduler import ParamScheduler, CyclicalScheduler, \\\n LinearCyclicalScheduler, CosineAnnealingScheduler\n\nfrom ignite.contrib.handlers.tqdm_logger import ProgressBar\n\n__all__ = ['ProgressBar']\n", "path": "ignite/contrib/handlers/__init__.py"}, {"content": "# coding: utf-8\n\nfrom ignite.contrib.engines.tbptt import create_supervised_tbptt_trainer\nfrom ignite.contrib.engines.tbptt import Tbptt_Events\n\n\n__all__ = [\"create_supervised_tbptt_trainer\", \"Tbptt_Events\"]\n", "path": "ignite/contrib/engines/__init__.py"}]} | 737 | 225 |
gh_patches_debug_5758 | rasdani/github-patches | git_diff | fossasia__open-event-server-2489 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Propose attendees/ticketing API
With the orga app and the implementation of API endpoints in this PR https://github.com/fossasia/open-event-orga-server/pull/2379 we have the first steps to an attendee API. In how far would that overlap with a ticketing API?
What is the best way to implement this and keep it generic? Do we need two APIs - Attendees and Ticketing or would that be handled in one API?
Also related to https://github.com/fossasia/open-event-orga-server/issues/904
</issue>
<code>
[start of app/api/tickets.py]
1 from flask.ext.restplus import Namespace
2
3 from app.helpers.ticketing import TicketingManager
4
5 from .helpers.helpers import (
6 requires_auth,
7 can_access)
8 from .helpers.utils import POST_RESPONSES
9 from .helpers.utils import Resource
10 from .helpers import custom_fields as fields
11 from ..helpers.data_getter import DataGetter
12
13 api = Namespace('tickets', description='Tickets', path='/')
14
15 ORDER = api.model('Order', {
16 'id': fields.Integer(),
17 'identifier': fields.String(),
18 'amount': fields.Float(),
19 'paid_via': fields.String(),
20 'invoice_number': fields.String(),
21 'payment_mode': fields.String(),
22 'status': fields.String(),
23 'completed_at': fields.DateTime(),
24 })
25
26 TICKET = api.model('Ticket', {
27 'id': fields.Integer(),
28 'name': fields.String(),
29 'description': fields.String(),
30 'type': fields.String(),
31 'price': fields.Float(),
32 'quantity': fields.Integer(),
33 })
34
35
36 @api.route('/events/<int:event_id>/tickets/')
37 class TicketsList(Resource):
38 @requires_auth
39 @api.doc('tickets', responses=POST_RESPONSES)
40 @api.marshal_list_with(TICKET)
41 def get(self, event_id):
42 """Get tickets of the event"""
43 return DataGetter.get_sales_open_tickets(event_id=event_id).all()
44
45
46 @api.route('/events/<int:event_id>/tickets/<int:ticket_id>')
47 class Ticket(Resource):
48 @requires_auth
49 @api.doc('ticket', responses=POST_RESPONSES)
50 @api.marshal_with(TICKET)
51 def get(self, event_id, ticket_id):
52 """Get information about a ticket"""
53 return TicketingManager.get_ticket(ticket_id=ticket_id)
54
55
56
57
[end of app/api/tickets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/tickets.py b/app/api/tickets.py
--- a/app/api/tickets.py
+++ b/app/api/tickets.py
@@ -52,5 +52,13 @@
"""Get information about a ticket"""
return TicketingManager.get_ticket(ticket_id=ticket_id)
[email protected]('/events/<int:event_id>/orders/<string:identifier>')
+class Order(Resource):
+ @requires_auth
+ @api.doc('order', responses=POST_RESPONSES)
+ @api.marshal_with(ORDER)
+ def get(self, event_id, identifier):
+ """Get information about a ticket"""
+ return TicketingManager.get_order_by_identifier(identifier=identifier)
| {"golden_diff": "diff --git a/app/api/tickets.py b/app/api/tickets.py\n--- a/app/api/tickets.py\n+++ b/app/api/tickets.py\n@@ -52,5 +52,13 @@\n \"\"\"Get information about a ticket\"\"\"\n return TicketingManager.get_ticket(ticket_id=ticket_id)\n \[email protected]('/events/<int:event_id>/orders/<string:identifier>')\n+class Order(Resource):\n+ @requires_auth\n+ @api.doc('order', responses=POST_RESPONSES)\n+ @api.marshal_with(ORDER)\n+ def get(self, event_id, identifier):\n+ \"\"\"Get information about a ticket\"\"\"\n+ return TicketingManager.get_order_by_identifier(identifier=identifier)\n", "issue": "Propose attendees/ticketing API\nWith the orga app and the implementation of API endpoints in this PR https://github.com/fossasia/open-event-orga-server/pull/2379 we have the first steps to an attendee API. In how far would that overlap with a ticketing API?\n\nWhat is the best way to implement this and keep it generic? Do we need two APIs - Attendees and Ticketing or would that be handled in one API?\n\nAlso related to https://github.com/fossasia/open-event-orga-server/issues/904\n\n", "before_files": [{"content": "from flask.ext.restplus import Namespace\n\nfrom app.helpers.ticketing import TicketingManager\n\nfrom .helpers.helpers import (\n requires_auth,\n can_access)\nfrom .helpers.utils import POST_RESPONSES\nfrom .helpers.utils import Resource\nfrom .helpers import custom_fields as fields\nfrom ..helpers.data_getter import DataGetter\n\napi = Namespace('tickets', description='Tickets', path='/')\n\nORDER = api.model('Order', {\n 'id': fields.Integer(),\n 'identifier': fields.String(),\n 'amount': fields.Float(),\n 'paid_via': fields.String(),\n 'invoice_number': fields.String(),\n 'payment_mode': fields.String(),\n 'status': fields.String(),\n 'completed_at': fields.DateTime(),\n})\n\nTICKET = api.model('Ticket', {\n 'id': fields.Integer(),\n 'name': fields.String(),\n 'description': fields.String(),\n 'type': fields.String(),\n 'price': fields.Float(),\n 'quantity': fields.Integer(),\n})\n\n\[email protected]('/events/<int:event_id>/tickets/')\nclass TicketsList(Resource):\n @requires_auth\n @api.doc('tickets', responses=POST_RESPONSES)\n @api.marshal_list_with(TICKET)\n def get(self, event_id):\n \"\"\"Get tickets of the event\"\"\"\n return DataGetter.get_sales_open_tickets(event_id=event_id).all()\n\n\[email protected]('/events/<int:event_id>/tickets/<int:ticket_id>')\nclass Ticket(Resource):\n @requires_auth\n @api.doc('ticket', responses=POST_RESPONSES)\n @api.marshal_with(TICKET)\n def get(self, event_id, ticket_id):\n \"\"\"Get information about a ticket\"\"\"\n return TicketingManager.get_ticket(ticket_id=ticket_id)\n\n\n\n", "path": "app/api/tickets.py"}]} | 1,129 | 154 |
gh_patches_debug_43093 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-3261 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Suggestion] Add chat(s) parameter to ChatJoinRequestHandler
This param should allow to filter out chats which will be handled by the ChatJoinRequestHandler, much like the pattern argument of the CallbackQueryHandler. It should allow "username" strings as well as ids and if set, the handler should check if the incoming update is from that chat.
For first time contributors, check how CallbackQueryHandler implements the pattern argument in check_update: https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_callbackqueryhandler.py#L123
</issue>
<code>
[start of telegram/ext/_chatjoinrequesthandler.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2022
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the ChatJoinRequestHandler class."""
20
21
22 from telegram import Update
23 from telegram.ext._handler import BaseHandler
24 from telegram.ext._utils.types import CCT
25
26
27 class ChatJoinRequestHandler(BaseHandler[Update, CCT]):
28 """BaseHandler class to handle Telegram updates that contain
29 :attr:`telegram.Update.chat_join_request`.
30
31 Warning:
32 When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom
33 attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.
34
35 .. versionadded:: 13.8
36
37 Args:
38 callback (:term:`coroutine function`): The callback function for this handler. Will be
39 called when :meth:`check_update` has determined that an update should be processed by
40 this handler. Callback signature::
41
42 async def callback(update: Update, context: CallbackContext)
43
44 The return value of the callback is usually ignored except for the special case of
45 :class:`telegram.ext.ConversationHandler`.
46 block (:obj:`bool`, optional): Determines whether the return value of the callback should
47 be awaited before processing the next handler in
48 :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.
49
50 Attributes:
51 callback (:term:`coroutine function`): The callback function for this handler.
52 block (:obj:`bool`): Determines whether the callback will run in a blocking way..
53
54 """
55
56 __slots__ = ()
57
58 def check_update(self, update: object) -> bool:
59 """Determines whether an update should be passed to this handler's :attr:`callback`.
60
61 Args:
62 update (:class:`telegram.Update` | :obj:`object`): Incoming update.
63
64 Returns:
65 :obj:`bool`
66
67 """
68 return isinstance(update, Update) and bool(update.chat_join_request)
69
[end of telegram/ext/_chatjoinrequesthandler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/ext/_chatjoinrequesthandler.py b/telegram/ext/_chatjoinrequesthandler.py
--- a/telegram/ext/_chatjoinrequesthandler.py
+++ b/telegram/ext/_chatjoinrequesthandler.py
@@ -18,16 +18,27 @@
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the ChatJoinRequestHandler class."""
+from typing import FrozenSet, Optional
from telegram import Update
+from telegram._utils.defaultvalue import DEFAULT_TRUE
+from telegram._utils.types import RT, SCT, DVInput
from telegram.ext._handler import BaseHandler
-from telegram.ext._utils.types import CCT
+from telegram.ext._utils.types import CCT, HandlerCallback
class ChatJoinRequestHandler(BaseHandler[Update, CCT]):
"""BaseHandler class to handle Telegram updates that contain
:attr:`telegram.Update.chat_join_request`.
+ Note:
+ If neither of :paramref:`username` and the :paramref:`chat_id` are passed, this handler
+ accepts *any* join request. Otherwise, this handler accepts all requests to join chats
+ for which the chat ID is listed in :paramref:`chat_id` or the username is listed in
+ :paramref:`username`, or both.
+
+ .. versionadded:: 20.0
+
Warning:
When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom
attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.
@@ -43,6 +54,14 @@
The return value of the callback is usually ignored except for the special case of
:class:`telegram.ext.ConversationHandler`.
+ chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters requests to allow only
+ those which are asking to join the specified chat ID(s).
+
+ .. versionadded:: 20.0
+ username (:obj:`str` | Collection[:obj:`str`], optional): Filters requests to allow only
+ those which are asking to join the specified username(s).
+
+ .. versionadded:: 20.0
block (:obj:`bool`, optional): Determines whether the return value of the callback should
be awaited before processing the next handler in
:meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.
@@ -53,7 +72,38 @@
"""
- __slots__ = ()
+ __slots__ = (
+ "_chat_ids",
+ "_usernames",
+ )
+
+ def __init__(
+ self,
+ callback: HandlerCallback[Update, CCT, RT],
+ chat_id: SCT[int] = None,
+ username: SCT[str] = None,
+ block: DVInput[bool] = DEFAULT_TRUE,
+ ):
+ super().__init__(callback, block=block)
+
+ self._chat_ids = self._parse_chat_id(chat_id)
+ self._usernames = self._parse_username(username)
+
+ @staticmethod
+ def _parse_chat_id(chat_id: Optional[SCT[int]]) -> FrozenSet[int]:
+ if chat_id is None:
+ return frozenset()
+ if isinstance(chat_id, int):
+ return frozenset({chat_id})
+ return frozenset(chat_id)
+
+ @staticmethod
+ def _parse_username(username: Optional[SCT[str]]) -> FrozenSet[str]:
+ if username is None:
+ return frozenset()
+ if isinstance(username, str):
+ return frozenset({username[1:] if username.startswith("@") else username})
+ return frozenset({usr[1:] if usr.startswith("@") else usr for usr in username})
def check_update(self, update: object) -> bool:
"""Determines whether an update should be passed to this handler's :attr:`callback`.
@@ -65,4 +115,12 @@
:obj:`bool`
"""
- return isinstance(update, Update) and bool(update.chat_join_request)
+ if isinstance(update, Update) and update.chat_join_request:
+ if not self._chat_ids and not self._usernames:
+ return True
+ if update.chat_join_request.chat.id in self._chat_ids:
+ return True
+ if update.chat_join_request.from_user.username in self._usernames:
+ return True
+ return False
+ return False
| {"golden_diff": "diff --git a/telegram/ext/_chatjoinrequesthandler.py b/telegram/ext/_chatjoinrequesthandler.py\n--- a/telegram/ext/_chatjoinrequesthandler.py\n+++ b/telegram/ext/_chatjoinrequesthandler.py\n@@ -18,16 +18,27 @@\n # along with this program. If not, see [http://www.gnu.org/licenses/].\n \"\"\"This module contains the ChatJoinRequestHandler class.\"\"\"\n \n+from typing import FrozenSet, Optional\n \n from telegram import Update\n+from telegram._utils.defaultvalue import DEFAULT_TRUE\n+from telegram._utils.types import RT, SCT, DVInput\n from telegram.ext._handler import BaseHandler\n-from telegram.ext._utils.types import CCT\n+from telegram.ext._utils.types import CCT, HandlerCallback\n \n \n class ChatJoinRequestHandler(BaseHandler[Update, CCT]):\n \"\"\"BaseHandler class to handle Telegram updates that contain\n :attr:`telegram.Update.chat_join_request`.\n \n+ Note:\n+ If neither of :paramref:`username` and the :paramref:`chat_id` are passed, this handler\n+ accepts *any* join request. Otherwise, this handler accepts all requests to join chats\n+ for which the chat ID is listed in :paramref:`chat_id` or the username is listed in\n+ :paramref:`username`, or both.\n+\n+ .. versionadded:: 20.0\n+\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n@@ -43,6 +54,14 @@\n \n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n+ chat_id (:obj:`int` | Collection[:obj:`int`], optional): Filters requests to allow only\n+ those which are asking to join the specified chat ID(s).\n+\n+ .. versionadded:: 20.0\n+ username (:obj:`str` | Collection[:obj:`str`], optional): Filters requests to allow only\n+ those which are asking to join the specified username(s).\n+\n+ .. versionadded:: 20.0\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n@@ -53,7 +72,38 @@\n \n \"\"\"\n \n- __slots__ = ()\n+ __slots__ = (\n+ \"_chat_ids\",\n+ \"_usernames\",\n+ )\n+\n+ def __init__(\n+ self,\n+ callback: HandlerCallback[Update, CCT, RT],\n+ chat_id: SCT[int] = None,\n+ username: SCT[str] = None,\n+ block: DVInput[bool] = DEFAULT_TRUE,\n+ ):\n+ super().__init__(callback, block=block)\n+\n+ self._chat_ids = self._parse_chat_id(chat_id)\n+ self._usernames = self._parse_username(username)\n+\n+ @staticmethod\n+ def _parse_chat_id(chat_id: Optional[SCT[int]]) -> FrozenSet[int]:\n+ if chat_id is None:\n+ return frozenset()\n+ if isinstance(chat_id, int):\n+ return frozenset({chat_id})\n+ return frozenset(chat_id)\n+\n+ @staticmethod\n+ def _parse_username(username: Optional[SCT[str]]) -> FrozenSet[str]:\n+ if username is None:\n+ return frozenset()\n+ if isinstance(username, str):\n+ return frozenset({username[1:] if username.startswith(\"@\") else username})\n+ return frozenset({usr[1:] if usr.startswith(\"@\") else usr for usr in username})\n \n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n@@ -65,4 +115,12 @@\n :obj:`bool`\n \n \"\"\"\n- return isinstance(update, Update) and bool(update.chat_join_request)\n+ if isinstance(update, Update) and update.chat_join_request:\n+ if not self._chat_ids and not self._usernames:\n+ return True\n+ if update.chat_join_request.chat.id in self._chat_ids:\n+ return True\n+ if update.chat_join_request.from_user.username in self._usernames:\n+ return True\n+ return False\n+ return False\n", "issue": "[Suggestion] Add chat(s) parameter to ChatJoinRequestHandler\nThis param should allow to filter out chats which will be handled by the ChatJoinRequestHandler, much like the pattern argument of the CallbackQueryHandler. It should allow \"username\" strings as well as ids and if set, the handler should check if the incoming update is from that chat.\r\n\r\nFor first time contributors, check how CallbackQueryHandler implements the pattern argument in check_update: https://github.com/python-telegram-bot/python-telegram-bot/blob/master/telegram/ext/_callbackqueryhandler.py#L123\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the ChatJoinRequestHandler class.\"\"\"\n\n\nfrom telegram import Update\nfrom telegram.ext._handler import BaseHandler\nfrom telegram.ext._utils.types import CCT\n\n\nclass ChatJoinRequestHandler(BaseHandler[Update, CCT]):\n \"\"\"BaseHandler class to handle Telegram updates that contain\n :attr:`telegram.Update.chat_join_request`.\n\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n\n .. versionadded:: 13.8\n\n Args:\n callback (:term:`coroutine function`): The callback function for this handler. Will be\n called when :meth:`check_update` has determined that an update should be processed by\n this handler. Callback signature::\n\n async def callback(update: Update, context: CallbackContext)\n\n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n\n Attributes:\n callback (:term:`coroutine function`): The callback function for this handler.\n block (:obj:`bool`): Determines whether the callback will run in a blocking way..\n\n \"\"\"\n\n __slots__ = ()\n\n def check_update(self, update: object) -> bool:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update` | :obj:`object`): Incoming update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n return isinstance(update, Update) and bool(update.chat_join_request)\n", "path": "telegram/ext/_chatjoinrequesthandler.py"}]} | 1,383 | 988 |
gh_patches_debug_8876 | rasdani/github-patches | git_diff | microsoft__MLOS-211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sphinx Python API docs generation broken in recent nightly CI runs
For example: <https://github.com/microsoft/MLOS/runs/1635132574?check_suite_focus=true>
</issue>
<code>
[start of source/Mlos.Python/mlos/Spaces/Point.py]
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 import json
6 from numbers import Number
7
8 import pandas as pd
9 from mlos.Spaces.Dimensions.Dimension import Dimension
10
11
12 class Point:
13 """ Models a point in a Hypergrid.
14
15 """
16 def __init__(self, **kwargs):
17 self.dimension_value_dict = dict()
18 for dimension_name, value in kwargs.items():
19 self[dimension_name] = value
20
21 def copy(self):
22 return Point(**{key: value for key, value in self})
23
24 def flat_copy(self):
25 """ Creates a copy of the point but all dimension names are flattened.
26
27 :return:
28 """
29 flat_dict = {
30 Dimension.flatten_dimension_name(dimension_name): value
31 for dimension_name, value in self
32 }
33 return Point(**flat_dict)
34
35 def __eq__(self, other):
36 if not isinstance(other, Point):
37 return False
38 return \
39 all(other.get(dimension_name, None) == value for dimension_name, value in self) \
40 and \
41 all(self.get(dimension_name, None) == value for dimension_name, value in other)
42
43 def __ne__(self, other):
44 return not self == other
45
46 def __iter__(self):
47 for dimension_name, value in self.dimension_value_dict.items():
48 if not isinstance(value, Point):
49 yield dimension_name, value
50 else:
51 for sub_dimension_name, sub_dimension_value in value:
52 yield dimension_name + "." + sub_dimension_name, sub_dimension_value
53
54 def __getattr__(self, dimension_name):
55 if dimension_name == "__isabstractmethod__":
56 # A sad but necessary way to deal with ABC.
57 return False
58 return self[dimension_name]
59
60 def __setattr__(self, name, value):
61 if name == "dimension_value_dict":
62 self.__dict__[name] = value
63 else:
64 dimension_name = name
65 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)
66 if subgrid_name is None:
67 self.dimension_value_dict[dimension_name] = value
68 else:
69 point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point())
70 point_in_subgrid[dimension_name_without_subgrid_name] = value
71 self.dimension_value_dict[subgrid_name] = point_in_subgrid
72
73 def __getitem__(self, dimension_name):
74 if dimension_name not in self:
75 raise KeyError(f"This Point does not have a value along dimension: {dimension_name}")
76 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)
77 if subgrid_name is None:
78 return self.dimension_value_dict[dimension_name]
79 return self[subgrid_name][dimension_name_without_subgrid_name]
80
81 def get(self, dimension_name, default=None):
82 try:
83 return self[dimension_name]
84 except KeyError:
85 return default
86
87 def __setitem__(self, dimension_name, value):
88 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)
89 if subgrid_name is None:
90 self.dimension_value_dict[dimension_name] = value
91 else:
92 point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point())
93 point_in_subgrid[dimension_name_without_subgrid_name] = value
94 self.dimension_value_dict[subgrid_name] = point_in_subgrid
95
96 def __contains__(self, dimension_name):
97 subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)
98 if subgrid_name is None:
99 return dimension_name in self.dimension_value_dict
100 if subgrid_name not in self.dimension_value_dict:
101 return False
102 return dimension_name_without_subgrid_name in self[subgrid_name]
103
104 def __repr__(self):
105 return self.__str__()
106
107 def __str__(self):
108 return str(self.to_json(indent=2))
109
110 def __getstate__(self):
111 return self.to_json()
112
113 def __setstate__(self, state):
114 temp_point = self.from_json(state)
115 self.dimension_value_dict = temp_point.dimension_value_dict
116
117 def to_json(self, indent=None):
118 if indent is not None:
119 return json.dumps(self.to_dict(), indent=indent)
120 return json.dumps(self.to_dict())
121
122 @classmethod
123 def from_json(cls, json_str):
124 coordinates = json.loads(json_str)
125 return Point(**coordinates)
126
127 def to_dict(self):
128 return_dict = {}
129 for param_name, value in self:
130 if isinstance(value, Number) and int(value) == value and not isinstance(value, bool):
131 value = int(value)
132 return_dict[param_name] = value
133 return return_dict
134
135 def to_dataframe(self):
136 return pd.DataFrame({param_name: [value] for param_name, value in self})
137
138 @classmethod
139 def from_dataframe(cls, dataframe: pd.DataFrame):
140 assert len(dataframe.index) == 1
141 dataframe = dataframe.dropna(axis=1)
142 dataframe_dict = dataframe.to_dict(orient='list')
143 point_dict = {key: values[0] for key, values in dataframe_dict.items()}
144 return Point(**point_dict)
145
[end of source/Mlos.Python/mlos/Spaces/Point.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/Mlos.Python/mlos/Spaces/Point.py b/source/Mlos.Python/mlos/Spaces/Point.py
--- a/source/Mlos.Python/mlos/Spaces/Point.py
+++ b/source/Mlos.Python/mlos/Spaces/Point.py
@@ -55,7 +55,10 @@
if dimension_name == "__isabstractmethod__":
# A sad but necessary way to deal with ABC.
return False
- return self[dimension_name]
+ try:
+ return self[dimension_name]
+ except KeyError:
+ raise AttributeError(f"This Point does not have a {dimension_name} attribute.")
def __setattr__(self, name, value):
if name == "dimension_value_dict":
| {"golden_diff": "diff --git a/source/Mlos.Python/mlos/Spaces/Point.py b/source/Mlos.Python/mlos/Spaces/Point.py\n--- a/source/Mlos.Python/mlos/Spaces/Point.py\n+++ b/source/Mlos.Python/mlos/Spaces/Point.py\n@@ -55,7 +55,10 @@\n if dimension_name == \"__isabstractmethod__\":\r\n # A sad but necessary way to deal with ABC.\r\n return False\r\n- return self[dimension_name]\r\n+ try:\r\n+ return self[dimension_name]\r\n+ except KeyError:\r\n+ raise AttributeError(f\"This Point does not have a {dimension_name} attribute.\")\r\n \r\n def __setattr__(self, name, value):\r\n if name == \"dimension_value_dict\":\n", "issue": "Sphinx Python API docs generation broken in recent nightly CI runs\nFor example: <https://github.com/microsoft/MLOS/runs/1635132574?check_suite_focus=true>\n", "before_files": [{"content": "#\r\n# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT License.\r\n#\r\nimport json\r\nfrom numbers import Number\r\n\r\nimport pandas as pd\r\nfrom mlos.Spaces.Dimensions.Dimension import Dimension\r\n\r\n\r\nclass Point:\r\n \"\"\" Models a point in a Hypergrid.\r\n\r\n \"\"\"\r\n def __init__(self, **kwargs):\r\n self.dimension_value_dict = dict()\r\n for dimension_name, value in kwargs.items():\r\n self[dimension_name] = value\r\n\r\n def copy(self):\r\n return Point(**{key: value for key, value in self})\r\n\r\n def flat_copy(self):\r\n \"\"\" Creates a copy of the point but all dimension names are flattened.\r\n\r\n :return:\r\n \"\"\"\r\n flat_dict = {\r\n Dimension.flatten_dimension_name(dimension_name): value\r\n for dimension_name, value in self\r\n }\r\n return Point(**flat_dict)\r\n\r\n def __eq__(self, other):\r\n if not isinstance(other, Point):\r\n return False\r\n return \\\r\n all(other.get(dimension_name, None) == value for dimension_name, value in self) \\\r\n and \\\r\n all(self.get(dimension_name, None) == value for dimension_name, value in other)\r\n\r\n def __ne__(self, other):\r\n return not self == other\r\n\r\n def __iter__(self):\r\n for dimension_name, value in self.dimension_value_dict.items():\r\n if not isinstance(value, Point):\r\n yield dimension_name, value\r\n else:\r\n for sub_dimension_name, sub_dimension_value in value:\r\n yield dimension_name + \".\" + sub_dimension_name, sub_dimension_value\r\n\r\n def __getattr__(self, dimension_name):\r\n if dimension_name == \"__isabstractmethod__\":\r\n # A sad but necessary way to deal with ABC.\r\n return False\r\n return self[dimension_name]\r\n\r\n def __setattr__(self, name, value):\r\n if name == \"dimension_value_dict\":\r\n self.__dict__[name] = value\r\n else:\r\n dimension_name = name\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n self.dimension_value_dict[dimension_name] = value\r\n else:\r\n point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point())\r\n point_in_subgrid[dimension_name_without_subgrid_name] = value\r\n self.dimension_value_dict[subgrid_name] = point_in_subgrid\r\n\r\n def __getitem__(self, dimension_name):\r\n if dimension_name not in self:\r\n raise KeyError(f\"This Point does not have a value along dimension: {dimension_name}\")\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n return self.dimension_value_dict[dimension_name]\r\n return self[subgrid_name][dimension_name_without_subgrid_name]\r\n\r\n def get(self, dimension_name, default=None):\r\n try:\r\n return self[dimension_name]\r\n except KeyError:\r\n return default\r\n\r\n def __setitem__(self, dimension_name, value):\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n self.dimension_value_dict[dimension_name] = value\r\n else:\r\n point_in_subgrid = self.dimension_value_dict.get(subgrid_name, Point())\r\n point_in_subgrid[dimension_name_without_subgrid_name] = value\r\n self.dimension_value_dict[subgrid_name] = point_in_subgrid\r\n\r\n def __contains__(self, dimension_name):\r\n subgrid_name, dimension_name_without_subgrid_name = Dimension.split_dimension_name(dimension_name)\r\n if subgrid_name is None:\r\n return dimension_name in self.dimension_value_dict\r\n if subgrid_name not in self.dimension_value_dict:\r\n return False\r\n return dimension_name_without_subgrid_name in self[subgrid_name]\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\n def __str__(self):\r\n return str(self.to_json(indent=2))\r\n\r\n def __getstate__(self):\r\n return self.to_json()\r\n\r\n def __setstate__(self, state):\r\n temp_point = self.from_json(state)\r\n self.dimension_value_dict = temp_point.dimension_value_dict\r\n\r\n def to_json(self, indent=None):\r\n if indent is not None:\r\n return json.dumps(self.to_dict(), indent=indent)\r\n return json.dumps(self.to_dict())\r\n\r\n @classmethod\r\n def from_json(cls, json_str):\r\n coordinates = json.loads(json_str)\r\n return Point(**coordinates)\r\n\r\n def to_dict(self):\r\n return_dict = {}\r\n for param_name, value in self:\r\n if isinstance(value, Number) and int(value) == value and not isinstance(value, bool):\r\n value = int(value)\r\n return_dict[param_name] = value\r\n return return_dict\r\n\r\n def to_dataframe(self):\r\n return pd.DataFrame({param_name: [value] for param_name, value in self})\r\n\r\n @classmethod\r\n def from_dataframe(cls, dataframe: pd.DataFrame):\r\n assert len(dataframe.index) == 1\r\n dataframe = dataframe.dropna(axis=1)\r\n dataframe_dict = dataframe.to_dict(orient='list')\r\n point_dict = {key: values[0] for key, values in dataframe_dict.items()}\r\n return Point(**point_dict)\r\n", "path": "source/Mlos.Python/mlos/Spaces/Point.py"}]} | 2,042 | 163 |
gh_patches_debug_37529 | rasdani/github-patches | git_diff | dmlc__dgl-5377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Sparse] Support column-wise softmax.
## 🔨Work Item
**IMPORTANT:**
* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.
* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.
Project tracker: https://github.com/orgs/dmlc/projects/2
## Description
<!-- short description of the work item -->
## Depending work items or issues
<!-- what must be done before this -->
</issue>
<code>
[start of python/dgl/sparse/softmax.py]
1 """Softmax op for SparseMatrix"""
2 # pylint: disable=invalid-name, W0622
3
4 import torch
5
6 from .sparse_matrix import SparseMatrix
7
8 __all__ = ["softmax"]
9
10
11 def softmax(input: SparseMatrix) -> SparseMatrix:
12 """Applies row-wise softmax to the non-zero elements of the sparse matrix.
13
14 Equivalently, applies softmax to the non-zero elements of the sparse
15 matrix along the column (``dim=1``) dimension.
16
17 If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix
18 :attr:`output` and :attr:`output.val` take the same shape as :attr:`input`
19 and :attr:`input.val`. :attr:`output.val[:, i]` is calculated based on
20 :attr:`input.val[:, i]`.
21
22 Parameters
23 ----------
24 input : SparseMatrix
25 The input sparse matrix
26
27 Returns
28 -------
29 SparseMatrix
30 The output sparse matrix
31
32 Examples
33 --------
34
35 Case1: matrix with values of shape (nnz)
36
37 >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
38 >>> nnz = len(row)
39 >>> val = torch.arange(nnz).float()
40 >>> A = dglsp.spmatrix(indices, val)
41 >>> dglsp.softmax(A)
42 SparseMatrix(indices=tensor([[0, 0, 1, 2],
43 [1, 2, 2, 0]]),
44 values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),
45 shape=(3, 3), nnz=4)
46
47 Case2: matrix with values of shape (nnz, D)
48
49 >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
50 >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
51 >>> A = dglsp.spmatrix(indices, val)
52 >>> dglsp.softmax(A)
53 SparseMatrix(indices=tensor([[0, 0, 1, 2],
54 [1, 2, 2, 0]]),
55 values=tensor([[0.2689, 0.9820],
56 [0.7311, 0.0180],
57 [1.0000, 1.0000],
58 [1.0000, 1.0000]]),
59 shape=(3, 3), nnz=4, val_size=(2,))
60 """
61 return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))
62
63
64 SparseMatrix.softmax = softmax
65
[end of python/dgl/sparse/softmax.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/dgl/sparse/softmax.py b/python/dgl/sparse/softmax.py
--- a/python/dgl/sparse/softmax.py
+++ b/python/dgl/sparse/softmax.py
@@ -8,11 +8,10 @@
__all__ = ["softmax"]
-def softmax(input: SparseMatrix) -> SparseMatrix:
- """Applies row-wise softmax to the non-zero elements of the sparse matrix.
-
- Equivalently, applies softmax to the non-zero elements of the sparse
- matrix along the column (``dim=1``) dimension.
+def softmax(input: SparseMatrix, dim: int = 1) -> SparseMatrix:
+ """Applies softmax to the non-zero elements of the sparse matrix on the
+ dimension :attr:``dim``. dim = 0 or 1 indicates column-wise or row-wise
+ softmax respectively.
If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix
:attr:`output` and :attr:`output.val` take the same shape as :attr:`input`
@@ -32,11 +31,10 @@
Examples
--------
- Case1: matrix with values of shape (nnz)
+ Case1: row-wise softmax on matrix with values of shape (nnz)
>>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
- >>> nnz = len(row)
- >>> val = torch.arange(nnz).float()
+ >>> val = torch.tensor([0., 1., 2., 3.])
>>> A = dglsp.spmatrix(indices, val)
>>> dglsp.softmax(A)
SparseMatrix(indices=tensor([[0, 0, 1, 2],
@@ -44,7 +42,7 @@
values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),
shape=(3, 3), nnz=4)
- Case2: matrix with values of shape (nnz, D)
+ Case2: row-wise softmax on matrix with values of shape (nnz, D)
>>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
>>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])
@@ -57,8 +55,21 @@
[1.0000, 1.0000],
[1.0000, 1.0000]]),
shape=(3, 3), nnz=4, val_size=(2,))
+
+ Case3: column-wise softmax on matrix with values of shape (nnz)
+
+ >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])
+ >>> val = torch.tensor([0., 1., 2., 3.])
+ >>> A = dglsp.spmatrix(indices, val)
+ >>> dglsp.softmax(A, 0)
+ SparseMatrix(indices=tensor([[0, 0, 1, 2],
+ [1, 2, 2, 0]]),
+ values=tensor([1.0000, 0.2689, 0.7311, 1.0000]),
+ shape=(3, 3), nnz=4)
"""
- return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))
+ return SparseMatrix(
+ torch.ops.dgl_sparse.softmax(input.c_sparse_matrix, dim)
+ )
SparseMatrix.softmax = softmax
| {"golden_diff": "diff --git a/python/dgl/sparse/softmax.py b/python/dgl/sparse/softmax.py\n--- a/python/dgl/sparse/softmax.py\n+++ b/python/dgl/sparse/softmax.py\n@@ -8,11 +8,10 @@\n __all__ = [\"softmax\"]\n \n \n-def softmax(input: SparseMatrix) -> SparseMatrix:\n- \"\"\"Applies row-wise softmax to the non-zero elements of the sparse matrix.\n-\n- Equivalently, applies softmax to the non-zero elements of the sparse\n- matrix along the column (``dim=1``) dimension.\n+def softmax(input: SparseMatrix, dim: int = 1) -> SparseMatrix:\n+ \"\"\"Applies softmax to the non-zero elements of the sparse matrix on the\n+ dimension :attr:``dim``. dim = 0 or 1 indicates column-wise or row-wise\n+ softmax respectively.\n \n If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix\n :attr:`output` and :attr:`output.val` take the same shape as :attr:`input`\n@@ -32,11 +31,10 @@\n Examples\n --------\n \n- Case1: matrix with values of shape (nnz)\n+ Case1: row-wise softmax on matrix with values of shape (nnz)\n \n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n- >>> nnz = len(row)\n- >>> val = torch.arange(nnz).float()\n+ >>> val = torch.tensor([0., 1., 2., 3.])\n >>> A = dglsp.spmatrix(indices, val)\n >>> dglsp.softmax(A)\n SparseMatrix(indices=tensor([[0, 0, 1, 2],\n@@ -44,7 +42,7 @@\n values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),\n shape=(3, 3), nnz=4)\n \n- Case2: matrix with values of shape (nnz, D)\n+ Case2: row-wise softmax on matrix with values of shape (nnz, D)\n \n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])\n@@ -57,8 +55,21 @@\n [1.0000, 1.0000],\n [1.0000, 1.0000]]),\n shape=(3, 3), nnz=4, val_size=(2,))\n+\n+ Case3: column-wise softmax on matrix with values of shape (nnz)\n+\n+ >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n+ >>> val = torch.tensor([0., 1., 2., 3.])\n+ >>> A = dglsp.spmatrix(indices, val)\n+ >>> dglsp.softmax(A, 0)\n+ SparseMatrix(indices=tensor([[0, 0, 1, 2],\n+ [1, 2, 2, 0]]),\n+ values=tensor([1.0000, 0.2689, 0.7311, 1.0000]),\n+ shape=(3, 3), nnz=4)\n \"\"\"\n- return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))\n+ return SparseMatrix(\n+ torch.ops.dgl_sparse.softmax(input.c_sparse_matrix, dim)\n+ )\n \n \n SparseMatrix.softmax = softmax\n", "issue": "[Sparse] Support column-wise softmax.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<!-- short description of the work item -->\r\n\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"Softmax op for SparseMatrix\"\"\"\n# pylint: disable=invalid-name, W0622\n\nimport torch\n\nfrom .sparse_matrix import SparseMatrix\n\n__all__ = [\"softmax\"]\n\n\ndef softmax(input: SparseMatrix) -> SparseMatrix:\n \"\"\"Applies row-wise softmax to the non-zero elements of the sparse matrix.\n\n Equivalently, applies softmax to the non-zero elements of the sparse\n matrix along the column (``dim=1``) dimension.\n\n If :attr:`input.val` takes shape ``(nnz, D)``, then the output matrix\n :attr:`output` and :attr:`output.val` take the same shape as :attr:`input`\n and :attr:`input.val`. :attr:`output.val[:, i]` is calculated based on\n :attr:`input.val[:, i]`.\n\n Parameters\n ----------\n input : SparseMatrix\n The input sparse matrix\n\n Returns\n -------\n SparseMatrix\n The output sparse matrix\n\n Examples\n --------\n\n Case1: matrix with values of shape (nnz)\n\n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n >>> nnz = len(row)\n >>> val = torch.arange(nnz).float()\n >>> A = dglsp.spmatrix(indices, val)\n >>> dglsp.softmax(A)\n SparseMatrix(indices=tensor([[0, 0, 1, 2],\n [1, 2, 2, 0]]),\n values=tensor([0.2689, 0.7311, 1.0000, 1.0000]),\n shape=(3, 3), nnz=4)\n\n Case2: matrix with values of shape (nnz, D)\n\n >>> indices = torch.tensor([[0, 0, 1, 2], [1, 2, 2, 0]])\n >>> val = torch.tensor([[0., 7.], [1., 3.], [2., 2.], [3., 1.]])\n >>> A = dglsp.spmatrix(indices, val)\n >>> dglsp.softmax(A)\n SparseMatrix(indices=tensor([[0, 0, 1, 2],\n [1, 2, 2, 0]]),\n values=tensor([[0.2689, 0.9820],\n [0.7311, 0.0180],\n [1.0000, 1.0000],\n [1.0000, 1.0000]]),\n shape=(3, 3), nnz=4, val_size=(2,))\n \"\"\"\n return SparseMatrix(torch.ops.dgl_sparse.softmax(input.c_sparse_matrix))\n\n\nSparseMatrix.softmax = softmax\n", "path": "python/dgl/sparse/softmax.py"}]} | 1,423 | 864 |
gh_patches_debug_20857 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
baseline output can change resource order for each run
If I generate a baseline file and I have then made some improvements to my Terraform code and I run the baseline again. What I am finding is that the order of the resources for each file can often change which then shows up as a diff against the prevous baseline file - when in reality nothing has change but the order of the resources in the findings array in the baseline output file
I was wondering could the findings array just be sorted before being output? Then the resource order should be fixed and any actual diffs should be real changes to check_ids (which is sorted already) or new resources being added?
e.g. this is a diff from two runs of generating a baseline file nothing has actually change just resources moved around in the array.
```
@@ -100,13 +100,12 @@
"file": "/main.tf",
"findings": [
{
- "resource": "aws_s3_bucket.canary_artifacts",
+ "resource": "aws_s3_bucket.backups",
"check_ids": [
"CKV2_AWS_6",
"CKV_AWS_144",
"CKV_AWS_145",
- "CKV_AWS_18",
- "CKV_AWS_21"
+ "CKV_AWS_18"
]
},
{
@@ -119,12 +118,13 @@
]
},
{
- "resource": "aws_s3_bucket.lambdas",
+ "resource": "aws_s3_bucket.canary_artifacts",
"check_ids": [
"CKV2_AWS_6",
"CKV_AWS_144",
"CKV_AWS_145",
- "CKV_AWS_18"
+ "CKV_AWS_18",
+ "CKV_AWS_21"
]
},
{
@@ -137,7 +137,7 @@
]
},
{
- "resource": "aws_s3_bucket.backups",
+ "resource": "aws_s3_bucket.lambdas",
"check_ids": [
"CKV2_AWS_6",
"CKV_AWS_144",
```
</issue>
<code>
[start of checkov/common/output/baseline.py]
1 from __future__ import annotations
2
3 import json
4 from collections import defaultdict
5 from checkov.common.models.enums import CheckResult
6 from typing import Any, TYPE_CHECKING
7
8 if TYPE_CHECKING:
9 from checkov.common.output.record import Record
10 from checkov.common.output.report import Report
11 from checkov.common.typing import _BaselineFinding, _BaselineFailedChecks
12
13
14 class Baseline:
15 def __init__(self, output_skipped: bool = False) -> None:
16 self.path = ""
17 self.path_failed_checks_map: dict[str, list[_BaselineFinding]] = defaultdict(list)
18 self.failed_checks: list[_BaselineFailedChecks] = []
19 self.output_skipped = output_skipped
20
21 def add_findings_from_report(self, report: Report) -> None:
22 for check in report.failed_checks:
23 try:
24 existing = next(
25 x for x in self.path_failed_checks_map[check.file_path] if x["resource"] == check.resource
26 )
27 except StopIteration:
28 existing = {"resource": check.resource, "check_ids": []}
29 self.path_failed_checks_map[check.file_path].append(existing)
30 existing["check_ids"].append(check.check_id)
31 existing["check_ids"].sort() # Sort the check IDs to be nicer to the eye
32
33 def to_dict(self) -> dict[str, Any]:
34 """
35 The output of this class needs to be very explicit, hence the following structure of the dict:
36 {
37 "failed_checks": [
38 {
39 "file": "path/to/file",
40 "findings: [
41 {
42 "resource": "aws_s3_bucket.this",
43 "check_ids": [
44 "CKV_AWS_1",
45 "CKV_AWS_2",
46 "CKV_AWS_3"
47 ]
48 }
49 ]
50 }
51 ]
52 }
53 """
54 failed_checks_list = []
55 for file, findings in self.path_failed_checks_map.items():
56 formatted_findings = []
57 for finding in findings:
58 formatted_findings.append({"resource": finding["resource"], "check_ids": finding["check_ids"]})
59 failed_checks_list.append({"file": file, "findings": formatted_findings})
60
61 resp = {"failed_checks": failed_checks_list}
62 return resp
63
64 def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:
65 for scan_report in scan_reports:
66 scan_report.passed_checks = [
67 check for check in scan_report.passed_checks if self._is_check_in_baseline(check)
68 ]
69 scan_report.skipped_checks = [
70 check for check in scan_report.skipped_checks if self._is_check_in_baseline(check)
71 ]
72 if self.output_skipped:
73 for check in scan_report.failed_checks:
74 if self._is_check_in_baseline(check):
75 check.check_result["suppress_comment"] = "baseline-skipped"
76 check.check_result["result"] = CheckResult.SKIPPED
77 scan_report.skipped_checks.append(check)
78 scan_report.failed_checks = [
79 check for check in scan_report.failed_checks if not self._is_check_in_baseline(check)
80 ]
81
82 def _is_check_in_baseline(self, check: Record) -> bool:
83 failed_check_id = check.check_id
84 failed_check_resource = check.resource
85 for baseline_failed_check in self.failed_checks:
86 for finding in baseline_failed_check["findings"]:
87 if finding["resource"] == failed_check_resource and failed_check_id in finding["check_ids"]:
88 return True
89 return False
90
91 def from_json(self, file_path: str) -> None:
92 self.path = file_path
93 with open(file_path, "r") as f:
94 baseline_raw = json.load(f)
95 self.failed_checks = baseline_raw.get("failed_checks", {})
96
[end of checkov/common/output/baseline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/common/output/baseline.py b/checkov/common/output/baseline.py
--- a/checkov/common/output/baseline.py
+++ b/checkov/common/output/baseline.py
@@ -2,6 +2,8 @@
import json
from collections import defaultdict
+from operator import itemgetter
+
from checkov.common.models.enums import CheckResult
from typing import Any, TYPE_CHECKING
@@ -56,9 +58,9 @@
formatted_findings = []
for finding in findings:
formatted_findings.append({"resource": finding["resource"], "check_ids": finding["check_ids"]})
- failed_checks_list.append({"file": file, "findings": formatted_findings})
+ failed_checks_list.append({"file": file, "findings": sorted(formatted_findings, key=itemgetter("resource"))})
- resp = {"failed_checks": failed_checks_list}
+ resp = {"failed_checks": sorted(failed_checks_list, key=itemgetter("file"))}
return resp
def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:
| {"golden_diff": "diff --git a/checkov/common/output/baseline.py b/checkov/common/output/baseline.py\n--- a/checkov/common/output/baseline.py\n+++ b/checkov/common/output/baseline.py\n@@ -2,6 +2,8 @@\n \n import json\n from collections import defaultdict\n+from operator import itemgetter\n+\n from checkov.common.models.enums import CheckResult\n from typing import Any, TYPE_CHECKING\n \n@@ -56,9 +58,9 @@\n formatted_findings = []\n for finding in findings:\n formatted_findings.append({\"resource\": finding[\"resource\"], \"check_ids\": finding[\"check_ids\"]})\n- failed_checks_list.append({\"file\": file, \"findings\": formatted_findings})\n+ failed_checks_list.append({\"file\": file, \"findings\": sorted(formatted_findings, key=itemgetter(\"resource\"))})\n \n- resp = {\"failed_checks\": failed_checks_list}\n+ resp = {\"failed_checks\": sorted(failed_checks_list, key=itemgetter(\"file\"))}\n return resp\n \n def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:\n", "issue": "baseline output can change resource order for each run\nIf I generate a baseline file and I have then made some improvements to my Terraform code and I run the baseline again. What I am finding is that the order of the resources for each file can often change which then shows up as a diff against the prevous baseline file - when in reality nothing has change but the order of the resources in the findings array in the baseline output file \r\n\r\nI was wondering could the findings array just be sorted before being output? Then the resource order should be fixed and any actual diffs should be real changes to check_ids (which is sorted already) or new resources being added?\r\n\r\ne.g. this is a diff from two runs of generating a baseline file nothing has actually change just resources moved around in the array.\r\n\r\n```\r\n@@ -100,13 +100,12 @@\r\n \"file\": \"/main.tf\",\r\n \"findings\": [\r\n {\r\n- \"resource\": \"aws_s3_bucket.canary_artifacts\",\r\n+ \"resource\": \"aws_s3_bucket.backups\",\r\n \"check_ids\": [\r\n \"CKV2_AWS_6\",\r\n \"CKV_AWS_144\",\r\n \"CKV_AWS_145\",\r\n- \"CKV_AWS_18\",\r\n- \"CKV_AWS_21\"\r\n+ \"CKV_AWS_18\"\r\n ]\r\n },\r\n {\r\n@@ -119,12 +118,13 @@\r\n ]\r\n },\r\n {\r\n- \"resource\": \"aws_s3_bucket.lambdas\",\r\n+ \"resource\": \"aws_s3_bucket.canary_artifacts\",\r\n \"check_ids\": [\r\n \"CKV2_AWS_6\",\r\n \"CKV_AWS_144\",\r\n \"CKV_AWS_145\",\r\n- \"CKV_AWS_18\"\r\n+ \"CKV_AWS_18\",\r\n+ \"CKV_AWS_21\"\r\n ]\r\n },\r\n {\r\n@@ -137,7 +137,7 @@\r\n ]\r\n },\r\n {\r\n- \"resource\": \"aws_s3_bucket.backups\",\r\n+ \"resource\": \"aws_s3_bucket.lambdas\",\r\n \"check_ids\": [\r\n \"CKV2_AWS_6\",\r\n \"CKV_AWS_144\",\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport json\nfrom collections import defaultdict\nfrom checkov.common.models.enums import CheckResult\nfrom typing import Any, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from checkov.common.output.record import Record\n from checkov.common.output.report import Report\n from checkov.common.typing import _BaselineFinding, _BaselineFailedChecks\n\n\nclass Baseline:\n def __init__(self, output_skipped: bool = False) -> None:\n self.path = \"\"\n self.path_failed_checks_map: dict[str, list[_BaselineFinding]] = defaultdict(list)\n self.failed_checks: list[_BaselineFailedChecks] = []\n self.output_skipped = output_skipped\n\n def add_findings_from_report(self, report: Report) -> None:\n for check in report.failed_checks:\n try:\n existing = next(\n x for x in self.path_failed_checks_map[check.file_path] if x[\"resource\"] == check.resource\n )\n except StopIteration:\n existing = {\"resource\": check.resource, \"check_ids\": []}\n self.path_failed_checks_map[check.file_path].append(existing)\n existing[\"check_ids\"].append(check.check_id)\n existing[\"check_ids\"].sort() # Sort the check IDs to be nicer to the eye\n\n def to_dict(self) -> dict[str, Any]:\n \"\"\"\n The output of this class needs to be very explicit, hence the following structure of the dict:\n {\n \"failed_checks\": [\n {\n \"file\": \"path/to/file\",\n \"findings: [\n {\n \"resource\": \"aws_s3_bucket.this\",\n \"check_ids\": [\n \"CKV_AWS_1\",\n \"CKV_AWS_2\",\n \"CKV_AWS_3\"\n ]\n }\n ]\n }\n ]\n }\n \"\"\"\n failed_checks_list = []\n for file, findings in self.path_failed_checks_map.items():\n formatted_findings = []\n for finding in findings:\n formatted_findings.append({\"resource\": finding[\"resource\"], \"check_ids\": finding[\"check_ids\"]})\n failed_checks_list.append({\"file\": file, \"findings\": formatted_findings})\n\n resp = {\"failed_checks\": failed_checks_list}\n return resp\n\n def compare_and_reduce_reports(self, scan_reports: list[Report]) -> None:\n for scan_report in scan_reports:\n scan_report.passed_checks = [\n check for check in scan_report.passed_checks if self._is_check_in_baseline(check)\n ]\n scan_report.skipped_checks = [\n check for check in scan_report.skipped_checks if self._is_check_in_baseline(check)\n ]\n if self.output_skipped:\n for check in scan_report.failed_checks:\n if self._is_check_in_baseline(check):\n check.check_result[\"suppress_comment\"] = \"baseline-skipped\"\n check.check_result[\"result\"] = CheckResult.SKIPPED\n scan_report.skipped_checks.append(check)\n scan_report.failed_checks = [\n check for check in scan_report.failed_checks if not self._is_check_in_baseline(check)\n ]\n\n def _is_check_in_baseline(self, check: Record) -> bool:\n failed_check_id = check.check_id\n failed_check_resource = check.resource\n for baseline_failed_check in self.failed_checks:\n for finding in baseline_failed_check[\"findings\"]:\n if finding[\"resource\"] == failed_check_resource and failed_check_id in finding[\"check_ids\"]:\n return True\n return False\n\n def from_json(self, file_path: str) -> None:\n self.path = file_path\n with open(file_path, \"r\") as f:\n baseline_raw = json.load(f)\n self.failed_checks = baseline_raw.get(\"failed_checks\", {})\n", "path": "checkov/common/output/baseline.py"}]} | 2,024 | 236 |
gh_patches_debug_10241 | rasdani/github-patches | git_diff | rootpy__rootpy-748 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when using root_open: 'TDirectory' object has no attribute 'func'
As above:
`AttributeError: 'TDirectory' object has no attribute 'func'`
</issue>
<code>
[start of rootpy/ROOT.py]
1 # Copyright 2012 the rootpy developers
2 # distributed under the terms of the GNU General Public License
3 """
4 :py:mod:`rootpy.ROOT`
5 =====================
6
7 This module is intended to be a drop-in replacement for ordinary
8 PyROOT imports by mimicking PyROOT's interface. If you find a case where it is
9 not, please report an issue to the rootpy developers.
10
11 Both ROOT and rootpy classes can be accessed in a harmonized way through this
12 module. This means you can take advantage of rootpy classes automatically by
13 replacing ``import ROOT`` with ``import rootpy.ROOT as ROOT`` or
14 ``from rootpy import ROOT`` in your code, while maintaining backward
15 compatibility with existing use of ROOT's classes.
16
17 ROOT classes are automatically "asrootpy'd" *after* the constructor in ROOT has
18 been called:
19
20 .. sourcecode:: python
21
22 >>> import rootpy.ROOT as ROOT
23 >>> h = ROOT.TH1F('name', 'title', 10, 0, 1)
24 >>> h
25 Hist('name')
26 >>> h.TYPE
27 'F'
28
29 Also access rootpy classes under this same module without needing to remember
30 where to import them from in rootpy:
31
32 .. sourcecode:: python
33
34 >>> import rootpy.ROOT as ROOT
35 >>> h = ROOT.Hist(10, 0, 1, name='name', type='F')
36 >>> h
37 Hist('name')
38 >>> h.TYPE
39 'F'
40
41 Plain old ROOT can still be accessed through the ``R`` property:
42
43 .. sourcecode:: python
44
45 >>> from rootpy import ROOT
46 >>> ROOT.R.TFile
47 <class 'ROOT.TFile'>
48
49 """
50 from __future__ import absolute_import
51
52 from copy import copy
53
54 import ROOT
55
56 from . import asrootpy, lookup_rootpy, ROOT_VERSION
57 from . import QROOT, stl
58 from .utils.module_facade import Facade
59
60 __all__ = []
61
62
63 def proxy_global(name, no_expand_macro=False):
64 """
65 Used to automatically asrootpy ROOT's thread local variables
66 """
67 if no_expand_macro: # pragma: no cover
68 # handle older ROOT versions without _ExpandMacroFunction wrapping
69 @property
70 def gSomething_no_func(self):
71 glob = self(getattr(ROOT, name))
72 # create a fake func() that just returns self
73 def func():
74 return glob
75 glob.func = func
76 return glob
77 return gSomething_no_func
78
79 @property
80 def gSomething(self):
81 glob = getattr(ROOT, name)
82 orig_func = glob.func
83
84 def asrootpy_izing_func():
85 return self(orig_func())
86
87 # new_glob = copy(glob)
88 new_glob = glob.__class__.__new__(glob.__class__)
89 new_glob.func = asrootpy_izing_func
90 # Memoize
91 setattr(type(self), name, new_glob)
92 return new_glob
93 return gSomething
94
95
96 @Facade(__name__, expose_internal=False)
97 class Module(object):
98
99 __version__ = ROOT_VERSION
100
101 def __call__(self, arg, after_init=False):
102 return asrootpy(arg, warn=False, after_init=after_init)
103
104 def __getattr__(self, what):
105 try:
106 # check ROOT
107 result = self(getattr(ROOT, what), after_init=True)
108 except AttributeError:
109 # check rootpy
110 result = lookup_rootpy(what)
111 if result is None:
112 raise AttributeError(
113 'ROOT does not have the attribute `{0}` '
114 'and rootpy does not contain the class `{0}`'.format(what))
115 return result
116
117 try:
118 # Memoize
119 setattr(self, what, result)
120 except AttributeError:
121 # Oops... Oh well. I tried.
122 pass
123
124 return result
125
126 @property
127 def R(self):
128 return ROOT
129
130 gPad = proxy_global("gPad")
131 gVirtualX = proxy_global("gVirtualX")
132
133 if ROOT_VERSION < (5, 32, 0): # pragma: no cover
134 # handle versions of ROOT older than 5.32.00
135 gDirectory = proxy_global("gDirectory", no_expand_macro=True)
136 gFile = proxy_global("gFile", no_expand_macro=True)
137 gInterpreter = proxy_global("gInterpreter", no_expand_macro=True)
138 else:
139 gDirectory = proxy_global("gDirectory")
140 gFile = proxy_global("gFile")
141 gInterpreter = proxy_global("gInterpreter")
142
143 # use the smart template STL types from rootpy.stl instead
144 for t in QROOT.std.stlclasses:
145 locals()[t] = getattr(stl, t)
146 del t
147
[end of rootpy/ROOT.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rootpy/ROOT.py b/rootpy/ROOT.py
--- a/rootpy/ROOT.py
+++ b/rootpy/ROOT.py
@@ -130,8 +130,7 @@
gPad = proxy_global("gPad")
gVirtualX = proxy_global("gVirtualX")
- if ROOT_VERSION < (5, 32, 0): # pragma: no cover
- # handle versions of ROOT older than 5.32.00
+ if ROOT_VERSION < (5, 32, 0) or ROOT_VERSION >= (6, 9, 2): # pragma: no cover
gDirectory = proxy_global("gDirectory", no_expand_macro=True)
gFile = proxy_global("gFile", no_expand_macro=True)
gInterpreter = proxy_global("gInterpreter", no_expand_macro=True)
| {"golden_diff": "diff --git a/rootpy/ROOT.py b/rootpy/ROOT.py\n--- a/rootpy/ROOT.py\n+++ b/rootpy/ROOT.py\n@@ -130,8 +130,7 @@\n gPad = proxy_global(\"gPad\")\n gVirtualX = proxy_global(\"gVirtualX\")\n \n- if ROOT_VERSION < (5, 32, 0): # pragma: no cover\n- # handle versions of ROOT older than 5.32.00\n+ if ROOT_VERSION < (5, 32, 0) or ROOT_VERSION >= (6, 9, 2): # pragma: no cover\n gDirectory = proxy_global(\"gDirectory\", no_expand_macro=True)\n gFile = proxy_global(\"gFile\", no_expand_macro=True)\n gInterpreter = proxy_global(\"gInterpreter\", no_expand_macro=True)\n", "issue": "Error when using root_open: 'TDirectory' object has no attribute 'func'\nAs above:\r\n\r\n`AttributeError: 'TDirectory' object has no attribute 'func'`\n", "before_files": [{"content": "# Copyright 2012 the rootpy developers\n# distributed under the terms of the GNU General Public License\n\"\"\"\n:py:mod:`rootpy.ROOT`\n=====================\n\nThis module is intended to be a drop-in replacement for ordinary\nPyROOT imports by mimicking PyROOT's interface. If you find a case where it is\nnot, please report an issue to the rootpy developers.\n\nBoth ROOT and rootpy classes can be accessed in a harmonized way through this\nmodule. This means you can take advantage of rootpy classes automatically by\nreplacing ``import ROOT`` with ``import rootpy.ROOT as ROOT`` or\n``from rootpy import ROOT`` in your code, while maintaining backward\ncompatibility with existing use of ROOT's classes.\n\nROOT classes are automatically \"asrootpy'd\" *after* the constructor in ROOT has\nbeen called:\n\n.. sourcecode:: python\n\n >>> import rootpy.ROOT as ROOT\n >>> h = ROOT.TH1F('name', 'title', 10, 0, 1)\n >>> h\n Hist('name')\n >>> h.TYPE\n 'F'\n\nAlso access rootpy classes under this same module without needing to remember\nwhere to import them from in rootpy:\n\n.. sourcecode:: python\n\n >>> import rootpy.ROOT as ROOT\n >>> h = ROOT.Hist(10, 0, 1, name='name', type='F')\n >>> h\n Hist('name')\n >>> h.TYPE\n 'F'\n\nPlain old ROOT can still be accessed through the ``R`` property:\n\n.. sourcecode:: python\n\n >>> from rootpy import ROOT\n >>> ROOT.R.TFile\n <class 'ROOT.TFile'>\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom copy import copy\n\nimport ROOT\n\nfrom . import asrootpy, lookup_rootpy, ROOT_VERSION\nfrom . import QROOT, stl\nfrom .utils.module_facade import Facade\n\n__all__ = []\n\n\ndef proxy_global(name, no_expand_macro=False):\n \"\"\"\n Used to automatically asrootpy ROOT's thread local variables\n \"\"\"\n if no_expand_macro: # pragma: no cover\n # handle older ROOT versions without _ExpandMacroFunction wrapping\n @property\n def gSomething_no_func(self):\n glob = self(getattr(ROOT, name))\n # create a fake func() that just returns self\n def func():\n return glob\n glob.func = func\n return glob\n return gSomething_no_func\n\n @property\n def gSomething(self):\n glob = getattr(ROOT, name)\n orig_func = glob.func\n\n def asrootpy_izing_func():\n return self(orig_func())\n\n # new_glob = copy(glob)\n new_glob = glob.__class__.__new__(glob.__class__)\n new_glob.func = asrootpy_izing_func\n # Memoize\n setattr(type(self), name, new_glob)\n return new_glob\n return gSomething\n\n\n@Facade(__name__, expose_internal=False)\nclass Module(object):\n\n __version__ = ROOT_VERSION\n\n def __call__(self, arg, after_init=False):\n return asrootpy(arg, warn=False, after_init=after_init)\n\n def __getattr__(self, what):\n try:\n # check ROOT\n result = self(getattr(ROOT, what), after_init=True)\n except AttributeError:\n # check rootpy\n result = lookup_rootpy(what)\n if result is None:\n raise AttributeError(\n 'ROOT does not have the attribute `{0}` '\n 'and rootpy does not contain the class `{0}`'.format(what))\n return result\n\n try:\n # Memoize\n setattr(self, what, result)\n except AttributeError:\n # Oops... Oh well. I tried.\n pass\n\n return result\n\n @property\n def R(self):\n return ROOT\n\n gPad = proxy_global(\"gPad\")\n gVirtualX = proxy_global(\"gVirtualX\")\n\n if ROOT_VERSION < (5, 32, 0): # pragma: no cover\n # handle versions of ROOT older than 5.32.00\n gDirectory = proxy_global(\"gDirectory\", no_expand_macro=True)\n gFile = proxy_global(\"gFile\", no_expand_macro=True)\n gInterpreter = proxy_global(\"gInterpreter\", no_expand_macro=True)\n else:\n gDirectory = proxy_global(\"gDirectory\")\n gFile = proxy_global(\"gFile\")\n gInterpreter = proxy_global(\"gInterpreter\")\n\n # use the smart template STL types from rootpy.stl instead\n for t in QROOT.std.stlclasses:\n locals()[t] = getattr(stl, t)\n del t\n", "path": "rootpy/ROOT.py"}]} | 1,939 | 192 |
gh_patches_debug_22709 | rasdani/github-patches | git_diff | sopel-irc__sopel-2494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Root module description is a mini-rant about LC_ALL rather than a description of the library
### Description
Looking at the `sopel` module with `pydoc` in an interactive prompt) exposes the user to [a short rant](https://github.com/sopel-irc/sopel/blob/c26914b68913bc25bdd1f5fed9c5942a87fdfee6/sopel/__init__.py#L1-L4) about the behavior of `LC_ALL` and instructions to use only ASCII in this module.
I'm sympathetic to the frustration over #984 that led to this, but it will be an improvement to add a docstring to the module with a short description.
### Reproduction steps
Run `python3 -m pydoc sopel` or `import sopel; help(sopel)` in an interactive prompt.
### Expected behavior
The user should see a short description of Sopel
### Relevant logs
_No response_
### Notes
_No response_
### Sopel version
c26914b
### Installation method
`pip install`
### Python version
_No response_
### Operating system
_No response_
### IRCd
_No response_
### Relevant plugins
_No response_
</issue>
<code>
[start of sopel/__init__.py]
1 # ASCII ONLY IN THIS FILE THOUGH!!!!!!!
2 # Python does some stupid bullshit of respecting LC_ALL over the encoding on the
3 # file, so in order to undo Python's ridiculous fucking idiocy, we have to have
4 # our own check.
5
6 # Copyright 2008, Sean B. Palmer, inamidst.com
7 # Copyright 2012, Elsie Powell, http://embolalia.com
8 # Copyright 2012, Elad Alfassa <[email protected]>
9 #
10 # Licensed under the Eiffel Forum License 2.
11
12 from __future__ import annotations
13
14 from collections import namedtuple
15 import locale
16 import re
17 import sys
18
19 # TODO: replace with stdlib importlib.metadata when dropping py3.7
20 # version info used in this module works from py3.8+
21 import importlib_metadata
22
23 __all__ = [
24 'bot',
25 'config',
26 'db',
27 'formatting',
28 'irc',
29 'loader',
30 'logger',
31 'module', # deprecated in 7.1, removed in 9.0
32 'plugin',
33 'tools',
34 'trigger',
35 'version_info',
36 ]
37
38 loc = locale.getlocale()
39 if not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]):
40 print('WARNING!!! You are running with a non-UTF8 locale environment '
41 'variable (e.g. LC_ALL is set to "C"), which makes Python 3 do '
42 'stupid things. If you get strange errors, please set it to '
43 'something like "en_US.UTF-8".', file=sys.stderr)
44
45
46 __version__ = importlib_metadata.version('sopel')
47
48
49 def _version_info(version=__version__):
50 regex = re.compile(r'(\d+)\.(\d+)\.(\d+)(?:[\-\.]?(a|b|rc)(\d+))?.*')
51 version_match = regex.match(version)
52
53 if version_match is None:
54 raise RuntimeError("Can't parse version number!")
55
56 version_groups = version_match.groups()
57 major, minor, micro = (int(piece) for piece in version_groups[0:3])
58 level = version_groups[3]
59 serial = int(version_groups[4] or 0)
60 if level == 'a':
61 level = 'alpha'
62 elif level == 'b':
63 level = 'beta'
64 elif level == 'rc':
65 level = 'candidate'
66 elif not level and version_groups[4] is None:
67 level = 'final'
68 else:
69 level = 'alpha'
70
71 VersionInfo = namedtuple('VersionInfo',
72 'major, minor, micro, releaselevel, serial')
73 return VersionInfo(major, minor, micro, level, serial)
74
75
76 version_info = _version_info()
77
[end of sopel/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/__init__.py b/sopel/__init__.py
--- a/sopel/__init__.py
+++ b/sopel/__init__.py
@@ -1,8 +1,9 @@
-# ASCII ONLY IN THIS FILE THOUGH!!!!!!!
-# Python does some stupid bullshit of respecting LC_ALL over the encoding on the
-# file, so in order to undo Python's ridiculous fucking idiocy, we have to have
-# our own check.
+"""
+Sopel is a simple, easy-to-use, open-source IRC utility bot, written in Python.
+It’s designed to be easy to use, easy to run, and easy to extend.
+"""
+#
# Copyright 2008, Sean B. Palmer, inamidst.com
# Copyright 2012, Elsie Powell, http://embolalia.com
# Copyright 2012, Elad Alfassa <[email protected]>
@@ -37,9 +38,8 @@
loc = locale.getlocale()
if not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]):
- print('WARNING!!! You are running with a non-UTF8 locale environment '
- 'variable (e.g. LC_ALL is set to "C"), which makes Python 3 do '
- 'stupid things. If you get strange errors, please set it to '
+ print('Warning: Running with a non-UTF8 locale. If you see strange '
+ 'encoding errors, try setting the LC_ALL environment variable to '
'something like "en_US.UTF-8".', file=sys.stderr)
| {"golden_diff": "diff --git a/sopel/__init__.py b/sopel/__init__.py\n--- a/sopel/__init__.py\n+++ b/sopel/__init__.py\n@@ -1,8 +1,9 @@\n-# ASCII ONLY IN THIS FILE THOUGH!!!!!!!\n-# Python does some stupid bullshit of respecting LC_ALL over the encoding on the\n-# file, so in order to undo Python's ridiculous fucking idiocy, we have to have\n-# our own check.\n+\"\"\"\n+Sopel is a simple, easy-to-use, open-source IRC utility bot, written in Python.\n \n+It\u2019s designed to be easy to use, easy to run, and easy to extend.\n+\"\"\"\n+#\n # Copyright 2008, Sean B. Palmer, inamidst.com\n # Copyright 2012, Elsie Powell, http://embolalia.com\n # Copyright 2012, Elad Alfassa <[email protected]>\n@@ -37,9 +38,8 @@\n \n loc = locale.getlocale()\n if not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]):\n- print('WARNING!!! You are running with a non-UTF8 locale environment '\n- 'variable (e.g. LC_ALL is set to \"C\"), which makes Python 3 do '\n- 'stupid things. If you get strange errors, please set it to '\n+ print('Warning: Running with a non-UTF8 locale. If you see strange '\n+ 'encoding errors, try setting the LC_ALL environment variable to '\n 'something like \"en_US.UTF-8\".', file=sys.stderr)\n", "issue": "Root module description is a mini-rant about LC_ALL rather than a description of the library\n### Description\n\nLooking at the `sopel` module with `pydoc` in an interactive prompt) exposes the user to [a short rant](https://github.com/sopel-irc/sopel/blob/c26914b68913bc25bdd1f5fed9c5942a87fdfee6/sopel/__init__.py#L1-L4) about the behavior of `LC_ALL` and instructions to use only ASCII in this module.\r\n\r\nI'm sympathetic to the frustration over #984 that led to this, but it will be an improvement to add a docstring to the module with a short description.\n\n### Reproduction steps\n\nRun `python3 -m pydoc sopel` or `import sopel; help(sopel)` in an interactive prompt.\n\n### Expected behavior\n\nThe user should see a short description of Sopel\n\n### Relevant logs\n\n_No response_\n\n### Notes\n\n_No response_\n\n### Sopel version\n\nc26914b\n\n### Installation method\n\n`pip install`\n\n### Python version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### IRCd\n\n_No response_\n\n### Relevant plugins\n\n_No response_\n", "before_files": [{"content": "# ASCII ONLY IN THIS FILE THOUGH!!!!!!!\n# Python does some stupid bullshit of respecting LC_ALL over the encoding on the\n# file, so in order to undo Python's ridiculous fucking idiocy, we have to have\n# our own check.\n\n# Copyright 2008, Sean B. Palmer, inamidst.com\n# Copyright 2012, Elsie Powell, http://embolalia.com\n# Copyright 2012, Elad Alfassa <[email protected]>\n#\n# Licensed under the Eiffel Forum License 2.\n\nfrom __future__ import annotations\n\nfrom collections import namedtuple\nimport locale\nimport re\nimport sys\n\n# TODO: replace with stdlib importlib.metadata when dropping py3.7\n# version info used in this module works from py3.8+\nimport importlib_metadata\n\n__all__ = [\n 'bot',\n 'config',\n 'db',\n 'formatting',\n 'irc',\n 'loader',\n 'logger',\n 'module', # deprecated in 7.1, removed in 9.0\n 'plugin',\n 'tools',\n 'trigger',\n 'version_info',\n]\n\nloc = locale.getlocale()\nif not loc[1] or ('UTF-8' not in loc[1] and 'utf8' not in loc[1]):\n print('WARNING!!! You are running with a non-UTF8 locale environment '\n 'variable (e.g. LC_ALL is set to \"C\"), which makes Python 3 do '\n 'stupid things. If you get strange errors, please set it to '\n 'something like \"en_US.UTF-8\".', file=sys.stderr)\n\n\n__version__ = importlib_metadata.version('sopel')\n\n\ndef _version_info(version=__version__):\n regex = re.compile(r'(\\d+)\\.(\\d+)\\.(\\d+)(?:[\\-\\.]?(a|b|rc)(\\d+))?.*')\n version_match = regex.match(version)\n\n if version_match is None:\n raise RuntimeError(\"Can't parse version number!\")\n\n version_groups = version_match.groups()\n major, minor, micro = (int(piece) for piece in version_groups[0:3])\n level = version_groups[3]\n serial = int(version_groups[4] or 0)\n if level == 'a':\n level = 'alpha'\n elif level == 'b':\n level = 'beta'\n elif level == 'rc':\n level = 'candidate'\n elif not level and version_groups[4] is None:\n level = 'final'\n else:\n level = 'alpha'\n\n VersionInfo = namedtuple('VersionInfo',\n 'major, minor, micro, releaselevel, serial')\n return VersionInfo(major, minor, micro, level, serial)\n\n\nversion_info = _version_info()\n", "path": "sopel/__init__.py"}]} | 1,576 | 371 |
gh_patches_debug_20164 | rasdani/github-patches | git_diff | pytorch__vision-2258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise error if target boxes are degenerate in Faster R-CNN
We have had a number of reports with users saying that their training loss is nan after a few iterations.
Most of the time, this is due to degenerate boxes (i.e., boxes with negative sizes or zero area). We should improve the user experience in those situations.
I think that raising an error in `GeneralizedRCNN` if the target boxes are degenerate would be a good compromise.
Related issues: https://github.com/pytorch/vision/issues/2235 https://github.com/pytorch/vision/issues/1994 https://github.com/pytorch/vision/issues/1176 https://github.com/pytorch/vision/issues/1128 #1120 and #997
</issue>
<code>
[start of torchvision/models/detection/generalized_rcnn.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2 """
3 Implements the Generalized R-CNN framework
4 """
5
6 from collections import OrderedDict
7 import torch
8 from torch import nn
9 import warnings
10 from torch.jit.annotations import Tuple, List, Dict, Optional
11 from torch import Tensor
12
13
14 class GeneralizedRCNN(nn.Module):
15 """
16 Main class for Generalized R-CNN.
17
18 Arguments:
19 backbone (nn.Module):
20 rpn (nn.Module):
21 roi_heads (nn.Module): takes the features + the proposals from the RPN and computes
22 detections / masks from it.
23 transform (nn.Module): performs the data transformation from the inputs to feed into
24 the model
25 """
26
27 def __init__(self, backbone, rpn, roi_heads, transform):
28 super(GeneralizedRCNN, self).__init__()
29 self.transform = transform
30 self.backbone = backbone
31 self.rpn = rpn
32 self.roi_heads = roi_heads
33 # used only on torchscript mode
34 self._has_warned = False
35
36 @torch.jit.unused
37 def eager_outputs(self, losses, detections):
38 # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
39 if self.training:
40 return losses
41
42 return detections
43
44 def forward(self, images, targets=None):
45 # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]
46 """
47 Arguments:
48 images (list[Tensor]): images to be processed
49 targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)
50
51 Returns:
52 result (list[BoxList] or dict[Tensor]): the output from the model.
53 During training, it returns a dict[Tensor] which contains the losses.
54 During testing, it returns list[BoxList] contains additional fields
55 like `scores`, `labels` and `mask` (for Mask R-CNN models).
56
57 """
58 if self.training and targets is None:
59 raise ValueError("In training mode, targets should be passed")
60 if self.training:
61 assert targets is not None
62 for target in targets:
63 boxes = target["boxes"]
64 if isinstance(boxes, torch.Tensor):
65 if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
66 raise ValueError("Expected target boxes to be a tensor"
67 "of shape [N, 4], got {:}.".format(
68 boxes.shape))
69 else:
70 raise ValueError("Expected target boxes to be of type "
71 "Tensor, got {:}.".format(type(boxes)))
72
73 original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], [])
74 for img in images:
75 val = img.shape[-2:]
76 assert len(val) == 2
77 original_image_sizes.append((val[0], val[1]))
78
79 images, targets = self.transform(images, targets)
80 features = self.backbone(images.tensors)
81 if isinstance(features, torch.Tensor):
82 features = OrderedDict([('0', features)])
83 proposals, proposal_losses = self.rpn(images, features, targets)
84 detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)
85 detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)
86
87 losses = {}
88 losses.update(detector_losses)
89 losses.update(proposal_losses)
90
91 if torch.jit.is_scripting():
92 if not self._has_warned:
93 warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")
94 self._has_warned = True
95 return (losses, detections)
96 else:
97 return self.eager_outputs(losses, detections)
98
[end of torchvision/models/detection/generalized_rcnn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py
--- a/torchvision/models/detection/generalized_rcnn.py
+++ b/torchvision/models/detection/generalized_rcnn.py
@@ -77,6 +77,21 @@
original_image_sizes.append((val[0], val[1]))
images, targets = self.transform(images, targets)
+
+ # Check for degenerate boxes
+ # TODO: Move this to a function
+ if targets is not None:
+ for target_idx, target in enumerate(targets):
+ boxes = target["boxes"]
+ degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]
+ if degenerate_boxes.any():
+ # print the first degenrate box
+ bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0]
+ degen_bb: List[float] = boxes[bb_idx].tolist()
+ raise ValueError("All bounding boxes should have positive height and width."
+ " Found invaid box {} for target at index {}."
+ .format(degen_bb, target_idx))
+
features = self.backbone(images.tensors)
if isinstance(features, torch.Tensor):
features = OrderedDict([('0', features)])
| {"golden_diff": "diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py\n--- a/torchvision/models/detection/generalized_rcnn.py\n+++ b/torchvision/models/detection/generalized_rcnn.py\n@@ -77,6 +77,21 @@\n original_image_sizes.append((val[0], val[1]))\n \n images, targets = self.transform(images, targets)\n+\n+ # Check for degenerate boxes\n+ # TODO: Move this to a function\n+ if targets is not None:\n+ for target_idx, target in enumerate(targets):\n+ boxes = target[\"boxes\"]\n+ degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]\n+ if degenerate_boxes.any():\n+ # print the first degenrate box\n+ bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0]\n+ degen_bb: List[float] = boxes[bb_idx].tolist()\n+ raise ValueError(\"All bounding boxes should have positive height and width.\"\n+ \" Found invaid box {} for target at index {}.\"\n+ .format(degen_bb, target_idx))\n+\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict([('0', features)])\n", "issue": "Raise error if target boxes are degenerate in Faster R-CNN\nWe have had a number of reports with users saying that their training loss is nan after a few iterations.\r\n\r\nMost of the time, this is due to degenerate boxes (i.e., boxes with negative sizes or zero area). We should improve the user experience in those situations.\r\n\r\nI think that raising an error in `GeneralizedRCNN` if the target boxes are degenerate would be a good compromise.\r\n\r\nRelated issues: https://github.com/pytorch/vision/issues/2235 https://github.com/pytorch/vision/issues/1994 https://github.com/pytorch/vision/issues/1176 https://github.com/pytorch/vision/issues/1128 #1120 and #997\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\"\"\"\nImplements the Generalized R-CNN framework\n\"\"\"\n\nfrom collections import OrderedDict\nimport torch\nfrom torch import nn\nimport warnings\nfrom torch.jit.annotations import Tuple, List, Dict, Optional\nfrom torch import Tensor\n\n\nclass GeneralizedRCNN(nn.Module):\n \"\"\"\n Main class for Generalized R-CNN.\n\n Arguments:\n backbone (nn.Module):\n rpn (nn.Module):\n roi_heads (nn.Module): takes the features + the proposals from the RPN and computes\n detections / masks from it.\n transform (nn.Module): performs the data transformation from the inputs to feed into\n the model\n \"\"\"\n\n def __init__(self, backbone, rpn, roi_heads, transform):\n super(GeneralizedRCNN, self).__init__()\n self.transform = transform\n self.backbone = backbone\n self.rpn = rpn\n self.roi_heads = roi_heads\n # used only on torchscript mode\n self._has_warned = False\n\n @torch.jit.unused\n def eager_outputs(self, losses, detections):\n # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]\n if self.training:\n return losses\n\n return detections\n\n def forward(self, images, targets=None):\n # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]\n \"\"\"\n Arguments:\n images (list[Tensor]): images to be processed\n targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)\n\n Returns:\n result (list[BoxList] or dict[Tensor]): the output from the model.\n During training, it returns a dict[Tensor] which contains the losses.\n During testing, it returns list[BoxList] contains additional fields\n like `scores`, `labels` and `mask` (for Mask R-CNN models).\n\n \"\"\"\n if self.training and targets is None:\n raise ValueError(\"In training mode, targets should be passed\")\n if self.training:\n assert targets is not None\n for target in targets:\n boxes = target[\"boxes\"]\n if isinstance(boxes, torch.Tensor):\n if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n raise ValueError(\"Expected target boxes to be a tensor\"\n \"of shape [N, 4], got {:}.\".format(\n boxes.shape))\n else:\n raise ValueError(\"Expected target boxes to be of type \"\n \"Tensor, got {:}.\".format(type(boxes)))\n\n original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], [])\n for img in images:\n val = img.shape[-2:]\n assert len(val) == 2\n original_image_sizes.append((val[0], val[1]))\n\n images, targets = self.transform(images, targets)\n features = self.backbone(images.tensors)\n if isinstance(features, torch.Tensor):\n features = OrderedDict([('0', features)])\n proposals, proposal_losses = self.rpn(images, features, targets)\n detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)\n detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)\n\n losses = {}\n losses.update(detector_losses)\n losses.update(proposal_losses)\n\n if torch.jit.is_scripting():\n if not self._has_warned:\n warnings.warn(\"RCNN always returns a (Losses, Detections) tuple in scripting\")\n self._has_warned = True\n return (losses, detections)\n else:\n return self.eager_outputs(losses, detections)\n", "path": "torchvision/models/detection/generalized_rcnn.py"}]} | 1,725 | 287 |
gh_patches_debug_1514 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Latest minikube not starting on Travis CI
Same issue and hopefully fix as this https://github.com/kubernetes/minikube/issues/2704
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 from setuptools import find_packages, setup
3
4 import versioneer
5
6 setup(
7 name='aimmo',
8 cmdclass=versioneer.get_cmdclass(),
9 packages=find_packages(),
10 include_package_data=True,
11 install_requires=[
12 'django >= 1.8.3, < 1.9.0',
13 'django-autoconfig >= 0.3.6, < 1.0.0',
14 'django-forms-bootstrap',
15 'django-js-reverse',
16 'eventlet',
17 'flask',
18 'flask-socketio',
19 'requests',
20 'six',
21 'pykube',
22 'hypothesis',
23 'flask-cors >= 3.0, < 3.1',
24 'psutil >= 5.4, < 5.5',
25 ],
26 tests_require=[
27 'django-setuptest',
28 'httmock',
29 'mock == 2.0.0',
30 'docker == 2.7.0',
31 'kubernetes == 4.0.0',
32 'PyYAML == 3.12',
33 ],
34 test_suite='setuptest.setuptest.SetupTestSuite',
35 version=versioneer.get_version(),
36 zip_safe=False,
37 )
38
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -28,7 +28,7 @@
'httmock',
'mock == 2.0.0',
'docker == 2.7.0',
- 'kubernetes == 4.0.0',
+ 'kubernetes == 5.0.0',
'PyYAML == 3.12',
],
test_suite='setuptest.setuptest.SetupTestSuite',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,7 @@\n 'httmock',\n 'mock == 2.0.0',\n 'docker == 2.7.0',\n- 'kubernetes == 4.0.0',\n+ 'kubernetes == 5.0.0',\n 'PyYAML == 3.12',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n", "issue": "Latest minikube not starting on Travis CI\nSame issue and hopefully fix as this https://github.com/kubernetes/minikube/issues/2704\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\nsetup(\n name='aimmo',\n cmdclass=versioneer.get_cmdclass(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'django >= 1.8.3, < 1.9.0',\n 'django-autoconfig >= 0.3.6, < 1.0.0',\n 'django-forms-bootstrap',\n 'django-js-reverse',\n 'eventlet',\n 'flask',\n 'flask-socketio',\n 'requests',\n 'six',\n 'pykube',\n 'hypothesis',\n 'flask-cors >= 3.0, < 3.1',\n 'psutil >= 5.4, < 5.5',\n ],\n tests_require=[\n 'django-setuptest',\n 'httmock',\n 'mock == 2.0.0',\n 'docker == 2.7.0',\n 'kubernetes == 4.0.0',\n 'PyYAML == 3.12',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n version=versioneer.get_version(),\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 904 | 115 |
gh_patches_debug_1492 | rasdani/github-patches | git_diff | wright-group__WrightTools-590 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change __version__ to match pep 440
Specifically, when a branch is specified, it should use a plus sign instead of minus
https://www.python.org/dev/peps/pep-0440/#local-version-identifiers
https://github.com/wright-group/WrightTools/blob/490a4a3d6fb6f016e7033d661b553b72c2d86fcb/WrightTools/__version__.py#L33
</issue>
<code>
[start of WrightTools/__version__.py]
1 """Define WrightTools version."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import os
8
9
10 # ---- define -------------------------------------------------------------------------------------
11
12
13 here = os.path.abspath(os.path.dirname(__file__))
14
15
16 __all__ = ['__version__', '__branch__']
17
18
19 # --- version -------------------------------------------------------------------------------------
20
21
22 # read from VERSION file
23 with open(os.path.join(os.path.dirname(here), 'VERSION')) as f:
24 __version__ = f.read().strip()
25
26
27 # add git branch, if appropriate
28 p = os.path.join(os.path.dirname(here), '.git', 'HEAD')
29 if os.path.isfile(p):
30 with open(p) as f:
31 __branch__ = f.readline().rstrip().split(r'/')[-1]
32 if __branch__ != 'master':
33 __version__ += '-' + __branch__
34 else:
35 __branch__ = None
36
[end of WrightTools/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/__version__.py b/WrightTools/__version__.py
--- a/WrightTools/__version__.py
+++ b/WrightTools/__version__.py
@@ -30,6 +30,6 @@
with open(p) as f:
__branch__ = f.readline().rstrip().split(r'/')[-1]
if __branch__ != 'master':
- __version__ += '-' + __branch__
+ __version__ += '+' + __branch__
else:
__branch__ = None
| {"golden_diff": "diff --git a/WrightTools/__version__.py b/WrightTools/__version__.py\n--- a/WrightTools/__version__.py\n+++ b/WrightTools/__version__.py\n@@ -30,6 +30,6 @@\n with open(p) as f:\n __branch__ = f.readline().rstrip().split(r'/')[-1]\n if __branch__ != 'master':\n- __version__ += '-' + __branch__\n+ __version__ += '+' + __branch__\n else:\n __branch__ = None\n", "issue": "Change __version__ to match pep 440\nSpecifically, when a branch is specified, it should use a plus sign instead of minus\r\n\r\nhttps://www.python.org/dev/peps/pep-0440/#local-version-identifiers\r\n\r\nhttps://github.com/wright-group/WrightTools/blob/490a4a3d6fb6f016e7033d661b553b72c2d86fcb/WrightTools/__version__.py#L33\n", "before_files": [{"content": "\"\"\"Define WrightTools version.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport os\n\n\n# ---- define -------------------------------------------------------------------------------------\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\n__all__ = ['__version__', '__branch__']\n\n\n# --- version -------------------------------------------------------------------------------------\n\n\n# read from VERSION file\nwith open(os.path.join(os.path.dirname(here), 'VERSION')) as f:\n __version__ = f.read().strip()\n\n\n# add git branch, if appropriate\np = os.path.join(os.path.dirname(here), '.git', 'HEAD')\nif os.path.isfile(p):\n with open(p) as f:\n __branch__ = f.readline().rstrip().split(r'/')[-1]\n if __branch__ != 'master':\n __version__ += '-' + __branch__\nelse:\n __branch__ = None\n", "path": "WrightTools/__version__.py"}]} | 898 | 118 |
gh_patches_debug_3450 | rasdani/github-patches | git_diff | astronomer__astro-sdk-176 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use standard AWS environment variables
**Context**
At the moment, Astro 0.6.x uses a custom environment variable `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` to define AWS credentials. However, there are standard [AWS environment variables to define credentials](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables).
**Acceptance criteria**
* Replace any occurrence of `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` by `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`
</issue>
<code>
[start of src/astro/utils/cloud_storage_creds.py]
1 import json
2 import os
3 from urllib import parse
4
5 from airflow.hooks.base import BaseHook
6
7 from astro.utils.dependencies import (
8 AwsBaseHook,
9 BotoSession,
10 GCSClient,
11 GCSHook,
12 google_service_account,
13 )
14
15
16 def parse_s3_env_var():
17 raw_data = (
18 os.environ["AIRFLOW__ASTRO__CONN_AWS_DEFAULT"]
19 .replace("%2F", "/")
20 .replace("aws://", "")
21 .replace("@", "")
22 .split(":")
23 )
24 return [parse.unquote(r) for r in raw_data]
25
26
27 def s3fs_creds(conn_id=None):
28 """Structure s3fs credentials from Airflow connection.
29 s3fs enables pandas to write to s3
30 """
31 if conn_id:
32 # The following line raises a friendly exception
33 BaseHook.get_connection(conn_id)
34 aws_hook = AwsBaseHook(conn_id, client_type="S3")
35 session = aws_hook.get_session()
36 else:
37 key, secret = parse_s3_env_var()
38 session = BotoSession(
39 aws_access_key_id=key,
40 aws_secret_access_key=secret,
41 )
42 return dict(client=session.client("s3"))
43
44
45 def gcs_client(conn_id=None):
46 """
47 get GCS credentials for storage.
48 """
49 if conn_id:
50 gcs_hook = GCSHook(conn_id)
51 client = gcs_hook.get_conn()
52 else:
53 client = GCSClient()
54
55 return dict(client=client)
56
[end of src/astro/utils/cloud_storage_creds.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/astro/utils/cloud_storage_creds.py b/src/astro/utils/cloud_storage_creds.py
--- a/src/astro/utils/cloud_storage_creds.py
+++ b/src/astro/utils/cloud_storage_creds.py
@@ -14,14 +14,7 @@
def parse_s3_env_var():
- raw_data = (
- os.environ["AIRFLOW__ASTRO__CONN_AWS_DEFAULT"]
- .replace("%2F", "/")
- .replace("aws://", "")
- .replace("@", "")
- .split(":")
- )
- return [parse.unquote(r) for r in raw_data]
+ return os.environ["AWS_ACCESS_KEY_ID"], os.environ["AWS_SECRET_ACCESS_KEY"]
def s3fs_creds(conn_id=None):
| {"golden_diff": "diff --git a/src/astro/utils/cloud_storage_creds.py b/src/astro/utils/cloud_storage_creds.py\n--- a/src/astro/utils/cloud_storage_creds.py\n+++ b/src/astro/utils/cloud_storage_creds.py\n@@ -14,14 +14,7 @@\n \n \n def parse_s3_env_var():\n- raw_data = (\n- os.environ[\"AIRFLOW__ASTRO__CONN_AWS_DEFAULT\"]\n- .replace(\"%2F\", \"/\")\n- .replace(\"aws://\", \"\")\n- .replace(\"@\", \"\")\n- .split(\":\")\n- )\n- return [parse.unquote(r) for r in raw_data]\n+ return os.environ[\"AWS_ACCESS_KEY_ID\"], os.environ[\"AWS_SECRET_ACCESS_KEY\"]\n \n \n def s3fs_creds(conn_id=None):\n", "issue": "Use standard AWS environment variables\n**Context**\r\nAt the moment, Astro 0.6.x uses a custom environment variable `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` to define AWS credentials. However, there are standard [AWS environment variables to define credentials](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables).\r\n\r\n**Acceptance criteria**\r\n* Replace any occurrence of `AIRFLOW__ASTRO__CONN_AWS_DEFAULT` by `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`\n", "before_files": [{"content": "import json\nimport os\nfrom urllib import parse\n\nfrom airflow.hooks.base import BaseHook\n\nfrom astro.utils.dependencies import (\n AwsBaseHook,\n BotoSession,\n GCSClient,\n GCSHook,\n google_service_account,\n)\n\n\ndef parse_s3_env_var():\n raw_data = (\n os.environ[\"AIRFLOW__ASTRO__CONN_AWS_DEFAULT\"]\n .replace(\"%2F\", \"/\")\n .replace(\"aws://\", \"\")\n .replace(\"@\", \"\")\n .split(\":\")\n )\n return [parse.unquote(r) for r in raw_data]\n\n\ndef s3fs_creds(conn_id=None):\n \"\"\"Structure s3fs credentials from Airflow connection.\n s3fs enables pandas to write to s3\n \"\"\"\n if conn_id:\n # The following line raises a friendly exception\n BaseHook.get_connection(conn_id)\n aws_hook = AwsBaseHook(conn_id, client_type=\"S3\")\n session = aws_hook.get_session()\n else:\n key, secret = parse_s3_env_var()\n session = BotoSession(\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n )\n return dict(client=session.client(\"s3\"))\n\n\ndef gcs_client(conn_id=None):\n \"\"\"\n get GCS credentials for storage.\n \"\"\"\n if conn_id:\n gcs_hook = GCSHook(conn_id)\n client = gcs_hook.get_conn()\n else:\n client = GCSClient()\n\n return dict(client=client)\n", "path": "src/astro/utils/cloud_storage_creds.py"}]} | 1,087 | 172 |
gh_patches_debug_33725 | rasdani/github-patches | git_diff | modoboa__modoboa-1859 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dashboard - server behind proxy
# Impacted versions
* Modoboa: 1.14.0
* Webserver: Nginx
# Steps to reproduce
Modoboa server is behind proxy, so no internet direct access
Acces dashboard via admin account
# Current behavior
504 Gateway Time-out
# Expected behavior
no error
</issue>
<code>
[start of modoboa/core/views/dashboard.py]
1 """Core dashboard views."""
2
3 import feedparser
4 import requests
5 from dateutil import parser
6 from requests.exceptions import RequestException
7
8 from django.contrib.auth import mixins as auth_mixins
9 from django.views import generic
10
11 from .. import signals
12
13 MODOBOA_WEBSITE_URL = "https://modoboa.org/"
14
15
16 class DashboardView(auth_mixins.AccessMixin, generic.TemplateView):
17 """Dashboard view."""
18
19 template_name = "core/dashboard.html"
20
21 def dispatch(self, request, *args, **kwargs):
22 """Check if user can access dashboard."""
23 if not request.user.is_authenticated or not request.user.is_admin:
24 return self.handle_no_permission()
25 return super(DashboardView, self).dispatch(request, *args, **kwargs)
26
27 def get_context_data(self, **kwargs):
28 """Add context variables."""
29 context = super(DashboardView, self).get_context_data(**kwargs)
30 context.update({
31 "selection": "dashboard", "widgets": {"left": [], "right": []}
32 })
33 # Fetch latest news
34 if self.request.user.language == "fr":
35 lang = "fr"
36 else:
37 lang = "en"
38 context.update({"selection": "dashboard"})
39
40 feed_url = "{}{}/weblog/feeds/".format(MODOBOA_WEBSITE_URL, lang)
41 if self.request.user.role != "SuperAdmins":
42 custom_feed_url = (
43 self.request.localconfig.parameters.get_value("rss_feed_url"))
44 if custom_feed_url:
45 feed_url = custom_feed_url
46 posts = feedparser.parse(feed_url)
47 entries = []
48 for entry in posts["entries"][:5]:
49 entry["published"] = parser.parse(entry["published"])
50 entries.append(entry)
51 context["widgets"]["left"].append("core/_latest_news_widget.html")
52 context.update({"news": entries})
53
54 hide_features_widget = self.request.localconfig.parameters.get_value(
55 "hide_features_widget")
56 if self.request.user.is_superuser or not hide_features_widget:
57 url = "{}{}/api/projects/?featured=true".format(
58 MODOBOA_WEBSITE_URL, lang)
59 features = []
60 try:
61 response = requests.get(url)
62 except RequestException:
63 pass
64 else:
65 if response.status_code == 200:
66 features = response.json()
67 context["widgets"]["right"].append("core/_current_features.html")
68 context.update({"features": features})
69
70 # Extra widgets
71 result = signals.extra_admin_dashboard_widgets.send(
72 sender=self.__class__, user=self.request.user)
73 for _receiver, widgets in result:
74 for widget in widgets:
75 context["widgets"][widget["column"]].append(
76 widget["template"])
77 # FIXME: can raise conflicts...
78 context.update(widget["context"])
79
80 return context
81
[end of modoboa/core/views/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modoboa/core/views/dashboard.py b/modoboa/core/views/dashboard.py
--- a/modoboa/core/views/dashboard.py
+++ b/modoboa/core/views/dashboard.py
@@ -8,6 +8,8 @@
from django.contrib.auth import mixins as auth_mixins
from django.views import generic
+from django.conf import settings
+
from .. import signals
MODOBOA_WEBSITE_URL = "https://modoboa.org/"
@@ -43,11 +45,12 @@
self.request.localconfig.parameters.get_value("rss_feed_url"))
if custom_feed_url:
feed_url = custom_feed_url
- posts = feedparser.parse(feed_url)
entries = []
- for entry in posts["entries"][:5]:
- entry["published"] = parser.parse(entry["published"])
- entries.append(entry)
+ if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:
+ posts = feedparser.parse(feed_url)
+ for entry in posts["entries"][:5]:
+ entry["published"] = parser.parse(entry["published"])
+ entries.append(entry)
context["widgets"]["left"].append("core/_latest_news_widget.html")
context.update({"news": entries})
@@ -57,13 +60,14 @@
url = "{}{}/api/projects/?featured=true".format(
MODOBOA_WEBSITE_URL, lang)
features = []
- try:
- response = requests.get(url)
- except RequestException:
- pass
- else:
- if response.status_code == 200:
- features = response.json()
+ if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:
+ try:
+ response = requests.get(url)
+ except RequestException:
+ pass
+ else:
+ if response.status_code == 200:
+ features = response.json()
context["widgets"]["right"].append("core/_current_features.html")
context.update({"features": features})
| {"golden_diff": "diff --git a/modoboa/core/views/dashboard.py b/modoboa/core/views/dashboard.py\n--- a/modoboa/core/views/dashboard.py\n+++ b/modoboa/core/views/dashboard.py\n@@ -8,6 +8,8 @@\n from django.contrib.auth import mixins as auth_mixins\n from django.views import generic\n \n+from django.conf import settings\n+\n from .. import signals\n \n MODOBOA_WEBSITE_URL = \"https://modoboa.org/\"\n@@ -43,11 +45,12 @@\n self.request.localconfig.parameters.get_value(\"rss_feed_url\"))\n if custom_feed_url:\n feed_url = custom_feed_url\n- posts = feedparser.parse(feed_url)\n entries = []\n- for entry in posts[\"entries\"][:5]:\n- entry[\"published\"] = parser.parse(entry[\"published\"])\n- entries.append(entry)\n+ if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:\n+ posts = feedparser.parse(feed_url)\n+ for entry in posts[\"entries\"][:5]:\n+ entry[\"published\"] = parser.parse(entry[\"published\"])\n+ entries.append(entry)\n context[\"widgets\"][\"left\"].append(\"core/_latest_news_widget.html\")\n context.update({\"news\": entries})\n \n@@ -57,13 +60,14 @@\n url = \"{}{}/api/projects/?featured=true\".format(\n MODOBOA_WEBSITE_URL, lang)\n features = []\n- try:\n- response = requests.get(url)\n- except RequestException:\n- pass\n- else:\n- if response.status_code == 200:\n- features = response.json()\n+ if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:\n+ try:\n+ response = requests.get(url)\n+ except RequestException:\n+ pass\n+ else:\n+ if response.status_code == 200:\n+ features = response.json()\n context[\"widgets\"][\"right\"].append(\"core/_current_features.html\")\n context.update({\"features\": features})\n", "issue": "Dashboard - server behind proxy\n# Impacted versions\r\n\r\n* Modoboa: 1.14.0\r\n* Webserver: Nginx\r\n\r\n# Steps to reproduce\r\nModoboa server is behind proxy, so no internet direct access\r\nAcces dashboard via admin account\r\n\r\n# Current behavior\r\n504 Gateway Time-out\r\n\r\n# Expected behavior\r\nno error\r\n\n", "before_files": [{"content": "\"\"\"Core dashboard views.\"\"\"\n\nimport feedparser\nimport requests\nfrom dateutil import parser\nfrom requests.exceptions import RequestException\n\nfrom django.contrib.auth import mixins as auth_mixins\nfrom django.views import generic\n\nfrom .. import signals\n\nMODOBOA_WEBSITE_URL = \"https://modoboa.org/\"\n\n\nclass DashboardView(auth_mixins.AccessMixin, generic.TemplateView):\n \"\"\"Dashboard view.\"\"\"\n\n template_name = \"core/dashboard.html\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Check if user can access dashboard.\"\"\"\n if not request.user.is_authenticated or not request.user.is_admin:\n return self.handle_no_permission()\n return super(DashboardView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"Add context variables.\"\"\"\n context = super(DashboardView, self).get_context_data(**kwargs)\n context.update({\n \"selection\": \"dashboard\", \"widgets\": {\"left\": [], \"right\": []}\n })\n # Fetch latest news\n if self.request.user.language == \"fr\":\n lang = \"fr\"\n else:\n lang = \"en\"\n context.update({\"selection\": \"dashboard\"})\n\n feed_url = \"{}{}/weblog/feeds/\".format(MODOBOA_WEBSITE_URL, lang)\n if self.request.user.role != \"SuperAdmins\":\n custom_feed_url = (\n self.request.localconfig.parameters.get_value(\"rss_feed_url\"))\n if custom_feed_url:\n feed_url = custom_feed_url\n posts = feedparser.parse(feed_url)\n entries = []\n for entry in posts[\"entries\"][:5]:\n entry[\"published\"] = parser.parse(entry[\"published\"])\n entries.append(entry)\n context[\"widgets\"][\"left\"].append(\"core/_latest_news_widget.html\")\n context.update({\"news\": entries})\n\n hide_features_widget = self.request.localconfig.parameters.get_value(\n \"hide_features_widget\")\n if self.request.user.is_superuser or not hide_features_widget:\n url = \"{}{}/api/projects/?featured=true\".format(\n MODOBOA_WEBSITE_URL, lang)\n features = []\n try:\n response = requests.get(url)\n except RequestException:\n pass\n else:\n if response.status_code == 200:\n features = response.json()\n context[\"widgets\"][\"right\"].append(\"core/_current_features.html\")\n context.update({\"features\": features})\n\n # Extra widgets\n result = signals.extra_admin_dashboard_widgets.send(\n sender=self.__class__, user=self.request.user)\n for _receiver, widgets in result:\n for widget in widgets:\n context[\"widgets\"][widget[\"column\"]].append(\n widget[\"template\"])\n # FIXME: can raise conflicts...\n context.update(widget[\"context\"])\n\n return context\n", "path": "modoboa/core/views/dashboard.py"}]} | 1,364 | 433 |
gh_patches_debug_15542 | rasdani/github-patches | git_diff | replicate__cog-553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dear friend,please tell me why I can't run it from cog example.
I am a newbie.
I run the code from cog examples.
I can run "cog run python",but I can't run following command.
input:
sudo cog predict -i @input.jpg
resluts:

</issue>
<code>
[start of python/cog/json.py]
1 from enum import Enum
2 import io
3 from typing import Any
4
5 from pydantic import BaseModel
6
7 from .types import Path
8
9 try:
10 import numpy as np # type: ignore
11
12 has_numpy = True
13 except ImportError:
14 has_numpy = False
15
16
17 def encode_json(obj: Any, upload_file) -> Any:
18 """
19 Returns a JSON-compatible version of the object. It will encode any Pydantic models and custom types.
20
21 When a file is encountered, it will be passed to upload_file. Any paths will be opened and converted to files.
22
23 Somewhat based on FastAPI's jsonable_encoder().
24 """
25 if isinstance(obj, BaseModel):
26 return encode_json(obj.dict(exclude_unset=True), upload_file)
27 if isinstance(obj, dict):
28 return {key: encode_json(value, upload_file) for key, value in obj.items()}
29 if isinstance(obj, list):
30 return [encode_json(value, upload_file) for value in obj]
31 if isinstance(obj, Enum):
32 return obj.value
33 if isinstance(obj, Path):
34 with obj.open("rb") as f:
35 return upload_file(f)
36 if isinstance(obj, io.IOBase):
37 return upload_file(obj)
38 if has_numpy:
39 if isinstance(obj, np.integer):
40 return int(obj)
41 if isinstance(obj, np.floating):
42 return float(obj)
43 if isinstance(obj, np.ndarray):
44 return obj.tolist()
45 return obj
46
[end of python/cog/json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/cog/json.py b/python/cog/json.py
--- a/python/cog/json.py
+++ b/python/cog/json.py
@@ -1,5 +1,6 @@
from enum import Enum
import io
+from types import GeneratorType
from typing import Any
from pydantic import BaseModel
@@ -26,7 +27,7 @@
return encode_json(obj.dict(exclude_unset=True), upload_file)
if isinstance(obj, dict):
return {key: encode_json(value, upload_file) for key, value in obj.items()}
- if isinstance(obj, list):
+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
return [encode_json(value, upload_file) for value in obj]
if isinstance(obj, Enum):
return obj.value
| {"golden_diff": "diff --git a/python/cog/json.py b/python/cog/json.py\n--- a/python/cog/json.py\n+++ b/python/cog/json.py\n@@ -1,5 +1,6 @@\n from enum import Enum\n import io\n+from types import GeneratorType\n from typing import Any\n \n from pydantic import BaseModel\n@@ -26,7 +27,7 @@\n return encode_json(obj.dict(exclude_unset=True), upload_file)\n if isinstance(obj, dict):\n return {key: encode_json(value, upload_file) for key, value in obj.items()}\n- if isinstance(obj, list):\n+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):\n return [encode_json(value, upload_file) for value in obj]\n if isinstance(obj, Enum):\n return obj.value\n", "issue": "Dear friend,please tell me why I can't run it from cog example.\nI am a newbie.\r\nI run the code from cog examples.\r\nI can run \"cog run python\",but I can't run following command.\r\ninput:\r\nsudo cog predict -i @input.jpg\r\nresluts:\r\n\r\n\n", "before_files": [{"content": "from enum import Enum\nimport io\nfrom typing import Any\n\nfrom pydantic import BaseModel\n\nfrom .types import Path\n\ntry:\n import numpy as np # type: ignore\n\n has_numpy = True\nexcept ImportError:\n has_numpy = False\n\n\ndef encode_json(obj: Any, upload_file) -> Any:\n \"\"\"\n Returns a JSON-compatible version of the object. It will encode any Pydantic models and custom types.\n\n When a file is encountered, it will be passed to upload_file. Any paths will be opened and converted to files.\n\n Somewhat based on FastAPI's jsonable_encoder().\n \"\"\"\n if isinstance(obj, BaseModel):\n return encode_json(obj.dict(exclude_unset=True), upload_file)\n if isinstance(obj, dict):\n return {key: encode_json(value, upload_file) for key, value in obj.items()}\n if isinstance(obj, list):\n return [encode_json(value, upload_file) for value in obj]\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, Path):\n with obj.open(\"rb\") as f:\n return upload_file(f)\n if isinstance(obj, io.IOBase):\n return upload_file(obj)\n if has_numpy:\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return obj\n", "path": "python/cog/json.py"}]} | 1,052 | 178 |
gh_patches_debug_12833 | rasdani/github-patches | git_diff | mindee__doctr-219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Demo app error when analyzing my first document
## 🐛 Bug
I tried to analyze a PNG and a PDF, got the same error. I try to change the model, didn't change anything.
## To Reproduce
Steps to reproduce the behavior:
1. Upload a PNG
2. Click on analyze document
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
KeyError: 0
Traceback:
File "/Users/thibautmorla/opt/anaconda3/lib/python3.8/site-packages/streamlit/script_runner.py", line 337, in _run_script
exec(code, module.__dict__)
File "/Users/thibautmorla/Downloads/doctr/demo/app.py", line 93, in <module>
main()
File "/Users/thibautmorla/Downloads/doctr/demo/app.py", line 77, in main
seg_map = predictor.det_predictor.model(processed_batches[0])[0]
```
## Additional context
First image upload
</issue>
<code>
[start of demo/app.py]
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 import os
7 import streamlit as st
8 import matplotlib.pyplot as plt
9
10 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
11
12 import tensorflow as tf
13 import cv2
14
15 gpu_devices = tf.config.experimental.list_physical_devices('GPU')
16 if any(gpu_devices):
17 tf.config.experimental.set_memory_growth(gpu_devices[0], True)
18
19 from doctr.documents import DocumentFile
20 from doctr.models import ocr_predictor
21 from doctr.utils.visualization import visualize_page
22
23 DET_ARCHS = ["db_resnet50"]
24 RECO_ARCHS = ["crnn_vgg16_bn", "crnn_resnet31", "sar_vgg16_bn", "sar_resnet31"]
25
26
27 def main():
28
29 # Wide mode
30 st.set_page_config(layout="wide")
31
32 # Designing the interface
33 st.title("DocTR: Document Text Recognition")
34 # For newline
35 st.write('\n')
36 # Set the columns
37 cols = st.beta_columns((1, 1, 1))
38 cols[0].header("Input document")
39 cols[1].header("Text segmentation")
40 cols[-1].header("OCR output")
41
42 # Sidebar
43 # File selection
44 st.sidebar.title("Document selection")
45 # Disabling warning
46 st.set_option('deprecation.showfileUploaderEncoding', False)
47 # Choose your own image
48 uploaded_file = st.sidebar.file_uploader("Upload files", type=['pdf', 'png', 'jpeg', 'jpg'])
49 if uploaded_file is not None:
50 if uploaded_file.name.endswith('.pdf'):
51 doc = DocumentFile.from_pdf(uploaded_file.read())
52 else:
53 doc = DocumentFile.from_images(uploaded_file.read())
54 cols[0].image(doc[0], "First page", use_column_width=True)
55
56 # Model selection
57 st.sidebar.title("Model selection")
58 det_arch = st.sidebar.selectbox("Text detection model", DET_ARCHS)
59 reco_arch = st.sidebar.selectbox("Text recognition model", RECO_ARCHS)
60
61 # For newline
62 st.sidebar.write('\n')
63
64 if st.sidebar.button("Analyze document"):
65
66 if uploaded_file is None:
67 st.sidebar.write("Please upload a document")
68
69 else:
70 with st.spinner('Loading model...'):
71 predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)
72
73 with st.spinner('Analyzing...'):
74
75 # Forward the image to the model
76 processed_batches = predictor.det_predictor.pre_processor(doc)
77 seg_map = predictor.det_predictor.model(processed_batches[0])[0]
78 seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),
79 interpolation=cv2.INTER_LINEAR)
80 # Plot the raw heatmap
81 fig, ax = plt.subplots()
82 ax.imshow(seg_map)
83 ax.axis('off')
84 cols[1].pyplot(fig)
85
86 # OCR
87 out = predictor(doc)
88 fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)
89 cols[-1].pyplot(fig)
90
91
92 if __name__ == '__main__':
93 main()
94
[end of demo/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/demo/app.py b/demo/app.py
--- a/demo/app.py
+++ b/demo/app.py
@@ -74,7 +74,8 @@
# Forward the image to the model
processed_batches = predictor.det_predictor.pre_processor(doc)
- seg_map = predictor.det_predictor.model(processed_batches[0])[0]
+ seg_map = predictor.det_predictor.model(processed_batches[0])["proba_map"]
+ seg_map = tf.squeeze(seg_map, axis=[0, 3])
seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),
interpolation=cv2.INTER_LINEAR)
# Plot the raw heatmap
| {"golden_diff": "diff --git a/demo/app.py b/demo/app.py\n--- a/demo/app.py\n+++ b/demo/app.py\n@@ -74,7 +74,8 @@\n \n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor(doc)\n- seg_map = predictor.det_predictor.model(processed_batches[0])[0]\n+ seg_map = predictor.det_predictor.model(processed_batches[0])[\"proba_map\"]\n+ seg_map = tf.squeeze(seg_map, axis=[0, 3])\n seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n", "issue": "Demo app error when analyzing my first document\n## \ud83d\udc1b Bug\r\n\r\nI tried to analyze a PNG and a PDF, got the same error. I try to change the model, didn't change anything.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Upload a PNG\r\n2. Click on analyze document\r\n\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nKeyError: 0\r\nTraceback:\r\nFile \"/Users/thibautmorla/opt/anaconda3/lib/python3.8/site-packages/streamlit/script_runner.py\", line 337, in _run_script\r\n exec(code, module.__dict__)\r\nFile \"/Users/thibautmorla/Downloads/doctr/demo/app.py\", line 93, in <module>\r\n main()\r\nFile \"/Users/thibautmorla/Downloads/doctr/demo/app.py\", line 77, in main\r\n seg_map = predictor.det_predictor.model(processed_batches[0])[0]\r\n```\r\n\r\n\r\n## Additional context\r\n\r\nFirst image upload\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport os\nimport streamlit as st\nimport matplotlib.pyplot as plt\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nimport tensorflow as tf\nimport cv2\n\ngpu_devices = tf.config.experimental.list_physical_devices('GPU')\nif any(gpu_devices):\n tf.config.experimental.set_memory_growth(gpu_devices[0], True)\n\nfrom doctr.documents import DocumentFile\nfrom doctr.models import ocr_predictor\nfrom doctr.utils.visualization import visualize_page\n\nDET_ARCHS = [\"db_resnet50\"]\nRECO_ARCHS = [\"crnn_vgg16_bn\", \"crnn_resnet31\", \"sar_vgg16_bn\", \"sar_resnet31\"]\n\n\ndef main():\n\n # Wide mode\n st.set_page_config(layout=\"wide\")\n\n # Designing the interface\n st.title(\"DocTR: Document Text Recognition\")\n # For newline\n st.write('\\n')\n # Set the columns\n cols = st.beta_columns((1, 1, 1))\n cols[0].header(\"Input document\")\n cols[1].header(\"Text segmentation\")\n cols[-1].header(\"OCR output\")\n\n # Sidebar\n # File selection\n st.sidebar.title(\"Document selection\")\n # Disabling warning\n st.set_option('deprecation.showfileUploaderEncoding', False)\n # Choose your own image\n uploaded_file = st.sidebar.file_uploader(\"Upload files\", type=['pdf', 'png', 'jpeg', 'jpg'])\n if uploaded_file is not None:\n if uploaded_file.name.endswith('.pdf'):\n doc = DocumentFile.from_pdf(uploaded_file.read())\n else:\n doc = DocumentFile.from_images(uploaded_file.read())\n cols[0].image(doc[0], \"First page\", use_column_width=True)\n\n # Model selection\n st.sidebar.title(\"Model selection\")\n det_arch = st.sidebar.selectbox(\"Text detection model\", DET_ARCHS)\n reco_arch = st.sidebar.selectbox(\"Text recognition model\", RECO_ARCHS)\n\n # For newline\n st.sidebar.write('\\n')\n\n if st.sidebar.button(\"Analyze document\"):\n\n if uploaded_file is None:\n st.sidebar.write(\"Please upload a document\")\n\n else:\n with st.spinner('Loading model...'):\n predictor = ocr_predictor(det_arch, reco_arch, pretrained=True)\n\n with st.spinner('Analyzing...'):\n\n # Forward the image to the model\n processed_batches = predictor.det_predictor.pre_processor(doc)\n seg_map = predictor.det_predictor.model(processed_batches[0])[0]\n seg_map = cv2.resize(seg_map.numpy(), (doc[0].shape[1], doc[0].shape[0]),\n interpolation=cv2.INTER_LINEAR)\n # Plot the raw heatmap\n fig, ax = plt.subplots()\n ax.imshow(seg_map)\n ax.axis('off')\n cols[1].pyplot(fig)\n\n # OCR\n out = predictor(doc)\n fig = visualize_page(out.pages[0].export(), doc[0], interactive=False)\n cols[-1].pyplot(fig)\n\n\nif __name__ == '__main__':\n main()\n", "path": "demo/app.py"}]} | 1,662 | 157 |
gh_patches_debug_3507 | rasdani/github-patches | git_diff | jazzband__pip-tools-1039 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py install_requires should have `"click>=7"` not `"click>=6"`
Thank you for all the work on this tool, it's very useful.
Issue:
As of 4.4.0 pip-tools now depends on version 7.0 of click, not 6.0.
The argument `show_envvar` is now being passed to `click.option()`
https://github.com/jazzband/pip-tools/compare/4.3.0...4.4.0#diff-c8673e93c598354ab4a9aa8dd090e913R183
That argument was added in click 7.0
https://click.palletsprojects.com/en/7.x/api/#click.Option
compared to
https://click.palletsprojects.com/en/6.x/api/#click.Option
Fix: setup.py install_requires should have `"click>=7"` not `"click>=6"`
</issue>
<code>
[start of setup.py]
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 from os.path import abspath, dirname, join
5
6 from setuptools import find_packages, setup
7
8
9 def read_file(filename):
10 """Read the contents of a file located relative to setup.py"""
11 with open(join(abspath(dirname(__file__)), filename)) as thefile:
12 return thefile.read()
13
14
15 setup(
16 name="pip-tools",
17 use_scm_version=True,
18 url="https://github.com/jazzband/pip-tools/",
19 license="BSD",
20 author="Vincent Driessen",
21 author_email="[email protected]",
22 description=__doc__.strip(),
23 long_description=read_file("README.rst"),
24 long_description_content_type="text/x-rst",
25 packages=find_packages(exclude=["tests"]),
26 package_data={},
27 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
28 setup_requires=["setuptools_scm"],
29 install_requires=["click>=6", "six"],
30 zip_safe=False,
31 entry_points={
32 "console_scripts": [
33 "pip-compile = piptools.scripts.compile:cli",
34 "pip-sync = piptools.scripts.sync:cli",
35 ]
36 },
37 platforms="any",
38 classifiers=[
39 "Development Status :: 5 - Production/Stable",
40 "Intended Audience :: Developers",
41 "Intended Audience :: System Administrators",
42 "License :: OSI Approved :: BSD License",
43 "Operating System :: OS Independent",
44 "Programming Language :: Python",
45 "Programming Language :: Python :: 2",
46 "Programming Language :: Python :: 2.7",
47 "Programming Language :: Python :: 3",
48 "Programming Language :: Python :: 3.5",
49 "Programming Language :: Python :: 3.6",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: 3.8",
52 "Programming Language :: Python :: Implementation :: CPython",
53 "Programming Language :: Python :: Implementation :: PyPy",
54 "Topic :: System :: Systems Administration",
55 ],
56 )
57
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
package_data={},
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*",
setup_requires=["setuptools_scm"],
- install_requires=["click>=6", "six"],
+ install_requires=["click>=7", "six"],
zip_safe=False,
entry_points={
"console_scripts": [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n package_data={},\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n setup_requires=[\"setuptools_scm\"],\n- install_requires=[\"click>=6\", \"six\"],\n+ install_requires=[\"click>=7\", \"six\"],\n zip_safe=False,\n entry_points={\n \"console_scripts\": [\n", "issue": "setup.py install_requires should have `\"click>=7\"` not `\"click>=6\"`\nThank you for all the work on this tool, it's very useful.\r\n\r\nIssue:\r\nAs of 4.4.0 pip-tools now depends on version 7.0 of click, not 6.0.\r\n\r\nThe argument `show_envvar` is now being passed to `click.option()`\r\nhttps://github.com/jazzband/pip-tools/compare/4.3.0...4.4.0#diff-c8673e93c598354ab4a9aa8dd090e913R183\r\n\r\nThat argument was added in click 7.0\r\nhttps://click.palletsprojects.com/en/7.x/api/#click.Option\r\ncompared to \r\nhttps://click.palletsprojects.com/en/6.x/api/#click.Option\r\n\r\nFix: setup.py install_requires should have `\"click>=7\"` not `\"click>=6\"`\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\n\nfrom setuptools import find_packages, setup\n\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\n\nsetup(\n name=\"pip-tools\",\n use_scm_version=True,\n url=\"https://github.com/jazzband/pip-tools/\",\n license=\"BSD\",\n author=\"Vincent Driessen\",\n author_email=\"[email protected]\",\n description=__doc__.strip(),\n long_description=read_file(\"README.rst\"),\n long_description_content_type=\"text/x-rst\",\n packages=find_packages(exclude=[\"tests\"]),\n package_data={},\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n setup_requires=[\"setuptools_scm\"],\n install_requires=[\"click>=6\", \"six\"],\n zip_safe=False,\n entry_points={\n \"console_scripts\": [\n \"pip-compile = piptools.scripts.compile:cli\",\n \"pip-sync = piptools.scripts.sync:cli\",\n ]\n },\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: System :: Systems Administration\",\n ],\n)\n", "path": "setup.py"}]} | 1,294 | 121 |
gh_patches_debug_5846 | rasdani/github-patches | git_diff | pwr-Solaar__Solaar-1447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Too high PyYAML and psutil version requirements for RHEL/CentOS 8 base OS
**Information**
<!-- Please update to Solaar from this repository before asking for a new feature. -->
- Solaar version (`solaar --version` and `git describe --tags`): **1.1.1**
- Distribution: **RHEL 8.5**
- Kernel version (ex. `uname -srmo`): `Linux 4.18.0-348.7.1.el8_5.x86_64 x86_64 GNU/Linux`
- Output of `solaar show` for the target device (if applicable): **N/A**
**Is your feature request related to a problem? Please describe.**
[setup.py](https://github.com/pwr-Solaar/Solaar/blob/master/setup.py#L63) lists requirements for PyYAML >= 5.1 and psutil >= 5.6.0. Unfortunately, RHEL8 and derivatives carry PyYAML 3.12 and psutil 5.4.3 built for the default python 3.6 in the base OS repository. Consequently, solaar is not installable without installing `python38` or `python39` stack (called module in RHEL/Fedora) that carry newer versions of these two. I've had a request from RHEL8 user to provide a package for RHEL8 in Fedora EPEL, hence this issue.
**Describe the solution you'd like**
If there's no specific reason for the higher versions, I'd like the requirements to be lowered to the versions listed above. I will provide a patch if agreed. I made a build for RHEL8 with these dependencies patched to lower versions and a user tested it and confirmed it seems to be working fine for him.
**Describe alternatives you've considered**
Regular packages in Fedora/EPEL may not depend on modular packages, only base OS. I considered maintaining solaar as a modular package, but this seems very complicated and I don't really have time for this. On the other hand, building it as a regular package adds minimal overhead to my Fedora package maintenance.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 from glob import glob as _glob
4
5 try:
6 from setuptools import setup
7 except ImportError:
8 from distutils.core import setup
9
10 # from solaar import NAME, __version__
11 __version__ = '1.1.1'
12 NAME = 'Solaar'
13
14
15 def _data_files():
16 from os.path import dirname as _dirname
17
18 yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
19 yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
20 yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']
21
22 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
23 yield _dirname(mo), [mo]
24
25 yield 'share/applications', ['share/applications/solaar.desktop']
26 yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']
27 yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']
28
29 del _dirname
30
31
32 setup(
33 name=NAME.lower(),
34 version=__version__,
35 description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',
36 long_description='''
37 Solaar is a Linux device manager for many Logitech peripherals that connect through
38 Unifying and other receivers or via USB or Bluetooth.
39 Solaar is able to pair/unpair devices with receivers and show and modify some of the
40 modifiable features of devices.
41 For instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),
42 author='Daniel Pavel',
43 license='GPLv2',
44 url='http://pwr-solaar.github.io/Solaar/',
45 classifiers=[
46 'Development Status :: 4 - Beta',
47 'Environment :: X11 Applications :: GTK',
48 'Environment :: Console',
49 'Intended Audience :: End Users/Desktop',
50 'License :: DFSG approved',
51 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
52 'Natural Language :: English',
53 'Programming Language :: Python :: 3 :: Only',
54 'Operating System :: POSIX :: Linux',
55 'Topic :: Utilities',
56 ],
57 platforms=['linux'],
58
59 # sudo apt install python-gi python3-gi \
60 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1
61 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],
62 python_requires='>=3.6',
63 install_requires=[
64 'pyudev (>= 0.13)',
65 'PyYAML (>= 5.1)',
66 'python-xlib (>= 0.27)',
67 'psutil (>= 5.6.0)',
68 ],
69 package_dir={'': 'lib'},
70 packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
71 data_files=list(_data_files()),
72 scripts=_glob('bin/*'),
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,9 +62,9 @@
python_requires='>=3.6',
install_requires=[
'pyudev (>= 0.13)',
- 'PyYAML (>= 5.1)',
+ 'PyYAML (>= 3.12)',
'python-xlib (>= 0.27)',
- 'psutil (>= 5.6.0)',
+ 'psutil (>= 5.4.3)',
],
package_dir={'': 'lib'},
packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,9 +62,9 @@\n python_requires='>=3.6',\n install_requires=[\n 'pyudev (>= 0.13)',\n- 'PyYAML (>= 5.1)',\n+ 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n- 'psutil (>= 5.6.0)',\n+ 'psutil (>= 5.4.3)',\n ],\n package_dir={'': 'lib'},\n packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n", "issue": "Too high PyYAML and psutil version requirements for RHEL/CentOS 8 base OS\n**Information**\r\n<!-- Please update to Solaar from this repository before asking for a new feature. -->\r\n- Solaar version (`solaar --version` and `git describe --tags`): **1.1.1**\r\n- Distribution: **RHEL 8.5**\r\n- Kernel version (ex. `uname -srmo`): `Linux 4.18.0-348.7.1.el8_5.x86_64 x86_64 GNU/Linux`\r\n- Output of `solaar show` for the target device (if applicable): **N/A**\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\n[setup.py](https://github.com/pwr-Solaar/Solaar/blob/master/setup.py#L63) lists requirements for PyYAML >= 5.1 and psutil >= 5.6.0. Unfortunately, RHEL8 and derivatives carry PyYAML 3.12 and psutil 5.4.3 built for the default python 3.6 in the base OS repository. Consequently, solaar is not installable without installing `python38` or `python39` stack (called module in RHEL/Fedora) that carry newer versions of these two. I've had a request from RHEL8 user to provide a package for RHEL8 in Fedora EPEL, hence this issue.\r\n\r\n**Describe the solution you'd like**\r\nIf there's no specific reason for the higher versions, I'd like the requirements to be lowered to the versions listed above. I will provide a patch if agreed. I made a build for RHEL8 with these dependencies patched to lower versions and a user tested it and confirmed it seems to be working fine for him.\r\n\r\n**Describe alternatives you've considered**\r\nRegular packages in Fedora/EPEL may not depend on modular packages, only base OS. I considered maintaining solaar as a modular package, but this seems very complicated and I don't really have time for this. On the other hand, building it as a regular package adds minimal overhead to my Fedora package maintenance.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# from solaar import NAME, __version__\n__version__ = '1.1.1'\nNAME = 'Solaar'\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.6',\n install_requires=[\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 5.1)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.6.0)',\n ],\n package_dir={'': 'lib'},\n packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}]} | 1,842 | 164 |
gh_patches_debug_17312 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-619 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use extra for asyncio dependencies
Hello! The latest release for this library pulls in aiohttp and its dependencies unconditionally, which adds non-trivial burden to projects that don’t need it. Would you consider using a packaging extra so that people can opt-in?
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 DEPENDENCIES = (
22 "cachetools>=2.0.0,<5.0",
23 "pyasn1-modules>=0.2.1",
24 # rsa==4.5 is the last version to support 2.7
25 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
26 'rsa<4.6; python_version < "3.5"',
27 'rsa>=3.1.4,<5; python_version >= "3.5"',
28 "setuptools>=40.3.0",
29 "six>=1.9.0",
30 'aiohttp >= 3.6.2, < 4.0.0dev; python_version>="3.6"',
31 )
32
33
34 with io.open("README.rst", "r") as fh:
35 long_description = fh.read()
36
37 version = "1.22.0"
38
39 setup(
40 name="google-auth",
41 version=version,
42 author="Google Cloud Platform",
43 author_email="[email protected]",
44 description="Google Authentication Library",
45 long_description=long_description,
46 url="https://github.com/googleapis/google-auth-library-python",
47 packages=find_packages(exclude=("tests*", "system_tests*")),
48 namespace_packages=("google",),
49 install_requires=DEPENDENCIES,
50 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
51 license="Apache 2.0",
52 keywords="google auth oauth client",
53 classifiers=[
54 "Programming Language :: Python :: 2",
55 "Programming Language :: Python :: 2.7",
56 "Programming Language :: Python :: 3",
57 "Programming Language :: Python :: 3.5",
58 "Programming Language :: Python :: 3.6",
59 "Programming Language :: Python :: 3.7",
60 "Programming Language :: Python :: 3.8",
61 "Development Status :: 5 - Production/Stable",
62 "Intended Audience :: Developers",
63 "License :: OSI Approved :: Apache Software License",
64 "Operating System :: POSIX",
65 "Operating System :: Microsoft :: Windows",
66 "Operating System :: MacOS :: MacOS X",
67 "Operating System :: OS Independent",
68 "Topic :: Internet :: WWW/HTTP",
69 ],
70 )
71
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,9 +27,9 @@
'rsa>=3.1.4,<5; python_version >= "3.5"',
"setuptools>=40.3.0",
"six>=1.9.0",
- 'aiohttp >= 3.6.2, < 4.0.0dev; python_version>="3.6"',
)
+extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"}
with io.open("README.rst", "r") as fh:
long_description = fh.read()
@@ -47,6 +47,7 @@
packages=find_packages(exclude=("tests*", "system_tests*")),
namespace_packages=("google",),
install_requires=DEPENDENCIES,
+ extras_require=extras,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
license="Apache 2.0",
keywords="google auth oauth client",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,9 +27,9 @@\n 'rsa>=3.1.4,<5; python_version >= \"3.5\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n- 'aiohttp >= 3.6.2, < 4.0.0dev; python_version>=\"3.6\"',\n )\n \n+extras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n \n with io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n@@ -47,6 +47,7 @@\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n+ extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n", "issue": "Use extra for asyncio dependencies\nHello! The latest release for this library pulls in aiohttp and its dependencies unconditionally, which adds non-trivial burden to projects that don\u2019t need it. Would you consider using a packaging extra so that people can opt-in?\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.5\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.5\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n 'aiohttp >= 3.6.2, < 4.0.0dev; python_version>=\"3.6\"',\n)\n\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.22.0\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 1,370 | 262 |
gh_patches_debug_28730 | rasdani/github-patches | git_diff | pytorch__ignite-2027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Loss metric to use required_output_keys
## 🚀 Feature
Currently, if we have custom metrics that require data other then `y_pred` and `y`, [we suggest](https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/6) to do the following:
```python
metrics = {
"Accuracy": Accuracy(),
"Loss": Loss(criterion, output_transform=lambda out_dict: (out_dict["y_pred"], out_dict["y"])),
"CustomMetric": CustomMetric()
}
evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred}
)
```
where `CustomMetric` is defined as
```python
class CustomMetric(Metric):
required_output_keys = ("y_pred", "y", "x")
```
The idea is to extend this for `Loss` metric to support `required_output_keys`. The main issue with `Loss` now is with `(prediction, target, kwargs)` optional input, where `kwargs` is a dict for extra args for criterion function.
</issue>
<code>
[start of ignite/metrics/loss.py]
1 from typing import Callable, Dict, Sequence, Tuple, Union, cast
2
3 import torch
4
5 from ignite.exceptions import NotComputableError
6 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
7
8 __all__ = ["Loss"]
9
10
11 class Loss(Metric):
12 """
13 Calculates the average loss according to the passed loss_fn.
14
15 Args:
16 loss_fn: a callable taking a prediction tensor, a target
17 tensor, optionally other arguments, and returns the average loss
18 over all observations in the batch.
19 output_transform: a callable that is used to transform the
20 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
21 form expected by the metric.
22 This can be useful if, for example, you have a multi-output model and
23 you want to compute the metric with respect to one of the outputs.
24 The output is expected to be a tuple `(prediction, target)` or
25 (prediction, target, kwargs) where kwargs is a dictionary of extra
26 keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.
27 batch_size: a callable taking a target tensor that returns the
28 first dimension size (usually the batch size).
29 device: specifies which device updates are accumulated on. Setting the
30 metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
31 non-blocking. By default, CPU.
32
33 """
34
35 required_output_keys = None
36
37 def __init__(
38 self,
39 loss_fn: Callable,
40 output_transform: Callable = lambda x: x,
41 batch_size: Callable = len,
42 device: Union[str, torch.device] = torch.device("cpu"),
43 ):
44 super(Loss, self).__init__(output_transform, device=device)
45 self._loss_fn = loss_fn
46 self._batch_size = batch_size
47
48 @reinit__is_reduced
49 def reset(self) -> None:
50 self._sum = torch.tensor(0.0, device=self._device)
51 self._num_examples = 0
52
53 @reinit__is_reduced
54 def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:
55 if len(output) == 2:
56 y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)
57 kwargs = {} # type: Dict
58 else:
59 y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)
60 average_loss = self._loss_fn(y_pred, y, **kwargs).detach()
61
62 if len(average_loss.shape) != 0:
63 raise ValueError("loss_fn did not return the average loss.")
64
65 n = self._batch_size(y)
66 self._sum += average_loss.to(self._device) * n
67 self._num_examples += n
68
69 @sync_all_reduce("_sum", "_num_examples")
70 def compute(self) -> float:
71 if self._num_examples == 0:
72 raise NotComputableError("Loss must have at least one example before it can be computed.")
73 return self._sum.item() / self._num_examples
74
[end of ignite/metrics/loss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py
--- a/ignite/metrics/loss.py
+++ b/ignite/metrics/loss.py
@@ -30,9 +30,52 @@
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
+ Attributes:
+ required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the
+ latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the
+ criterion function requires additional arguments, which can be passed using ``criterion_kwargs``.
+ See notes below for an example.
+
+ Note:
+
+ Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input
+ for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy
+ and the Loss metric using an ``evaluator`` created with
+ :meth:`~ignite.engine.create_supervised_evaluator` method.
+
+ .. code-block:: python
+
+ import torch
+ import torch.nn as nn
+ from torch.nn.functional import nll_loss
+
+ from ignite.metrics import Accuracy, Loss
+ from ignite.engine import create_supervised_evaluator
+
+ model = ...
+
+ criterion = nll_loss
+
+ metrics = {
+ "Accuracy": Accuracy(),
+ "Loss": Loss(criterion)
+ }
+
+ # global criterion kwargs
+ criterion_kwargs = {...}
+
+ evaluator = create_supervised_evaluator(
+ model,
+ metrics=metrics,
+ output_transform=lambda x, y, y_pred: {
+ "x": x, "y": y, "y_pred": y_pred, "criterion_kwargs": criterion_kwargs}
+ )
+
+ res = evaluator.run(data)
+
"""
- required_output_keys = None
+ required_output_keys = ("y_pred", "y", "criterion_kwargs")
def __init__(
self,
| {"golden_diff": "diff --git a/ignite/metrics/loss.py b/ignite/metrics/loss.py\n--- a/ignite/metrics/loss.py\n+++ b/ignite/metrics/loss.py\n@@ -30,9 +30,52 @@\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n \n+ Attributes:\n+ required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the\n+ latter is a dictionary. Default, ``(\"y_pred\", \"y\", \"criterion_kwargs\")``. This is useful when the\n+ criterion function requires additional arguments, which can be passed using ``criterion_kwargs``.\n+ See notes below for an example.\n+\n+ Note:\n+\n+ Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input\n+ for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy\n+ and the Loss metric using an ``evaluator`` created with\n+ :meth:`~ignite.engine.create_supervised_evaluator` method.\n+\n+ .. code-block:: python\n+\n+ import torch\n+ import torch.nn as nn\n+ from torch.nn.functional import nll_loss\n+\n+ from ignite.metrics import Accuracy, Loss\n+ from ignite.engine import create_supervised_evaluator\n+\n+ model = ...\n+\n+ criterion = nll_loss\n+\n+ metrics = {\n+ \"Accuracy\": Accuracy(),\n+ \"Loss\": Loss(criterion)\n+ }\n+\n+ # global criterion kwargs\n+ criterion_kwargs = {...}\n+\n+ evaluator = create_supervised_evaluator(\n+ model,\n+ metrics=metrics,\n+ output_transform=lambda x, y, y_pred: {\n+ \"x\": x, \"y\": y, \"y_pred\": y_pred, \"criterion_kwargs\": criterion_kwargs}\n+ )\n+\n+ res = evaluator.run(data)\n+\n \"\"\"\n \n- required_output_keys = None\n+ required_output_keys = (\"y_pred\", \"y\", \"criterion_kwargs\")\n \n def __init__(\n self,\n", "issue": "Loss metric to use required_output_keys\n## \ud83d\ude80 Feature\r\n\r\nCurrently, if we have custom metrics that require data other then `y_pred` and `y`, [we suggest](https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/6) to do the following: \r\n```python\r\nmetrics = {\r\n \"Accuracy\": Accuracy(),\r\n \"Loss\": Loss(criterion, output_transform=lambda out_dict: (out_dict[\"y_pred\"], out_dict[\"y\"])),\r\n \"CustomMetric\": CustomMetric()\r\n}\r\n\r\nevaluator = create_supervised_evaluator(\r\n model, \r\n metrics=metrics, \r\n output_transform=lambda x, y, y_pred: {\"x\": x, \"y\": y, \"y_pred\": y_pred}\r\n)\r\n```\r\n\r\nwhere `CustomMetric` is defined as \r\n```python\r\nclass CustomMetric(Metric):\r\n\r\n required_output_keys = (\"y_pred\", \"y\", \"x\")\r\n```\r\n\r\nThe idea is to extend this for `Loss` metric to support `required_output_keys`. The main issue with `Loss` now is with `(prediction, target, kwargs)` optional input, where `kwargs` is a dict for extra args for criterion function.\n", "before_files": [{"content": "from typing import Callable, Dict, Sequence, Tuple, Union, cast\n\nimport torch\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"Loss\"]\n\n\nclass Loss(Metric):\n \"\"\"\n Calculates the average loss according to the passed loss_fn.\n\n Args:\n loss_fn: a callable taking a prediction tensor, a target\n tensor, optionally other arguments, and returns the average loss\n over all observations in the batch.\n output_transform: a callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n The output is expected to be a tuple `(prediction, target)` or\n (prediction, target, kwargs) where kwargs is a dictionary of extra\n keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.\n batch_size: a callable taking a target tensor that returns the\n first dimension size (usually the batch size).\n device: specifies which device updates are accumulated on. Setting the\n metric's device to be the same as your ``update`` arguments ensures the ``update`` method is\n non-blocking. By default, CPU.\n\n \"\"\"\n\n required_output_keys = None\n\n def __init__(\n self,\n loss_fn: Callable,\n output_transform: Callable = lambda x: x,\n batch_size: Callable = len,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n super(Loss, self).__init__(output_transform, device=device)\n self._loss_fn = loss_fn\n self._batch_size = batch_size\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n\n @reinit__is_reduced\n def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:\n if len(output) == 2:\n y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)\n kwargs = {} # type: Dict\n else:\n y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)\n average_loss = self._loss_fn(y_pred, y, **kwargs).detach()\n\n if len(average_loss.shape) != 0:\n raise ValueError(\"loss_fn did not return the average loss.\")\n\n n = self._batch_size(y)\n self._sum += average_loss.to(self._device) * n\n self._num_examples += n\n\n @sync_all_reduce(\"_sum\", \"_num_examples\")\n def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"Loss must have at least one example before it can be computed.\")\n return self._sum.item() / self._num_examples\n", "path": "ignite/metrics/loss.py"}]} | 1,612 | 482 |
gh_patches_debug_23931 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-4285 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nationwide_gb spider returning closed branches
The nationwide_gb spide is currently returning a number of branches that are described as "Permanently Closed" on their web pages. For example:
* https://www.nationwide.co.uk/branches/glasgow/1635-great-western-road
* https://www.nationwide.co.uk/branches/bournemouth/10-southbourne-grove
* https://www.nationwide.co.uk/branches/london/129-balham-high-road
They all have "- permanently closed" appended to their names, which might be robust enough to use to detect them. If not, they have their opening times for each day set to "closed".
nationwide_gb spider missing branches (regex not general enough)
According to e.g. https://www.altfi.com/article/9347_nationwide-pledges-to-keep-its-625-branches-open-until-2024 Nationwide should have 625 UK branches. The current nationwide_gb spider is only returning 549.
One issue that is probably behind most (if not all) of the missing branches is that the regex `r"https:\/\/www\.nationwide\.co\.uk\/branches\/[-()\w]+\/[-\w]+$"` used to detect branch page URLs is not sufficiently general. In addition to word characters and hypens the final (branch) part of the URL can also contain a forward slash (used to denote house-number ranges).
For example: https://www.nationwide.co.uk/branches/northampton/18/19-weston-favell-centre
</issue>
<code>
[start of locations/spiders/nationwide_gb.py]
1 from scrapy.linkextractors import LinkExtractor
2 from scrapy.spiders import CrawlSpider, Rule
3
4 from locations.structured_data_spider import StructuredDataSpider
5
6
7 class NationwideGB(CrawlSpider, StructuredDataSpider):
8 name = "nationwide_gb"
9 item_attributes = {"brand": "Nationwide", "brand_wikidata": "Q846735"}
10 start_urls = ["https://www.nationwide.co.uk/branches/index.html"]
11 rules = [
12 Rule(
13 LinkExtractor(
14 allow=r"https:\/\/www\.nationwide\.co\.uk\/branches\/[-()\w]+\/[-\w]+$"
15 ),
16 callback="parse_sd",
17 ),
18 Rule(
19 LinkExtractor(
20 allow=r"https:\/\/www\.nationwide\.co\.uk\/branches\/[-()\w]+$"
21 )
22 ),
23 ]
24 wanted_types = ["BankOrCreditUnion"]
25
[end of locations/spiders/nationwide_gb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/nationwide_gb.py b/locations/spiders/nationwide_gb.py
--- a/locations/spiders/nationwide_gb.py
+++ b/locations/spiders/nationwide_gb.py
@@ -1,24 +1,20 @@
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
+from locations.categories import Categories
from locations.structured_data_spider import StructuredDataSpider
class NationwideGB(CrawlSpider, StructuredDataSpider):
name = "nationwide_gb"
- item_attributes = {"brand": "Nationwide", "brand_wikidata": "Q846735"}
+ item_attributes = {
+ "brand": "Nationwide",
+ "brand_wikidata": "Q846735",
+ "extras": Categories.BANK.value,
+ }
start_urls = ["https://www.nationwide.co.uk/branches/index.html"]
- rules = [
- Rule(
- LinkExtractor(
- allow=r"https:\/\/www\.nationwide\.co\.uk\/branches\/[-()\w]+\/[-\w]+$"
- ),
- callback="parse_sd",
- ),
- Rule(
- LinkExtractor(
- allow=r"https:\/\/www\.nationwide\.co\.uk\/branches\/[-()\w]+$"
- )
- ),
- ]
- wanted_types = ["BankOrCreditUnion"]
+ rules = [Rule(LinkExtractor(allow=r"/branches/"), callback="parse_sd", follow=True)]
+
+ def post_process_item(self, item, response, ld_data, **kwargs):
+ if "permanently closed" not in item["name"].lower():
+ yield item
| {"golden_diff": "diff --git a/locations/spiders/nationwide_gb.py b/locations/spiders/nationwide_gb.py\n--- a/locations/spiders/nationwide_gb.py\n+++ b/locations/spiders/nationwide_gb.py\n@@ -1,24 +1,20 @@\n from scrapy.linkextractors import LinkExtractor\n from scrapy.spiders import CrawlSpider, Rule\n \n+from locations.categories import Categories\n from locations.structured_data_spider import StructuredDataSpider\n \n \n class NationwideGB(CrawlSpider, StructuredDataSpider):\n name = \"nationwide_gb\"\n- item_attributes = {\"brand\": \"Nationwide\", \"brand_wikidata\": \"Q846735\"}\n+ item_attributes = {\n+ \"brand\": \"Nationwide\",\n+ \"brand_wikidata\": \"Q846735\",\n+ \"extras\": Categories.BANK.value,\n+ }\n start_urls = [\"https://www.nationwide.co.uk/branches/index.html\"]\n- rules = [\n- Rule(\n- LinkExtractor(\n- allow=r\"https:\\/\\/www\\.nationwide\\.co\\.uk\\/branches\\/[-()\\w]+\\/[-\\w]+$\"\n- ),\n- callback=\"parse_sd\",\n- ),\n- Rule(\n- LinkExtractor(\n- allow=r\"https:\\/\\/www\\.nationwide\\.co\\.uk\\/branches\\/[-()\\w]+$\"\n- )\n- ),\n- ]\n- wanted_types = [\"BankOrCreditUnion\"]\n+ rules = [Rule(LinkExtractor(allow=r\"/branches/\"), callback=\"parse_sd\", follow=True)]\n+\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n+ if \"permanently closed\" not in item[\"name\"].lower():\n+ yield item\n", "issue": "nationwide_gb spider returning closed branches\nThe nationwide_gb spide is currently returning a number of branches that are described as \"Permanently Closed\" on their web pages. For example:\r\n\r\n* https://www.nationwide.co.uk/branches/glasgow/1635-great-western-road\r\n* https://www.nationwide.co.uk/branches/bournemouth/10-southbourne-grove\r\n* https://www.nationwide.co.uk/branches/london/129-balham-high-road\r\n\r\nThey all have \"- permanently closed\" appended to their names, which might be robust enough to use to detect them. If not, they have their opening times for each day set to \"closed\".\nnationwide_gb spider missing branches (regex not general enough)\nAccording to e.g. https://www.altfi.com/article/9347_nationwide-pledges-to-keep-its-625-branches-open-until-2024 Nationwide should have 625 UK branches. The current nationwide_gb spider is only returning 549.\r\n\r\nOne issue that is probably behind most (if not all) of the missing branches is that the regex `r\"https:\\/\\/www\\.nationwide\\.co\\.uk\\/branches\\/[-()\\w]+\\/[-\\w]+$\"` used to detect branch page URLs is not sufficiently general. In addition to word characters and hypens the final (branch) part of the URL can also contain a forward slash (used to denote house-number ranges).\r\n\r\nFor example: https://www.nationwide.co.uk/branches/northampton/18/19-weston-favell-centre\n", "before_files": [{"content": "from scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass NationwideGB(CrawlSpider, StructuredDataSpider):\n name = \"nationwide_gb\"\n item_attributes = {\"brand\": \"Nationwide\", \"brand_wikidata\": \"Q846735\"}\n start_urls = [\"https://www.nationwide.co.uk/branches/index.html\"]\n rules = [\n Rule(\n LinkExtractor(\n allow=r\"https:\\/\\/www\\.nationwide\\.co\\.uk\\/branches\\/[-()\\w]+\\/[-\\w]+$\"\n ),\n callback=\"parse_sd\",\n ),\n Rule(\n LinkExtractor(\n allow=r\"https:\\/\\/www\\.nationwide\\.co\\.uk\\/branches\\/[-()\\w]+$\"\n )\n ),\n ]\n wanted_types = [\"BankOrCreditUnion\"]\n", "path": "locations/spiders/nationwide_gb.py"}]} | 1,115 | 381 |
gh_patches_debug_6365 | rasdani/github-patches | git_diff | lutris__lutris-1251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log window: lock to the bottom gets lost
Caused by https://github.com/lutris/lutris/pull/1179. From my experience, it happens when there are a lot lines outputted all at once:

</issue>
<code>
[start of lutris/gui/logwindow.py]
1 from gi.repository import Gtk
2 from lutris.gui.widgets.dialogs import Dialog
3
4
5 class LogTextView(Gtk.TextView):
6 def __init__(self, buffer):
7 super(LogTextView, self).__init__()
8
9 self.set_buffer(buffer)
10 self.set_editable(False)
11 self.set_monospace(True)
12 self.set_left_margin(10)
13 self.scroll_max = 0
14 self.set_wrap_mode(Gtk.WrapMode.CHAR)
15 self.get_style_context().add_class('lutris-logview')
16 self.connect("size-allocate", self.autoscroll)
17
18 def autoscroll(self, *args):
19 adj = self.get_vadjustment()
20 if adj.get_value() == self.scroll_max or self.scroll_max == 0:
21 adj.set_value(adj.get_upper() - adj.get_page_size())
22 self.scroll_max = adj.get_upper() - adj.get_page_size()
23
24
25 class LogWindow(Dialog):
26 def __init__(self, title, buffer, parent):
27 super(LogWindow, self).__init__(title, parent, 0,
28 ('_OK', Gtk.ResponseType.OK))
29 self.set_size_request(640, 480)
30 self.grid = Gtk.Grid()
31 self.buffer = buffer
32 self.logtextview = LogTextView(self.buffer)
33
34 scrolledwindow = Gtk.ScrolledWindow(hexpand=True, vexpand=True,
35 child=self.logtextview)
36 self.vbox.add(scrolledwindow)
37 self.show_all()
38
[end of lutris/gui/logwindow.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/gui/logwindow.py b/lutris/gui/logwindow.py
--- a/lutris/gui/logwindow.py
+++ b/lutris/gui/logwindow.py
@@ -19,7 +19,9 @@
adj = self.get_vadjustment()
if adj.get_value() == self.scroll_max or self.scroll_max == 0:
adj.set_value(adj.get_upper() - adj.get_page_size())
- self.scroll_max = adj.get_upper() - adj.get_page_size()
+ self.scroll_max = adj.get_value()
+ else:
+ self.scroll_max = adj.get_upper() - adj.get_page_size()
class LogWindow(Dialog):
| {"golden_diff": "diff --git a/lutris/gui/logwindow.py b/lutris/gui/logwindow.py\n--- a/lutris/gui/logwindow.py\n+++ b/lutris/gui/logwindow.py\n@@ -19,7 +19,9 @@\n adj = self.get_vadjustment()\n if adj.get_value() == self.scroll_max or self.scroll_max == 0:\n adj.set_value(adj.get_upper() - adj.get_page_size())\n- self.scroll_max = adj.get_upper() - adj.get_page_size()\n+ self.scroll_max = adj.get_value()\n+ else:\n+ self.scroll_max = adj.get_upper() - adj.get_page_size()\n \n \n class LogWindow(Dialog):\n", "issue": "Log window: lock to the bottom gets lost\nCaused by https://github.com/lutris/lutris/pull/1179. From my experience, it happens when there are a lot lines outputted all at once:\r\n\r\n\n", "before_files": [{"content": "from gi.repository import Gtk\nfrom lutris.gui.widgets.dialogs import Dialog\n\n\nclass LogTextView(Gtk.TextView):\n def __init__(self, buffer):\n super(LogTextView, self).__init__()\n\n self.set_buffer(buffer)\n self.set_editable(False)\n self.set_monospace(True)\n self.set_left_margin(10)\n self.scroll_max = 0\n self.set_wrap_mode(Gtk.WrapMode.CHAR)\n self.get_style_context().add_class('lutris-logview')\n self.connect(\"size-allocate\", self.autoscroll)\n\n def autoscroll(self, *args):\n adj = self.get_vadjustment()\n if adj.get_value() == self.scroll_max or self.scroll_max == 0:\n adj.set_value(adj.get_upper() - adj.get_page_size())\n self.scroll_max = adj.get_upper() - adj.get_page_size()\n\n\nclass LogWindow(Dialog):\n def __init__(self, title, buffer, parent):\n super(LogWindow, self).__init__(title, parent, 0,\n ('_OK', Gtk.ResponseType.OK))\n self.set_size_request(640, 480)\n self.grid = Gtk.Grid()\n self.buffer = buffer\n self.logtextview = LogTextView(self.buffer)\n\n scrolledwindow = Gtk.ScrolledWindow(hexpand=True, vexpand=True,\n child=self.logtextview)\n self.vbox.add(scrolledwindow)\n self.show_all()\n", "path": "lutris/gui/logwindow.py"}]} | 1,043 | 146 |
gh_patches_debug_611 | rasdani/github-patches | git_diff | pex-tool__pex-1251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.31
On the docket:
+ [x] When Pex is run from a Pex PEX its isolation is broken. #1232
+ [x] The `--venv` mode `pex` script does not have a `__name__ == '__main__'` guard breaking multiprocessing. #1236
+ [x] The `--seed` mode for a `--venv` PEX is unsafe. #1239
+ [x] The venv `pex` script handles entrypoint functions differently from PEX. #1241
+ [x] Interpreter identification leaks an unconstrained `$PWD` entry into `sys.path`. #1231
+ [x] Support control of venv creation mode `--copies` vs. `--symlinks` #1230
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.30"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.30"
+__version__ = "2.1.31"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.30\"\n+__version__ = \"2.1.31\"\n", "issue": "Release 2.1.31\nOn the docket:\r\n+ [x] When Pex is run from a Pex PEX its isolation is broken. #1232\r\n+ [x] The `--venv` mode `pex` script does not have a `__name__ == '__main__'` guard breaking multiprocessing. #1236\r\n+ [x] The `--seed` mode for a `--venv` PEX is unsafe. #1239\r\n+ [x] The venv `pex` script handles entrypoint functions differently from PEX. #1241\r\n+ [x] Interpreter identification leaks an unconstrained `$PWD` entry into `sys.path`. #1231\r\n+ [x] Support control of venv creation mode `--copies` vs. `--symlinks` #1230\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.30\"\n", "path": "pex/version.py"}]} | 772 | 97 |
gh_patches_debug_10697 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade Ansble to 2.7 series
## Description
SecureDrop currently uses the Ansible 2.6 series which is approaching end-of-life. Ansible major versions are supported (receive security patches) for [three major releases](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html).
We should move to Ansible 2.7 to ensure a smooth transition, which would allow us to rapidly patch should there be a need to quickly patch.
## User Stories
As a developer, I want to make sure my dependencies are being maintained for security issues.
</issue>
<code>
[start of install_files/ansible-base/callback_plugins/ansible_version_check.py]
1 # -*- encoding:utf-8 -*-
2 from __future__ import absolute_import, division, print_function, \
3 unicode_literals
4
5 import sys
6
7 import ansible
8
9 try:
10 # Version 2.0+
11 from ansible.plugins.callback import CallbackBase
12 except ImportError:
13 CallbackBase = object
14
15
16 def print_red_bold(text):
17 print('\x1b[31;1m' + text + '\x1b[0m')
18
19
20 class CallbackModule(CallbackBase):
21 def __init__(self):
22 # Can't use `on_X` because this isn't forwards compatible
23 # with Ansible 2.0+
24 required_version = '2.6.19' # Keep synchronized with requirements files
25 if not ansible.__version__.startswith(required_version):
26 print_red_bold(
27 "SecureDrop restriction: only Ansible {version}.*"
28 "is supported."
29 .format(version=required_version)
30 )
31 sys.exit(1)
32
[end of install_files/ansible-base/callback_plugins/ansible_version_check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py
--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py
+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py
@@ -21,7 +21,7 @@
def __init__(self):
# Can't use `on_X` because this isn't forwards compatible
# with Ansible 2.0+
- required_version = '2.6.19' # Keep synchronized with requirements files
+ required_version = '2.7.13' # Keep synchronized with requirements files
if not ansible.__version__.startswith(required_version):
print_red_bold(
"SecureDrop restriction: only Ansible {version}.*"
| {"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -21,7 +21,7 @@\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n- required_version = '2.6.19' # Keep synchronized with requirements files\n+ required_version = '2.7.13' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n", "issue": "Upgrade Ansble to 2.7 series\n## Description\r\n\r\nSecureDrop currently uses the Ansible 2.6 series which is approaching end-of-life. Ansible major versions are supported (receive security patches) for [three major releases](https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html).\r\n\r\nWe should move to Ansible 2.7 to ensure a smooth transition, which would allow us to rapidly patch should there be a need to quickly patch.\r\n\r\n## User Stories\r\n\r\nAs a developer, I want to make sure my dependencies are being maintained for security issues.\r\n\n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.6.19' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]} | 936 | 178 |
gh_patches_debug_7275 | rasdani/github-patches | git_diff | facebookresearch__xformers-136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CI] InProj countainer not properly covered
# 🐛 Bug
Not a bug per say, but many inProjCountainer options are not covered, see https://app.codecov.io/gh/facebookresearch/xformers/blob/main/xformers/components/in_proj_container.py
</issue>
<code>
[start of xformers/components/__init__.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 from dataclasses import fields
8 from pathlib import Path
9 from typing import Any, Dict, Union
10
11 from xformers.utils import import_all_modules
12
13 from .activations import Activation, build_activation # noqa
14 from .attention import Attention, build_attention # noqa
15 from .multi_head_dispatch import MultiHeadDispatch, MultiHeadDispatchConfig # noqa
16 from .residual import LayerNormStyle, PostNorm, PreNorm, Residual # noqa
17
18 # automatically import any Python files in the directory
19 import_all_modules(str(Path(__file__).parent), "xformers.components")
20
21
22 def build_multi_head_attention(
23 multi_head_config: Union[MultiHeadDispatchConfig, Dict[str, Any]],
24 ):
25 """Builds a multihead attention from a config.
26
27 This assumes a 'name' key in the config which is used to determine what
28 attention class to instantiate. For instance, a config `{"name": "my_attention",
29 "foo": "bar"}` will find a class that was registered as "my_attention"
30 (see :func:`register_attention`) and call .from_config on it."""
31
32 if not isinstance(multi_head_config, MultiHeadDispatchConfig):
33 # Extract the required fields
34 field_names = list(map(lambda x: x.name, fields(MultiHeadDispatchConfig)))
35
36 # The missing fields get Noned
37 for k in field_names:
38 if k not in multi_head_config.keys():
39 multi_head_config[k] = None
40
41 # Could be that the attention needs to be instantiated
42 if not isinstance(multi_head_config["attention"], Attention):
43 # Convenience: fill in possible missing fields
44 if "num_heads" not in multi_head_config["attention"]:
45 multi_head_config["attention"]["num_heads"] = multi_head_config[
46 "num_heads"
47 ]
48
49 if (
50 "dim_features" not in multi_head_config["attention"]
51 or multi_head_config["attention"]["dim_features"] is None
52 ):
53 multi_head_config["attention"]["dim_features"] = (
54 multi_head_config["dim_model"] // multi_head_config["num_heads"]
55 )
56
57 multi_head_config["attention"] = build_attention(
58 multi_head_config["attention"]
59 )
60
61 multi_head_config = MultiHeadDispatchConfig(**multi_head_config)
62
63 return MultiHeadDispatch.from_config(multi_head_config)
64
[end of xformers/components/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xformers/components/__init__.py b/xformers/components/__init__.py
--- a/xformers/components/__init__.py
+++ b/xformers/components/__init__.py
@@ -12,6 +12,7 @@
from .activations import Activation, build_activation # noqa
from .attention import Attention, build_attention # noqa
+from .in_proj_container import InProjContainer, InProjParams # noqa
from .multi_head_dispatch import MultiHeadDispatch, MultiHeadDispatchConfig # noqa
from .residual import LayerNormStyle, PostNorm, PreNorm, Residual # noqa
| {"golden_diff": "diff --git a/xformers/components/__init__.py b/xformers/components/__init__.py\n--- a/xformers/components/__init__.py\n+++ b/xformers/components/__init__.py\n@@ -12,6 +12,7 @@\n \n from .activations import Activation, build_activation # noqa\n from .attention import Attention, build_attention # noqa\n+from .in_proj_container import InProjContainer, InProjParams # noqa\n from .multi_head_dispatch import MultiHeadDispatch, MultiHeadDispatchConfig # noqa\n from .residual import LayerNormStyle, PostNorm, PreNorm, Residual # noqa\n", "issue": "[CI] InProj countainer not properly covered\n# \ud83d\udc1b Bug\r\nNot a bug per say, but many inProjCountainer options are not covered, see https://app.codecov.io/gh/facebookresearch/xformers/blob/main/xformers/components/in_proj_container.py\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom dataclasses import fields\nfrom pathlib import Path\nfrom typing import Any, Dict, Union\n\nfrom xformers.utils import import_all_modules\n\nfrom .activations import Activation, build_activation # noqa\nfrom .attention import Attention, build_attention # noqa\nfrom .multi_head_dispatch import MultiHeadDispatch, MultiHeadDispatchConfig # noqa\nfrom .residual import LayerNormStyle, PostNorm, PreNorm, Residual # noqa\n\n# automatically import any Python files in the directory\nimport_all_modules(str(Path(__file__).parent), \"xformers.components\")\n\n\ndef build_multi_head_attention(\n multi_head_config: Union[MultiHeadDispatchConfig, Dict[str, Any]],\n):\n \"\"\"Builds a multihead attention from a config.\n\n This assumes a 'name' key in the config which is used to determine what\n attention class to instantiate. For instance, a config `{\"name\": \"my_attention\",\n \"foo\": \"bar\"}` will find a class that was registered as \"my_attention\"\n (see :func:`register_attention`) and call .from_config on it.\"\"\"\n\n if not isinstance(multi_head_config, MultiHeadDispatchConfig):\n # Extract the required fields\n field_names = list(map(lambda x: x.name, fields(MultiHeadDispatchConfig)))\n\n # The missing fields get Noned\n for k in field_names:\n if k not in multi_head_config.keys():\n multi_head_config[k] = None\n\n # Could be that the attention needs to be instantiated\n if not isinstance(multi_head_config[\"attention\"], Attention):\n # Convenience: fill in possible missing fields\n if \"num_heads\" not in multi_head_config[\"attention\"]:\n multi_head_config[\"attention\"][\"num_heads\"] = multi_head_config[\n \"num_heads\"\n ]\n\n if (\n \"dim_features\" not in multi_head_config[\"attention\"]\n or multi_head_config[\"attention\"][\"dim_features\"] is None\n ):\n multi_head_config[\"attention\"][\"dim_features\"] = (\n multi_head_config[\"dim_model\"] // multi_head_config[\"num_heads\"]\n )\n\n multi_head_config[\"attention\"] = build_attention(\n multi_head_config[\"attention\"]\n )\n\n multi_head_config = MultiHeadDispatchConfig(**multi_head_config)\n\n return MultiHeadDispatch.from_config(multi_head_config)\n", "path": "xformers/components/__init__.py"}]} | 1,264 | 140 |
gh_patches_debug_1866 | rasdani/github-patches | git_diff | modin-project__modin-1782 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ClusterError class should implement its own version of __str__ method
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Modin installed from (source or binary)**:
- **Modin version**:
- **Python version**:
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
`ClusterError` includes the `cause` field that is not printed. This makes it difficult to understand the problems that cause exceptions.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
</issue>
<code>
[start of modin/experimental/cloud/base.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 from typing import NamedTuple
15 import os
16 import sys
17
18
19 class ClusterError(Exception):
20 """
21 Generic cluster operating exception
22 """
23
24 def __init__(self, *args, cause: BaseException = None, traceback: str = None, **kw):
25 self.cause = cause
26 self.traceback = traceback
27 super().__init__(*args, **kw)
28
29
30 class CannotSpawnCluster(ClusterError):
31 """
32 Raised when cluster cannot be spawned in the cloud
33 """
34
35
36 class CannotDestroyCluster(ClusterError):
37 """
38 Raised when cluster cannot be destroyed in the cloud
39 """
40
41
42 class ConnectionDetails(NamedTuple):
43 user_name: str = "modin"
44 key_file: str = None
45 address: str = None
46 port: int = 22
47
48
49 _EXT = (".exe", ".com", ".cmd", ".bat", "") if sys.platform == "win32" else ("",)
50
51
52 def _which(prog):
53 for entry in os.environ["PATH"].split(os.pathsep):
54 for ext in _EXT:
55 path = os.path.join(entry, prog + ext)
56 if os.access(path, os.X_OK):
57 return path
58 return None
59
60
61 def _get_ssh_proxy_command():
62 socks_proxy = os.environ.get("MODIN_SOCKS_PROXY", None)
63 if socks_proxy is None:
64 return None
65 if _which("nc"):
66 return f"nc -x {socks_proxy} %h %p"
67 elif _which("connect"):
68 return f"connect -S {socks_proxy} %h %p"
69 raise ClusterError(
70 "SSH through proxy required but no supported proxying tools found"
71 )
72
[end of modin/experimental/cloud/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/experimental/cloud/base.py b/modin/experimental/cloud/base.py
--- a/modin/experimental/cloud/base.py
+++ b/modin/experimental/cloud/base.py
@@ -26,6 +26,11 @@
self.traceback = traceback
super().__init__(*args, **kw)
+ def __str__(self):
+ if self.clause:
+ return f"clause: {self.cause}\n{super()}"
+ return str(super())
+
class CannotSpawnCluster(ClusterError):
"""
| {"golden_diff": "diff --git a/modin/experimental/cloud/base.py b/modin/experimental/cloud/base.py\n--- a/modin/experimental/cloud/base.py\n+++ b/modin/experimental/cloud/base.py\n@@ -26,6 +26,11 @@\n self.traceback = traceback\n super().__init__(*args, **kw)\n \n+ def __str__(self):\n+ if self.clause:\n+ return f\"clause: {self.cause}\\n{super()}\"\n+ return str(super())\n+\n \n class CannotSpawnCluster(ClusterError):\n \"\"\"\n", "issue": "ClusterError class should implement its own version of __str__ method\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Modin installed from (source or binary)**:\r\n- **Modin version**:\r\n- **Python version**:\r\n- **Exact command to reproduce**:\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n`ClusterError` includes the `cause` field that is not printed. This makes it difficult to understand the problems that cause exceptions.\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nfrom typing import NamedTuple\nimport os\nimport sys\n\n\nclass ClusterError(Exception):\n \"\"\"\n Generic cluster operating exception\n \"\"\"\n\n def __init__(self, *args, cause: BaseException = None, traceback: str = None, **kw):\n self.cause = cause\n self.traceback = traceback\n super().__init__(*args, **kw)\n\n\nclass CannotSpawnCluster(ClusterError):\n \"\"\"\n Raised when cluster cannot be spawned in the cloud\n \"\"\"\n\n\nclass CannotDestroyCluster(ClusterError):\n \"\"\"\n Raised when cluster cannot be destroyed in the cloud\n \"\"\"\n\n\nclass ConnectionDetails(NamedTuple):\n user_name: str = \"modin\"\n key_file: str = None\n address: str = None\n port: int = 22\n\n\n_EXT = (\".exe\", \".com\", \".cmd\", \".bat\", \"\") if sys.platform == \"win32\" else (\"\",)\n\n\ndef _which(prog):\n for entry in os.environ[\"PATH\"].split(os.pathsep):\n for ext in _EXT:\n path = os.path.join(entry, prog + ext)\n if os.access(path, os.X_OK):\n return path\n return None\n\n\ndef _get_ssh_proxy_command():\n socks_proxy = os.environ.get(\"MODIN_SOCKS_PROXY\", None)\n if socks_proxy is None:\n return None\n if _which(\"nc\"):\n return f\"nc -x {socks_proxy} %h %p\"\n elif _which(\"connect\"):\n return f\"connect -S {socks_proxy} %h %p\"\n raise ClusterError(\n \"SSH through proxy required but no supported proxying tools found\"\n )\n", "path": "modin/experimental/cloud/base.py"}]} | 1,445 | 122 |
gh_patches_debug_2605 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-940 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DB Types in column.valid_target_types are not in sync with the types returned in database types endpoint
## Description
* `valid_target_types` property of column returns "DOUBLE PRECISION"
- Endpoint: /api/v0/tables/14/columns/
* Types endpoint returns mathesar types where Number has the db type "DOUBLE_PRECISION"
- http://localhost:8000/api/v0/databases/1/types/
- Mathesar type: Number
Note that "DOUBLE PRECISION" and "DOUBLE_PRECISION" differ from each other.
## Expected behavior
Both endpoints should return values with same spelling.
</issue>
<code>
[start of db/types/base.py]
1 from enum import Enum
2
3 from sqlalchemy import create_engine
4
5 from db import constants
6
7
8 CHAR = 'char'
9 STRING = 'string'
10 VARCHAR = 'varchar'
11
12
13 class PostgresType(Enum):
14 """
15 This only includes built-in Postgres types that SQLAlchemy supports.
16 SQLAlchemy doesn't support XML. See zzzeek's comment on:
17 https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy
18 The values are keys returned by get_available_types.
19 """
20 _ARRAY = '_array'
21 BIGINT = 'bigint'
22 BIT_VARYING = 'bit varying'
23 BIT = 'bit'
24 BOOLEAN = 'boolean'
25 BYTEA = 'bytea'
26 CHAR = '"char"'
27 CHARACTER_VARYING = 'character varying'
28 CHARACTER = 'character'
29 CIDR = 'cidr'
30 DATE = 'date'
31 DATERANGE = 'daterange'
32 DECIMAL = 'decimal'
33 DOUBLE_PRECISION = 'double precision'
34 FLOAT = 'float'
35 HSTORE = 'hstore'
36 INET = 'inet'
37 INT4RANGE = 'int4range'
38 INT8RANGE = 'int8range'
39 INTEGER = 'integer'
40 INTERVAL = 'interval'
41 JSON = 'json'
42 JSONB = 'jsonb'
43 MACADDR = 'macaddr'
44 MONEY = 'money'
45 NAME = 'name'
46 NUMERIC = 'numeric'
47 NUMRANGE = 'numrange'
48 OID = 'oid'
49 REAL = 'real'
50 REGCLASS = 'regclass'
51 SMALLINT = 'smallint'
52 TEXT = 'text'
53 TIME = 'time'
54 TIME_WITH_TIME_ZONE = 'time with time zone'
55 TIME_WITHOUT_TIME_ZONE = 'time without time zone'
56 TIMESTAMP = 'timestamp'
57 TIMESTAMP_WITH_TIMESTAMP_ZONE = 'timestamp with time zone'
58 TIMESTAMP_WITHOUT_TIMESTAMP_ZONE = 'timestamp without time zone'
59 TSRANGE = 'tsrange'
60 TSTZRANGE = 'tstzrange'
61 TSVECTOR = 'tsvector'
62 UUID = 'uuid'
63
64
65 class MathesarCustomType(Enum):
66 """
67 This is a list of custom Mathesar DB types.
68 Keys returned by get_available_types are of the format 'mathesar_types.VALUE'
69 """
70 EMAIL = 'email'
71 URI = 'uri'
72 MONEY = 'money'
73
74
75 SCHEMA = f"{constants.MATHESAR_PREFIX}types"
76 # Since we want to have our identifiers quoted appropriately for use in
77 # PostgreSQL, we want to use the postgres dialect preparer to set this up.
78 preparer = create_engine("postgresql://").dialect.identifier_preparer
79
80
81 def get_qualified_name(name):
82 return ".".join([preparer.quote_schema(SCHEMA), name])
83
84
85 def get_available_types(engine):
86 return engine.dialect.ischema_names
87
88
89 def get_db_type_name(sa_type, engine):
90 USER_DEFINED_STR = 'user_defined'
91 db_type = sa_type.__visit_name__
92 if db_type == USER_DEFINED_STR:
93 db_type = sa_type().compile(engine.dialect)
94 return db_type
95
[end of db/types/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/types/base.py b/db/types/base.py
--- a/db/types/base.py
+++ b/db/types/base.py
@@ -87,8 +87,8 @@
def get_db_type_name(sa_type, engine):
- USER_DEFINED_STR = 'user_defined'
- db_type = sa_type.__visit_name__
- if db_type == USER_DEFINED_STR:
- db_type = sa_type().compile(engine.dialect)
+ try:
+ db_type = sa_type.compile(dialect=engine.dialect)
+ except TypeError:
+ db_type = sa_type().compile(dialect=engine.dialect)
return db_type
| {"golden_diff": "diff --git a/db/types/base.py b/db/types/base.py\n--- a/db/types/base.py\n+++ b/db/types/base.py\n@@ -87,8 +87,8 @@\n \n \n def get_db_type_name(sa_type, engine):\n- USER_DEFINED_STR = 'user_defined'\n- db_type = sa_type.__visit_name__\n- if db_type == USER_DEFINED_STR:\n- db_type = sa_type().compile(engine.dialect)\n+ try:\n+ db_type = sa_type.compile(dialect=engine.dialect)\n+ except TypeError:\n+ db_type = sa_type().compile(dialect=engine.dialect)\n return db_type\n", "issue": "DB Types in column.valid_target_types are not in sync with the types returned in database types endpoint\n## Description\r\n* `valid_target_types` property of column returns \"DOUBLE PRECISION\"\r\n - Endpoint: /api/v0/tables/14/columns/\r\n* Types endpoint returns mathesar types where Number has the db type \"DOUBLE_PRECISION\"\r\n - http://localhost:8000/api/v0/databases/1/types/\r\n - Mathesar type: Number\r\n\r\nNote that \"DOUBLE PRECISION\" and \"DOUBLE_PRECISION\" differ from each other.\r\n\r\n## Expected behavior\r\nBoth endpoints should return values with same spelling.\r\n\n", "before_files": [{"content": "from enum import Enum\n\nfrom sqlalchemy import create_engine\n\nfrom db import constants\n\n\nCHAR = 'char'\nSTRING = 'string'\nVARCHAR = 'varchar'\n\n\nclass PostgresType(Enum):\n \"\"\"\n This only includes built-in Postgres types that SQLAlchemy supports.\n SQLAlchemy doesn't support XML. See zzzeek's comment on:\n https://stackoverflow.com/questions/16153512/using-postgresql-xml-data-type-with-sqlalchemy\n The values are keys returned by get_available_types.\n \"\"\"\n _ARRAY = '_array'\n BIGINT = 'bigint'\n BIT_VARYING = 'bit varying'\n BIT = 'bit'\n BOOLEAN = 'boolean'\n BYTEA = 'bytea'\n CHAR = '\"char\"'\n CHARACTER_VARYING = 'character varying'\n CHARACTER = 'character'\n CIDR = 'cidr'\n DATE = 'date'\n DATERANGE = 'daterange'\n DECIMAL = 'decimal'\n DOUBLE_PRECISION = 'double precision'\n FLOAT = 'float'\n HSTORE = 'hstore'\n INET = 'inet'\n INT4RANGE = 'int4range'\n INT8RANGE = 'int8range'\n INTEGER = 'integer'\n INTERVAL = 'interval'\n JSON = 'json'\n JSONB = 'jsonb'\n MACADDR = 'macaddr'\n MONEY = 'money'\n NAME = 'name'\n NUMERIC = 'numeric'\n NUMRANGE = 'numrange'\n OID = 'oid'\n REAL = 'real'\n REGCLASS = 'regclass'\n SMALLINT = 'smallint'\n TEXT = 'text'\n TIME = 'time'\n TIME_WITH_TIME_ZONE = 'time with time zone'\n TIME_WITHOUT_TIME_ZONE = 'time without time zone'\n TIMESTAMP = 'timestamp'\n TIMESTAMP_WITH_TIMESTAMP_ZONE = 'timestamp with time zone'\n TIMESTAMP_WITHOUT_TIMESTAMP_ZONE = 'timestamp without time zone'\n TSRANGE = 'tsrange'\n TSTZRANGE = 'tstzrange'\n TSVECTOR = 'tsvector'\n UUID = 'uuid'\n\n\nclass MathesarCustomType(Enum):\n \"\"\"\n This is a list of custom Mathesar DB types.\n Keys returned by get_available_types are of the format 'mathesar_types.VALUE'\n \"\"\"\n EMAIL = 'email'\n URI = 'uri'\n MONEY = 'money'\n\n\nSCHEMA = f\"{constants.MATHESAR_PREFIX}types\"\n# Since we want to have our identifiers quoted appropriately for use in\n# PostgreSQL, we want to use the postgres dialect preparer to set this up.\npreparer = create_engine(\"postgresql://\").dialect.identifier_preparer\n\n\ndef get_qualified_name(name):\n return \".\".join([preparer.quote_schema(SCHEMA), name])\n\n\ndef get_available_types(engine):\n return engine.dialect.ischema_names\n\n\ndef get_db_type_name(sa_type, engine):\n USER_DEFINED_STR = 'user_defined'\n db_type = sa_type.__visit_name__\n if db_type == USER_DEFINED_STR:\n db_type = sa_type().compile(engine.dialect)\n return db_type\n", "path": "db/types/base.py"}]} | 1,521 | 141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.