problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_5416
rasdani/github-patches
git_diff
rasterio__rasterio-1192
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> query params in https url is not working Hi I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, It can be found here: https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32 I added in my env the following code: ``` if parts.query: path += "?" + parts.query ``` and it solved the issue, Is there a reason for cutting the querystring? Thanks, Guy query params in https url is not working Hi I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, It can be found here: https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32 I added in my env the following code: ``` if parts.query: path += "?" + parts.query ``` and it solved the issue, Is there a reason for cutting the querystring? Thanks, Guy </issue> <code> [start of rasterio/vfs.py] 1 """Implementation of Apache VFS schemes and URLs.""" 2 3 import os 4 5 from rasterio.compat import urlparse 6 7 8 # NB: As not to propagate fallacies of distributed computing, Rasterio 9 # does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only 10 # the following local filesystem schemes are supported. 11 SCHEMES = { 12 'gzip': 'gzip', 13 'gzip+file': 'gzip', 14 'zip': 'zip', 15 'zip+file': 'zip', 16 'tar': 'tar', 17 'tar+file': 'tar', 18 'https': 'curl', 19 'http': 'curl', 20 's3': 's3'} 21 22 FILE_SCHEMES = [ 23 '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file'] 24 25 26 def parse_path(uri, vfs=None): 27 """Parse a URI or Apache VFS URL into its parts 28 29 Returns: tuple 30 (path, archive, scheme) 31 """ 32 archive = scheme = None 33 path = uri 34 if vfs: 35 parts = urlparse(vfs) 36 scheme = parts.scheme 37 archive = parts.path 38 if parts.netloc and parts.netloc != 'localhost': # pragma: no cover 39 archive = parts.netloc + archive 40 else: 41 parts = urlparse(path) 42 scheme = parts.scheme 43 path = parts.path 44 if parts.netloc and parts.netloc != 'localhost': 45 path = parts.netloc + path 46 # There are certain URI schemes we favor over GDAL's names. 47 if scheme in SCHEMES: 48 parts = path.split('!') 49 path = parts.pop() if parts else None 50 archive = parts.pop() if parts else None 51 # For filesystem paths. 52 elif scheme.lower() in FILE_SCHEMES: 53 pass 54 # We permit GDAL's idiosyncratic URI-like dataset paths such as 55 # 'netcdf':... to fall right through with no parsed archive 56 # or scheme. 57 else: 58 archive = scheme = None 59 path = uri 60 61 return path, archive, scheme 62 63 64 def vsi_path(path, archive=None, scheme=None): 65 """Convert a parsed path to a GDAL VSI path.""" 66 # If a VSF and archive file are specified, we convert the path to 67 # a GDAL VSI path (see cpl_vsi.h). 68 if scheme and scheme.startswith('http'): 69 result = "/vsicurl/{0}://{1}".format(scheme, path) 70 elif scheme and scheme == 's3': 71 result = "/vsis3/{0}".format(path) 72 elif scheme and scheme != 'file': 73 if archive: 74 result = '/vsi{0}/{1}/{2}'.format( 75 scheme, archive, path.lstrip('/')) 76 else: 77 result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/')) 78 else: 79 result = path 80 return result 81 [end of rasterio/vfs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/vfs.py b/rasterio/vfs.py --- a/rasterio/vfs.py +++ b/rasterio/vfs.py @@ -41,6 +41,8 @@ parts = urlparse(path) scheme = parts.scheme path = parts.path + if parts.query: + path += "?" + parts.query if parts.netloc and parts.netloc != 'localhost': path = parts.netloc + path # There are certain URI schemes we favor over GDAL's names.
{"golden_diff": "diff --git a/rasterio/vfs.py b/rasterio/vfs.py\n--- a/rasterio/vfs.py\n+++ b/rasterio/vfs.py\n@@ -41,6 +41,8 @@\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n+ if parts.query:\n+ path += \"?\" + parts.query\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n", "issue": "query params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\nquery params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\n", "before_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {\n 'gzip': 'gzip',\n 'gzip+file': 'gzip',\n 'zip': 'zip',\n 'zip+file': 'zip',\n 'tar': 'tar',\n 'tar+file': 'tar',\n 'https': 'curl',\n 'http': 'curl',\n 's3': 's3'}\n\nFILE_SCHEMES = [\n '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme.lower() in FILE_SCHEMES:\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'netcdf':... to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n if archive:\n result = '/vsi{0}/{1}/{2}'.format(\n scheme, archive, path.lstrip('/'))\n else:\n result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}]}
1,646
119
gh_patches_debug_2624
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6488
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Possible erreur 500 à la résolution d'une alerte sur un contenu qui n'est plus public Rapporté par Sentry. J'ai eu du mal à comprendre comment le bug a pu se produire, mais j'ai réussi à le reproduire (d'une façon peut-être un peu tirée par les cheveux...). **Comment reproduire ?** 1. Se connecter en tant que `user1` 2. Signaler un billet 3. Se connecter en tant que `staff` 4. Ouvrir la page du billet signalé dans deux onglets différents 5. Sur un des onglets, dépublier le billet 6. Sur l'autre onglet, résoudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publié, c'est là qu'est l'astuce) Une erreur 500 va alors apparaître. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88 Le contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`. La correction de ce bug passe sans doute par la vérification si l'alerte est déjà résolue ou si le contenu signalé a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait être levée. </issue> <code> [start of zds/tutorialv2/views/alerts.py] 1 from datetime import datetime 2 3 from django.contrib import messages 4 from django.core.exceptions import PermissionDenied 5 from django.contrib.auth.mixins import LoginRequiredMixin 6 from django.db import transaction 7 from django.http import Http404 8 from django.shortcuts import get_object_or_404, redirect 9 from django.template.loader import render_to_string 10 from django.utils.decorators import method_decorator 11 from django.utils.translation import gettext_lazy as _ 12 from django.views.generic import FormView 13 14 from zds.tutorialv2.models import TYPE_CHOICES_DICT 15 from zds.tutorialv2.models.database import PublishableContent 16 from zds.utils.models import Alert 17 18 19 class SendContentAlert(LoginRequiredMixin, FormView): 20 http_method_names = ["post"] 21 22 @method_decorator(transaction.atomic) 23 def dispatch(self, *args, **kwargs): 24 return super().dispatch(*args, **kwargs) 25 26 def post(self, request, *args, **kwargs): 27 try: 28 content_pk = int(self.kwargs["pk"]) 29 except (KeyError, ValueError): 30 raise Http404("Identifiant manquant ou conversion en entier impossible.") 31 content = get_object_or_404(PublishableContent, pk=content_pk) 32 33 if len(request.POST["signal_text"].strip()) == 0: 34 messages.error(request, _("La raison du signalement ne peut pas être vide.")) 35 else: 36 alert = Alert( 37 author=request.user, 38 content=content, 39 scope="CONTENT", 40 text=request.POST["signal_text"], 41 pubdate=datetime.now(), 42 ) 43 alert.save() 44 45 human_content_type = TYPE_CHOICES_DICT[content.type].lower() 46 messages.success(self.request, _("Ce {} a bien été signalé aux modérateurs.").format(human_content_type)) 47 48 return redirect(content.get_absolute_url_online()) 49 50 51 class SolveContentAlert(LoginRequiredMixin, FormView): 52 @method_decorator(transaction.atomic) 53 def dispatch(self, *args, **kwargs): 54 return super().dispatch(*args, **kwargs) 55 56 def post(self, request, *args, **kwargs): 57 if not request.user.has_perm("tutorialv2.change_contentreaction"): 58 raise PermissionDenied 59 try: 60 alert = get_object_or_404(Alert, pk=int(request.POST["alert_pk"])) 61 content = PublishableContent.objects.get(pk=alert.content.id) 62 except (KeyError, ValueError): 63 raise Http404("L'alerte n'existe pas.") 64 65 resolve_reason = "" 66 msg_title = "" 67 msg_content = "" 68 if "text" in request.POST and request.POST["text"]: 69 resolve_reason = request.POST["text"] 70 authors = alert.content.authors.values_list("username", flat=True) 71 authors = ", ".join(authors) 72 msg_title = _("Résolution d'alerte : {0}").format(content.title) 73 msg_content = render_to_string( 74 "tutorialv2/messages/resolve_alert.md", 75 { 76 "content": content, 77 "url": content.get_absolute_url_online(), 78 "name": alert.author.username, 79 "target_name": authors, 80 "modo_name": request.user.username, 81 "message": "\n".join(["> " + line for line in resolve_reason.split("\n")]), 82 "alert_text": "\n".join(["> " + line for line in alert.text.split("\n")]), 83 }, 84 ) 85 alert.solve(request.user, resolve_reason, msg_title, msg_content) 86 87 messages.success(self.request, _("L'alerte a bien été résolue.")) 88 return redirect(content.get_absolute_url_online()) 89 [end of zds/tutorialv2/views/alerts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py --- a/zds/tutorialv2/views/alerts.py +++ b/zds/tutorialv2/views/alerts.py @@ -62,6 +62,9 @@ except (KeyError, ValueError): raise Http404("L'alerte n'existe pas.") + if alert.solved: + raise Http404("L'alerte a déjà été résolue.") + resolve_reason = "" msg_title = "" msg_content = ""
{"golden_diff": "diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py\n--- a/zds/tutorialv2/views/alerts.py\n+++ b/zds/tutorialv2/views/alerts.py\n@@ -62,6 +62,9 @@\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n \n+ if alert.solved:\n+ raise Http404(\"L'alerte a d\u00e9j\u00e0 \u00e9t\u00e9 r\u00e9solue.\")\n+\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n", "issue": "Possible erreur 500 \u00e0 la r\u00e9solution d'une alerte sur un contenu qui n'est plus public\nRapport\u00e9 par Sentry. J'ai eu du mal \u00e0 comprendre comment le bug a pu se produire, mais j'ai r\u00e9ussi \u00e0 le reproduire (d'une fa\u00e7on peut-\u00eatre un peu tir\u00e9e par les cheveux...).\r\n\r\n**Comment reproduire ?**\r\n\r\n1. Se connecter en tant que `user1`\r\n2. Signaler un billet\r\n3. Se connecter en tant que `staff`\r\n4. Ouvrir la page du billet signal\u00e9 dans deux onglets diff\u00e9rents\r\n5. Sur un des onglets, d\u00e9publier le billet\r\n6. Sur l'autre onglet, r\u00e9soudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publi\u00e9, c'est l\u00e0 qu'est l'astuce)\r\n\r\nUne erreur 500 va alors appara\u00eetre. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88\r\nLe contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`.\r\n\r\nLa correction de ce bug passe sans doute par la v\u00e9rification si l'alerte est d\u00e9j\u00e0 r\u00e9solue ou si le contenu signal\u00e9 a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait \u00eatre lev\u00e9e.\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\n\nfrom zds.tutorialv2.models import TYPE_CHOICES_DICT\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.utils.models import Alert\n\n\nclass SendContentAlert(LoginRequiredMixin, FormView):\n http_method_names = [\"post\"]\n\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n content_pk = int(self.kwargs[\"pk\"])\n except (KeyError, ValueError):\n raise Http404(\"Identifiant manquant ou conversion en entier impossible.\")\n content = get_object_or_404(PublishableContent, pk=content_pk)\n\n if len(request.POST[\"signal_text\"].strip()) == 0:\n messages.error(request, _(\"La raison du signalement ne peut pas \u00eatre vide.\"))\n else:\n alert = Alert(\n author=request.user,\n content=content,\n scope=\"CONTENT\",\n text=request.POST[\"signal_text\"],\n pubdate=datetime.now(),\n )\n alert.save()\n\n human_content_type = TYPE_CHOICES_DICT[content.type].lower()\n messages.success(self.request, _(\"Ce {} a bien \u00e9t\u00e9 signal\u00e9 aux mod\u00e9rateurs.\").format(human_content_type))\n\n return redirect(content.get_absolute_url_online())\n\n\nclass SolveContentAlert(LoginRequiredMixin, FormView):\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.has_perm(\"tutorialv2.change_contentreaction\"):\n raise PermissionDenied\n try:\n alert = get_object_or_404(Alert, pk=int(request.POST[\"alert_pk\"]))\n content = PublishableContent.objects.get(pk=alert.content.id)\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n if \"text\" in request.POST and request.POST[\"text\"]:\n resolve_reason = request.POST[\"text\"]\n authors = alert.content.authors.values_list(\"username\", flat=True)\n authors = \", \".join(authors)\n msg_title = _(\"R\u00e9solution d'alerte : {0}\").format(content.title)\n msg_content = render_to_string(\n \"tutorialv2/messages/resolve_alert.md\",\n {\n \"content\": content,\n \"url\": content.get_absolute_url_online(),\n \"name\": alert.author.username,\n \"target_name\": authors,\n \"modo_name\": request.user.username,\n \"message\": \"\\n\".join([\"> \" + line for line in resolve_reason.split(\"\\n\")]),\n \"alert_text\": \"\\n\".join([\"> \" + line for line in alert.text.split(\"\\n\")]),\n },\n )\n alert.solve(request.user, resolve_reason, msg_title, msg_content)\n\n messages.success(self.request, _(\"L'alerte a bien \u00e9t\u00e9 r\u00e9solue.\"))\n return redirect(content.get_absolute_url_online())\n", "path": "zds/tutorialv2/views/alerts.py"}]}
1,843
128
gh_patches_debug_1631
rasdani/github-patches
git_diff
vyperlang__vyper-3340
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7 ### Version Information * vyper Version (output of `vyper --version`): 0.3.8 * OS: osx * Python Version (output of `python --version`): 3.10.4 ### What's your issue about? <img width="705" alt="image" src="https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png"> </issue> <code> [start of vyper/semantics/environment.py] 1 from typing import Dict 2 3 from vyper.semantics.analysis.base import VarInfo 4 from vyper.semantics.types import AddressT, BytesT, VyperType 5 from vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T 6 7 8 # common properties for environment variables 9 class _EnvType(VyperType): 10 def __eq__(self, other): 11 return self is other 12 13 def __hash__(self): 14 return hash(id(self)) 15 16 17 class _Block(_EnvType): 18 _id = "block" 19 _type_members = { 20 "coinbase": AddressT(), 21 "difficulty": UINT256_T, 22 "prevrandao": UINT256_T, 23 "number": UINT256_T, 24 "gaslimit": UINT256_T, 25 "basefee": UINT256_T, 26 "prevhash": BYTES32_T, 27 "timestamp": UINT256_T, 28 } 29 30 31 class _Chain(_EnvType): 32 _id = "chain" 33 _type_members = {"id": UINT256_T} 34 35 36 class _Msg(_EnvType): 37 _id = "msg" 38 _type_members = {"data": BytesT(), "gas": UINT256_T, "sender": AddressT(), "value": UINT256_T} 39 40 41 class _Tx(_EnvType): 42 _id = "tx" 43 _type_members = {"origin": AddressT(), "gasprice": UINT256_T} 44 45 46 CONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())} 47 48 49 def get_constant_vars() -> Dict: 50 """ 51 Get a dictionary of constant environment variables. 52 """ 53 result = {} 54 for k, v in CONSTANT_ENVIRONMENT_VARS.items(): 55 result[k] = VarInfo(v, is_constant=True) 56 57 return result 58 59 60 # Not sure this is necessary, but add an ad-hoc type for `self` for clarity 61 class _SelfT(AddressT): 62 pass 63 64 65 MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} 66 67 68 def get_mutable_vars() -> Dict: 69 """ 70 Get a dictionary of mutable environment variables (those that are 71 modified during the course of contract execution, such as `self`). 72 """ 73 return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()} 74 [end of vyper/semantics/environment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py --- a/vyper/semantics/environment.py +++ b/vyper/semantics/environment.py @@ -57,12 +57,7 @@ return result -# Not sure this is necessary, but add an ad-hoc type for `self` for clarity -class _SelfT(AddressT): - pass - - -MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} +MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": AddressT} def get_mutable_vars() -> Dict:
{"golden_diff": "diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py\n--- a/vyper/semantics/environment.py\n+++ b/vyper/semantics/environment.py\n@@ -57,12 +57,7 @@\n return result\n \n \n-# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\n-class _SelfT(AddressT):\n- pass\n-\n-\n-MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n+MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n \n \n def get_mutable_vars() -> Dict:\n", "issue": "Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.8\r\n* OS: osx\r\n* Python Version (output of `python --version`): 3.10.4\r\n\r\n### What's your issue about?\r\n\r\n<img width=\"705\" alt=\"image\" src=\"https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png\">\r\n\r\n\n", "before_files": [{"content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\n# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\nclass _SelfT(AddressT):\n pass\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py"}]}
1,378
144
gh_patches_debug_20094
rasdani/github-patches
git_diff
Flexget__Flexget-2495
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk' <!--- Before opening an issue, verify: - Is this a feature request? Post it on https://feathub.com/Flexget/Flexget - Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions - Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon - Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues - Did you fill out the issue template as completely as possible? The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this! ---> ### Expected behaviour: After daemon startup, tasks are run without errors <!--- Please don't just say "it doesn't crash" or "it works". Explain what the expected result is. ---> ### Actual behaviour: Tasks are aborted ### Steps to reproduce: - Step 1: Configure exists plugin - Step 2: Run flexget #### Config: AFAIR, any config using exists plugin #### Log: <details> <summary>(click to expand)</summary> ``` 2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk' Traceback (most recent call last): File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py", line 520, in __run_plugin result = method(*args, **kwargs) File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py", line 20, in __call__ return self.func(*args, **kwargs) File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py", line 41, in on_task_filter for p in folder.walk(errors='ignore'): AttributeError: 'PosixPath' object has no attribute 'walk' ``` </details> ### Additional information: - FlexGet version: 3.0.1 - Python version: 3.7.2 - Installation method: pip - Using daemon (yes/no): yes - OS and version: debian unstable - Link to crash log: N/A <!--- In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on. Please verify that the following data is present before submitting your issue: - Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible. - Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`). - FlexGet version (use `flexget -V` to get it). - Full Python version, for example `2.7.11` (use `python -V` to get it). - Installation method (pip, git install, etc). - Whether or not you're running FlexGet as a daemon. - OS and version. - Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file. ---> </issue> <code> [start of flexget/plugins/filter/exists.py] 1 import logging 2 import platform 3 from pathlib import Path 4 5 from flexget import plugin 6 from flexget.config_schema import one_or_more 7 from flexget.event import event 8 9 log = logging.getLogger('exists') 10 11 12 class FilterExists: 13 """ 14 Reject entries that already exist in given path. 15 16 Example:: 17 18 exists: /storage/movies/ 19 """ 20 21 schema = one_or_more({'type': 'string', 'format': 'path'}) 22 23 def prepare_config(self, config): 24 # If only a single path is passed turn it into a 1 element list 25 if isinstance(config, str): 26 config = [config] 27 return config 28 29 @plugin.priority(-1) 30 def on_task_filter(self, task, config): 31 if not task.accepted: 32 log.debug('No accepted entries, not scanning for existing.') 33 return 34 log.verbose('Scanning path(s) for existing files.') 35 config = self.prepare_config(config) 36 filenames = {} 37 for folder in config: 38 folder = Path(folder).expanduser() 39 if not folder.exists(): 40 raise plugin.PluginWarning('Path %s does not exist' % folder, log) 41 for p in folder.walk(errors='ignore'): 42 key = p.name 43 # windows file system is not case sensitive 44 if platform.system() == 'Windows': 45 key = key.lower() 46 filenames[key] = p 47 for entry in task.accepted: 48 # priority is: filename, location (filename only), title 49 name = Path(entry.get('filename', entry.get('location', entry['title']))).name 50 if platform.system() == 'Windows': 51 name = name.lower() 52 if name in filenames: 53 log.debug('Found %s in %s' % (name, filenames[name])) 54 entry.reject('exists in %s' % filenames[name]) 55 56 57 @event('plugin.register') 58 def register_plugin(): 59 plugin.register(FilterExists, 'exists', api_ver=2) 60 [end of flexget/plugins/filter/exists.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py --- a/flexget/plugins/filter/exists.py +++ b/flexget/plugins/filter/exists.py @@ -38,12 +38,13 @@ folder = Path(folder).expanduser() if not folder.exists(): raise plugin.PluginWarning('Path %s does not exist' % folder, log) - for p in folder.walk(errors='ignore'): - key = p.name - # windows file system is not case sensitive - if platform.system() == 'Windows': - key = key.lower() - filenames[key] = p + for p in folder.rglob('*'): + if p.is_file(): + key = p.name + # windows file system is not case sensitive + if platform.system() == 'Windows': + key = key.lower() + filenames[key] = p for entry in task.accepted: # priority is: filename, location (filename only), title name = Path(entry.get('filename', entry.get('location', entry['title']))).name
{"golden_diff": "diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py\n--- a/flexget/plugins/filter/exists.py\n+++ b/flexget/plugins/filter/exists.py\n@@ -38,12 +38,13 @@\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n- for p in folder.walk(errors='ignore'):\n- key = p.name\n- # windows file system is not case sensitive\n- if platform.system() == 'Windows':\n- key = key.lower()\n- filenames[key] = p\n+ for p in folder.rglob('*'):\n+ if p.is_file():\n+ key = p.name\n+ # windows file system is not case sensitive\n+ if platform.system() == 'Windows':\n+ key = key.lower()\n+ filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n", "issue": "Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n\r\n### Expected behaviour:\r\nAfter daemon startup, tasks are run without errors\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nTasks are aborted\r\n### Steps to reproduce:\r\n- Step 1: Configure exists plugin\r\n- Step 2: Run flexget\r\n\r\n#### Config:\r\nAFAIR, any config using exists plugin\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\r\nTraceback (most recent call last):\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py\", line 520, in __run_plugin\r\n result = method(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py\", line 41, in on_task_filter\r\n for p in folder.walk(errors='ignore'):\r\nAttributeError: 'PosixPath' object has no attribute 'walk'\r\n\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.0.1\r\n- Python version: 3.7.2\r\n- Installation method: pip\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian unstable\r\n- Link to crash log: N/A\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). \r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "import logging\nimport platform\nfrom pathlib import Path\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\n\nlog = logging.getLogger('exists')\n\n\nclass FilterExists:\n \"\"\"\n Reject entries that already exist in given path.\n\n Example::\n\n exists: /storage/movies/\n \"\"\"\n\n schema = one_or_more({'type': 'string', 'format': 'path'})\n\n def prepare_config(self, config):\n # If only a single path is passed turn it into a 1 element list\n if isinstance(config, str):\n config = [config]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('No accepted entries, not scanning for existing.')\n return\n log.verbose('Scanning path(s) for existing files.')\n config = self.prepare_config(config)\n filenames = {}\n for folder in config:\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n for p in folder.walk(errors='ignore'):\n key = p.name\n # windows file system is not case sensitive\n if platform.system() == 'Windows':\n key = key.lower()\n filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n if platform.system() == 'Windows':\n name = name.lower()\n if name in filenames:\n log.debug('Found %s in %s' % (name, filenames[name]))\n entry.reject('exists in %s' % filenames[name])\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExists, 'exists', api_ver=2)\n", "path": "flexget/plugins/filter/exists.py"}]}
1,886
249
gh_patches_debug_33506
rasdani/github-patches
git_diff
optuna__optuna-1285
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `experimental` decorator breaks class documentation. The `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions. See https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break. ## Expected behavior Class documentation should not be altered by applying the experimental decorator. ## Steps to reproduce 1. Apply the experimental decorator to a class. 1. Build the document (`cd docs && make html`) 1. Open the rendered documentation and note that the class signatures is broken. ## Additional context (optional) - An issue regarding the indentation https://github.com/optuna/optuna/issues/1213. </issue> <code> [start of optuna/_experimental.py] 1 import functools 2 import inspect 3 from typing import Any 4 from typing import Callable 5 import warnings 6 7 from optuna.exceptions import ExperimentalWarning 8 9 10 # White spaces of each line are necessary to beautifully rendered documentation. 11 # NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require 12 # another four spaces. 13 _EXPERIMENTAL_DOCSTRING_TEMPLATE = """ 14 15 .. note:: 16 Added in v{ver} as an experimental feature. The interface may change in newer versions 17 without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. 18 """ 19 20 21 def _make_func_spec_str(func: Callable[..., Any]) -> str: 22 23 name = func.__name__ 24 argspec = inspect.getfullargspec(func) 25 26 n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0 27 offset = int(len(argspec.args) > 0 and argspec.args[0] == "self") 28 29 if n_defaults > 0: 30 args = ", ".join(argspec.args[offset:-n_defaults]) 31 with_default_values = ", ".join( 32 [ 33 "{}={}".format(a, d) 34 for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore 35 ] 36 ) 37 else: 38 args = ", ".join(argspec.args[offset:]) 39 with_default_values = "" 40 41 if len(args) > 0 and len(with_default_values) > 0: 42 args += ", " 43 44 # NOTE(crcrpar): The four spaces are necessary to correctly render documentation. 45 # Different classes or methods require more spaces. 46 str_args_description = "(" + args + with_default_values + ")\n\n " 47 return name + str_args_description 48 49 50 def _validate_version(version: str) -> None: 51 52 if not isinstance(version, str) or len(version.split(".")) != 3: 53 raise ValueError( 54 "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format( 55 version 56 ) 57 ) 58 59 60 def experimental(version: str, name: str = None) -> Any: 61 """Decorate class or function as experimental. 62 63 Args: 64 version: The first version that supports the target feature. 65 name: The name of the feature. Defaults to the function or class name. Optional. 66 """ 67 68 _validate_version(version) 69 70 def _experimental_wrapper(f: Any) -> Any: 71 # f is either func or class. 72 73 def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]: 74 75 docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 76 if func.__doc__ is None: 77 func.__doc__ = "" 78 func.__doc__ += docstring 79 80 # TODO(crcrpar): Annotate this correctly. 81 @functools.wraps(func) 82 def new_func(*args: Any, **kwargs: Any) -> Any: 83 """Wrapped function.""" 84 85 warnings.warn( 86 "{} is experimental (supported from v{}). " 87 "The interface can change in the future.".format( 88 name if name is not None else func.__name__, version 89 ), 90 ExperimentalWarning, 91 ) 92 93 return func(*args, **kwargs) # type: ignore 94 95 return new_func 96 97 def _experimental_class(cls: Any) -> Any: 98 """Decorates a class as experimental. 99 100 This decorator is supposed to be applied to the experimental class. 101 """ 102 103 _original_init = cls.__init__ 104 105 def wrapped_init(self, *args, **kwargs) -> None: # type: ignore 106 warnings.warn( 107 "{} is experimental (supported from v{}). " 108 "The interface can change in the future.".format( 109 name if name is not None else cls.__name__, version 110 ), 111 ExperimentalWarning, 112 ) 113 114 _original_init(self, *args, **kwargs) 115 116 cls.__init__ = wrapped_init 117 118 if cls.__doc__ is None: 119 cls.__doc__ = "" 120 cls.__doc__ = ( 121 _make_func_spec_str(_original_init) 122 + cls.__doc__ 123 + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 124 ) 125 return cls 126 127 return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f) 128 129 return _experimental_wrapper 130 [end of optuna/_experimental.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/optuna/_experimental.py b/optuna/_experimental.py --- a/optuna/_experimental.py +++ b/optuna/_experimental.py @@ -18,35 +18,6 @@ """ -def _make_func_spec_str(func: Callable[..., Any]) -> str: - - name = func.__name__ - argspec = inspect.getfullargspec(func) - - n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0 - offset = int(len(argspec.args) > 0 and argspec.args[0] == "self") - - if n_defaults > 0: - args = ", ".join(argspec.args[offset:-n_defaults]) - with_default_values = ", ".join( - [ - "{}={}".format(a, d) - for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore - ] - ) - else: - args = ", ".join(argspec.args[offset:]) - with_default_values = "" - - if len(args) > 0 and len(with_default_values) > 0: - args += ", " - - # NOTE(crcrpar): The four spaces are necessary to correctly render documentation. - # Different classes or methods require more spaces. - str_args_description = "(" + args + with_default_values + ")\n\n " - return name + str_args_description - - def _validate_version(version: str) -> None: if not isinstance(version, str) or len(version.split(".")) != 3: @@ -102,6 +73,7 @@ _original_init = cls.__init__ + @functools.wraps(_original_init) def wrapped_init(self, *args, **kwargs) -> None: # type: ignore warnings.warn( "{} is experimental (supported from v{}). " @@ -117,11 +89,7 @@ if cls.__doc__ is None: cls.__doc__ = "" - cls.__doc__ = ( - _make_func_spec_str(_original_init) - + cls.__doc__ - + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) - ) + cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) return cls return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
{"golden_diff": "diff --git a/optuna/_experimental.py b/optuna/_experimental.py\n--- a/optuna/_experimental.py\n+++ b/optuna/_experimental.py\n@@ -18,35 +18,6 @@\n \"\"\"\n \n \n-def _make_func_spec_str(func: Callable[..., Any]) -> str:\n-\n- name = func.__name__\n- argspec = inspect.getfullargspec(func)\n-\n- n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n- offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n-\n- if n_defaults > 0:\n- args = \", \".join(argspec.args[offset:-n_defaults])\n- with_default_values = \", \".join(\n- [\n- \"{}={}\".format(a, d)\n- for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n- ]\n- )\n- else:\n- args = \", \".join(argspec.args[offset:])\n- with_default_values = \"\"\n-\n- if len(args) > 0 and len(with_default_values) > 0:\n- args += \", \"\n-\n- # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n- # Different classes or methods require more spaces.\n- str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n- return name + str_args_description\n-\n-\n def _validate_version(version: str) -> None:\n \n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n@@ -102,6 +73,7 @@\n \n _original_init = cls.__init__\n \n+ @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n@@ -117,11 +89,7 @@\n \n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n- cls.__doc__ = (\n- _make_func_spec_str(_original_init)\n- + cls.__doc__\n- + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n- )\n+ cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n \n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n", "issue": "`experimental` decorator breaks class documentation.\nThe `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions.\r\n\r\nSee https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break.\r\n\r\n## Expected behavior\r\n\r\nClass documentation should not be altered by applying the experimental decorator.\r\n\r\n## Steps to reproduce\r\n\r\n1. Apply the experimental decorator to a class.\r\n1. Build the document (`cd docs && make html`) \r\n1. Open the rendered documentation and note that the class signatures is broken.\r\n\r\n## Additional context (optional)\r\n\r\n- An issue regarding the indentation https://github.com/optuna/optuna/issues/1213.\r\n\n", "before_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _make_func_spec_str(func: Callable[..., Any]) -> str:\n\n name = func.__name__\n argspec = inspect.getfullargspec(func)\n\n n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n\n if n_defaults > 0:\n args = \", \".join(argspec.args[offset:-n_defaults])\n with_default_values = \", \".join(\n [\n \"{}={}\".format(a, d)\n for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n ]\n )\n else:\n args = \", \".join(argspec.args[offset:])\n with_default_values = \"\"\n\n if len(args) > 0 and len(with_default_values) > 0:\n args += \", \"\n\n # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n # Different classes or methods require more spaces.\n str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n return name + str_args_description\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ = (\n _make_func_spec_str(_original_init)\n + cls.__doc__\n + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n )\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}]}
1,939
541
gh_patches_debug_25202
rasdani/github-patches
git_diff
opsdroid__opsdroid-12
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Connectors should fork When a connector is started it should fork into its own process. This is because connectors block to accept messages from their source. This requires #5 to enable persistent memory between connector processes. </issue> <code> [start of opsdroid/core.py] 1 """Core components of OpsDroid.""" 2 3 import logging 4 import sys 5 import weakref 6 from opsdroid.helper import match 7 from opsdroid.memory import Memory 8 9 10 class OpsDroid(): 11 """Root object for opsdroid.""" 12 13 instances = [] 14 15 def __init__(self): 16 """Start opsdroid.""" 17 self.bot_name = 'opsdroid' 18 self.sys_status = 0 19 self.connectors = [] 20 self.skills = [] 21 self.memory = Memory() 22 logging.info("Created main opsdroid object") 23 24 def __enter__(self): 25 """Add self to existing instances.""" 26 if len(self.__class__.instances) == 0: 27 self.__class__.instances.append(weakref.proxy(self)) 28 else: 29 self.critical("opsdroid has already been started", 1) 30 return self 31 32 def __exit__(self, exc_type, exc_value, traceback): 33 """Remove self from existing instances.""" 34 self.__class__.instances = [] 35 36 def exit(self): 37 """Exit application.""" 38 logging.info("Exiting application with return code " + 39 str(self.sys_status)) 40 sys.exit(self.sys_status) 41 42 def critical(self, error, code): 43 """Exit due to unrecoverable error.""" 44 self.sys_status = code 45 logging.critical(error) 46 print("Error: " + error) 47 self.exit() 48 49 def start_connectors(self, connectors): 50 """Start the connectors.""" 51 if len(connectors) == 0: 52 self.critical("All connectors failed to load", 1) 53 for connector_module in connectors: 54 for name, cls in connector_module["module"].__dict__.items(): 55 if isinstance(cls, type) and "Connector" in name: 56 connector_module["config"]["bot-name"] = self.bot_name 57 connector = cls(connector_module["config"]) 58 self.connectors.append(connector) 59 connector.connect(self) 60 61 def start_databases(self, databases): 62 """Start the databases.""" 63 if len(databases) == 0: 64 logging.warning("All databases failed to load") 65 for database_module in databases: 66 for name, cls in database_module["module"].__dict__.items(): 67 if isinstance(cls, type) and "Database" in name: 68 logging.debug("Adding database: " + name) 69 database = cls(database_module["config"]) 70 self.memory.databases.append(database) 71 database.connect() 72 73 def load_regex_skill(self, regex, skill): 74 """Load skills.""" 75 self.skills.append({"regex": regex, "skill": skill}) 76 77 def parse(self, message): 78 """Parse a string against all skills.""" 79 if message.text.strip() != "": 80 logging.debug("Parsing input: " + message.text) 81 for skill in self.skills: 82 if "regex" in skill: 83 regex = match(skill["regex"], message.text) 84 if regex: 85 message.regex = regex 86 skill["skill"](self, message) 87 [end of opsdroid/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/core.py b/opsdroid/core.py --- a/opsdroid/core.py +++ b/opsdroid/core.py @@ -3,6 +3,7 @@ import logging import sys import weakref +from multiprocessing import Process from opsdroid.helper import match from opsdroid.memory import Memory @@ -17,6 +18,7 @@ self.bot_name = 'opsdroid' self.sys_status = 0 self.connectors = [] + self.connector_jobs = [] self.skills = [] self.memory = Memory() logging.info("Created main opsdroid object") @@ -56,7 +58,11 @@ connector_module["config"]["bot-name"] = self.bot_name connector = cls(connector_module["config"]) self.connectors.append(connector) - connector.connect(self) + job = Process(target=connector.connect, args=(self,)) + job.start() + self.connector_jobs.append(job) + for job in self.connector_jobs: + job.join() def start_databases(self, databases): """Start the databases."""
{"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -3,6 +3,7 @@\n import logging\n import sys\n import weakref\n+from multiprocessing import Process\n from opsdroid.helper import match\n from opsdroid.memory import Memory\n \n@@ -17,6 +18,7 @@\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n+ self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n@@ -56,7 +58,11 @@\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n- connector.connect(self)\n+ job = Process(target=connector.connect, args=(self,))\n+ job.start()\n+ self.connector_jobs.append(job)\n+ for job in self.connector_jobs:\n+ job.join()\n \n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n", "issue": "Connectors should fork\nWhen a connector is started it should fork into its own process. This is because connectors block to accept messages from their source.\n\nThis requires #5 to enable persistent memory between connector processes.\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}]}
1,370
252
gh_patches_debug_31638
rasdani/github-patches
git_diff
bridgecrewio__checkov-5766
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots **Describe the issue** It seems that there are no checks that ensure that the following resource only allows HTTPS: - azurerm_function_app_slot - azurerm_linux_function_app_slot - azurerm_windows_function_app_slot **Examples** ````hcl resource "azurerm_function_app_slot" "example" { name = "test-azure-functions_slot" location = azurerm_resource_group.example.location resource_group_name = azurerm_resource_group.example.name app_service_plan_id = azurerm_app_service_plan.example.id function_app_name = azurerm_function_app.example.name storage_account_name = azurerm_storage_account.example.name storage_account_access_key = azurerm_storage_account.example.primary_access_key https_only = true } resource "azurerm_linux_function_app_slot" "example" { name = "example-linux-function-app-slot" function_app_id = azurerm_linux_function_app.example.id storage_account_name = azurerm_storage_account.example.name site_config { require_https = true } } resource "azurerm_windows_function_app" "example" { name = "example-windows-function-app" resource_group_name = azurerm_resource_group.example.name location = azurerm_resource_group.example.location storage_account_name = azurerm_storage_account.example.name service_plan_id = azurerm_service_plan.example.id site_config { require_https = true } } ```` **Version (please complete the following information):** - N/A **Additional context** N/A </issue> <code> [start of checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py] 1 from checkov.common.models.enums import CheckCategories 2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 3 4 5 class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck): 6 def __init__(self): 7 name = "Ensure that Function apps is only accessible over HTTPS" 8 id = "CKV_AZURE_70" 9 supported_resources = ['azurerm_function_app'] 10 categories = [CheckCategories.NETWORKING] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def get_inspected_key(self): 14 return 'https_only' 15 16 17 check = FunctionAppsAccessibleOverHttps() 18 [end of checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py --- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py +++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py @@ -1,17 +1,44 @@ -from checkov.common.models.enums import CheckCategories -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck +from __future__ import annotations +from typing import Any -class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck): - def __init__(self): +from checkov.common.models.enums import CheckCategories, CheckResult +from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck + + +class FunctionAppsAccessibleOverHttps(BaseResourceCheck): + + def __init__(self) -> None: name = "Ensure that Function apps is only accessible over HTTPS" id = "CKV_AZURE_70" - supported_resources = ['azurerm_function_app'] + supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app', + 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot', + 'azurerm_windows_function_app_slot'] categories = [CheckCategories.NETWORKING] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - def get_inspected_key(self): - return 'https_only' + def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult: + # default=false for https_only + if 'https_only' not in conf.keys(): + return CheckResult.FAILED + + https_only = conf.get('https_only')[0] + if not https_only: + return CheckResult.FAILED + + # relevant for linux/windows resources + if 'auth_settings_v2' in conf.keys(): + auth_settings_v2 = conf['auth_settings_v2'][0] + + # default=true for require_https + if 'require_https' not in auth_settings_v2.keys(): + return CheckResult.PASSED + + require_https = auth_settings_v2.get('require_https')[0] + if not require_https: + return CheckResult.FAILED + + return CheckResult.PASSED check = FunctionAppsAccessibleOverHttps()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n--- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n+++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py\n@@ -1,17 +1,44 @@\n-from checkov.common.models.enums import CheckCategories\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n+from __future__ import annotations\n \n+from typing import Any\n \n-class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):\n- def __init__(self):\n+from checkov.common.models.enums import CheckCategories, CheckResult\n+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n+\n+\n+class FunctionAppsAccessibleOverHttps(BaseResourceCheck):\n+\n+ def __init__(self) -> None:\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n- supported_resources = ['azurerm_function_app']\n+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',\n+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',\n+ 'azurerm_windows_function_app_slot']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def get_inspected_key(self):\n- return 'https_only'\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ # default=false for https_only\n+ if 'https_only' not in conf.keys():\n+ return CheckResult.FAILED\n+\n+ https_only = conf.get('https_only')[0]\n+ if not https_only:\n+ return CheckResult.FAILED\n+\n+ # relevant for linux/windows resources\n+ if 'auth_settings_v2' in conf.keys():\n+ auth_settings_v2 = conf['auth_settings_v2'][0]\n+\n+ # default=true for require_https\n+ if 'require_https' not in auth_settings_v2.keys():\n+ return CheckResult.PASSED\n+\n+ require_https = auth_settings_v2.get('require_https')[0]\n+ if not require_https:\n+ return CheckResult.FAILED\n+\n+ return CheckResult.PASSED\n \n \n check = FunctionAppsAccessibleOverHttps()\n", "issue": "Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots\n**Describe the issue**\r\nIt seems that there are no checks that ensure that the following resource only allows HTTPS:\r\n\r\n- azurerm_function_app_slot\r\n- azurerm_linux_function_app_slot\r\n- azurerm_windows_function_app_slot\r\n\r\n**Examples**\r\n\r\n````hcl\r\nresource \"azurerm_function_app_slot\" \"example\" {\r\n name = \"test-azure-functions_slot\"\r\n location = azurerm_resource_group.example.location\r\n resource_group_name = azurerm_resource_group.example.name\r\n app_service_plan_id = azurerm_app_service_plan.example.id\r\n function_app_name = azurerm_function_app.example.name\r\n storage_account_name = azurerm_storage_account.example.name\r\n storage_account_access_key = azurerm_storage_account.example.primary_access_key\r\n https_only = true\r\n}\r\n\r\nresource \"azurerm_linux_function_app_slot\" \"example\" {\r\n name = \"example-linux-function-app-slot\"\r\n function_app_id = azurerm_linux_function_app.example.id\r\n storage_account_name = azurerm_storage_account.example.name\r\n\r\n site_config {\r\n require_https = true \r\n }\r\n}\r\n\r\nresource \"azurerm_windows_function_app\" \"example\" {\r\n name = \"example-windows-function-app\"\r\n resource_group_name = azurerm_resource_group.example.name\r\n location = azurerm_resource_group.example.location\r\n storage_account_name = azurerm_storage_account.example.name\r\n service_plan_id = azurerm_service_plan.example.id\r\n\r\n site_config {\r\n require_https = true \r\n }\r\n}\r\n\r\n````\r\n\r\n**Version (please complete the following information):**\r\n - N/A\r\n \r\n**Additional context**\r\n\r\nN/A\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Function apps is only accessible over HTTPS\"\n id = \"CKV_AZURE_70\"\n supported_resources = ['azurerm_function_app']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'https_only'\n\n\ncheck = FunctionAppsAccessibleOverHttps()\n", "path": "checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py"}]}
1,103
556
gh_patches_debug_2348
rasdani/github-patches
git_diff
cornellius-gp__gpytorch-2285
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Docs] Bernoulli likelihoods # 📚 Documentation/Examples In the document for [Bernoulli likelihoods](https://docs.gpytorch.ai/en/stable/likelihoods.html), since the labels take value in {0, 1}, the likelihood should be $$p(Y=y | f) = \Phi ((2y-1)f)$$ instead of the currently displayed $$p(Y=y|f) = \Phi(yf).$$ </issue> <code> [start of gpytorch/likelihoods/bernoulli_likelihood.py] 1 #!/usr/bin/env python3 2 3 import warnings 4 5 import torch 6 7 from ..distributions import base_distributions 8 from ..functions import log_normal_cdf 9 from .likelihood import _OneDimensionalLikelihood 10 11 12 class BernoulliLikelihood(_OneDimensionalLikelihood): 13 r""" 14 Implements the Bernoulli likelihood used for GP classification, using 15 Probit regression (i.e., the latent function is warped to be in [0,1] 16 using the standard Normal CDF :math:`\Phi(x)`). Given the identity 17 :math:`\Phi(-x) = 1-\Phi(x)`, we can write the likelihood compactly as: 18 19 .. math:: 20 \begin{equation*} 21 p(Y=y|f)=\Phi(yf) 22 \end{equation*} 23 """ 24 25 def forward(self, function_samples, **kwargs): 26 output_probs = base_distributions.Normal(0, 1).cdf(function_samples) 27 return base_distributions.Bernoulli(probs=output_probs) 28 29 def log_marginal(self, observations, function_dist, *args, **kwargs): 30 marginal = self.marginal(function_dist, *args, **kwargs) 31 return marginal.log_prob(observations) 32 33 def marginal(self, function_dist, **kwargs): 34 mean = function_dist.mean 35 var = function_dist.variance 36 link = mean.div(torch.sqrt(1 + var)) 37 output_probs = base_distributions.Normal(0, 1).cdf(link) 38 return base_distributions.Bernoulli(probs=output_probs) 39 40 def expected_log_prob(self, observations, function_dist, *params, **kwargs): 41 if torch.any(observations.eq(-1)): 42 # Remove after 1.0 43 warnings.warn( 44 "BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. " 45 "Observations with labels in {-1, 1} are deprecated.", 46 DeprecationWarning, 47 ) 48 else: 49 observations = observations.mul(2).sub(1) 50 # Custom function here so we can use log_normal_cdf rather than Normal.cdf 51 # This is going to be less prone to overflow errors 52 log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations)) 53 log_prob = self.quadrature(log_prob_lambda, function_dist) 54 return log_prob 55 [end of gpytorch/likelihoods/bernoulli_likelihood.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gpytorch/likelihoods/bernoulli_likelihood.py b/gpytorch/likelihoods/bernoulli_likelihood.py --- a/gpytorch/likelihoods/bernoulli_likelihood.py +++ b/gpytorch/likelihoods/bernoulli_likelihood.py @@ -18,8 +18,11 @@ .. math:: \begin{equation*} - p(Y=y|f)=\Phi(yf) + p(Y=y|f)=\Phi((2y - 1)f) \end{equation*} + + .. note:: + The labels should take values in {0, 1}. """ def forward(self, function_samples, **kwargs):
{"golden_diff": "diff --git a/gpytorch/likelihoods/bernoulli_likelihood.py b/gpytorch/likelihoods/bernoulli_likelihood.py\n--- a/gpytorch/likelihoods/bernoulli_likelihood.py\n+++ b/gpytorch/likelihoods/bernoulli_likelihood.py\n@@ -18,8 +18,11 @@\n \n .. math::\n \\begin{equation*}\n- p(Y=y|f)=\\Phi(yf)\n+ p(Y=y|f)=\\Phi((2y - 1)f)\n \\end{equation*}\n+\n+ .. note::\n+ The labels should take values in {0, 1}.\n \"\"\"\n \n def forward(self, function_samples, **kwargs):\n", "issue": "[Docs] Bernoulli likelihoods \n# \ud83d\udcda Documentation/Examples\r\n\r\nIn the document for [Bernoulli likelihoods](https://docs.gpytorch.ai/en/stable/likelihoods.html), since the labels take value in {0, 1}, the likelihood should be \r\n\r\n$$p(Y=y | f) = \\Phi ((2y-1)f)$$\r\n\r\ninstead of the currently displayed\r\n\r\n$$p(Y=y|f) = \\Phi(yf).$$\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport warnings\n\nimport torch\n\nfrom ..distributions import base_distributions\nfrom ..functions import log_normal_cdf\nfrom .likelihood import _OneDimensionalLikelihood\n\n\nclass BernoulliLikelihood(_OneDimensionalLikelihood):\n r\"\"\"\n Implements the Bernoulli likelihood used for GP classification, using\n Probit regression (i.e., the latent function is warped to be in [0,1]\n using the standard Normal CDF :math:`\\Phi(x)`). Given the identity\n :math:`\\Phi(-x) = 1-\\Phi(x)`, we can write the likelihood compactly as:\n\n .. math::\n \\begin{equation*}\n p(Y=y|f)=\\Phi(yf)\n \\end{equation*}\n \"\"\"\n\n def forward(self, function_samples, **kwargs):\n output_probs = base_distributions.Normal(0, 1).cdf(function_samples)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def log_marginal(self, observations, function_dist, *args, **kwargs):\n marginal = self.marginal(function_dist, *args, **kwargs)\n return marginal.log_prob(observations)\n\n def marginal(self, function_dist, **kwargs):\n mean = function_dist.mean\n var = function_dist.variance\n link = mean.div(torch.sqrt(1 + var))\n output_probs = base_distributions.Normal(0, 1).cdf(link)\n return base_distributions.Bernoulli(probs=output_probs)\n\n def expected_log_prob(self, observations, function_dist, *params, **kwargs):\n if torch.any(observations.eq(-1)):\n # Remove after 1.0\n warnings.warn(\n \"BernoulliLikelihood.expected_log_prob expects observations with labels in {0, 1}. \"\n \"Observations with labels in {-1, 1} are deprecated.\",\n DeprecationWarning,\n )\n else:\n observations = observations.mul(2).sub(1)\n # Custom function here so we can use log_normal_cdf rather than Normal.cdf\n # This is going to be less prone to overflow errors\n log_prob_lambda = lambda function_samples: log_normal_cdf(function_samples.mul(observations))\n log_prob = self.quadrature(log_prob_lambda, function_dist)\n return log_prob\n", "path": "gpytorch/likelihoods/bernoulli_likelihood.py"}]}
1,262
160
gh_patches_debug_1415
rasdani/github-patches
git_diff
bokeh__bokeh-1434
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tools get lost on Grid Plots JS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342) </issue> <code> [start of examples/plotting/file/grid.py] 1 import numpy as np 2 3 from bokeh.plotting import * 4 5 N = 50 6 7 x = np.linspace(0, 4*np.pi, N) 8 y = np.sin(x) 9 10 TOOLS = "pan,wheel_zoom,box_zoom,reset,save" 11 12 l = figure(title="line", tools=TOOLS) 13 l.line(x,y, line_width=3, color="gold") 14 15 aw = figure(title="annular wedge", tools=TOOLS) 16 aw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color="navy", alpha=0.5, 17 inner_radius_units="screen", outer_radius_units="screen") 18 19 bez = figure(title="bezier", tools=TOOLS) 20 bez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2, 21 line_width=2, color="olive") 22 23 q = figure(title="quad", tools=TOOLS) 24 q.quad(x, x-0.2, y, y-0.2, color="tomato", alpha=0.4) 25 26 p = gridplot([[l,aw],[bez,q]]) 27 28 output_file("grid.html", title="grid.py example") 29 show(p) 30 [end of examples/plotting/file/grid.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py --- a/examples/plotting/file/grid.py +++ b/examples/plotting/file/grid.py @@ -7,7 +7,7 @@ x = np.linspace(0, 4*np.pi, N) y = np.sin(x) -TOOLS = "pan,wheel_zoom,box_zoom,reset,save" +TOOLS = "pan,wheel_zoom,box_zoom,reset,save,crosshair" l = figure(title="line", tools=TOOLS) l.line(x,y, line_width=3, color="gold")
{"golden_diff": "diff --git a/examples/plotting/file/grid.py b/examples/plotting/file/grid.py\n--- a/examples/plotting/file/grid.py\n+++ b/examples/plotting/file/grid.py\n@@ -7,7 +7,7 @@\n x = np.linspace(0, 4*np.pi, N)\n y = np.sin(x)\n \n-TOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n+TOOLS = \"pan,wheel_zoom,box_zoom,reset,save,crosshair\"\n \n l = figure(title=\"line\", tools=TOOLS)\n l.line(x,y, line_width=3, color=\"gold\")\n", "issue": "Tools get lost on Grid Plots\nJS logic error prevents all tools from showing up in the toolbar. (cf. comment in #1342)\n\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.plotting import *\n\nN = 50\n\nx = np.linspace(0, 4*np.pi, N)\ny = np.sin(x)\n\nTOOLS = \"pan,wheel_zoom,box_zoom,reset,save\"\n\nl = figure(title=\"line\", tools=TOOLS)\nl.line(x,y, line_width=3, color=\"gold\")\n\naw = figure(title=\"annular wedge\", tools=TOOLS)\naw.annular_wedge(x, y, 10, 20, 0.6, 4.1, color=\"navy\", alpha=0.5,\n inner_radius_units=\"screen\", outer_radius_units=\"screen\")\n\nbez = figure(title=\"bezier\", tools=TOOLS)\nbez.bezier(x, y, x+0.4, y, x+0.1, y+0.2, x-0.1, y-0.2,\n line_width=2, color=\"olive\")\n\nq = figure(title=\"quad\", tools=TOOLS)\nq.quad(x, x-0.2, y, y-0.2, color=\"tomato\", alpha=0.4)\n\np = gridplot([[l,aw],[bez,q]])\n\noutput_file(\"grid.html\", title=\"grid.py example\")\nshow(p)\n", "path": "examples/plotting/file/grid.py"}]}
907
133
gh_patches_debug_2061
rasdani/github-patches
git_diff
plotly__dash-565
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> New version of dash_renderer is not automatically installed with Dash 0.36.0 Deploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more. </issue> <code> [start of setup.py] 1 import io 2 from setuptools import setup, find_packages 3 4 main_ns = {} 5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used 6 7 setup( 8 name='dash', 9 version=main_ns['__version__'], 10 author='chris p', 11 author_email='[email protected]', 12 packages=find_packages(exclude=['tests*']), 13 include_package_data=True, 14 license='MIT', 15 description=('A Python framework for building reactive web-apps. ' 16 'Developed by Plotly.'), 17 long_description=io.open('README.md', encoding='utf-8').read(), 18 long_description_content_type='text/markdown', 19 install_requires=[ 20 'Flask>=0.12', 21 'flask-compress', 22 'plotly', 23 'dash_renderer', 24 ], 25 entry_points={ 26 'console_scripts': [ 27 'dash-generate-components =' 28 ' dash.development.component_generator:cli' 29 ] 30 }, 31 url='https://plot.ly/dash', 32 classifiers=[ 33 'Development Status :: 5 - Production/Stable', 34 'Environment :: Web Environment', 35 'Framework :: Flask', 36 'Intended Audience :: Developers', 37 'Intended Audience :: Education', 38 'Intended Audience :: Financial and Insurance Industry', 39 'Intended Audience :: Healthcare Industry', 40 'Intended Audience :: Manufacturing', 41 'Intended Audience :: Science/Research', 42 'License :: OSI Approved :: MIT License', 43 'Programming Language :: Python', 44 'Programming Language :: Python :: 2', 45 'Programming Language :: Python :: 2.7', 46 'Programming Language :: Python :: 3', 47 'Programming Language :: Python :: 3.3', 48 'Programming Language :: Python :: 3.4', 49 'Programming Language :: Python :: 3.5', 50 'Programming Language :: Python :: 3.6', 51 'Programming Language :: Python :: 3.7', 52 'Topic :: Database :: Front-Ends', 53 'Topic :: Office/Business :: Financial :: Spreadsheet', 54 'Topic :: Scientific/Engineering :: Visualization', 55 'Topic :: Software Development :: Libraries :: Application Frameworks', 56 'Topic :: Software Development :: Widget Sets' 57 ] 58 ) 59 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -20,7 +20,10 @@ 'Flask>=0.12', 'flask-compress', 'plotly', - 'dash_renderer', + 'dash_renderer==0.17.0', + 'dash-core-components==0.43.0', + 'dash-html-components==0.13.5', + 'dash-table==3.3.0' ], entry_points={ 'console_scripts': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,10 @@\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n- 'dash_renderer',\n+ 'dash_renderer==0.17.0',\n+ 'dash-core-components==0.43.0',\n+ 'dash-html-components==0.13.5',\n+ 'dash-table==3.3.0'\n ],\n entry_points={\n 'console_scripts': [\n", "issue": "New version of dash_renderer is not automatically installed with Dash 0.36.0\nDeploying apps on Dash Deployment Server results in `dash-renderer` not being updated if it is already installed (even if that version is `0.16.x` and the Dash version is specified as `0.36.0`. This causes an `Error loading dependencies`, as `dash-renderer` attempts to attach event handlers to Dash events, which don't exist any more. \n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n include_package_data=True,\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n entry_points={\n 'console_scripts': [\n 'dash-generate-components ='\n ' dash.development.component_generator:cli'\n ]\n },\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]}
1,206
127
gh_patches_debug_10715
rasdani/github-patches
git_diff
akvo__akvo-rsr-2711
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Custom stylesheets for Akvo pages missing The custom CSS that can be uploaded to an Akvo page site is currently not served. This is due to the link to it accidentally being removed when the main stylesheet for RSR was supplemented with a second sheet for the new Results. </issue> <code> [start of akvo/rsr/context_processors.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Akvo RSR is covered by the GNU Affero General Public License. 4 5 See more details in the license.txt file located at the root folder of the 6 Akvo RSR module. For additional details on the GNU license please see 7 < http://www.gnu.org/licenses/agpl.html >. 8 """ 9 10 import django 11 12 from django.conf import settings 13 from django.core.exceptions import DisallowedHost 14 from django.contrib.sites.models import get_current_site 15 16 17 def extra_context(request, protocol="http"): 18 """Add information to the request context.""" 19 try: 20 current_site = get_current_site(request) 21 except DisallowedHost: 22 current_site = None 23 24 django_version = django.get_version() 25 debug = getattr(settings, 'DEBUG', False) 26 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown') 27 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown') 28 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown') 29 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown') 30 31 return dict( 32 current_site=current_site, 33 django_version=django_version, 34 debug=debug, 35 deploy_tag=deploy_tag, 36 deploy_branch=deploy_branch, 37 deploy_commit_id=deploy_commit_id, 38 deploy_commit_full_id=deploy_commit_full_id 39 ) 40 41 42 def get_current_path_without_lang(request): 43 """Return current path without lang.""" 44 path = request.get_full_path() 45 path_bits = path.split('/') 46 path = '/'.join(path_bits[2:]) 47 return {'current_path_without_lang': path} 48 49 50 def extra_pages_context(request): 51 """Add context information of an RSR Page.""" 52 if request.rsr_page: 53 page = request.rsr_page 54 return { 55 'rsr_page': page, 56 'favicon': page.favicon, 57 'logo': page.logo, 58 'organisation': page.organisation, 59 'return_url': page.return_url, 60 'return_url_text': page.custom_return_url_text, 61 'stylesheet': page.stylesheet, 62 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN), 63 'domain_url': '//{}'.format(settings.RSR_DOMAIN), 64 'no_facebook': not page.facebook_button, 65 'facebook_app_id': page.facebook_app_id, 66 'no_twitter': not page.twitter_button, 67 } 68 69 return {} 70 [end of akvo/rsr/context_processors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py --- a/akvo/rsr/context_processors.py +++ b/akvo/rsr/context_processors.py @@ -58,7 +58,7 @@ 'organisation': page.organisation, 'return_url': page.return_url, 'return_url_text': page.custom_return_url_text, - 'stylesheet': page.stylesheet, + 'page_stylesheet': page.stylesheet, 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN), 'domain_url': '//{}'.format(settings.RSR_DOMAIN), 'no_facebook': not page.facebook_button,
{"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -58,7 +58,7 @@\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n- 'stylesheet': page.stylesheet,\n+ 'page_stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n", "issue": "Custom stylesheets for Akvo pages missing\nThe custom CSS that can be uploaded to an Akvo page site is currently not served.\r\n\r\nThis is due to the link to it accidentally being removed when the main stylesheet for RSR was supplemented with a second sheet for the new Results.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]}
1,235
146
gh_patches_debug_129
rasdani/github-patches
git_diff
librosa__librosa-1839
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> librosa 0.10.2 is not compatible with matplotlib <3.5 ***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed! **librosa 0.10.2 is not imcompatible with matplotlib.colormap** When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib' ![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7) **error code** <!-- Example: ``` import librosa.display import matplotlib.pyplot as plt import numpy as np --> **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Software versions*** ![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f) **Additional context** I have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0 librosa 0.10.2 is not compatible with matplotlib <3.5 ***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed! **librosa 0.10.2 is not imcompatible with matplotlib.colormap** When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib' ![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7) **error code** <!-- Example: ``` import librosa.display import matplotlib.pyplot as plt import numpy as np --> **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Software versions*** ![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f) **Additional context** I have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0 </issue> <code> [start of librosa/version.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """Version info""" 4 5 import sys 6 import importlib 7 8 short_version = "0.10" 9 version = "0.10.2" 10 11 12 def __get_mod_version(modname): 13 try: 14 if modname in sys.modules: 15 mod = sys.modules[modname] 16 else: 17 mod = importlib.import_module(modname) 18 try: 19 return mod.__version__ 20 except AttributeError: 21 return "installed, no version number available" 22 23 except ImportError: 24 return None 25 26 27 def show_versions() -> None: 28 """Return the version information for all librosa dependencies.""" 29 core_deps = [ 30 "audioread", 31 "numpy", 32 "scipy", 33 "sklearn", 34 "joblib", 35 "decorator", 36 "numba", 37 "soundfile", 38 "pooch", 39 "soxr", 40 "typing_extensions", 41 "lazy_loader", 42 "msgpack", 43 ] 44 45 extra_deps = [ 46 "numpydoc", 47 "sphinx", 48 "sphinx_rtd_theme", 49 "matplotlib", 50 "sphinx_multiversion", 51 "sphinx_gallery", 52 "mir_eval", 53 "ipython", 54 "sphinxcontrib.rsvgconverter", 55 "pytest", 56 "pytest_mpl", 57 "pytest_cov", 58 "samplerate", 59 "resampy", 60 "presets", 61 "packaging", 62 ] 63 64 print("INSTALLED VERSIONS") 65 print("------------------") 66 print(f"python: {sys.version}\n") 67 print(f"librosa: {version}\n") 68 for dep in core_deps: 69 print("{}: {}".format(dep, __get_mod_version(dep))) 70 print("") 71 for dep in extra_deps: 72 print("{}: {}".format(dep, __get_mod_version(dep))) 73 [end of librosa/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/librosa/version.py b/librosa/version.py --- a/librosa/version.py +++ b/librosa/version.py @@ -6,7 +6,7 @@ import importlib short_version = "0.10" -version = "0.10.2" +version = "0.10.2.post1" def __get_mod_version(modname):
{"golden_diff": "diff --git a/librosa/version.py b/librosa/version.py\n--- a/librosa/version.py\n+++ b/librosa/version.py\n@@ -6,7 +6,7 @@\n import importlib\n \n short_version = \"0.10\"\n-version = \"0.10.2\"\n+version = \"0.10.2.post1\"\n \n \n def __get_mod_version(modname):\n", "issue": "librosa 0.10.2 is not compatible with matplotlib <3.5\n***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!\r\n\r\n\r\n**librosa 0.10.2 is not imcompatible with matplotlib.colormap**\r\n When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'\r\n![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7)\r\n\r\n\r\n\r\n**error code**\r\n<!--\r\nExample:\r\n```\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n-->\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Software versions***\r\n\r\n![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f)\r\n\r\n**Additional context**\r\nI have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0\r\n\nlibrosa 0.10.2 is not compatible with matplotlib <3.5\n***BEFORE POSTING A BUG REPORT*** Please look through [existing issues (both open and closed)](https://github.com/librosa/librosa/issues?q=is%3Aissue) to see if it's already been reported or fixed!\r\n\r\n\r\n**librosa 0.10.2 is not imcompatible with matplotlib.colormap**\r\n When I try to user librosa.display, it reports the following error: cannot import name 'colormaps' from 'matplotlib'\r\n![image](https://github.com/librosa/librosa/assets/51704570/d50df74f-c345-48ba-8953-b9b1efec3ff7)\r\n\r\n\r\n\r\n**error code**\r\n<!--\r\nExample:\r\n```\r\nimport librosa.display\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n-->\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Software versions***\r\n\r\n![image](https://github.com/librosa/librosa/assets/51704570/957530c7-9656-44f9-8b0e-c1df49c3b61f)\r\n\r\n**Additional context**\r\nI have tried to change the version of matplotlib, but it does not work. And the versions I have tried are: 2.0.0, 3.0.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Version info\"\"\"\n\nimport sys\nimport importlib\n\nshort_version = \"0.10\"\nversion = \"0.10.2\"\n\n\ndef __get_mod_version(modname):\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n try:\n return mod.__version__\n except AttributeError:\n return \"installed, no version number available\"\n\n except ImportError:\n return None\n\n\ndef show_versions() -> None:\n \"\"\"Return the version information for all librosa dependencies.\"\"\"\n core_deps = [\n \"audioread\",\n \"numpy\",\n \"scipy\",\n \"sklearn\",\n \"joblib\",\n \"decorator\",\n \"numba\",\n \"soundfile\",\n \"pooch\",\n \"soxr\",\n \"typing_extensions\",\n \"lazy_loader\",\n \"msgpack\",\n ]\n\n extra_deps = [\n \"numpydoc\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"matplotlib\",\n \"sphinx_multiversion\",\n \"sphinx_gallery\",\n \"mir_eval\",\n \"ipython\",\n \"sphinxcontrib.rsvgconverter\",\n \"pytest\",\n \"pytest_mpl\",\n \"pytest_cov\",\n \"samplerate\",\n \"resampy\",\n \"presets\",\n \"packaging\",\n ]\n\n print(\"INSTALLED VERSIONS\")\n print(\"------------------\")\n print(f\"python: {sys.version}\\n\")\n print(f\"librosa: {version}\\n\")\n for dep in core_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n print(\"\")\n for dep in extra_deps:\n print(\"{}: {}\".format(dep, __get_mod_version(dep)))\n", "path": "librosa/version.py"}]}
1,748
88
gh_patches_debug_23491
rasdani/github-patches
git_diff
ESMCI__cime-1396
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Build pFUnit on some other machines We should build pFUnit on some other machines so we can run unit tests there: - cheyenne - hobart Instructions for building pFUnit are: 1. Download pFUnit from http://sourceforge.net/projects/pfunit/files/latest/download 2. Set the PFUNIT environment variable. For example:: ``` export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP ``` 3. Build pFUnit: ``` mkdir build cd build cmake -DMPI=YES -DOPENMP=YES .. make -j 4 ``` 4. Run pFUnit's own unit tests: ``` make tests ``` Note: this didn't work on yellowstone. Instead, I had to do it by hand: ``` DAV_CORES=4 execca mpirun.lsf tests/tests.x ``` 5. Install pFUnit on your system: ``` make install INSTALL_DIR=$PFUNIT ``` </issue> <code> [start of scripts/lib/CIME/BuildTools/cmakemacroswriter.py] 1 """Classes used to write build system files. 2 3 The classes here are used to write out settings for use by Makefile and CMake 4 build systems. The two relevant classes are CMakeMacroWriter and 5 MakeMacroWriter, which encapsulate the information necessary to write CMake and 6 Makefile formatted text, respectively. See the docstrings for those classes for 7 more. 8 """ 9 10 # This is not the most useful check. 11 # pylint: disable=invalid-name 12 13 from CIME.BuildTools.macrowriterbase import MacroWriterBase 14 from CIME.XML.standard_module_setup import * 15 logger = logging.getLogger(__name__) 16 17 18 class CMakeMacroWriter(MacroWriterBase): 19 20 """Macro writer for the CMake format. 21 22 For details on the provided methods, see MacroWriterBase, which this 23 class inherits from. 24 """ 25 26 def __init__(self, output): 27 """Initialize a CMake macro writer. 28 29 Arguments: 30 output - File-like object (probably an io.TextIOWrapper), which 31 will be written to. 32 """ 33 super(CMakeMacroWriter, self).__init__(output) 34 # This counter is for avoiding name conflicts in temporary 35 # variables used for shell commands. 36 self._var_num = 0 37 38 def environment_variable_string(self, name): 39 """Return an environment variable reference. 40 41 >>> import io 42 >>> s = io.StringIO() 43 >>> CMakeMacroWriter(s).environment_variable_string("foo") 44 '$ENV{foo}' 45 """ 46 return "$ENV{" + name + "}" 47 48 def shell_command_strings(self, command): 49 # pylint: disable=line-too-long 50 """Return strings used to get the output of a shell command. 51 52 >>> import io 53 >>> s = io.StringIO() 54 >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings("echo bar") 55 >>> set_up 56 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)' 57 >>> inline 58 '${CIME_TEMP_SHELL0}' 59 >>> tear_down 60 'unset(CIME_TEMP_SHELL0)' 61 """ 62 # pylint: enable=line-too-long 63 # Create a unique variable name, then increment variable number 64 # counter so that we get a different value next time. 65 var_name = "CIME_TEMP_SHELL" + str(self._var_num) 66 self._var_num += 1 67 set_up = "execute_process(COMMAND " + command + \ 68 " OUTPUT_VARIABLE " + var_name + \ 69 " OUTPUT_STRIP_TRAILING_WHITESPACE)" 70 tear_down = "unset(" + var_name + ")" 71 return (set_up, "${" + var_name + "}", tear_down) 72 73 def variable_string(self, name): 74 """Return a string to refer to a variable with the given name. 75 76 >>> import io 77 >>> s = io.StringIO() 78 >>> CMakeMacroWriter(s).variable_string("foo") 79 '${foo}' 80 """ 81 return "${" + name + "}" 82 83 def set_variable(self, name, value): 84 """Write out a statement setting a variable to some value. 85 86 >>> import io 87 >>> s = io.StringIO() 88 >>> CMakeMacroWriter(s).set_variable("foo", "bar") 89 >>> s.getvalue() 90 u'set(foo "bar")\\n' 91 """ 92 self.write_line("set(" + name + ' "' + value + '")') 93 94 def start_ifeq(self, left, right): 95 """Write out a statement to start a conditional block. 96 97 >>> import io 98 >>> s = io.StringIO() 99 >>> CMakeMacroWriter(s).start_ifeq("foo", "bar") 100 >>> s.getvalue() 101 u'if("foo" STREQUAL "bar")\\n' 102 """ 103 self.write_line('if("' + left + '" STREQUAL "' + right + '")') 104 self.indent_right() 105 106 def end_ifeq(self): 107 """Write out a statement to end a block started with start_ifeq. 108 109 >>> import io 110 >>> s = io.StringIO() 111 >>> writer = CMakeMacroWriter(s) 112 >>> writer.start_ifeq("foo", "bar") 113 >>> writer.set_variable("foo2", "bar2") 114 >>> writer.end_ifeq() 115 >>> s.getvalue() 116 u'if("foo" STREQUAL "bar")\\n set(foo2 "bar2")\\nendif()\\n' 117 """ 118 self.indent_left() 119 self.write_line("endif()") 120 [end of scripts/lib/CIME/BuildTools/cmakemacroswriter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py --- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py +++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py @@ -89,7 +89,8 @@ >>> s.getvalue() u'set(foo "bar")\\n' """ - self.write_line("set(" + name + ' "' + value + '")') + value_transformed = self._transform_value(value) + self.write_line("set(" + name + ' "' + value_transformed + '")') def start_ifeq(self, left, right): """Write out a statement to start a conditional block. @@ -117,3 +118,22 @@ """ self.indent_left() self.write_line("endif()") + + def _transform_value(self, value): + """Some elements need their values transformed in some way for CMake to handle them properly. + This method does those transformations. + + Args: + - value (str): value of element + + Returns transformed value + """ + + # Not all variables need leading & trailing whitespace removed, but some + # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC, + # SCXX, SFC) are only handled correctly if leading & trailing whitespace + # is removed. It doesn't seem to hurt to remove whitespace from all + # variables. + value_transformed = value.strip() + + return value_transformed
{"golden_diff": "diff --git a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n--- a/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n+++ b/scripts/lib/CIME/BuildTools/cmakemacroswriter.py\n@@ -89,7 +89,8 @@\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n- self.write_line(\"set(\" + name + ' \"' + value + '\")')\n+ value_transformed = self._transform_value(value)\n+ self.write_line(\"set(\" + name + ' \"' + value_transformed + '\")')\n \n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n@@ -117,3 +118,22 @@\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n+\n+ def _transform_value(self, value):\n+ \"\"\"Some elements need their values transformed in some way for CMake to handle them properly.\n+ This method does those transformations.\n+\n+ Args:\n+ - value (str): value of element\n+\n+ Returns transformed value\n+ \"\"\"\n+\n+ # Not all variables need leading & trailing whitespace removed, but some\n+ # do. In particular, compiler variables (MPICC, MPICXX, MPIFC, SCC,\n+ # SCXX, SFC) are only handled correctly if leading & trailing whitespace\n+ # is removed. It doesn't seem to hurt to remove whitespace from all\n+ # variables.\n+ value_transformed = value.strip()\n+\n+ return value_transformed\n", "issue": "Build pFUnit on some other machines\nWe should build pFUnit on some other machines so we can run unit tests there:\r\n\r\n- cheyenne\r\n- hobart\r\n\r\nInstructions for building pFUnit are:\r\n\r\n1. Download pFUnit from\r\n http://sourceforge.net/projects/pfunit/files/latest/download\r\n\r\n2. Set the PFUNIT environment variable. For example::\r\n\r\n ```\r\n export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP\r\n ```\r\n\r\n3. Build pFUnit:\r\n\r\n```\r\n mkdir build\r\n cd build\r\n cmake -DMPI=YES -DOPENMP=YES ..\r\n make -j 4\r\n```\r\n\r\n4. Run pFUnit's own unit tests:\r\n\r\n```\r\n make tests\r\n```\r\n\r\n Note: this didn't work on yellowstone. Instead, I had to do it by hand:\r\n\r\n```\r\n DAV_CORES=4 execca\r\n mpirun.lsf tests/tests.x\r\n```\r\n\r\n5. Install pFUnit on your system:\r\n\r\n```\r\n make install INSTALL_DIR=$PFUNIT\r\n```\n", "before_files": [{"content": "\"\"\"Classes used to write build system files.\n\nThe classes here are used to write out settings for use by Makefile and CMake\nbuild systems. The two relevant classes are CMakeMacroWriter and\nMakeMacroWriter, which encapsulate the information necessary to write CMake and\nMakefile formatted text, respectively. See the docstrings for those classes for\nmore.\n\"\"\"\n\n# This is not the most useful check.\n# pylint: disable=invalid-name\n\nfrom CIME.BuildTools.macrowriterbase import MacroWriterBase\nfrom CIME.XML.standard_module_setup import *\nlogger = logging.getLogger(__name__)\n\n\nclass CMakeMacroWriter(MacroWriterBase):\n\n \"\"\"Macro writer for the CMake format.\n\n For details on the provided methods, see MacroWriterBase, which this\n class inherits from.\n \"\"\"\n\n def __init__(self, output):\n \"\"\"Initialize a CMake macro writer.\n\n Arguments:\n output - File-like object (probably an io.TextIOWrapper), which\n will be written to.\n \"\"\"\n super(CMakeMacroWriter, self).__init__(output)\n # This counter is for avoiding name conflicts in temporary\n # variables used for shell commands.\n self._var_num = 0\n\n def environment_variable_string(self, name):\n \"\"\"Return an environment variable reference.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).environment_variable_string(\"foo\")\n '$ENV{foo}'\n \"\"\"\n return \"$ENV{\" + name + \"}\"\n\n def shell_command_strings(self, command):\n # pylint: disable=line-too-long\n \"\"\"Return strings used to get the output of a shell command.\n\n >>> import io\n >>> s = io.StringIO()\n >>> set_up, inline, tear_down = CMakeMacroWriter(s).shell_command_strings(\"echo bar\")\n >>> set_up\n 'execute_process(COMMAND echo bar OUTPUT_VARIABLE CIME_TEMP_SHELL0 OUTPUT_STRIP_TRAILING_WHITESPACE)'\n >>> inline\n '${CIME_TEMP_SHELL0}'\n >>> tear_down\n 'unset(CIME_TEMP_SHELL0)'\n \"\"\"\n # pylint: enable=line-too-long\n # Create a unique variable name, then increment variable number\n # counter so that we get a different value next time.\n var_name = \"CIME_TEMP_SHELL\" + str(self._var_num)\n self._var_num += 1\n set_up = \"execute_process(COMMAND \" + command + \\\n \" OUTPUT_VARIABLE \" + var_name + \\\n \" OUTPUT_STRIP_TRAILING_WHITESPACE)\"\n tear_down = \"unset(\" + var_name + \")\"\n return (set_up, \"${\" + var_name + \"}\", tear_down)\n\n def variable_string(self, name):\n \"\"\"Return a string to refer to a variable with the given name.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).variable_string(\"foo\")\n '${foo}'\n \"\"\"\n return \"${\" + name + \"}\"\n\n def set_variable(self, name, value):\n \"\"\"Write out a statement setting a variable to some value.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).set_variable(\"foo\", \"bar\")\n >>> s.getvalue()\n u'set(foo \"bar\")\\\\n'\n \"\"\"\n self.write_line(\"set(\" + name + ' \"' + value + '\")')\n\n def start_ifeq(self, left, right):\n \"\"\"Write out a statement to start a conditional block.\n\n >>> import io\n >>> s = io.StringIO()\n >>> CMakeMacroWriter(s).start_ifeq(\"foo\", \"bar\")\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n'\n \"\"\"\n self.write_line('if(\"' + left + '\" STREQUAL \"' + right + '\")')\n self.indent_right()\n\n def end_ifeq(self):\n \"\"\"Write out a statement to end a block started with start_ifeq.\n\n >>> import io\n >>> s = io.StringIO()\n >>> writer = CMakeMacroWriter(s)\n >>> writer.start_ifeq(\"foo\", \"bar\")\n >>> writer.set_variable(\"foo2\", \"bar2\")\n >>> writer.end_ifeq()\n >>> s.getvalue()\n u'if(\"foo\" STREQUAL \"bar\")\\\\n set(foo2 \"bar2\")\\\\nendif()\\\\n'\n \"\"\"\n self.indent_left()\n self.write_line(\"endif()\")\n", "path": "scripts/lib/CIME/BuildTools/cmakemacroswriter.py"}]}
2,034
369
gh_patches_debug_31153
rasdani/github-patches
git_diff
cupy__cupy-6121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `copyto` does not test shape for memcpy cases ### Description There should be a shape check before these: https://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L54-L55 https://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L76-L80 ### To Reproduce ```py >>> dst = cupy.ones((2, 3), dtype=int) >>> cupy.copyto(dst, cupy.arange(6)) >>> dst array([[0, 1, 2], [3, 4, 5]]) >>> numpy.copyto(numpy.ones((2, 3), dtype=int), numpy.arange(6)) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<__array_function__ internals>", line 5, in copyto ValueError: could not broadcast input array from shape (6,) into shape (2,3) ``` ```py >>> cupy.copyto(cupy.empty((3, 0, 4, 5)), cupy.empty((6, 7, 0))) >>> numpy.copyto(numpy.empty((3, 0, 4, 5)), numpy.empty((6, 7, 0))) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "<__array_function__ internals>", line 5, in copyto ValueError: could not broadcast input array from shape (6,7,0) into shape (3,0,4,5) ``` </issue> <code> [start of cupy/_manipulation/basic.py] 1 import numpy 2 3 from cupy import _core 4 from cupy._core import _fusion_interface 5 from cupy._core import fusion 6 from cupy._sorting import search 7 from cupy_backends.cuda.api import runtime 8 9 10 def copyto(dst, src, casting='same_kind', where=None): 11 """Copies values from one array to another with broadcasting. 12 13 This function can be called for arrays on different devices. In this case, 14 casting, ``where``, and broadcasting is not supported, and an exception is 15 raised if these are used. 16 17 Args: 18 dst (cupy.ndarray): Target array. 19 src (cupy.ndarray): Source array. 20 casting (str): Casting rule. See :func:`numpy.can_cast` for detail. 21 where (cupy.ndarray of bool): If specified, this array acts as a mask, 22 and an element is copied only if the corresponding element of 23 ``where`` is True. 24 25 .. seealso:: :func:`numpy.copyto` 26 27 """ 28 29 src_type = type(src) 30 src_is_python_scalar = src_type in ( 31 int, bool, float, complex, 32 fusion._FusionVarScalar, _fusion_interface._ScalarProxy) 33 if src_is_python_scalar: 34 src_dtype = numpy.dtype(type(src)) 35 can_cast = numpy.can_cast(src, dst.dtype, casting) 36 else: 37 src_dtype = src.dtype 38 can_cast = numpy.can_cast(src_dtype, dst.dtype, casting) 39 40 if not can_cast: 41 raise TypeError('Cannot cast %s to %s in %s casting mode' % 42 (src_dtype, dst.dtype, casting)) 43 44 if not src_is_python_scalar and src.ndim > dst.ndim: 45 # NumPy allows stripping leading unit dimensions. 46 try: 47 src = src.squeeze(tuple(range(src.ndim - dst.ndim))) 48 except ValueError: 49 # "cannot select an axis to squeeze out 50 # which has size not equal to one" 51 pass # raise an error later 52 53 if fusion._is_fusing(): 54 if where is None: 55 _core.elementwise_copy(src, dst) 56 else: 57 fusion._call_ufunc(search._where_ufunc, where, src, dst, dst) 58 return 59 60 if where is not None: 61 _core.elementwise_copy(src, dst, _where=where) 62 return 63 64 if dst.size == 0: 65 return 66 67 if src_is_python_scalar: 68 dst.fill(src) 69 return 70 71 if _can_memcpy(dst, src): 72 dst.data.copy_from_async(src.data, src.nbytes) 73 return 74 75 device = dst.device 76 prev_device = runtime.getDevice() 77 try: 78 runtime.setDevice(device.id) 79 if src.device != device: 80 src = src.copy() 81 _core.elementwise_copy(src, dst) 82 finally: 83 runtime.setDevice(prev_device) 84 85 86 def _can_memcpy(dst, src): 87 c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous 88 f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous 89 return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \ 90 dst.size == src.size 91 [end of cupy/_manipulation/basic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py --- a/cupy/_manipulation/basic.py +++ b/cupy/_manipulation/basic.py @@ -1,3 +1,5 @@ +import itertools + import numpy from cupy import _core @@ -41,22 +43,35 @@ raise TypeError('Cannot cast %s to %s in %s casting mode' % (src_dtype, dst.dtype, casting)) - if not src_is_python_scalar and src.ndim > dst.ndim: - # NumPy allows stripping leading unit dimensions. - try: - src = src.squeeze(tuple(range(src.ndim - dst.ndim))) - except ValueError: - # "cannot select an axis to squeeze out - # which has size not equal to one" - pass # raise an error later - if fusion._is_fusing(): + # TODO(kataoka): NumPy allows stripping leading unit dimensions. + # But fusion array proxy does not currently support + # `shape` and `squeeze`. + if where is None: _core.elementwise_copy(src, dst) else: fusion._call_ufunc(search._where_ufunc, where, src, dst, dst) return + if not src_is_python_scalar: + # Check broadcast condition + # - for fast-paths and + # - for a better error message (than ufunc's). + # NumPy allows stripping leading unit dimensions. + if not all([ + s in (d, 1) + for s, d in itertools.zip_longest( + reversed(src.shape), reversed(dst.shape), fillvalue=1) + ]): + raise ValueError( + "could not broadcast input array " + f"from shape {src.shape} into shape {dst.shape}") + squeeze_ndim = src.ndim - dst.ndim + if squeeze_ndim > 0: + # always succeeds because broadcast conition is checked. + src = src.squeeze(tuple(range(squeeze_ndim))) + if where is not None: _core.elementwise_copy(src, dst, _where=where) return
{"golden_diff": "diff --git a/cupy/_manipulation/basic.py b/cupy/_manipulation/basic.py\n--- a/cupy/_manipulation/basic.py\n+++ b/cupy/_manipulation/basic.py\n@@ -1,3 +1,5 @@\n+import itertools\n+\n import numpy\n \n from cupy import _core\n@@ -41,22 +43,35 @@\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n \n- if not src_is_python_scalar and src.ndim > dst.ndim:\n- # NumPy allows stripping leading unit dimensions.\n- try:\n- src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n- except ValueError:\n- # \"cannot select an axis to squeeze out\n- # which has size not equal to one\"\n- pass # raise an error later\n-\n if fusion._is_fusing():\n+ # TODO(kataoka): NumPy allows stripping leading unit dimensions.\n+ # But fusion array proxy does not currently support\n+ # `shape` and `squeeze`.\n+\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n \n+ if not src_is_python_scalar:\n+ # Check broadcast condition\n+ # - for fast-paths and\n+ # - for a better error message (than ufunc's).\n+ # NumPy allows stripping leading unit dimensions.\n+ if not all([\n+ s in (d, 1)\n+ for s, d in itertools.zip_longest(\n+ reversed(src.shape), reversed(dst.shape), fillvalue=1)\n+ ]):\n+ raise ValueError(\n+ \"could not broadcast input array \"\n+ f\"from shape {src.shape} into shape {dst.shape}\")\n+ squeeze_ndim = src.ndim - dst.ndim\n+ if squeeze_ndim > 0:\n+ # always succeeds because broadcast conition is checked.\n+ src = src.squeeze(tuple(range(squeeze_ndim)))\n+\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n", "issue": "`copyto` does not test shape for memcpy cases\n### Description\r\n\r\nThere should be a shape check before these:\r\nhttps://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L54-L55\r\nhttps://github.com/cupy/cupy/blob/e7f2e797216954dd152efe0169ec6a0094d7777d/cupy/_manipulation/basic.py#L76-L80\r\n\r\n### To Reproduce\r\n\r\n```py\r\n>>> dst = cupy.ones((2, 3), dtype=int)\r\n>>> cupy.copyto(dst, cupy.arange(6))\r\n>>> dst\r\narray([[0, 1, 2],\r\n [3, 4, 5]])\r\n>>> numpy.copyto(numpy.ones((2, 3), dtype=int), numpy.arange(6))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"<__array_function__ internals>\", line 5, in copyto\r\nValueError: could not broadcast input array from shape (6,) into shape (2,3)\r\n```\r\n\r\n```py\r\n>>> cupy.copyto(cupy.empty((3, 0, 4, 5)), cupy.empty((6, 7, 0)))\r\n>>> numpy.copyto(numpy.empty((3, 0, 4, 5)), numpy.empty((6, 7, 0)))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"<__array_function__ internals>\", line 5, in copyto\r\nValueError: could not broadcast input array from shape (6,7,0) into shape (3,0,4,5)\r\n```\r\n\n", "before_files": [{"content": "import numpy\n\nfrom cupy import _core\nfrom cupy._core import _fusion_interface\nfrom cupy._core import fusion\nfrom cupy._sorting import search\nfrom cupy_backends.cuda.api import runtime\n\n\ndef copyto(dst, src, casting='same_kind', where=None):\n \"\"\"Copies values from one array to another with broadcasting.\n\n This function can be called for arrays on different devices. In this case,\n casting, ``where``, and broadcasting is not supported, and an exception is\n raised if these are used.\n\n Args:\n dst (cupy.ndarray): Target array.\n src (cupy.ndarray): Source array.\n casting (str): Casting rule. See :func:`numpy.can_cast` for detail.\n where (cupy.ndarray of bool): If specified, this array acts as a mask,\n and an element is copied only if the corresponding element of\n ``where`` is True.\n\n .. seealso:: :func:`numpy.copyto`\n\n \"\"\"\n\n src_type = type(src)\n src_is_python_scalar = src_type in (\n int, bool, float, complex,\n fusion._FusionVarScalar, _fusion_interface._ScalarProxy)\n if src_is_python_scalar:\n src_dtype = numpy.dtype(type(src))\n can_cast = numpy.can_cast(src, dst.dtype, casting)\n else:\n src_dtype = src.dtype\n can_cast = numpy.can_cast(src_dtype, dst.dtype, casting)\n\n if not can_cast:\n raise TypeError('Cannot cast %s to %s in %s casting mode' %\n (src_dtype, dst.dtype, casting))\n\n if not src_is_python_scalar and src.ndim > dst.ndim:\n # NumPy allows stripping leading unit dimensions.\n try:\n src = src.squeeze(tuple(range(src.ndim - dst.ndim)))\n except ValueError:\n # \"cannot select an axis to squeeze out\n # which has size not equal to one\"\n pass # raise an error later\n\n if fusion._is_fusing():\n if where is None:\n _core.elementwise_copy(src, dst)\n else:\n fusion._call_ufunc(search._where_ufunc, where, src, dst, dst)\n return\n\n if where is not None:\n _core.elementwise_copy(src, dst, _where=where)\n return\n\n if dst.size == 0:\n return\n\n if src_is_python_scalar:\n dst.fill(src)\n return\n\n if _can_memcpy(dst, src):\n dst.data.copy_from_async(src.data, src.nbytes)\n return\n\n device = dst.device\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device.id)\n if src.device != device:\n src = src.copy()\n _core.elementwise_copy(src, dst)\n finally:\n runtime.setDevice(prev_device)\n\n\ndef _can_memcpy(dst, src):\n c_contiguous = dst.flags.c_contiguous and src.flags.c_contiguous\n f_contiguous = dst.flags.f_contiguous and src.flags.f_contiguous\n return (c_contiguous or f_contiguous) and dst.dtype == src.dtype and \\\n dst.size == src.size\n", "path": "cupy/_manipulation/basic.py"}]}
1,818
489
gh_patches_debug_13913
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-655
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix the repo regex to handle + in URLs The documentation said that you could do git+https:// or git+ssh:// but that was not, in fact, true. </issue> <code> [start of cookiecutter/main.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.main 6 ----------------- 7 8 Main entry point for the `cookiecutter` command. 9 10 The code in this module is also a good example of how to use Cookiecutter as a 11 library rather than a script. 12 """ 13 14 from __future__ import unicode_literals 15 import logging 16 import os 17 import re 18 19 from .config import get_user_config, USER_CONFIG_PATH 20 from .exceptions import InvalidModeException, RepositoryNotFound 21 from .prompt import prompt_for_config 22 from .generate import generate_context, generate_files 23 from .vcs import clone 24 from .replay import dump, load 25 26 logger = logging.getLogger(__name__) 27 28 builtin_abbreviations = { 29 'gh': 'https://github.com/{0}.git', 30 'bb': 'https://bitbucket.org/{0}', 31 } 32 33 REPO_REGEX = """ 34 ( 35 ((git|ssh|https|http):(//)?) # something like git:// ssh:// etc. 36 | # or 37 (\w+@[\w\.]+) # something like user@... 38 ) 39 .* 40 """ 41 42 43 def is_repo_url(value): 44 """Return True if value is a repository URL.""" 45 return bool(re.match(REPO_REGEX, value, re.VERBOSE)) 46 47 48 def expand_abbreviations(template, config_dict): 49 """ 50 Expand abbreviations in a template name. 51 52 :param template: The project template name. 53 :param config_dict: The user config, which will contain abbreviation 54 definitions. 55 """ 56 57 abbreviations = builtin_abbreviations.copy() 58 abbreviations.update(config_dict.get('abbreviations', {})) 59 60 if template in abbreviations: 61 return abbreviations[template] 62 63 # Split on colon. If there is no colon, rest will be empty 64 # and prefix will be the whole template 65 prefix, sep, rest = template.partition(':') 66 if prefix in abbreviations: 67 return abbreviations[prefix].format(rest) 68 69 return template 70 71 72 def cookiecutter( 73 template, checkout=None, no_input=False, extra_context=None, 74 replay=False, overwrite_if_exists=False, output_dir='.', 75 config_file=USER_CONFIG_PATH): 76 """ 77 API equivalent to using Cookiecutter at the command line. 78 79 :param template: A directory containing a project template directory, 80 or a URL to a git repository. 81 :param checkout: The branch, tag or commit ID to checkout after clone. 82 :param no_input: Prompt the user at command line for manual configuration? 83 :param extra_context: A dictionary of context that overrides default 84 and user configuration. 85 :param: overwrite_if_exists: Overwrite the contents of output directory 86 if it exists 87 :param output_dir: Where to output the generated project dir into. 88 :param config_file: User configuration file path. 89 """ 90 if replay and ((no_input is not False) or (extra_context is not None)): 91 err_msg = ( 92 "You can not use both replay and no_input or extra_context " 93 "at the same time." 94 ) 95 raise InvalidModeException(err_msg) 96 97 # Get user config from ~/.cookiecutterrc or equivalent 98 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used 99 config_dict = get_user_config(config_file=config_file) 100 101 template = expand_abbreviations(template, config_dict) 102 103 if is_repo_url(template): 104 repo_dir = clone( 105 repo_url=template, 106 checkout=checkout, 107 clone_to_dir=config_dict['cookiecutters_dir'], 108 no_input=no_input 109 ) 110 else: 111 # If it's a local repo, no need to clone or copy to your 112 # cookiecutters_dir 113 repo_dir = template 114 115 if not os.path.isdir(repo_dir): 116 raise RepositoryNotFound( 117 'The repository {0} could not be located.'.format(template) 118 ) 119 120 template_name = os.path.basename(template) 121 122 if replay: 123 context = load(config_dict['replay_dir'], template_name) 124 else: 125 context_file = os.path.join(repo_dir, 'cookiecutter.json') 126 logging.debug('context_file is {0}'.format(context_file)) 127 128 context = generate_context( 129 context_file=context_file, 130 default_context=config_dict['default_context'], 131 extra_context=extra_context, 132 ) 133 134 # prompt the user to manually configure at the command line. 135 # except when 'no-input' flag is set 136 context['cookiecutter'] = prompt_for_config(context, no_input) 137 138 dump(config_dict['replay_dir'], template_name, context) 139 140 # Create project from local context and project template. 141 return generate_files( 142 repo_dir=repo_dir, 143 context=context, 144 overwrite_if_exists=overwrite_if_exists, 145 output_dir=output_dir 146 ) 147 [end of cookiecutter/main.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cookiecutter/main.py b/cookiecutter/main.py --- a/cookiecutter/main.py +++ b/cookiecutter/main.py @@ -30,19 +30,18 @@ 'bb': 'https://bitbucket.org/{0}', } -REPO_REGEX = """ -( -((git|ssh|https|http):(//)?) # something like git:// ssh:// etc. - | # or - (\w+@[\w\.]+) # something like user@... +REPO_REGEX = re.compile(r""" +(?x) +((((git|hg)\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc. + | # or + (\w+@[\w\.]+) # something like user@... ) -.* -""" +""") def is_repo_url(value): """Return True if value is a repository URL.""" - return bool(re.match(REPO_REGEX, value, re.VERBOSE)) + return bool(REPO_REGEX.match(value)) def expand_abbreviations(template, config_dict):
{"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -30,19 +30,18 @@\n 'bb': 'https://bitbucket.org/{0}',\n }\n \n-REPO_REGEX = \"\"\"\n-(\n-((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n- | # or\n- (\\w+@[\\w\\.]+) # something like user@...\n+REPO_REGEX = re.compile(r\"\"\"\n+(?x)\n+((((git|hg)\\+)?(git|ssh|https?):(//)?) # something like git:// ssh:// etc.\n+ | # or\n+ (\\w+@[\\w\\.]+) # something like user@...\n )\n-.*\n-\"\"\"\n+\"\"\")\n \n \n def is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n- return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n+ return bool(REPO_REGEX.match(value))\n \n \n def expand_abbreviations(template, config_dict):\n", "issue": "Fix the repo regex to handle + in URLs\nThe documentation said that you could do git+https:// or git+ssh:// but that was not, in fact, true.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport logging\nimport os\nimport re\n\nfrom .config import get_user_config, USER_CONFIG_PATH\nfrom .exceptions import InvalidModeException, RepositoryNotFound\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\nfrom .replay import dump, load\n\nlogger = logging.getLogger(__name__)\n\nbuiltin_abbreviations = {\n 'gh': 'https://github.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nREPO_REGEX = \"\"\"\n(\n((git|ssh|https|http):(//)?) # something like git:// ssh:// etc.\n | # or\n (\\w+@[\\w\\.]+) # something like user@...\n)\n.*\n\"\"\"\n\n\ndef is_repo_url(value):\n \"\"\"Return True if value is a repository URL.\"\"\"\n return bool(re.match(REPO_REGEX, value, re.VERBOSE))\n\n\ndef expand_abbreviations(template, config_dict):\n \"\"\"\n Expand abbreviations in a template name.\n\n :param template: The project template name.\n :param config_dict: The user config, which will contain abbreviation\n definitions.\n \"\"\"\n\n abbreviations = builtin_abbreviations.copy()\n abbreviations.update(config_dict.get('abbreviations', {}))\n\n if template in abbreviations:\n return abbreviations[template]\n\n # Split on colon. If there is no colon, rest will be empty\n # and prefix will be the whole template\n prefix, sep, rest = template.partition(':')\n if prefix in abbreviations:\n return abbreviations[prefix].format(rest)\n\n return template\n\n\ndef cookiecutter(\n template, checkout=None, no_input=False, extra_context=None,\n replay=False, overwrite_if_exists=False, output_dir='.',\n config_file=USER_CONFIG_PATH):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Prompt the user at command line for manual configuration?\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param: overwrite_if_exists: Overwrite the contents of output directory\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config(config_file=config_file)\n\n template = expand_abbreviations(template, config_dict)\n\n if is_repo_url(template):\n repo_dir = clone(\n repo_url=template,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir'],\n no_input=no_input\n )\n else:\n # If it's a local repo, no need to clone or copy to your\n # cookiecutters_dir\n repo_dir = template\n\n if not os.path.isdir(repo_dir):\n raise RepositoryNotFound(\n 'The repository {0} could not be located.'.format(template)\n )\n\n template_name = os.path.basename(template)\n\n if replay:\n context = load(config_dict['replay_dir'], template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n return generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir\n )\n", "path": "cookiecutter/main.py"}]}
1,932
247
gh_patches_debug_20096
rasdani/github-patches
git_diff
liqd__adhocracy4-1005
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> comments: make sure blocked comments are not searched or filtered for their category **URL:** **user:** **expected behaviour:** when I search for sth. in the comments, I do not expect to find blocked comments, of which I cannot see the comment text **behaviour:** because the search happens in the backend, blocked comments are also searched, while the comment list from the serializer does not show the comment text of searched comments **important screensize:** **device & browser:** **Comment/Question:** Screenshot? </issue> <code> [start of adhocracy4/comments_async/filters.py] 1 from django.db import models 2 from rest_framework.filters import BaseFilterBackend 3 from rest_framework.filters import SearchFilter 4 5 6 class CommentCategoryFilterBackend(BaseFilterBackend): 7 """Filter the comments for the categories.""" 8 9 def filter_queryset(self, request, queryset, view): 10 11 if 'comment_category' in request.GET: 12 category = request.GET['comment_category'] 13 return queryset.filter(comment_categories__contains=category) 14 15 return queryset 16 17 18 class CommentOrderingFilterBackend(BaseFilterBackend): 19 """Order the comments.""" 20 21 def filter_queryset(self, request, queryset, view): 22 23 if 'ordering' in request.GET: 24 ordering = request.GET['ordering'] 25 26 if ordering == 'new': 27 return queryset.order_by('-created') 28 elif ordering == 'ans': 29 queryset = queryset\ 30 .annotate(comment_count=models.Count( 31 'child_comments', distinct=True)) 32 return queryset.order_by('-comment_count', '-created') 33 elif ordering == 'pos': 34 queryset = queryset\ 35 .annotate(positive_rating_count=models.Count( 36 models.Case( 37 models.When( 38 ratings__value=1, 39 then=models.F('ratings__id') 40 ), 41 output_field=models.IntegerField() 42 ), 43 distinct=True)) 44 return queryset.order_by('-positive_rating_count', '-created') 45 elif ordering == 'neg': 46 queryset = queryset\ 47 .annotate(negative_rating_count=models.Count( 48 models.Case( 49 models.When( 50 ratings__value=-1, 51 then=models.F('ratings__id') 52 ), 53 output_field=models.IntegerField() 54 ), 55 distinct=True)) 56 return queryset.order_by('-negative_rating_count', '-created') 57 elif ordering == 'dis': 58 return queryset.order_by( 59 models.F('last_discussed').desc(nulls_last=True), 60 '-created' 61 ) 62 elif ordering == 'mom': 63 return queryset.order_by('-is_moderator_marked', '-created') 64 65 return queryset 66 67 68 class CustomSearchFilter(SearchFilter): 69 70 def filter_queryset(self, request, queryset, view): 71 qs = super().filter_queryset(request, queryset, view) 72 if self.get_search_terms(request): 73 return qs.filter(is_removed=False, is_censored=False) 74 return qs 75 [end of adhocracy4/comments_async/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/adhocracy4/comments_async/filters.py b/adhocracy4/comments_async/filters.py --- a/adhocracy4/comments_async/filters.py +++ b/adhocracy4/comments_async/filters.py @@ -8,9 +8,12 @@ def filter_queryset(self, request, queryset, view): - if 'comment_category' in request.GET: + if ('comment_category' in request.GET + and request.GET['comment_category'] != ''): category = request.GET['comment_category'] - return queryset.filter(comment_categories__contains=category) + return queryset.filter( + comment_categories__contains=category, + is_blocked=False) return queryset @@ -70,5 +73,6 @@ def filter_queryset(self, request, queryset, view): qs = super().filter_queryset(request, queryset, view) if self.get_search_terms(request): - return qs.filter(is_removed=False, is_censored=False) + return qs.filter(is_removed=False, is_censored=False, + is_blocked=False) return qs
{"golden_diff": "diff --git a/adhocracy4/comments_async/filters.py b/adhocracy4/comments_async/filters.py\n--- a/adhocracy4/comments_async/filters.py\n+++ b/adhocracy4/comments_async/filters.py\n@@ -8,9 +8,12 @@\n \n def filter_queryset(self, request, queryset, view):\n \n- if 'comment_category' in request.GET:\n+ if ('comment_category' in request.GET\n+ and request.GET['comment_category'] != ''):\n category = request.GET['comment_category']\n- return queryset.filter(comment_categories__contains=category)\n+ return queryset.filter(\n+ comment_categories__contains=category,\n+ is_blocked=False)\n \n return queryset\n \n@@ -70,5 +73,6 @@\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n- return qs.filter(is_removed=False, is_censored=False)\n+ return qs.filter(is_removed=False, is_censored=False,\n+ is_blocked=False)\n return qs\n", "issue": "comments: make sure blocked comments are not searched or filtered for their category\n**URL:** \r\n**user:** \r\n**expected behaviour:** when I search for sth. in the comments, I do not expect to find blocked comments, of which I cannot see the comment text\r\n**behaviour:** because the search happens in the backend, blocked comments are also searched, while the comment list from the serializer does not show the comment text of searched comments\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.db import models\nfrom rest_framework.filters import BaseFilterBackend\nfrom rest_framework.filters import SearchFilter\n\n\nclass CommentCategoryFilterBackend(BaseFilterBackend):\n \"\"\"Filter the comments for the categories.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'comment_category' in request.GET:\n category = request.GET['comment_category']\n return queryset.filter(comment_categories__contains=category)\n\n return queryset\n\n\nclass CommentOrderingFilterBackend(BaseFilterBackend):\n \"\"\"Order the comments.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n\n if 'ordering' in request.GET:\n ordering = request.GET['ordering']\n\n if ordering == 'new':\n return queryset.order_by('-created')\n elif ordering == 'ans':\n queryset = queryset\\\n .annotate(comment_count=models.Count(\n 'child_comments', distinct=True))\n return queryset.order_by('-comment_count', '-created')\n elif ordering == 'pos':\n queryset = queryset\\\n .annotate(positive_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-positive_rating_count', '-created')\n elif ordering == 'neg':\n queryset = queryset\\\n .annotate(negative_rating_count=models.Count(\n models.Case(\n models.When(\n ratings__value=-1,\n then=models.F('ratings__id')\n ),\n output_field=models.IntegerField()\n ),\n distinct=True))\n return queryset.order_by('-negative_rating_count', '-created')\n elif ordering == 'dis':\n return queryset.order_by(\n models.F('last_discussed').desc(nulls_last=True),\n '-created'\n )\n elif ordering == 'mom':\n return queryset.order_by('-is_moderator_marked', '-created')\n\n return queryset\n\n\nclass CustomSearchFilter(SearchFilter):\n\n def filter_queryset(self, request, queryset, view):\n qs = super().filter_queryset(request, queryset, view)\n if self.get_search_terms(request):\n return qs.filter(is_removed=False, is_censored=False)\n return qs\n", "path": "adhocracy4/comments_async/filters.py"}]}
1,264
238
gh_patches_debug_10648
rasdani/github-patches
git_diff
zestedesavoir__zds-site-5449
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> L'export à la demande ne fonctionne pas Plus précisement, l'export Markdown ne fonctionne pas quand on clique sur le bouton "Exporter le contenu" alors qu'il fonctionne lors de la publication. </issue> <code> [start of zds/tutorialv2/api/views.py] 1 import contextlib 2 from pathlib import Path 3 4 from django.http import Http404 5 from django.utils import translation 6 from django.utils.translation import gettext as _ 7 from rest_framework import status 8 from rest_framework.fields import empty 9 from rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404 10 from rest_framework.response import Response 11 from rest_framework.serializers import Serializer, CharField, BooleanField 12 from rest_framework.permissions import IsAuthenticatedOrReadOnly 13 14 from zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff 15 from zds.tutorialv2.publication_utils import PublicatorRegistry 16 from zds.tutorialv2.utils import search_container_or_404 17 from zds.utils.api.views import KarmaView 18 from zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent 19 20 21 class ContainerReadinessSerializer(Serializer): 22 parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False) 23 container_slug = CharField(required=True) 24 ready_to_publish = BooleanField(required=True) 25 26 def run_validation(self, data=empty): 27 init = super().run_validation(data) 28 if not init: 29 return init 30 if not data.get('parent_container_slug', ''): 31 init.pop('parent_container_slug', '') 32 return init 33 34 def save(self, **kwargs): 35 if not self.validated_data: 36 self.is_valid(True) 37 versioned = self.instance.load_version() 38 container = search_container_or_404(versioned, self.validated_data) 39 container.ready_to_publish = self.validated_data['ready_to_publish'] 40 sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(), 41 commit_message=_('{} est {} à la publication.').format( 42 container.get_path(True), 43 _('prêt') if container.ready_to_publish else _('ignoré'))) 44 PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha) 45 46 def to_representation(self, instance): 47 return {} 48 49 50 class ContentReactionKarmaView(KarmaView): 51 queryset = ContentReaction.objects.all() 52 permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly) 53 54 55 class ContainerPublicationReadinessView(UpdateAPIView): 56 permission_classes = (IsAuthorOrStaff, ) 57 serializer_class = ContainerReadinessSerializer 58 59 def get_object(self): 60 content = PublishableContent.objects.prefetch_related('authors')\ 61 .filter(pk=int(self.kwargs.get('pk', 0)))\ 62 .first() 63 if not content: 64 raise Http404() 65 self.check_object_permissions(self.request, object) 66 return content 67 68 69 class ExportView(ListCreateAPIView): 70 permission_classes = (IsAuthorOrStaff,) 71 serializer_class = Serializer 72 73 def get_queryset(self): 74 return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0)) 75 76 def ensure_directories(self, content: PublishableContent): 77 final_directory = Path(content.public_version.get_extra_contents_directory()) 78 building_directory = Path(str(final_directory.parent) + '__building', final_directory.name) 79 with contextlib.suppress(FileExistsError): 80 final_directory.mkdir(parents=True) 81 with contextlib.suppress(FileExistsError): 82 building_directory.mkdir(parents=True) 83 return building_directory, final_directory 84 85 def create(self, request, *args, **kwargs): 86 try: 87 publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk'))) 88 if not publishable_content.public_version: 89 raise Http404('Not public content') 90 tmp_dir, _ = self.ensure_directories(publishable_content) 91 versioned = publishable_content.public_version.load_public_version() 92 base_name = str(Path(tmp_dir, versioned.slug)) 93 md_file_path = str(Path(tmp_dir, versioned.slug + '.md')) 94 95 PublicatorRegistry.get('md').publish(md_file_path, base_name, 96 versioned=versioned, 97 cur_language=translation.get_language()) 98 PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version) 99 except ValueError: 100 return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={}) 101 else: 102 return Response({}, status=status.HTTP_201_CREATED, headers={}) 103 [end of zds/tutorialv2/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/tutorialv2/api/views.py b/zds/tutorialv2/api/views.py --- a/zds/tutorialv2/api/views.py +++ b/zds/tutorialv2/api/views.py @@ -88,7 +88,7 @@ if not publishable_content.public_version: raise Http404('Not public content') tmp_dir, _ = self.ensure_directories(publishable_content) - versioned = publishable_content.public_version.load_public_version() + versioned = publishable_content.load_version(public=True) base_name = str(Path(tmp_dir, versioned.slug)) md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))
{"golden_diff": "diff --git a/zds/tutorialv2/api/views.py b/zds/tutorialv2/api/views.py\n--- a/zds/tutorialv2/api/views.py\n+++ b/zds/tutorialv2/api/views.py\n@@ -88,7 +88,7 @@\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n- versioned = publishable_content.public_version.load_public_version()\n+ versioned = publishable_content.load_version(public=True)\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n", "issue": "L'export \u00e0 la demande ne fonctionne pas\nPlus pr\u00e9cisement, l'export Markdown ne fonctionne pas quand on clique sur le bouton \"Exporter le contenu\" alors qu'il fonctionne lors de la publication.\n", "before_files": [{"content": "import contextlib\nfrom pathlib import Path\n\nfrom django.http import Http404\nfrom django.utils import translation\nfrom django.utils.translation import gettext as _\nfrom rest_framework import status\nfrom rest_framework.fields import empty\nfrom rest_framework.generics import UpdateAPIView, ListCreateAPIView, get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import Serializer, CharField, BooleanField\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\n\nfrom zds.member.api.permissions import CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly, IsAuthorOrStaff\nfrom zds.tutorialv2.publication_utils import PublicatorRegistry\nfrom zds.tutorialv2.utils import search_container_or_404\nfrom zds.utils.api.views import KarmaView\nfrom zds.tutorialv2.models.database import ContentReaction, PublishableContent, PublicationEvent\n\n\nclass ContainerReadinessSerializer(Serializer):\n parent_container_slug = CharField(allow_blank=True, allow_null=True, required=False)\n container_slug = CharField(required=True)\n ready_to_publish = BooleanField(required=True)\n\n def run_validation(self, data=empty):\n init = super().run_validation(data)\n if not init:\n return init\n if not data.get('parent_container_slug', ''):\n init.pop('parent_container_slug', '')\n return init\n\n def save(self, **kwargs):\n if not self.validated_data:\n self.is_valid(True)\n versioned = self.instance.load_version()\n container = search_container_or_404(versioned, self.validated_data)\n container.ready_to_publish = self.validated_data['ready_to_publish']\n sha = versioned.repo_update(versioned.title, versioned.get_introduction(), versioned.get_conclusion(),\n commit_message=_('{} est {} \u00e0 la publication.').format(\n container.get_path(True),\n _('pr\u00eat') if container.ready_to_publish else _('ignor\u00e9')))\n PublishableContent.objects.filter(pk=self.instance.pk).update(sha_draft=sha)\n\n def to_representation(self, instance):\n return {}\n\n\nclass ContentReactionKarmaView(KarmaView):\n queryset = ContentReaction.objects.all()\n permission_classes = (IsAuthenticatedOrReadOnly, CanReadAndWriteNowOrReadOnly, IsNotOwnerOrReadOnly)\n\n\nclass ContainerPublicationReadinessView(UpdateAPIView):\n permission_classes = (IsAuthorOrStaff, )\n serializer_class = ContainerReadinessSerializer\n\n def get_object(self):\n content = PublishableContent.objects.prefetch_related('authors')\\\n .filter(pk=int(self.kwargs.get('pk', 0)))\\\n .first()\n if not content:\n raise Http404()\n self.check_object_permissions(self.request, object)\n return content\n\n\nclass ExportView(ListCreateAPIView):\n permission_classes = (IsAuthorOrStaff,)\n serializer_class = Serializer\n\n def get_queryset(self):\n return PublicationEvent.objects.filter(published_object__content__pk=self.kwargs.get('pk', 0))\n\n def ensure_directories(self, content: PublishableContent):\n final_directory = Path(content.public_version.get_extra_contents_directory())\n building_directory = Path(str(final_directory.parent) + '__building', final_directory.name)\n with contextlib.suppress(FileExistsError):\n final_directory.mkdir(parents=True)\n with contextlib.suppress(FileExistsError):\n building_directory.mkdir(parents=True)\n return building_directory, final_directory\n\n def create(self, request, *args, **kwargs):\n try:\n publishable_content = get_object_or_404(PublishableContent.objects, pk=int(kwargs.get('pk')))\n if not publishable_content.public_version:\n raise Http404('Not public content')\n tmp_dir, _ = self.ensure_directories(publishable_content)\n versioned = publishable_content.public_version.load_public_version()\n base_name = str(Path(tmp_dir, versioned.slug))\n md_file_path = str(Path(tmp_dir, versioned.slug + '.md'))\n\n PublicatorRegistry.get('md').publish(md_file_path, base_name,\n versioned=versioned,\n cur_language=translation.get_language())\n PublicatorRegistry.get('watchdog').publish_from_published_content(publishable_content.public_version)\n except ValueError:\n return Response({}, status=status.HTTP_400_BAD_REQUEST, headers={})\n else:\n return Response({}, status=status.HTTP_201_CREATED, headers={})\n", "path": "zds/tutorialv2/api/views.py"}]}
1,726
149
gh_patches_debug_10271
rasdani/github-patches
git_diff
PaddlePaddle__Paddle2ONNX-15
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need check whether protoc existed. </issue> <code> [start of variables.py] 1 # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from onnx import helper, onnx_pb2, TensorProto 16 import paddle.fluid.core as core 17 18 19 def paddle_variable_to_onnx_tensor(paddle_var_name, block): 20 # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR. 21 paddle_var = block.var(paddle_var_name) 22 return helper.make_tensor_value_info( 23 paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], 24 paddle_var.shape) 25 26 27 PADDLE_TO_ONNX_DTYPE = { 28 core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT, 29 core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16, 30 # '': onnx_pb2.TensorProto.DOUBLE, 31 core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32, 32 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16, 33 # '': onnx_pb2.TensorProto.INT8, 34 # '': onnx_pb2.TensorProto.UINT8, 35 core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16, 36 core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64, 37 # '': onnx_pb2.TensorProto.STRING, 38 # '': onnx_pb2.TensorProto.COMPLEX64, 39 # '': onnx_pb2.TensorProto.COMPLEX128, 40 core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL 41 } 42 [end of variables.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/variables.py b/variables.py --- a/variables.py +++ b/variables.py @@ -19,9 +19,9 @@ def paddle_variable_to_onnx_tensor(paddle_var_name, block): # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR. paddle_var = block.var(paddle_var_name) - return helper.make_tensor_value_info( - paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], - paddle_var.shape) + return helper.make_tensor_value_info(paddle_var_name, + PADDLE_TO_ONNX_DTYPE[paddle_var.dtype], + paddle_var.shape) PADDLE_TO_ONNX_DTYPE = {
{"golden_diff": "diff --git a/variables.py b/variables.py\n--- a/variables.py\n+++ b/variables.py\n@@ -19,9 +19,9 @@\n def paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n- return helper.make_tensor_value_info(\n- paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n- paddle_var.shape)\n+ return helper.make_tensor_value_info(paddle_var_name,\n+ PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n+ paddle_var.shape)\n \n \n PADDLE_TO_ONNX_DTYPE = {\n", "issue": "Need check whether protoc existed.\n\n", "before_files": [{"content": "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom onnx import helper, onnx_pb2, TensorProto\nimport paddle.fluid.core as core\n\n\ndef paddle_variable_to_onnx_tensor(paddle_var_name, block):\n # TODO(varunarora): Need to do this only in the case of VarType.LOD_TENSOR.\n paddle_var = block.var(paddle_var_name)\n return helper.make_tensor_value_info(\n paddle_var_name, PADDLE_TO_ONNX_DTYPE[paddle_var.dtype],\n paddle_var.shape)\n\n\nPADDLE_TO_ONNX_DTYPE = {\n core.VarDesc.VarType.FP32: onnx_pb2.TensorProto.FLOAT,\n core.VarDesc.VarType.FP64: onnx_pb2.TensorProto.FLOAT16,\n # '': onnx_pb2.TensorProto.DOUBLE,\n core.VarDesc.VarType.INT32: onnx_pb2.TensorProto.INT32,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.INT16,\n # '': onnx_pb2.TensorProto.INT8,\n # '': onnx_pb2.TensorProto.UINT8,\n core.VarDesc.VarType.INT16: onnx_pb2.TensorProto.UINT16,\n core.VarDesc.VarType.INT64: onnx_pb2.TensorProto.INT64,\n # '': onnx_pb2.TensorProto.STRING,\n # '': onnx_pb2.TensorProto.COMPLEX64,\n # '': onnx_pb2.TensorProto.COMPLEX128,\n core.VarDesc.VarType.BOOL: onnx_pb2.TensorProto.BOOL\n}\n", "path": "variables.py"}]}
1,091
164
gh_patches_debug_9971
rasdani/github-patches
git_diff
pretix__pretix-882
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rich_text: tel schema When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened. </issue> <code> [start of src/pretix/base/templatetags/rich_text.py] 1 import urllib.parse 2 3 import bleach 4 import markdown 5 from bleach import DEFAULT_CALLBACKS 6 from django import template 7 from django.conf import settings 8 from django.core import signing 9 from django.urls import reverse 10 from django.utils.http import is_safe_url 11 from django.utils.safestring import mark_safe 12 13 register = template.Library() 14 15 ALLOWED_TAGS = [ 16 'a', 17 'abbr', 18 'acronym', 19 'b', 20 'blockquote', 21 'br', 22 'code', 23 'em', 24 'i', 25 'li', 26 'ol', 27 'strong', 28 'ul', 29 'p', 30 'table', 31 'tbody', 32 'thead', 33 'tr', 34 'td', 35 'th', 36 'div', 37 'span', 38 'hr', 39 'h1', 40 'h2', 41 'h3', 42 'h4', 43 'h5', 44 'h6', 45 'pre', 46 # Update doc/user/markdown.rst if you change this! 47 ] 48 49 ALLOWED_ATTRIBUTES = { 50 'a': ['href', 'title'], 51 'abbr': ['title'], 52 'acronym': ['title'], 53 'table': ['width'], 54 'td': ['width', 'align'], 55 'div': ['class'], 56 'p': ['class'], 57 'span': ['class'], 58 # Update doc/user/markdown.rst if you change this! 59 } 60 61 62 def safelink_callback(attrs, new=False): 63 url = attrs.get((None, 'href'), '/') 64 if not is_safe_url(url) and not url.startswith('mailto:'): 65 signer = signing.Signer(salt='safe-redirect') 66 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url)) 67 attrs[None, 'target'] = '_blank' 68 attrs[None, 'rel'] = 'noopener' 69 return attrs 70 71 72 def abslink_callback(attrs, new=False): 73 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/')) 74 attrs[None, 'target'] = '_blank' 75 attrs[None, 'rel'] = 'noopener' 76 return attrs 77 78 79 def markdown_compile(source): 80 return bleach.clean( 81 markdown.markdown( 82 source, 83 extensions=[ 84 'markdown.extensions.sane_lists', 85 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails 86 ] 87 ), 88 tags=ALLOWED_TAGS, 89 attributes=ALLOWED_ATTRIBUTES 90 ) 91 92 93 @register.filter 94 def rich_text(text: str, **kwargs): 95 """ 96 Processes markdown and cleans HTML in a text input. 97 """ 98 text = str(text) 99 body_md = bleach.linkify( 100 markdown_compile(text), 101 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback]) 102 ) 103 return mark_safe(body_md) 104 [end of src/pretix/base/templatetags/rich_text.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py --- a/src/pretix/base/templatetags/rich_text.py +++ b/src/pretix/base/templatetags/rich_text.py @@ -58,6 +58,8 @@ # Update doc/user/markdown.rst if you change this! } +ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel'] + def safelink_callback(attrs, new=False): url = attrs.get((None, 'href'), '/') @@ -86,7 +88,8 @@ ] ), tags=ALLOWED_TAGS, - attributes=ALLOWED_ATTRIBUTES + attributes=ALLOWED_ATTRIBUTES, + protocols=ALLOWED_PROTOCOLS, )
{"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -58,6 +58,8 @@\n # Update doc/user/markdown.rst if you change this!\n }\n \n+ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n+\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n@@ -86,7 +88,8 @@\n ]\n ),\n tags=ALLOWED_TAGS,\n- attributes=ALLOWED_ATTRIBUTES\n+ attributes=ALLOWED_ATTRIBUTES,\n+ protocols=ALLOWED_PROTOCOLS,\n )\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]}
1,533
189
gh_patches_debug_9859
rasdani/github-patches
git_diff
aimhubio__aim-3112
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [fix] Serve files linked into the static files directory In certain conda environments, the files in the static files root directory can be symlinked from a different place. The fixed implementation only resolves relative .. segments in the request path without resolving any symlinks. This way, it still prevents reading arbitrary files through the web server while allowing the reading of symlinked files. </issue> <code> [start of aim/web/api/views.py] 1 import os 2 from pathlib import Path 3 4 from fastapi import HTTPException, Request 5 from fastapi.responses import FileResponse, HTMLResponse 6 7 from aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter 8 from aim.web.configs import AIM_UI_BASE_PATH 9 10 statics_router = APIRouter() 11 12 13 @statics_router.get('/static-files/{path:path}/') 14 async def serve_static_files(path): 15 import aim_ui 16 17 static_files_root = Path(aim_ui.__file__).parent / 'build' 18 # Normalize to resolve any .. segments 19 static_file_name = os.path.normpath(static_files_root / path) 20 21 # Ensure that no paths outside the root directory are accessed by checking that the 22 # root directory is a prefix of the file path 23 common_prefix = Path(os.path.commonpath([static_files_root, static_file_name])) 24 if common_prefix == static_files_root: 25 raise HTTPException(status_code=404) 26 27 compressed_file_name = Path(f'{static_file_name}.gz') 28 if compressed_file_name.exists(): 29 return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'}) 30 return FileResponse(static_file_name) 31 32 33 # do not change the placement of this method 34 # as it also serves as a fallback for wrong url routes 35 @statics_router.get('/{path:path}/', response_class=HTMLResponse) 36 async def serve_index_html(request: Request): 37 import aim_ui 38 from jinja2 import Environment, FileSystemLoader 39 40 template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build') 41 env = Environment( 42 loader=FileSystemLoader(template_files_dir), 43 autoescape=True 44 ) 45 template = env.get_template('index-template.html') 46 base_path = os.environ.get(AIM_UI_BASE_PATH, '') 47 return template.render(base_path=base_path) 48 [end of aim/web/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aim/web/api/views.py b/aim/web/api/views.py --- a/aim/web/api/views.py +++ b/aim/web/api/views.py @@ -21,7 +21,7 @@ # Ensure that no paths outside the root directory are accessed by checking that the # root directory is a prefix of the file path common_prefix = Path(os.path.commonpath([static_files_root, static_file_name])) - if common_prefix == static_files_root: + if common_prefix != static_files_root: raise HTTPException(status_code=404) compressed_file_name = Path(f'{static_file_name}.gz')
{"golden_diff": "diff --git a/aim/web/api/views.py b/aim/web/api/views.py\n--- a/aim/web/api/views.py\n+++ b/aim/web/api/views.py\n@@ -21,7 +21,7 @@\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n- if common_prefix == static_files_root:\n+ if common_prefix != static_files_root:\n raise HTTPException(status_code=404)\n \n compressed_file_name = Path(f'{static_file_name}.gz')\n", "issue": "[fix] Serve files linked into the static files directory\nIn certain conda environments, the files in the static files root directory can be symlinked from a different place. The fixed implementation only resolves relative .. segments in the request path without resolving any symlinks. This way, it still prevents reading arbitrary files through the web server while allowing the reading of symlinked files.\n", "before_files": [{"content": "import os\nfrom pathlib import Path\n\nfrom fastapi import HTTPException, Request\nfrom fastapi.responses import FileResponse, HTMLResponse\n\nfrom aim.web.api.utils import APIRouter # wrapper for fastapi.APIRouter\nfrom aim.web.configs import AIM_UI_BASE_PATH\n\nstatics_router = APIRouter()\n\n\n@statics_router.get('/static-files/{path:path}/')\nasync def serve_static_files(path):\n import aim_ui\n\n static_files_root = Path(aim_ui.__file__).parent / 'build'\n # Normalize to resolve any .. segments\n static_file_name = os.path.normpath(static_files_root / path)\n\n # Ensure that no paths outside the root directory are accessed by checking that the\n # root directory is a prefix of the file path\n common_prefix = Path(os.path.commonpath([static_files_root, static_file_name]))\n if common_prefix == static_files_root:\n raise HTTPException(status_code=404)\n\n compressed_file_name = Path(f'{static_file_name}.gz')\n if compressed_file_name.exists():\n return FileResponse(compressed_file_name, headers={'Content-Encoding': 'gzip'})\n return FileResponse(static_file_name)\n\n\n# do not change the placement of this method\n# as it also serves as a fallback for wrong url routes\n@statics_router.get('/{path:path}/', response_class=HTMLResponse)\nasync def serve_index_html(request: Request):\n import aim_ui\n from jinja2 import Environment, FileSystemLoader\n\n template_files_dir = os.path.join(os.path.dirname(aim_ui.__file__), 'build')\n env = Environment(\n loader=FileSystemLoader(template_files_dir),\n autoescape=True\n )\n template = env.get_template('index-template.html')\n base_path = os.environ.get(AIM_UI_BASE_PATH, '')\n return template.render(base_path=base_path)\n", "path": "aim/web/api/views.py"}]}
1,093
142
gh_patches_debug_2972
rasdani/github-patches
git_diff
pyodide__pyodide-325
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ValueError: invalid __array_struct__ when using js arrays of arrays and numpy When using a matrix (array of array of numbers) in javascript and trying to convert that to a numpy array, it fails with the error `ValueError: invalid __array_struct__` To reproduce: JavaScript: ``` window.A = [[1,2,3],[4,5,6]]; ``` Python: ``` import numpy from js import A m = numpy.array(A) ``` </issue> <code> [start of src/pyodide.py] 1 """ 2 A library of helper utilities for connecting Python to the browser environment. 3 """ 4 5 import ast 6 import io 7 from textwrap import dedent 8 9 __version__ = '0.8.2' 10 11 12 def open_url(url): 13 """ 14 Fetches a given *url* and returns a io.StringIO to access its contents. 15 """ 16 from js import XMLHttpRequest 17 18 req = XMLHttpRequest.new() 19 req.open('GET', url, False) 20 req.send(None) 21 return io.StringIO(req.response) 22 23 24 def eval_code(code, ns): 25 """ 26 Runs a string of code, the last part of which may be an expression. 27 """ 28 # handle mis-indented input from multi-line strings 29 code = dedent(code) 30 31 mod = ast.parse(code) 32 if len(mod.body) == 0: 33 return None 34 35 if isinstance(mod.body[-1], ast.Expr): 36 expr = ast.Expression(mod.body[-1].value) 37 del mod.body[-1] 38 else: 39 expr = None 40 41 if len(mod.body): 42 exec(compile(mod, '<exec>', mode='exec'), ns, ns) 43 if expr is not None: 44 return eval(compile(expr, '<eval>', mode='eval'), ns, ns) 45 else: 46 return None 47 48 49 def find_imports(code): 50 """ 51 Finds the imports in a string of code and returns a list of their package 52 names. 53 """ 54 # handle mis-indented input from multi-line strings 55 code = dedent(code) 56 57 mod = ast.parse(code) 58 imports = set() 59 for node in ast.walk(mod): 60 if isinstance(node, ast.Import): 61 for name in node.names: 62 name = name.name 63 imports.add(name.split('.')[0]) 64 elif isinstance(node, ast.ImportFrom): 65 name = node.module 66 imports.add(name.split('.')[0]) 67 return list(imports) 68 69 70 __all__ = ['open_url', 'eval_code', 'find_imports'] 71 [end of src/pyodide.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyodide.py b/src/pyodide.py --- a/src/pyodide.py +++ b/src/pyodide.py @@ -67,4 +67,16 @@ return list(imports) -__all__ = ['open_url', 'eval_code', 'find_imports'] +def as_nested_list(obj): + """ + Assumes a Javascript object is made of (possibly nested) arrays and + converts them to nested Python lists. + """ + try: + it = iter(obj) + return [as_nested_list(x) for x in it] + except TypeError: + return obj + + +__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']
{"golden_diff": "diff --git a/src/pyodide.py b/src/pyodide.py\n--- a/src/pyodide.py\n+++ b/src/pyodide.py\n@@ -67,4 +67,16 @@\n return list(imports)\n \n \n-__all__ = ['open_url', 'eval_code', 'find_imports']\n+def as_nested_list(obj):\n+ \"\"\"\n+ Assumes a Javascript object is made of (possibly nested) arrays and\n+ converts them to nested Python lists.\n+ \"\"\"\n+ try:\n+ it = iter(obj)\n+ return [as_nested_list(x) for x in it]\n+ except TypeError:\n+ return obj\n+\n+\n+__all__ = ['open_url', 'eval_code', 'find_imports', 'as_nested_list']\n", "issue": "ValueError: invalid __array_struct__ when using js arrays of arrays and numpy\nWhen using a matrix (array of array of numbers) in javascript and trying to convert that to a numpy array, it fails with the error `ValueError: invalid __array_struct__`\r\n\r\nTo reproduce:\r\nJavaScript:\r\n```\r\nwindow.A = [[1,2,3],[4,5,6]];\r\n```\r\nPython:\r\n```\r\nimport numpy\r\nfrom js import A\r\nm = numpy.array(A)\r\n```\n", "before_files": [{"content": "\"\"\"\nA library of helper utilities for connecting Python to the browser environment.\n\"\"\"\n\nimport ast\nimport io\nfrom textwrap import dedent\n\n__version__ = '0.8.2'\n\n\ndef open_url(url):\n \"\"\"\n Fetches a given *url* and returns a io.StringIO to access its contents.\n \"\"\"\n from js import XMLHttpRequest\n\n req = XMLHttpRequest.new()\n req.open('GET', url, False)\n req.send(None)\n return io.StringIO(req.response)\n\n\ndef eval_code(code, ns):\n \"\"\"\n Runs a string of code, the last part of which may be an expression.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n if len(mod.body) == 0:\n return None\n\n if isinstance(mod.body[-1], ast.Expr):\n expr = ast.Expression(mod.body[-1].value)\n del mod.body[-1]\n else:\n expr = None\n\n if len(mod.body):\n exec(compile(mod, '<exec>', mode='exec'), ns, ns)\n if expr is not None:\n return eval(compile(expr, '<eval>', mode='eval'), ns, ns)\n else:\n return None\n\n\ndef find_imports(code):\n \"\"\"\n Finds the imports in a string of code and returns a list of their package\n names.\n \"\"\"\n # handle mis-indented input from multi-line strings\n code = dedent(code)\n\n mod = ast.parse(code)\n imports = set()\n for node in ast.walk(mod):\n if isinstance(node, ast.Import):\n for name in node.names:\n name = name.name\n imports.add(name.split('.')[0])\n elif isinstance(node, ast.ImportFrom):\n name = node.module\n imports.add(name.split('.')[0])\n return list(imports)\n\n\n__all__ = ['open_url', 'eval_code', 'find_imports']\n", "path": "src/pyodide.py"}]}
1,193
169
gh_patches_debug_13249
rasdani/github-patches
git_diff
streamlit__streamlit-7256
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pydeck_chart: Error (not a valid JSON) when data contains NaN ### Checklist - [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues. - [X] I added a very descriptive title to this issue. - [X] I have provided sufficient information below to help reproduce this issue. ### Summary If the data used in pydeck contains NaN (even if it is not used), the application fails with ![image](https://user-images.githubusercontent.com/25638902/205078988-992fdce1-a6b8-4183-9237-5cd4e358e2ff.png) ### Reproducible Code Example ```Python import math, pandas, pydeck, streamlit streamlit.set_page_config(layout="wide") data = pandas.DataFrame({"lng": [-109.037673], "lat": [36.994672], "weight": [math.nan]}) layer = pydeck.Layer("ScatterplotLayer", data=data, get_position=["lng", "lat"], radius_min_pixels=4) deck = pydeck.Deck(layers=[layer], map_style=pydeck.map_styles.CARTO_LIGHT, tooltip={"text": "weight: {weight}"}) deck.to_html("test.html") streamlit.pydeck_chart(deck, use_container_width=True) ``` ### Steps To Reproduce Run the code as usual. ### Expected Behavior No error, as in the file ```test.html``` generated ### Current Behavior SyntaxError: Unexpected token 'N', ...""weight": NaN "... is not valid JSON ### Is this a regression? - [ ] Yes, this used to work in a previous version. ### Debug info - Streamlit version: Streamlit v1.15.1 - Python version: 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)] - Operating System: Windows 10.0.19045.2251 - Browser: Chome, Opera, Edge, Firefox - Virtual environment: poetry ### Additional Information _No response_ ### Are you willing to submit a PR? - [ ] Yes, I am willing to submit a PR! </issue> <code> [start of e2e/scripts/st_pydeck_chart.py] 1 # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import Any, cast 16 17 import numpy as np 18 import pandas as pd 19 import pydeck as pdk 20 21 import streamlit as st 22 23 # Empty chart. 24 25 st.pydeck_chart() 26 27 # Basic chart. 28 29 np.random.seed(12345) 30 31 df = pd.DataFrame( 32 cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4], 33 columns=["lat", "lon"], 34 ) 35 36 st.pydeck_chart( 37 pdk.Deck( 38 map_style="mapbox://styles/mapbox/light-v9", 39 initial_view_state=pdk.ViewState( 40 latitude=37.76, 41 longitude=-122.4, 42 zoom=11, 43 pitch=50, 44 ), 45 layers=[ 46 pdk.Layer( 47 "HexagonLayer", 48 data=df, 49 get_position="[lon, lat]", 50 radius=200, 51 elevation_scale=4, 52 elevation_range=[0, 1000], 53 pickable=True, 54 extruded=True, 55 ), 56 pdk.Layer( 57 "ScatterplotLayer", 58 data=df, 59 get_position="[lon, lat]", 60 get_color="[200, 30, 0, 160]", 61 get_radius=200, 62 ), 63 ], 64 ) 65 ) 66 [end of e2e/scripts/st_pydeck_chart.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/e2e/scripts/st_pydeck_chart.py b/e2e/scripts/st_pydeck_chart.py --- a/e2e/scripts/st_pydeck_chart.py +++ b/e2e/scripts/st_pydeck_chart.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import math from typing import Any, cast import numpy as np @@ -63,3 +64,15 @@ ], ) ) + +# Chart w/ invalid JSON - issue #5799. +data = pd.DataFrame({"lng": [-109.037673], "lat": [36.994672], "weight": [math.nan]}) +layer = pdk.Layer( + "ScatterplotLayer", data=data, get_position=["lng", "lat"], radius_min_pixels=4 +) +deck = pdk.Deck( + layers=[layer], + map_style=pdk.map_styles.CARTO_LIGHT, + tooltip={"text": "weight: {weight}"}, +) +st.pydeck_chart(deck, use_container_width=True)
{"golden_diff": "diff --git a/e2e/scripts/st_pydeck_chart.py b/e2e/scripts/st_pydeck_chart.py\n--- a/e2e/scripts/st_pydeck_chart.py\n+++ b/e2e/scripts/st_pydeck_chart.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import math\n from typing import Any, cast\n \n import numpy as np\n@@ -63,3 +64,15 @@\n ],\n )\n )\n+\n+# Chart w/ invalid JSON - issue #5799.\n+data = pd.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\n+layer = pdk.Layer(\n+ \"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4\n+)\n+deck = pdk.Deck(\n+ layers=[layer],\n+ map_style=pdk.map_styles.CARTO_LIGHT,\n+ tooltip={\"text\": \"weight: {weight}\"},\n+)\n+st.pydeck_chart(deck, use_container_width=True)\n", "issue": "pydeck_chart: Error (not a valid JSON) when data contains NaN\n### Checklist\r\n\r\n- [X] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.\r\n- [X] I added a very descriptive title to this issue.\r\n- [X] I have provided sufficient information below to help reproduce this issue.\r\n\r\n### Summary\r\n\r\nIf the data used in pydeck contains NaN (even if it is not used), the application fails with \r\n![image](https://user-images.githubusercontent.com/25638902/205078988-992fdce1-a6b8-4183-9237-5cd4e358e2ff.png)\r\n\r\n\r\n### Reproducible Code Example\r\n\r\n```Python\r\nimport math, pandas, pydeck, streamlit\r\n\r\nstreamlit.set_page_config(layout=\"wide\")\r\ndata = pandas.DataFrame({\"lng\": [-109.037673], \"lat\": [36.994672], \"weight\": [math.nan]})\r\nlayer = pydeck.Layer(\"ScatterplotLayer\", data=data, get_position=[\"lng\", \"lat\"], radius_min_pixels=4)\r\ndeck = pydeck.Deck(layers=[layer], map_style=pydeck.map_styles.CARTO_LIGHT, tooltip={\"text\": \"weight: {weight}\"})\r\ndeck.to_html(\"test.html\")\r\nstreamlit.pydeck_chart(deck, use_container_width=True)\r\n```\r\n\r\n\r\n### Steps To Reproduce\r\n\r\nRun the code as usual.\r\n\r\n### Expected Behavior\r\n\r\nNo error, as in the file ```test.html``` generated\r\n\r\n### Current Behavior\r\n\r\nSyntaxError:\r\nUnexpected token 'N', ...\"\"weight\": NaN \"... is not valid JSON\r\n\r\n\r\n### Is this a regression?\r\n\r\n- [ ] Yes, this used to work in a previous version.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: Streamlit v1.15.1\r\n- Python version: 3.10.4 (tags/v3.10.4:9d38120, Mar 23 2022, 23:13:41) [MSC v.1929 64 bit (AMD64)]\r\n- Operating System: Windows 10.0.19045.2251\r\n- Browser: Chome, Opera, Edge, Firefox\r\n- Virtual environment: poetry\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes, I am willing to submit a PR!\n", "before_files": [{"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, cast\n\nimport numpy as np\nimport pandas as pd\nimport pydeck as pdk\n\nimport streamlit as st\n\n# Empty chart.\n\nst.pydeck_chart()\n\n# Basic chart.\n\nnp.random.seed(12345)\n\ndf = pd.DataFrame(\n cast(Any, np.random.randn(1000, 2) / [50, 50]) + [37.76, -122.4],\n columns=[\"lat\", \"lon\"],\n)\n\nst.pydeck_chart(\n pdk.Deck(\n map_style=\"mapbox://styles/mapbox/light-v9\",\n initial_view_state=pdk.ViewState(\n latitude=37.76,\n longitude=-122.4,\n zoom=11,\n pitch=50,\n ),\n layers=[\n pdk.Layer(\n \"HexagonLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n radius=200,\n elevation_scale=4,\n elevation_range=[0, 1000],\n pickable=True,\n extruded=True,\n ),\n pdk.Layer(\n \"ScatterplotLayer\",\n data=df,\n get_position=\"[lon, lat]\",\n get_color=\"[200, 30, 0, 160]\",\n get_radius=200,\n ),\n ],\n )\n)\n", "path": "e2e/scripts/st_pydeck_chart.py"}]}
1,667
258
gh_patches_debug_24568
rasdani/github-patches
git_diff
mdn__kuma-6829
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> T - Fix post-sign-ins redirect so that user lands on correct page **Summary** You're not taken back to where you were when you sign in. At least via the modal and GitHub. ![Screen-Recording-2020-04-03-at-2 31 33-PM](https://user-images.githubusercontent.com/26739/78393569-167dd900-75b8-11ea-9d78-3bf0ca358bb2.gif) **Steps To Reproduce (STR)** 1. Go to some page other than the home page. Be not-signed in. 2. Click sign in. 3. Click GitHub and complete your GitHub auth stuff **Actual behavior** I ended up back on the home page `/en-US/`. :( **Expected behavior** To be taken to the page I was originally on. **Additional context** Pretty sure this is regression but don't know if it started when we switched to a modal. </issue> <code> [start of kuma/core/context_processors.py] 1 from urllib.parse import urlparse 2 3 from constance import config 4 from django.conf import settings 5 from django.utils import translation 6 7 from .i18n import get_language_mapping 8 9 10 def global_settings(request): 11 """Adds settings to the context.""" 12 13 def clean_safe_url(url): 14 if "://" not in url: 15 # E.g. 'elasticsearch:9200' 16 url = "http://" + url 17 parsed = urlparse(url) 18 if "@" in parsed.netloc: 19 parsed = parsed._replace( 20 netloc="username:secret@" + parsed.netloc.split("@")[-1] 21 ) 22 return parsed.geturl() 23 24 # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from 25 # an environment variable) but for safe transition, we rely on 26 # constance if it hasn't been put into settings yet. 27 # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set 28 # and a valid value in the environment (for production!) then we 29 # can delete these lines of code. 30 # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076 31 google_analytics_account = getattr(settings, "GOOGLE_ANALYTICS_ACCOUNT", None) 32 if google_analytics_account is None: 33 if config.GOOGLE_ANALYTICS_ACCOUNT != "0": 34 settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT 35 36 return { 37 "settings": settings, 38 # Because the 'settings.ES_URLS' might contain the username:password 39 # it's never appropriate to display in templates. So clean them up. 40 # But return it as a lambda so it only executes if really needed. 41 "safe_es_urls": lambda: [clean_safe_url(x) for x in settings.ES_URLS], 42 } 43 44 45 def i18n(request): 46 return { 47 "LANGUAGES": get_language_mapping(), 48 "LANG": ( 49 settings.LANGUAGE_URL_MAP.get(translation.get_language()) 50 or translation.get_language() 51 ), 52 "DIR": "rtl" if translation.get_language_bidi() else "ltr", 53 } 54 55 56 def next_url(request): 57 if ( 58 hasattr(request, "path") 59 and "login" not in request.path 60 and "register" not in request.path 61 ): 62 return {"next_url": request.get_full_path()} 63 return {} 64 [end of kuma/core/context_processors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kuma/core/context_processors.py b/kuma/core/context_processors.py --- a/kuma/core/context_processors.py +++ b/kuma/core/context_processors.py @@ -4,6 +4,8 @@ from django.conf import settings from django.utils import translation +from kuma.core.urlresolvers import reverse + from .i18n import get_language_mapping @@ -54,10 +56,30 @@ def next_url(request): - if ( - hasattr(request, "path") - and "login" not in request.path - and "register" not in request.path - ): - return {"next_url": request.get_full_path()} - return {} + """Return a function by the same name as the context processor. + That means, in the jinja templates, instead of doing + + {% set url = next_url %} + + you just have to do: + + {% set url = next_url() %} + + which means that the actual context processor function isn't executed + every single time any jinja template is rendered. Now, only if the + context processor is actually needed, it gets executed. + + See https://www.peterbe.com/plog/closure-django-context-processors + """ + + def inner(): + if hasattr(request, "path"): + if request.GET.get("next"): + if "://" not in request.GET["next"]: + return request.GET["next"] + elif reverse(settings.LOGIN_URL) != request.get_full_path(): + # The only exception is the sign-in landing page which you get to + # if you can't use the auth modal. + return request.get_full_path() + + return {"next_url": inner}
{"golden_diff": "diff --git a/kuma/core/context_processors.py b/kuma/core/context_processors.py\n--- a/kuma/core/context_processors.py\n+++ b/kuma/core/context_processors.py\n@@ -4,6 +4,8 @@\n from django.conf import settings\n from django.utils import translation\n \n+from kuma.core.urlresolvers import reverse\n+\n from .i18n import get_language_mapping\n \n \n@@ -54,10 +56,30 @@\n \n \n def next_url(request):\n- if (\n- hasattr(request, \"path\")\n- and \"login\" not in request.path\n- and \"register\" not in request.path\n- ):\n- return {\"next_url\": request.get_full_path()}\n- return {}\n+ \"\"\"Return a function by the same name as the context processor.\n+ That means, in the jinja templates, instead of doing\n+\n+ {% set url = next_url %}\n+\n+ you just have to do:\n+\n+ {% set url = next_url() %}\n+\n+ which means that the actual context processor function isn't executed\n+ every single time any jinja template is rendered. Now, only if the\n+ context processor is actually needed, it gets executed.\n+\n+ See https://www.peterbe.com/plog/closure-django-context-processors\n+ \"\"\"\n+\n+ def inner():\n+ if hasattr(request, \"path\"):\n+ if request.GET.get(\"next\"):\n+ if \"://\" not in request.GET[\"next\"]:\n+ return request.GET[\"next\"]\n+ elif reverse(settings.LOGIN_URL) != request.get_full_path():\n+ # The only exception is the sign-in landing page which you get to\n+ # if you can't use the auth modal.\n+ return request.get_full_path()\n+\n+ return {\"next_url\": inner}\n", "issue": "T - Fix post-sign-ins redirect so that user lands on correct page\n**Summary**\r\nYou're not taken back to where you were when you sign in. At least via the modal and GitHub.\r\n\r\n![Screen-Recording-2020-04-03-at-2 31 33-PM](https://user-images.githubusercontent.com/26739/78393569-167dd900-75b8-11ea-9d78-3bf0ca358bb2.gif)\r\n\r\n\r\n**Steps To Reproduce (STR)**\r\n\r\n\r\n1. Go to some page other than the home page. Be not-signed in.\r\n2. Click sign in. \r\n3. Click GitHub and complete your GitHub auth stuff\r\n\r\n\r\n**Actual behavior**\r\nI ended up back on the home page `/en-US/`. :(\r\n\r\n\r\n**Expected behavior**\r\nTo be taken to the page I was originally on. \r\n\r\n\r\n**Additional context**\r\nPretty sure this is regression but don't know if it started when we switched to a modal. \r\n\n", "before_files": [{"content": "from urllib.parse import urlparse\n\nfrom constance import config\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom .i18n import get_language_mapping\n\n\ndef global_settings(request):\n \"\"\"Adds settings to the context.\"\"\"\n\n def clean_safe_url(url):\n if \"://\" not in url:\n # E.g. 'elasticsearch:9200'\n url = \"http://\" + url\n parsed = urlparse(url)\n if \"@\" in parsed.netloc:\n parsed = parsed._replace(\n netloc=\"username:secret@\" + parsed.netloc.split(\"@\")[-1]\n )\n return parsed.geturl()\n\n # TODO: Ideally, GOOGLE_ANALYTICS_ACCOUNT is only set in settings (from\n # an environment variable) but for safe transition, we rely on\n # constance if it hasn't been put into settings yet.\n # Once we know with confidence, that GOOGLE_ANALYTICS_ACCOUNT is set\n # and a valid value in the environment (for production!) then we\n # can delete these lines of code.\n # See https://bugzilla.mozilla.org/show_bug.cgi?id=1570076\n google_analytics_account = getattr(settings, \"GOOGLE_ANALYTICS_ACCOUNT\", None)\n if google_analytics_account is None:\n if config.GOOGLE_ANALYTICS_ACCOUNT != \"0\":\n settings.GOOGLE_ANALYTICS_ACCOUNT = config.GOOGLE_ANALYTICS_ACCOUNT\n\n return {\n \"settings\": settings,\n # Because the 'settings.ES_URLS' might contain the username:password\n # it's never appropriate to display in templates. So clean them up.\n # But return it as a lambda so it only executes if really needed.\n \"safe_es_urls\": lambda: [clean_safe_url(x) for x in settings.ES_URLS],\n }\n\n\ndef i18n(request):\n return {\n \"LANGUAGES\": get_language_mapping(),\n \"LANG\": (\n settings.LANGUAGE_URL_MAP.get(translation.get_language())\n or translation.get_language()\n ),\n \"DIR\": \"rtl\" if translation.get_language_bidi() else \"ltr\",\n }\n\n\ndef next_url(request):\n if (\n hasattr(request, \"path\")\n and \"login\" not in request.path\n and \"register\" not in request.path\n ):\n return {\"next_url\": request.get_full_path()}\n return {}\n", "path": "kuma/core/context_processors.py"}]}
1,399
390
gh_patches_debug_9354
rasdani/github-patches
git_diff
huggingface__text-generation-inference-579
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tied weight optimization for checkpoints doesn't work with text-generation-inference. ### System Info Ubuntu 20.04 4 A10 NVIDIA GPU's I think checkpoints saved after this feature was merged don't work with text-generation-inference. https://github.com/huggingface/transformers/issues/23868 With falcon models getting "`lm_head` not found" I'll add more details once I find minimal steps to reproduce. ### Information - [X] Docker - [ ] The CLI directly ### Tasks - [X] An officially supported command - [ ] My own modifications ### Reproduction Save tiiuae/falcon-40b checkpoint using transformers==4.30.2 launch text-generation-inference server (using transformers==4.27.4 works without issue) ### Expected behavior Expect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue. </issue> <code> [start of server/text_generation_server/models/flash_rw.py] 1 import torch 2 import torch.distributed 3 4 from opentelemetry import trace 5 from transformers import AutoTokenizer 6 from typing import Optional 7 8 from text_generation_server.models import FlashCausalLM 9 from text_generation_server.models.custom_modeling.flash_rw_modeling import ( 10 RWConfig, 11 FlashRWForCausalLM, 12 ) 13 from text_generation_server.utils import ( 14 initialize_torch_distributed, 15 weight_files, 16 Weights, 17 ) 18 19 tracer = trace.get_tracer(__name__) 20 21 22 class FlashRWSharded(FlashCausalLM): 23 def __init__( 24 self, 25 model_id: str, 26 revision: Optional[str] = None, 27 quantize: Optional[str] = None, 28 dtype: Optional[torch.dtype] = None, 29 trust_remote_code: bool = False, 30 ): 31 self.process_group, rank, world_size = initialize_torch_distributed() 32 if torch.cuda.is_available(): 33 device = torch.device(f"cuda:{rank}") 34 dtype = torch.float16 if dtype is None else dtype 35 else: 36 raise NotImplementedError("FlashRW is only available on GPU") 37 38 tokenizer = AutoTokenizer.from_pretrained( 39 model_id, 40 revision=revision, 41 padding_side="left", 42 truncation_side="left", 43 trust_remote_code=trust_remote_code, 44 ) 45 46 config = RWConfig.from_pretrained( 47 model_id, revision=revision, trust_remote_code=trust_remote_code 48 ) 49 50 torch.distributed.barrier(group=self.process_group) 51 filenames = weight_files(model_id, revision=revision, extension=".safetensors") 52 weights = Weights(filenames, device, dtype, process_group=self.process_group) 53 54 config.quantize = quantize 55 56 model = FlashRWForCausalLM(config, weights) 57 58 torch.distributed.barrier(group=self.process_group) 59 super(FlashRWSharded, self).__init__( 60 model=model.to(device), 61 tokenizer=tokenizer, 62 num_layers=len(model.transformer.h), 63 num_kv_heads=model.transformer.cache_size, 64 head_size=model.transformer.head_size, 65 dtype=dtype, 66 device=device, 67 rank=rank, 68 world_size=world_size, 69 ) 70 [end of server/text_generation_server/models/flash_rw.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py --- a/server/text_generation_server/models/flash_rw.py +++ b/server/text_generation_server/models/flash_rw.py @@ -49,7 +49,13 @@ torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") - weights = Weights(filenames, device, dtype, process_group=self.process_group) + weights = Weights( + filenames, + device, + dtype, + process_group=self.process_group, + aliases={"transformer.word_embeddings.weight": ["lm_head.weight"]}, + ) config.quantize = quantize
{"golden_diff": "diff --git a/server/text_generation_server/models/flash_rw.py b/server/text_generation_server/models/flash_rw.py\n--- a/server/text_generation_server/models/flash_rw.py\n+++ b/server/text_generation_server/models/flash_rw.py\n@@ -49,7 +49,13 @@\n \n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n- weights = Weights(filenames, device, dtype, process_group=self.process_group)\n+ weights = Weights(\n+ filenames,\n+ device,\n+ dtype,\n+ process_group=self.process_group,\n+ aliases={\"transformer.word_embeddings.weight\": [\"lm_head.weight\"]},\n+ )\n \n config.quantize = quantize\n", "issue": "Tied weight optimization for checkpoints doesn't work with text-generation-inference.\n### System Info\r\nUbuntu 20.04\r\n4 A10 NVIDIA GPU's\r\n\r\nI think checkpoints saved after this feature was merged don't work with text-generation-inference.\r\nhttps://github.com/huggingface/transformers/issues/23868\r\n\r\nWith falcon models getting \"`lm_head` not found\"\r\nI'll add more details once I find minimal steps to reproduce.\r\n\r\n### Information\r\n\r\n- [X] Docker\r\n- [ ] The CLI directly\r\n\r\n### Tasks\r\n\r\n- [X] An officially supported command\r\n- [ ] My own modifications\r\n\r\n### Reproduction\r\n\r\nSave tiiuae/falcon-40b checkpoint using transformers==4.30.2\r\nlaunch text-generation-inference server\r\n\r\n(using transformers==4.27.4 works without issue)\r\n\r\n### Expected behavior\r\n\r\nExpect the text-generation-inference weight loader to be able to find the `lm_head` weight in the checkpoint. Note this may be a safetensor issue.\n", "before_files": [{"content": "import torch\nimport torch.distributed\n\nfrom opentelemetry import trace\nfrom transformers import AutoTokenizer\nfrom typing import Optional\n\nfrom text_generation_server.models import FlashCausalLM\nfrom text_generation_server.models.custom_modeling.flash_rw_modeling import (\n RWConfig,\n FlashRWForCausalLM,\n)\nfrom text_generation_server.utils import (\n initialize_torch_distributed,\n weight_files,\n Weights,\n)\n\ntracer = trace.get_tracer(__name__)\n\n\nclass FlashRWSharded(FlashCausalLM):\n def __init__(\n self,\n model_id: str,\n revision: Optional[str] = None,\n quantize: Optional[str] = None,\n dtype: Optional[torch.dtype] = None,\n trust_remote_code: bool = False,\n ):\n self.process_group, rank, world_size = initialize_torch_distributed()\n if torch.cuda.is_available():\n device = torch.device(f\"cuda:{rank}\")\n dtype = torch.float16 if dtype is None else dtype\n else:\n raise NotImplementedError(\"FlashRW is only available on GPU\")\n\n tokenizer = AutoTokenizer.from_pretrained(\n model_id,\n revision=revision,\n padding_side=\"left\",\n truncation_side=\"left\",\n trust_remote_code=trust_remote_code,\n )\n\n config = RWConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n\n torch.distributed.barrier(group=self.process_group)\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n weights = Weights(filenames, device, dtype, process_group=self.process_group)\n\n config.quantize = quantize\n\n model = FlashRWForCausalLM(config, weights)\n\n torch.distributed.barrier(group=self.process_group)\n super(FlashRWSharded, self).__init__(\n model=model.to(device),\n tokenizer=tokenizer,\n num_layers=len(model.transformer.h),\n num_kv_heads=model.transformer.cache_size,\n head_size=model.transformer.head_size,\n dtype=dtype,\n device=device,\n rank=rank,\n world_size=world_size,\n )\n", "path": "server/text_generation_server/models/flash_rw.py"}]}
1,347
167
gh_patches_debug_38598
rasdani/github-patches
git_diff
apache__airflow-32382
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add DagProcessor status to health endpoint. ### Description Add DagProcessor status including latest heartbeat to health endpoint similar to Triggerer status added recently. Related PRs. https://github.com/apache/airflow/pull/31529 https://github.com/apache/airflow/pull/27755 ### Use case/motivation It helps in dag processor monitoring ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) </issue> <code> [start of airflow/api_connexion/schemas/health_schema.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from __future__ import annotations 18 19 from marshmallow import Schema, fields 20 21 22 class BaseInfoSchema(Schema): 23 """Base status field for metadatabase and scheduler.""" 24 25 status = fields.String(dump_only=True) 26 27 28 class MetaDatabaseInfoSchema(BaseInfoSchema): 29 """Schema for Metadatabase info.""" 30 31 32 class SchedulerInfoSchema(BaseInfoSchema): 33 """Schema for Scheduler info.""" 34 35 latest_scheduler_heartbeat = fields.String(dump_only=True) 36 37 38 class TriggererInfoSchema(BaseInfoSchema): 39 """Schema for Triggerer info.""" 40 41 latest_triggerer_heartbeat = fields.String(dump_only=True) 42 43 44 class HealthInfoSchema(Schema): 45 """Schema for the Health endpoint.""" 46 47 metadatabase = fields.Nested(MetaDatabaseInfoSchema) 48 scheduler = fields.Nested(SchedulerInfoSchema) 49 triggerer = fields.Nested(TriggererInfoSchema) 50 51 52 health_schema = HealthInfoSchema() 53 [end of airflow/api_connexion/schemas/health_schema.py] [start of airflow/api/common/airflow_health.py] 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from __future__ import annotations 18 19 from typing import Any 20 21 from airflow.jobs.scheduler_job_runner import SchedulerJobRunner 22 from airflow.jobs.triggerer_job_runner import TriggererJobRunner 23 24 HEALTHY = "healthy" 25 UNHEALTHY = "unhealthy" 26 27 28 def get_airflow_health() -> dict[str, Any]: 29 """Get the health for Airflow metadatabase, scheduler and triggerer.""" 30 metadatabase_status = HEALTHY 31 latest_scheduler_heartbeat = None 32 latest_triggerer_heartbeat = None 33 scheduler_status = UNHEALTHY 34 triggerer_status: str | None = UNHEALTHY 35 36 try: 37 latest_scheduler_job = SchedulerJobRunner.most_recent_job() 38 39 if latest_scheduler_job: 40 latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat() 41 if latest_scheduler_job.is_alive(): 42 scheduler_status = HEALTHY 43 except Exception: 44 metadatabase_status = UNHEALTHY 45 46 try: 47 latest_triggerer_job = TriggererJobRunner.most_recent_job() 48 49 if latest_triggerer_job: 50 latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat() 51 if latest_triggerer_job.is_alive(): 52 triggerer_status = HEALTHY 53 else: 54 triggerer_status = None 55 except Exception: 56 metadatabase_status = UNHEALTHY 57 58 airflow_health_status = { 59 "metadatabase": {"status": metadatabase_status}, 60 "scheduler": { 61 "status": scheduler_status, 62 "latest_scheduler_heartbeat": latest_scheduler_heartbeat, 63 }, 64 "triggerer": { 65 "status": triggerer_status, 66 "latest_triggerer_heartbeat": latest_triggerer_heartbeat, 67 }, 68 } 69 70 return airflow_health_status 71 [end of airflow/api/common/airflow_health.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/airflow/api/common/airflow_health.py b/airflow/api/common/airflow_health.py --- a/airflow/api/common/airflow_health.py +++ b/airflow/api/common/airflow_health.py @@ -18,6 +18,7 @@ from typing import Any +from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner from airflow.jobs.scheduler_job_runner import SchedulerJobRunner from airflow.jobs.triggerer_job_runner import TriggererJobRunner @@ -30,8 +31,10 @@ metadatabase_status = HEALTHY latest_scheduler_heartbeat = None latest_triggerer_heartbeat = None + latest_dag_processor_heartbeat = None scheduler_status = UNHEALTHY triggerer_status: str | None = UNHEALTHY + dag_processor_status: str | None = UNHEALTHY try: latest_scheduler_job = SchedulerJobRunner.most_recent_job() @@ -55,6 +58,18 @@ except Exception: metadatabase_status = UNHEALTHY + try: + latest_dag_processor_job = DagProcessorJobRunner.most_recent_job() + + if latest_dag_processor_job: + latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat() + if latest_dag_processor_job.is_alive(): + dag_processor_status = HEALTHY + else: + dag_processor_status = None + except Exception: + metadatabase_status = UNHEALTHY + airflow_health_status = { "metadatabase": {"status": metadatabase_status}, "scheduler": { @@ -65,6 +80,10 @@ "status": triggerer_status, "latest_triggerer_heartbeat": latest_triggerer_heartbeat, }, + "dag_processor": { + "status": dag_processor_status, + "latest_dag_processor_heartbeat": latest_dag_processor_heartbeat, + }, } return airflow_health_status diff --git a/airflow/api_connexion/schemas/health_schema.py b/airflow/api_connexion/schemas/health_schema.py --- a/airflow/api_connexion/schemas/health_schema.py +++ b/airflow/api_connexion/schemas/health_schema.py @@ -41,12 +41,19 @@ latest_triggerer_heartbeat = fields.String(dump_only=True) +class DagProcessorInfoSchema(BaseInfoSchema): + """Schema for DagProcessor info.""" + + latest_dag_processor_heartbeat = fields.String(dump_only=True) + + class HealthInfoSchema(Schema): """Schema for the Health endpoint.""" metadatabase = fields.Nested(MetaDatabaseInfoSchema) scheduler = fields.Nested(SchedulerInfoSchema) triggerer = fields.Nested(TriggererInfoSchema) + dag_processor = fields.Nested(DagProcessorInfoSchema) health_schema = HealthInfoSchema()
{"golden_diff": "diff --git a/airflow/api/common/airflow_health.py b/airflow/api/common/airflow_health.py\n--- a/airflow/api/common/airflow_health.py\n+++ b/airflow/api/common/airflow_health.py\n@@ -18,6 +18,7 @@\n \n from typing import Any\n \n+from airflow.jobs.dag_processor_job_runner import DagProcessorJobRunner\n from airflow.jobs.scheduler_job_runner import SchedulerJobRunner\n from airflow.jobs.triggerer_job_runner import TriggererJobRunner\n \n@@ -30,8 +31,10 @@\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n+ latest_dag_processor_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n+ dag_processor_status: str | None = UNHEALTHY\n \n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n@@ -55,6 +58,18 @@\n except Exception:\n metadatabase_status = UNHEALTHY\n \n+ try:\n+ latest_dag_processor_job = DagProcessorJobRunner.most_recent_job()\n+\n+ if latest_dag_processor_job:\n+ latest_dag_processor_heartbeat = latest_dag_processor_job.latest_heartbeat.isoformat()\n+ if latest_dag_processor_job.is_alive():\n+ dag_processor_status = HEALTHY\n+ else:\n+ dag_processor_status = None\n+ except Exception:\n+ metadatabase_status = UNHEALTHY\n+\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n@@ -65,6 +80,10 @@\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n+ \"dag_processor\": {\n+ \"status\": dag_processor_status,\n+ \"latest_dag_processor_heartbeat\": latest_dag_processor_heartbeat,\n+ },\n }\n \n return airflow_health_status\ndiff --git a/airflow/api_connexion/schemas/health_schema.py b/airflow/api_connexion/schemas/health_schema.py\n--- a/airflow/api_connexion/schemas/health_schema.py\n+++ b/airflow/api_connexion/schemas/health_schema.py\n@@ -41,12 +41,19 @@\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n \n \n+class DagProcessorInfoSchema(BaseInfoSchema):\n+ \"\"\"Schema for DagProcessor info.\"\"\"\n+\n+ latest_dag_processor_heartbeat = fields.String(dump_only=True)\n+\n+\n class HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n \n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n+ dag_processor = fields.Nested(DagProcessorInfoSchema)\n \n \n health_schema = HealthInfoSchema()\n", "issue": "Add DagProcessor status to health endpoint.\n### Description\n\nAdd DagProcessor status including latest heartbeat to health endpoint similar to Triggerer status added recently. Related PRs.\r\n\r\nhttps://github.com/apache/airflow/pull/31529\r\nhttps://github.com/apache/airflow/pull/27755\n\n### Use case/motivation\n\nIt helps in dag processor monitoring \n\n### Related issues\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [X] Yes I am willing to submit a PR!\n\n### Code of Conduct\n\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom marshmallow import Schema, fields\n\n\nclass BaseInfoSchema(Schema):\n \"\"\"Base status field for metadatabase and scheduler.\"\"\"\n\n status = fields.String(dump_only=True)\n\n\nclass MetaDatabaseInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Metadatabase info.\"\"\"\n\n\nclass SchedulerInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Scheduler info.\"\"\"\n\n latest_scheduler_heartbeat = fields.String(dump_only=True)\n\n\nclass TriggererInfoSchema(BaseInfoSchema):\n \"\"\"Schema for Triggerer info.\"\"\"\n\n latest_triggerer_heartbeat = fields.String(dump_only=True)\n\n\nclass HealthInfoSchema(Schema):\n \"\"\"Schema for the Health endpoint.\"\"\"\n\n metadatabase = fields.Nested(MetaDatabaseInfoSchema)\n scheduler = fields.Nested(SchedulerInfoSchema)\n triggerer = fields.Nested(TriggererInfoSchema)\n\n\nhealth_schema = HealthInfoSchema()\n", "path": "airflow/api_connexion/schemas/health_schema.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom airflow.jobs.scheduler_job_runner import SchedulerJobRunner\nfrom airflow.jobs.triggerer_job_runner import TriggererJobRunner\n\nHEALTHY = \"healthy\"\nUNHEALTHY = \"unhealthy\"\n\n\ndef get_airflow_health() -> dict[str, Any]:\n \"\"\"Get the health for Airflow metadatabase, scheduler and triggerer.\"\"\"\n metadatabase_status = HEALTHY\n latest_scheduler_heartbeat = None\n latest_triggerer_heartbeat = None\n scheduler_status = UNHEALTHY\n triggerer_status: str | None = UNHEALTHY\n\n try:\n latest_scheduler_job = SchedulerJobRunner.most_recent_job()\n\n if latest_scheduler_job:\n latest_scheduler_heartbeat = latest_scheduler_job.latest_heartbeat.isoformat()\n if latest_scheduler_job.is_alive():\n scheduler_status = HEALTHY\n except Exception:\n metadatabase_status = UNHEALTHY\n\n try:\n latest_triggerer_job = TriggererJobRunner.most_recent_job()\n\n if latest_triggerer_job:\n latest_triggerer_heartbeat = latest_triggerer_job.latest_heartbeat.isoformat()\n if latest_triggerer_job.is_alive():\n triggerer_status = HEALTHY\n else:\n triggerer_status = None\n except Exception:\n metadatabase_status = UNHEALTHY\n\n airflow_health_status = {\n \"metadatabase\": {\"status\": metadatabase_status},\n \"scheduler\": {\n \"status\": scheduler_status,\n \"latest_scheduler_heartbeat\": latest_scheduler_heartbeat,\n },\n \"triggerer\": {\n \"status\": triggerer_status,\n \"latest_triggerer_heartbeat\": latest_triggerer_heartbeat,\n },\n }\n\n return airflow_health_status\n", "path": "airflow/api/common/airflow_health.py"}]}
1,868
653
gh_patches_debug_31339
rasdani/github-patches
git_diff
pwndbg__pwndbg-1853
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better exception handling for unmet dependencies Some of our users update Pwndbg only by `git pull`ing the newest version and not re-running `setup.sh`. If that happens and when we updated our dependencies recently, the user may end up with broken Pwndbg such as (likely) happened in https://github.com/pwndbg/pwndbg/issues/1790. We should do the two following things: 1) When we detect/handle an exception in the [`pwndbg/exception.py:handle` function]( https://github.com/pwndbg/pwndbg/blob/de4acb2f40da3b0c97353a8b680ffa6440346f7b/pwndbg/exception.py#L61-L96), we should check the installed dependencies versions against the required ones and if they do not match, we should inform the user to update them. 2) We may want to display installed dependencies versions within the `bugreport` command output? </issue> <code> [start of pwndbg/exception.py] 1 from __future__ import annotations 2 3 import functools 4 import sys 5 import traceback 6 7 import gdb 8 9 import pwndbg.lib.cache 10 import pwndbg.lib.stdio 11 from pwndbg.color import message 12 from pwndbg.gdblib import config 13 14 with pwndbg.lib.stdio.stdio: 15 try: 16 import ipdb as pdb 17 except ImportError: 18 import pdb 19 try: 20 from rich.console import Console 21 22 _rich_console = Console() 23 except ImportError: 24 _rich_console = None 25 26 verbose = config.add_param( 27 "exception-verbose", 28 False, 29 "whether to print a full stacktrace for exceptions raised in Pwndbg commands", 30 ) 31 debug = config.add_param( 32 "exception-debugger", False, "whether to debug exceptions raised in Pwndbg commands" 33 ) 34 35 36 @pwndbg.lib.cache.cache_until("forever") 37 def inform_report_issue(exception_msg) -> None: 38 """ 39 Informs user that he can report an issue. 40 The use of caching makes it reporting only once for a given exception message. 41 """ 42 print( 43 message.notice( 44 "If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\n" 45 "(Please don't forget to search if it hasn't been reported before)\n" 46 "To generate the report and open a browser, you may run " 47 ) 48 + message.hint("`bugreport --run-browser`") 49 + message.notice("\nPS: Pull requests are welcome") 50 ) 51 52 53 def inform_verbose_and_debug() -> None: 54 print( 55 message.notice("For more info invoke `") 56 + message.hint("set exception-verbose on") 57 + message.notice("` and rerun the command\nor debug it by yourself with `") 58 + message.hint("set exception-debugger on") 59 + message.notice("`") 60 ) 61 62 63 def handle(name="Error"): 64 """Displays an exception to the user, optionally displaying a full traceback 65 and spawning an interactive post-moretem debugger. 66 67 Notes: 68 - ``set exception-verbose on`` enables stack traces. 69 - ``set exception-debugger on`` enables the post-mortem debugger. 70 """ 71 72 # This is for unit tests so they fail on exceptions instead of displaying them. 73 if getattr(sys, "_pwndbg_unittest_run", False) is True: 74 E, V, T = sys.exc_info() 75 e = E(V) 76 e.__traceback__ = T 77 raise e 78 79 # Display the error 80 if debug or verbose: 81 exception_msg = traceback.format_exc() 82 if _rich_console: 83 _rich_console.print_exception() 84 else: 85 print(exception_msg) 86 inform_report_issue(exception_msg) 87 88 else: 89 exc_type, exc_value, exc_traceback = sys.exc_info() 90 91 print(message.error(f"Exception occurred: {name}: {exc_value} ({exc_type})")) 92 93 inform_verbose_and_debug() 94 95 # Break into the interactive debugger 96 if debug: 97 with pwndbg.lib.stdio.stdio: 98 pdb.post_mortem() 99 100 101 @functools.wraps(pdb.set_trace) 102 def set_trace() -> None: 103 """Enable sane debugging in Pwndbg by switching to the "real" stdio.""" 104 debugger = pdb.Pdb( 105 stdin=sys.__stdin__, stdout=sys.__stdout__, skip=["pwndbg.lib.stdio", "pwndbg.exception"] 106 ) 107 debugger.set_trace() 108 109 110 pdb.set_trace = set_trace 111 112 113 @config.trigger(verbose, debug) 114 def update() -> None: 115 if verbose or debug: 116 command = "set python print-stack full" 117 else: 118 command = "set python print-stack message" 119 120 gdb.execute(command, from_tty=True, to_string=True) 121 [end of pwndbg/exception.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/exception.py b/pwndbg/exception.py --- a/pwndbg/exception.py +++ b/pwndbg/exception.py @@ -1,10 +1,12 @@ from __future__ import annotations import functools +import os import sys import traceback import gdb +import pkg_resources import pwndbg.lib.cache import pwndbg.lib.stdio @@ -33,6 +35,41 @@ ) +def inform_unmet_dependencies(errors) -> None: + """ + Informs user about unmet dependencies + """ + msg = message.error("You appear to have unmet Pwndbg dependencies.\n") + for e in errors: + if isinstance(e, pkg_resources.DistributionNotFound): + msg += message.notice(f"- required {e.args[0]}, but not installed\n") + else: + msg += message.notice(f"- required {e.args[1]}, installed: {e.args[0]}\n") + msg += message.notice("Consider running: ") + msg += message.hint("`setup.sh` ") + msg += message.notice("from Pwndbg project directory.\n") + print(msg) + + [email protected]_until("forever") +def check_dependencies(): + """ + Checks if there are any unmet dependencies in requirements.txt + """ + project_path = os.path.dirname(os.path.abspath(__file__)) + requirements_path = os.path.join(project_path, os.pardir, "requirements.txt") + with open(requirements_path, "r") as f: + errors = [] + for line in f.readlines(): + try: + pkg_resources.require(line) + except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e: + errors.append(e) + + if errors: + inform_unmet_dependencies(errors) + + @pwndbg.lib.cache.cache_until("forever") def inform_report_issue(exception_msg) -> None: """ @@ -76,6 +113,9 @@ e.__traceback__ = T raise e + # Check dependencies against requirements.txt and warn user + check_dependencies() + # Display the error if debug or verbose: exception_msg = traceback.format_exc()
{"golden_diff": "diff --git a/pwndbg/exception.py b/pwndbg/exception.py\n--- a/pwndbg/exception.py\n+++ b/pwndbg/exception.py\n@@ -1,10 +1,12 @@\n from __future__ import annotations\n \n import functools\n+import os\n import sys\n import traceback\n \n import gdb\n+import pkg_resources\n \n import pwndbg.lib.cache\n import pwndbg.lib.stdio\n@@ -33,6 +35,41 @@\n )\n \n \n+def inform_unmet_dependencies(errors) -> None:\n+ \"\"\"\n+ Informs user about unmet dependencies\n+ \"\"\"\n+ msg = message.error(\"You appear to have unmet Pwndbg dependencies.\\n\")\n+ for e in errors:\n+ if isinstance(e, pkg_resources.DistributionNotFound):\n+ msg += message.notice(f\"- required {e.args[0]}, but not installed\\n\")\n+ else:\n+ msg += message.notice(f\"- required {e.args[1]}, installed: {e.args[0]}\\n\")\n+ msg += message.notice(\"Consider running: \")\n+ msg += message.hint(\"`setup.sh` \")\n+ msg += message.notice(\"from Pwndbg project directory.\\n\")\n+ print(msg)\n+\n+\[email protected]_until(\"forever\")\n+def check_dependencies():\n+ \"\"\"\n+ Checks if there are any unmet dependencies in requirements.txt\n+ \"\"\"\n+ project_path = os.path.dirname(os.path.abspath(__file__))\n+ requirements_path = os.path.join(project_path, os.pardir, \"requirements.txt\")\n+ with open(requirements_path, \"r\") as f:\n+ errors = []\n+ for line in f.readlines():\n+ try:\n+ pkg_resources.require(line)\n+ except (pkg_resources.VersionConflict, pkg_resources.DistributionNotFound) as e:\n+ errors.append(e)\n+\n+ if errors:\n+ inform_unmet_dependencies(errors)\n+\n+\n @pwndbg.lib.cache.cache_until(\"forever\")\n def inform_report_issue(exception_msg) -> None:\n \"\"\"\n@@ -76,6 +113,9 @@\n e.__traceback__ = T\n raise e\n \n+ # Check dependencies against requirements.txt and warn user\n+ check_dependencies()\n+\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n", "issue": "Better exception handling for unmet dependencies\nSome of our users update Pwndbg only by `git pull`ing the newest version and not re-running `setup.sh`. If that happens and when we updated our dependencies recently, the user may end up with broken Pwndbg such as (likely) happened in https://github.com/pwndbg/pwndbg/issues/1790.\r\n\r\nWe should do the two following things:\r\n1) When we detect/handle an exception in the [`pwndbg/exception.py:handle` function]( https://github.com/pwndbg/pwndbg/blob/de4acb2f40da3b0c97353a8b680ffa6440346f7b/pwndbg/exception.py#L61-L96), we should check the installed dependencies versions against the required ones and if they do not match, we should inform the user to update them.\r\n2) We may want to display installed dependencies versions within the `bugreport` command output? \n", "before_files": [{"content": "from __future__ import annotations\n\nimport functools\nimport sys\nimport traceback\n\nimport gdb\n\nimport pwndbg.lib.cache\nimport pwndbg.lib.stdio\nfrom pwndbg.color import message\nfrom pwndbg.gdblib import config\n\nwith pwndbg.lib.stdio.stdio:\n try:\n import ipdb as pdb\n except ImportError:\n import pdb\n try:\n from rich.console import Console\n\n _rich_console = Console()\n except ImportError:\n _rich_console = None\n\nverbose = config.add_param(\n \"exception-verbose\",\n False,\n \"whether to print a full stacktrace for exceptions raised in Pwndbg commands\",\n)\ndebug = config.add_param(\n \"exception-debugger\", False, \"whether to debug exceptions raised in Pwndbg commands\"\n)\n\n\[email protected]_until(\"forever\")\ndef inform_report_issue(exception_msg) -> None:\n \"\"\"\n Informs user that he can report an issue.\n The use of caching makes it reporting only once for a given exception message.\n \"\"\"\n print(\n message.notice(\n \"If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\\n\"\n \"(Please don't forget to search if it hasn't been reported before)\\n\"\n \"To generate the report and open a browser, you may run \"\n )\n + message.hint(\"`bugreport --run-browser`\")\n + message.notice(\"\\nPS: Pull requests are welcome\")\n )\n\n\ndef inform_verbose_and_debug() -> None:\n print(\n message.notice(\"For more info invoke `\")\n + message.hint(\"set exception-verbose on\")\n + message.notice(\"` and rerun the command\\nor debug it by yourself with `\")\n + message.hint(\"set exception-debugger on\")\n + message.notice(\"`\")\n )\n\n\ndef handle(name=\"Error\"):\n \"\"\"Displays an exception to the user, optionally displaying a full traceback\n and spawning an interactive post-moretem debugger.\n\n Notes:\n - ``set exception-verbose on`` enables stack traces.\n - ``set exception-debugger on`` enables the post-mortem debugger.\n \"\"\"\n\n # This is for unit tests so they fail on exceptions instead of displaying them.\n if getattr(sys, \"_pwndbg_unittest_run\", False) is True:\n E, V, T = sys.exc_info()\n e = E(V)\n e.__traceback__ = T\n raise e\n\n # Display the error\n if debug or verbose:\n exception_msg = traceback.format_exc()\n if _rich_console:\n _rich_console.print_exception()\n else:\n print(exception_msg)\n inform_report_issue(exception_msg)\n\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n\n print(message.error(f\"Exception occurred: {name}: {exc_value} ({exc_type})\"))\n\n inform_verbose_and_debug()\n\n # Break into the interactive debugger\n if debug:\n with pwndbg.lib.stdio.stdio:\n pdb.post_mortem()\n\n\[email protected](pdb.set_trace)\ndef set_trace() -> None:\n \"\"\"Enable sane debugging in Pwndbg by switching to the \"real\" stdio.\"\"\"\n debugger = pdb.Pdb(\n stdin=sys.__stdin__, stdout=sys.__stdout__, skip=[\"pwndbg.lib.stdio\", \"pwndbg.exception\"]\n )\n debugger.set_trace()\n\n\npdb.set_trace = set_trace\n\n\[email protected](verbose, debug)\ndef update() -> None:\n if verbose or debug:\n command = \"set python print-stack full\"\n else:\n command = \"set python print-stack message\"\n\n gdb.execute(command, from_tty=True, to_string=True)\n", "path": "pwndbg/exception.py"}]}
1,819
507
gh_patches_debug_4953
rasdani/github-patches
git_diff
getnikola__nikola-2998
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Theme meta file ignored_assets key should ignore whitespace ### Environment (if reporting a bug) **Python Version:** 3.5.4 **Nikola Version:** 8.0.0dev HEAD **Operating System:** gentoo GNU/Linux ### Description: Current behaviour: The ignored_assets key in theme meta files takes a comma-separated list of filenames, that must not contain leading or trailing whitespace because it's considered to be part of the filename. Expected behaviour: The comma-separated list may contain the usual amount of whitespace: foo, bar, baz, etc </issue> <code> [start of nikola/plugins/task/copy_assets.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2018 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Copy theme assets into output.""" 28 29 30 import io 31 import os 32 33 from nikola.plugin_categories import Task 34 from nikola import utils 35 36 37 class CopyAssets(Task): 38 """Copy theme assets into output.""" 39 40 name = "copy_assets" 41 42 def gen_tasks(self): 43 """Create tasks to copy the assets of the whole theme chain. 44 45 If a file is present on two themes, use the version 46 from the "youngest" theme. 47 """ 48 kw = { 49 "themes": self.site.THEMES, 50 "translations": self.site.translations, 51 "files_folders": self.site.config['FILES_FOLDERS'], 52 "output_folder": self.site.config['OUTPUT_FOLDER'], 53 "filters": self.site.config['FILTERS'], 54 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'], 55 "code.css_selectors": ['pre.code', '.highlight pre'], 56 "code.css_head": '/* code.css file generated by Nikola */\n', 57 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n", 58 } 59 tasks = {} 60 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css') 61 code_css_input = utils.get_asset_path('assets/css/code.css', 62 themes=kw['themes'], 63 files_folders=kw['files_folders'], output_dir=None) 64 yield self.group_task() 65 66 main_theme = utils.get_theme_path(kw['themes'][0]) 67 theme_ini = utils.parse_theme_meta(main_theme) 68 if theme_ini: 69 ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',') 70 else: 71 ignored_assets = [] 72 73 for theme_name in kw['themes']: 74 src = os.path.join(utils.get_theme_path(theme_name), 'assets') 75 dst = os.path.join(kw['output_folder'], 'assets') 76 for task in utils.copy_tree(src, dst): 77 asset_name = os.path.relpath(task['name'], dst) 78 if task['name'] in tasks or asset_name in ignored_assets: 79 continue 80 tasks[task['name']] = task 81 task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')] 82 task['basename'] = self.name 83 if code_css_input: 84 if 'file_dep' not in task: 85 task['file_dep'] = [] 86 task['file_dep'].append(code_css_input) 87 yield utils.apply_filters(task, kw['filters']) 88 89 # Check whether or not there is a code.css file around. 90 if not code_css_input and kw['code_color_scheme']: 91 def create_code_css(): 92 from pygments.formatters import get_formatter_by_name 93 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"]) 94 utils.makedirs(os.path.dirname(code_css_path)) 95 with io.open(code_css_path, 'w+', encoding='utf8') as outf: 96 outf.write(kw["code.css_head"]) 97 outf.write(formatter.get_style_defs(kw["code.css_selectors"])) 98 outf.write(kw["code.css_close"]) 99 100 if os.path.exists(code_css_path): 101 with io.open(code_css_path, 'r', encoding='utf-8') as fh: 102 testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"] 103 else: 104 testcontents = False 105 106 task = { 107 'basename': self.name, 108 'name': code_css_path, 109 'targets': [code_css_path], 110 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents], 111 'actions': [(create_code_css, [])], 112 'clean': True, 113 } 114 yield utils.apply_filters(task, kw['filters']) 115 [end of nikola/plugins/task/copy_assets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py --- a/nikola/plugins/task/copy_assets.py +++ b/nikola/plugins/task/copy_assets.py @@ -67,6 +67,7 @@ theme_ini = utils.parse_theme_meta(main_theme) if theme_ini: ignored_assets = theme_ini.get("Nikola", "ignored_assets", fallback='').split(',') + ignored_assets = [asset_name.strip() for asset_name in ignored_assets] else: ignored_assets = []
{"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -67,6 +67,7 @@\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n+ ignored_assets = [asset_name.strip() for asset_name in ignored_assets]\n else:\n ignored_assets = []\n", "issue": "Theme meta file ignored_assets key should ignore whitespace\n### Environment (if reporting a bug)\r\n\r\n**Python Version:**\r\n3.5.4\r\n**Nikola Version:**\r\n8.0.0dev HEAD\r\n**Operating System:**\r\ngentoo GNU/Linux\r\n### Description:\r\nCurrent behaviour: The ignored_assets key in theme meta files takes a comma-separated list of filenames, that must not contain leading or trailing whitespace because it's considered to be part of the filename.\r\n\r\nExpected behaviour: The comma-separated list may contain the usual amount of whitespace: foo, bar, baz, etc\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Copy theme assets into output.\"\"\"\n\n\nimport io\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n kw = {\n \"themes\": self.site.THEMES,\n \"translations\": self.site.translations,\n \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": ['pre.code', '.highlight pre'],\n \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n code_css_input = utils.get_asset_path('assets/css/code.css',\n themes=kw['themes'],\n files_folders=kw['files_folders'], output_dir=None)\n yield self.group_task()\n\n main_theme = utils.get_theme_path(kw['themes'][0])\n theme_ini = utils.parse_theme_meta(main_theme)\n if theme_ini:\n ignored_assets = theme_ini.get(\"Nikola\", \"ignored_assets\", fallback='').split(',')\n else:\n ignored_assets = []\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n asset_name = os.path.relpath(task['name'], dst)\n if task['name'] in tasks or asset_name in ignored_assets:\n continue\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw, 'nikola.plugins.task.copy_assets')]\n task['basename'] = self.name\n if code_css_input:\n if 'file_dep' not in task:\n task['file_dep'] = []\n task['file_dep'].append(code_css_input)\n yield utils.apply_filters(task, kw['filters'])\n\n # Check whether or not there is a code.css file around.\n if not code_css_input and kw['code_color_scheme']:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with io.open(code_css_path, 'w+', encoding='utf8') as outf:\n outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n if os.path.exists(code_css_path):\n with io.open(code_css_path, 'r', encoding='utf-8') as fh:\n testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n else:\n testcontents = False\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.copy_assets'), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]}
1,977
123
gh_patches_debug_7666
rasdani/github-patches
git_diff
tensorflow__addons-2204
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Drop Python3.5 Support Per SIG Build, python3.5 will not be supported in TF2.4 since it has reached end of life. We should align. </issue> <code> [start of setup.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """TensorFlow Addons. 16 17 TensorFlow Addons is a repository of contributions that conform to well- 18 established API patterns, but implement new functionality not available 19 in core TensorFlow. TensorFlow natively supports a large number of 20 operators, layers, metrics, losses, and optimizers. However, in a fast 21 moving field like ML, there are many interesting new developments that 22 cannot be integrated into core TensorFlow (because their broad 23 applicability is not yet clear, or it is mostly used by a smaller subset 24 of the community). 25 """ 26 27 import os 28 from pathlib import Path 29 import sys 30 31 from datetime import datetime 32 from setuptools import find_packages 33 from setuptools import setup 34 from setuptools.dist import Distribution 35 from setuptools import Extension 36 37 DOCLINES = __doc__.split("\n") 38 39 40 def get_last_commit_time() -> str: 41 string_time = os.getenv("NIGHTLY_TIME").replace('"', "") 42 return datetime.strptime(string_time, "%Y-%m-%dT%H:%M:%SZ").strftime("%Y%m%d%H%M%S") 43 44 45 def get_project_name_version(): 46 # Version 47 version = {} 48 base_dir = os.path.dirname(os.path.abspath(__file__)) 49 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp: 50 exec(fp.read(), version) 51 52 project_name = "tensorflow-addons" 53 if "--nightly" in sys.argv: 54 project_name = "tfa-nightly" 55 version["__version__"] += get_last_commit_time() 56 sys.argv.remove("--nightly") 57 58 return project_name, version 59 60 61 def get_ext_modules(): 62 ext_modules = [] 63 if "--platlib-patch" in sys.argv: 64 if sys.platform.startswith("linux"): 65 # Manylinux2010 requires a patch for platlib 66 ext_modules = [Extension("_foo", ["stub.cc"])] 67 sys.argv.remove("--platlib-patch") 68 return ext_modules 69 70 71 class BinaryDistribution(Distribution): 72 """This class is needed in order to create OS specific wheels.""" 73 74 def has_ext_modules(self): 75 return True 76 77 78 project_name, version = get_project_name_version() 79 min_tf_version = version["MIN_TF_VERSION"] 80 max_tf_version = version["MAX_TF_VERSION"] 81 setup( 82 name=project_name, 83 version=version["__version__"], 84 description=DOCLINES[0], 85 long_description="\n".join(DOCLINES[2:]), 86 author="Google Inc.", 87 author_email="[email protected]", 88 packages=find_packages(), 89 ext_modules=get_ext_modules(), 90 install_requires=Path("requirements.txt").read_text().splitlines(), 91 extras_require={ 92 "tensorflow": ["tensorflow>={},<{}".format(min_tf_version, max_tf_version)], 93 "tensorflow-gpu": [ 94 "tensorflow-gpu>={},<{}".format(min_tf_version, max_tf_version) 95 ], 96 "tensorflow-cpu": [ 97 "tensorflow-cpu>={},<{}".format(min_tf_version, max_tf_version) 98 ], 99 }, 100 include_package_data=True, 101 zip_safe=False, 102 distclass=BinaryDistribution, 103 classifiers=[ 104 "Development Status :: 4 - Beta", 105 "Intended Audience :: Developers", 106 "Intended Audience :: Education", 107 "Intended Audience :: Science/Research", 108 "License :: OSI Approved :: Apache Software License", 109 "Programming Language :: Python :: 3", 110 "Programming Language :: Python :: 3.5", 111 "Programming Language :: Python :: 3.6", 112 "Programming Language :: Python :: 3.7", 113 "Programming Language :: Python :: 3.8", 114 "Topic :: Scientific/Engineering :: Mathematics", 115 "Topic :: Software Development :: Libraries :: Python Modules", 116 "Topic :: Software Development :: Libraries", 117 ], 118 license="Apache 2.0", 119 keywords="tensorflow addons machine learning", 120 ) 121 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -107,7 +107,6 @@ "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,7 +107,6 @@\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "issue": "Drop Python3.5 Support\nPer SIG Build, python3.5 will not be supported in TF2.4 since it has reached end of life. We should align.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nimport os\nfrom pathlib import Path\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split(\"\\n\")\n\n\ndef get_last_commit_time() -> str:\n string_time = os.getenv(\"NIGHTLY_TIME\").replace('\"', \"\")\n return datetime.strptime(string_time, \"%Y-%m-%dT%H:%M:%SZ\").strftime(\"%Y%m%d%H%M%S\")\n\n\ndef get_project_name_version():\n # Version\n version = {}\n base_dir = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n exec(fp.read(), version)\n\n project_name = \"tensorflow-addons\"\n if \"--nightly\" in sys.argv:\n project_name = \"tfa-nightly\"\n version[\"__version__\"] += get_last_commit_time()\n sys.argv.remove(\"--nightly\")\n\n return project_name, version\n\n\ndef get_ext_modules():\n ext_modules = []\n if \"--platlib-patch\" in sys.argv:\n if sys.platform.startswith(\"linux\"):\n # Manylinux2010 requires a patch for platlib\n ext_modules = [Extension(\"_foo\", [\"stub.cc\"])]\n sys.argv.remove(\"--platlib-patch\")\n return ext_modules\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nproject_name, version = get_project_name_version()\nmin_tf_version = version[\"MIN_TF_VERSION\"]\nmax_tf_version = version[\"MAX_TF_VERSION\"]\nsetup(\n name=project_name,\n version=version[\"__version__\"],\n description=DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author=\"Google Inc.\",\n author_email=\"[email protected]\",\n packages=find_packages(),\n ext_modules=get_ext_modules(),\n install_requires=Path(\"requirements.txt\").read_text().splitlines(),\n extras_require={\n \"tensorflow\": [\"tensorflow>={},<{}\".format(min_tf_version, max_tf_version)],\n \"tensorflow-gpu\": [\n \"tensorflow-gpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n \"tensorflow-cpu\": [\n \"tensorflow-cpu>={},<{}\".format(min_tf_version, max_tf_version)\n ],\n },\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Software Development :: Libraries\",\n ],\n license=\"Apache 2.0\",\n keywords=\"tensorflow addons machine learning\",\n)\n", "path": "setup.py"}]}
1,772
115
gh_patches_debug_54533
rasdani/github-patches
git_diff
dbt-labs__dbt-core-7566
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [CT-2552] Pin protobuf to greater than 4.0 Some users were seeing an error: > ImportError: cannot import name 'builder' from 'google.protobuf.internal'. The generated types_pb2.py file was built with protobuf 4 and requires protobuf 4 in order to import "builder". </issue> <code> [start of core/setup.py] 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 if sys.version_info < (3, 7, 2): 6 print("Error: dbt does not support this version of Python.") 7 print("Please upgrade to Python 3.7.2 or higher.") 8 sys.exit(1) 9 10 11 from setuptools import setup 12 13 try: 14 from setuptools import find_namespace_packages 15 except ImportError: 16 # the user has a downlevel version of setuptools. 17 print("Error: dbt requires setuptools v40.1.0 or higher.") 18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again") 19 sys.exit(1) 20 21 22 this_directory = os.path.abspath(os.path.dirname(__file__)) 23 with open(os.path.join(this_directory, "README.md")) as f: 24 long_description = f.read() 25 26 27 package_name = "dbt-core" 28 package_version = "1.6.0a1" 29 description = """With dbt, data analysts and engineers can build analytics \ 30 the way engineers build applications.""" 31 32 33 setup( 34 name=package_name, 35 version=package_version, 36 description=description, 37 long_description=long_description, 38 long_description_content_type="text/markdown", 39 author="dbt Labs", 40 author_email="[email protected]", 41 url="https://github.com/dbt-labs/dbt-core", 42 packages=find_namespace_packages(include=["dbt", "dbt.*"]), 43 include_package_data=True, 44 test_suite="test", 45 entry_points={ 46 "console_scripts": ["dbt = dbt.cli.main:cli"], 47 }, 48 install_requires=[ 49 "Jinja2==3.1.2", 50 "agate>=1.6,<1.7.1", 51 "click>=7.0,<9", 52 "colorama>=0.3.9,<0.4.7", 53 "hologram>=0.0.14,<=0.0.16", 54 "isodate>=0.6,<0.7", 55 "logbook>=1.5,<1.6", 56 "mashumaro[msgpack]==3.6", 57 "minimal-snowplow-tracker==0.0.2", 58 "networkx>=2.3,<2.8.1;python_version<'3.8'", 59 "networkx>=2.3,<3;python_version>='3.8'", 60 "packaging>20.9", 61 "sqlparse>=0.2.3,<0.4.4", 62 "dbt-extractor~=0.4.1", 63 "typing-extensions>=3.7.4", 64 "werkzeug>=1,<3", 65 "pathspec>=0.9,<0.12", 66 "protobuf>=3.18.3", 67 "pytz>=2015.7", 68 # the following are all to match snowflake-connector-python 69 "requests<3.0.0", 70 "idna>=2.5,<4", 71 "cffi>=1.9,<2.0.0", 72 "pyyaml>=6.0", 73 ], 74 zip_safe=False, 75 classifiers=[ 76 "Development Status :: 5 - Production/Stable", 77 "License :: OSI Approved :: Apache Software License", 78 "Operating System :: Microsoft :: Windows", 79 "Operating System :: MacOS :: MacOS X", 80 "Operating System :: POSIX :: Linux", 81 "Programming Language :: Python :: 3.7", 82 "Programming Language :: Python :: 3.8", 83 "Programming Language :: Python :: 3.9", 84 "Programming Language :: Python :: 3.10", 85 "Programming Language :: Python :: 3.11", 86 ], 87 python_requires=">=3.7.2", 88 ) 89 [end of core/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/setup.py b/core/setup.py --- a/core/setup.py +++ b/core/setup.py @@ -63,7 +63,7 @@ "typing-extensions>=3.7.4", "werkzeug>=1,<3", "pathspec>=0.9,<0.12", - "protobuf>=3.18.3", + "protobuf>=4.0.0", "pytz>=2015.7", # the following are all to match snowflake-connector-python "requests<3.0.0",
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -63,7 +63,7 @@\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n- \"protobuf>=3.18.3\",\n+ \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n", "issue": "[CT-2552] Pin protobuf to greater than 4.0\nSome users were seeing an error:\r\n\r\n> ImportError: cannot import name 'builder' from 'google.protobuf.internal'.\r\nThe generated types_pb2.py file was built with protobuf 4 and requires protobuf 4 in order to import \"builder\".\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.6.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.6\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>20.9\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n \"protobuf>=3.18.3\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]}
1,577
131
gh_patches_debug_4049
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Decimal scalars aren't serialized as strings when passed as numbers When using the `Decimal` scalar, if the value passed in is a floating point number, the value isn't serialized as a string, but as if it were still the floating point number which causes precision issues as can be seen in the working example below. [Working example](https://la4de.github.io/strawberry-playground/#UEsDBAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAcmVxdWlyZW1lbnRzLnR4dHN0cmF3YmVycnktZ3JhcGhxbFBLAwQKAAAAAABWgIxUj3TlatIAAADSAAAACQAAAHNjaGVtYS5weWltcG9ydCBzdHJhd2JlcnJ5CmZyb20gZGVjaW1hbCBpbXBvcnQgRGVjaW1hbAoKQHN0cmF3YmVycnkudHlwZQpjbGFzcyBRdWVyeToKICAgIEBzdHJhd2JlcnJ5LmZpZWxkCiAgICBkZWYgZGVjaW1hbChzZWxmLCB2YWx1ZTogRGVjaW1hbCkgLT4gRGVjaW1hbDoKICAgICAgICByZXR1cm4gdmFsdWUKCnNjaGVtYSA9IHN0cmF3YmVycnkuU2NoZW1hKHF1ZXJ5PVF1ZXJ5KVBLAwQKAAAAAABWgIxUERrh0UMAAABDAAAACQAAAHF1ZXJ5LmdxbHsKICBkZWNpbWFsKHZhbHVlOiAzLjE0KQogIGFub3RoZXJEZWNpbWFsOiBkZWNpbWFsKHZhbHVlOiAiMy4xNCIpCn1QSwMECgAAAAAAVoCMVKi7vnMDAAAAAwAAAA4AAAB2YXJpYWJsZXMuanNvbnsKfVBLAQIUAAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAAAAAAAAAAAAAAAAAAAByZXF1aXJlbWVudHMudHh0UEsBAhQACgAAAAAAVoCMVI905WrSAAAA0gAAAAkAAAAAAAAAAAAAAAAAQAAAAHNjaGVtYS5weVBLAQIUAAoAAAAAAFaAjFQRGuHRQwAAAEMAAAAJAAAAAAAAAAAAAAAAADkBAABxdWVyeS5ncWxQSwECFAAKAAAAAABWgIxUqLu+cwMAAAADAAAADgAAAAAAAAAAAAAAAACjAQAAdmFyaWFibGVzLmpzb25QSwUGAAAAAAQABADoAAAA0gEAAAAA) Actual output - `"3.140000000000000124344978758017532527446746826171875"` Expected output - `"3.14"` As text: ``` @strawberry.type class DecimalResponse: value: Decimal @strawberry.field def decimals( dec_value: Decimal, ) -> DecimalResponse: return DecimalResponse(value=dec_value) ``` ``` mutation decimals($dec_value: Decimal!) { decimals( decValue: $dec_value ) { ... on DecimalResponse { value } } } { "dec_value": 3.14 } ``` </issue> <code> [start of strawberry/schema/types/base_scalars.py] 1 import datetime 2 import decimal 3 import uuid 4 from operator import methodcaller 5 from typing import Callable 6 7 import dateutil.parser 8 9 from graphql import GraphQLError 10 11 from strawberry.custom_scalar import scalar 12 13 14 def wrap_parser(parser: Callable, type_: str) -> Callable: 15 def inner(value: str): 16 try: 17 return parser(value) 18 except ValueError as e: 19 raise GraphQLError(f'Value cannot represent a {type_}: "{value}". {e}') 20 21 return inner 22 23 24 def parse_decimal(value: str) -> decimal.Decimal: 25 try: 26 return decimal.Decimal(value) 27 except decimal.DecimalException: 28 raise GraphQLError(f'Value cannot represent a Decimal: "{value}".') 29 30 31 isoformat = methodcaller("isoformat") 32 33 34 Date = scalar( 35 datetime.date, 36 name="Date", 37 description="Date (isoformat)", 38 serialize=isoformat, 39 parse_value=wrap_parser(datetime.date.fromisoformat, "Date"), 40 ) 41 DateTime = scalar( 42 datetime.datetime, 43 name="DateTime", 44 description="Date with time (isoformat)", 45 serialize=isoformat, 46 parse_value=wrap_parser(dateutil.parser.isoparse, "DateTime"), 47 ) 48 Time = scalar( 49 datetime.time, 50 name="Time", 51 description="Time (isoformat)", 52 serialize=isoformat, 53 parse_value=wrap_parser(datetime.time.fromisoformat, "Time"), 54 ) 55 56 Decimal = scalar( 57 decimal.Decimal, 58 name="Decimal", 59 description="Decimal (fixed-point)", 60 serialize=str, 61 parse_value=parse_decimal, 62 ) 63 64 UUID = scalar( 65 uuid.UUID, 66 name="UUID", 67 serialize=str, 68 parse_value=wrap_parser(uuid.UUID, "UUID"), 69 ) 70 71 72 def _verify_void(x) -> None: 73 if x is not None: 74 raise ValueError(f"Expected 'None', got '{x}'") 75 76 77 Void = scalar( 78 type(None), 79 name="Void", 80 serialize=_verify_void, 81 parse_value=_verify_void, 82 description="Represents NULL values", 83 ) 84 [end of strawberry/schema/types/base_scalars.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/schema/types/base_scalars.py b/strawberry/schema/types/base_scalars.py --- a/strawberry/schema/types/base_scalars.py +++ b/strawberry/schema/types/base_scalars.py @@ -21,9 +21,9 @@ return inner -def parse_decimal(value: str) -> decimal.Decimal: +def parse_decimal(value: object) -> decimal.Decimal: try: - return decimal.Decimal(value) + return decimal.Decimal(str(value)) except decimal.DecimalException: raise GraphQLError(f'Value cannot represent a Decimal: "{value}".')
{"golden_diff": "diff --git a/strawberry/schema/types/base_scalars.py b/strawberry/schema/types/base_scalars.py\n--- a/strawberry/schema/types/base_scalars.py\n+++ b/strawberry/schema/types/base_scalars.py\n@@ -21,9 +21,9 @@\n return inner\n \n \n-def parse_decimal(value: str) -> decimal.Decimal:\n+def parse_decimal(value: object) -> decimal.Decimal:\n try:\n- return decimal.Decimal(value)\n+ return decimal.Decimal(str(value))\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n", "issue": "Decimal scalars aren't serialized as strings when passed as numbers\nWhen using the `Decimal` scalar, if the value passed in is a floating point number, the value isn't serialized as a string, but as if it were still the floating point number which causes precision issues as can be seen in the working example below.\r\n\r\n\r\n[Working example](https://la4de.github.io/strawberry-playground/#UEsDBAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAcmVxdWlyZW1lbnRzLnR4dHN0cmF3YmVycnktZ3JhcGhxbFBLAwQKAAAAAABWgIxUj3TlatIAAADSAAAACQAAAHNjaGVtYS5weWltcG9ydCBzdHJhd2JlcnJ5CmZyb20gZGVjaW1hbCBpbXBvcnQgRGVjaW1hbAoKQHN0cmF3YmVycnkudHlwZQpjbGFzcyBRdWVyeToKICAgIEBzdHJhd2JlcnJ5LmZpZWxkCiAgICBkZWYgZGVjaW1hbChzZWxmLCB2YWx1ZTogRGVjaW1hbCkgLT4gRGVjaW1hbDoKICAgICAgICByZXR1cm4gdmFsdWUKCnNjaGVtYSA9IHN0cmF3YmVycnkuU2NoZW1hKHF1ZXJ5PVF1ZXJ5KVBLAwQKAAAAAABWgIxUERrh0UMAAABDAAAACQAAAHF1ZXJ5LmdxbHsKICBkZWNpbWFsKHZhbHVlOiAzLjE0KQogIGFub3RoZXJEZWNpbWFsOiBkZWNpbWFsKHZhbHVlOiAiMy4xNCIpCn1QSwMECgAAAAAAVoCMVKi7vnMDAAAAAwAAAA4AAAB2YXJpYWJsZXMuanNvbnsKfVBLAQIUAAoAAAAAAFaAjFQgaUU4EgAAABIAAAAQAAAAAAAAAAAAAAAAAAAAAAByZXF1aXJlbWVudHMudHh0UEsBAhQACgAAAAAAVoCMVI905WrSAAAA0gAAAAkAAAAAAAAAAAAAAAAAQAAAAHNjaGVtYS5weVBLAQIUAAoAAAAAAFaAjFQRGuHRQwAAAEMAAAAJAAAAAAAAAAAAAAAAADkBAABxdWVyeS5ncWxQSwECFAAKAAAAAABWgIxUqLu+cwMAAAADAAAADgAAAAAAAAAAAAAAAACjAQAAdmFyaWFibGVzLmpzb25QSwUGAAAAAAQABADoAAAA0gEAAAAA)\r\n\r\nActual output - `\"3.140000000000000124344978758017532527446746826171875\"`\r\nExpected output - `\"3.14\"`\r\n\r\nAs text:\r\n\r\n```\r\[email protected]\r\nclass DecimalResponse:\r\n value: Decimal\r\n\r\n\r\[email protected]\r\ndef decimals(\r\n dec_value: Decimal,\r\n) -> DecimalResponse:\r\n return DecimalResponse(value=dec_value)\r\n ```\r\n```\r\nmutation decimals($dec_value: Decimal!) {\r\n decimals(\r\n decValue: $dec_value\r\n ) {\r\n ... on DecimalResponse {\r\n value\r\n }\r\n }\r\n}\r\n{\r\n \"dec_value\": 3.14\r\n}\r\n```\r\n\n", "before_files": [{"content": "import datetime\nimport decimal\nimport uuid\nfrom operator import methodcaller\nfrom typing import Callable\n\nimport dateutil.parser\n\nfrom graphql import GraphQLError\n\nfrom strawberry.custom_scalar import scalar\n\n\ndef wrap_parser(parser: Callable, type_: str) -> Callable:\n def inner(value: str):\n try:\n return parser(value)\n except ValueError as e:\n raise GraphQLError(f'Value cannot represent a {type_}: \"{value}\". {e}')\n\n return inner\n\n\ndef parse_decimal(value: str) -> decimal.Decimal:\n try:\n return decimal.Decimal(value)\n except decimal.DecimalException:\n raise GraphQLError(f'Value cannot represent a Decimal: \"{value}\".')\n\n\nisoformat = methodcaller(\"isoformat\")\n\n\nDate = scalar(\n datetime.date,\n name=\"Date\",\n description=\"Date (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.date.fromisoformat, \"Date\"),\n)\nDateTime = scalar(\n datetime.datetime,\n name=\"DateTime\",\n description=\"Date with time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(dateutil.parser.isoparse, \"DateTime\"),\n)\nTime = scalar(\n datetime.time,\n name=\"Time\",\n description=\"Time (isoformat)\",\n serialize=isoformat,\n parse_value=wrap_parser(datetime.time.fromisoformat, \"Time\"),\n)\n\nDecimal = scalar(\n decimal.Decimal,\n name=\"Decimal\",\n description=\"Decimal (fixed-point)\",\n serialize=str,\n parse_value=parse_decimal,\n)\n\nUUID = scalar(\n uuid.UUID,\n name=\"UUID\",\n serialize=str,\n parse_value=wrap_parser(uuid.UUID, \"UUID\"),\n)\n\n\ndef _verify_void(x) -> None:\n if x is not None:\n raise ValueError(f\"Expected 'None', got '{x}'\")\n\n\nVoid = scalar(\n type(None),\n name=\"Void\",\n serialize=_verify_void,\n parse_value=_verify_void,\n description=\"Represents NULL values\",\n)\n", "path": "strawberry/schema/types/base_scalars.py"}]}
1,963
134
gh_patches_debug_6817
rasdani/github-patches
git_diff
SeldonIO__MLServer-866
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add more debugging information I am running a custom Python model in MLServer, and it fails with an error. The error itself is not interesting, what's important is that MLServer gives very little helpful information to debug it. After sending a request with grpcurl here is the entire response: ```` ERROR: Code: Unknown Message: Unexpected <class 'TypeError'>: list indices must be integers or slices, not str ```` No stack trace, no line numbers or class names. The only thing that appears in the logs is > scv2-mlserver-1 | 2022-10-19 09:10:44,218 [mlserver.grpc] INFO - /inference.GRPCInferenceService/ModelInfer Would be great if MLServer propagated more debugging info to the response message and to the log. </issue> <code> [start of mlserver/grpc/utils.py] 1 import grpc 2 3 from typing import Callable, Dict, Tuple 4 from fastapi import status 5 6 from grpc import ServicerContext 7 8 from .logging import logger 9 from ..errors import MLServerError 10 11 12 STATUS_CODE_MAPPING = { 13 status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT, 14 status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND, 15 status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION, 16 status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL, 17 } 18 19 20 def to_headers(context: ServicerContext) -> Dict[str, str]: 21 metadata = context.invocation_metadata() 22 if hasattr(context, "trailing_metadata"): 23 # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose 24 # access to the trailing metadata on the service side 25 metadata += context.trailing_metadata() 26 headers = {} 27 for metadatum in metadata: 28 headers[metadatum.key] = metadatum.value 29 30 return headers 31 32 33 def to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]: 34 return tuple((key.lower(), value) for key, value in headers.items()) 35 36 37 def _grpc_status_code(err: MLServerError): 38 return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN) 39 40 41 def handle_mlserver_error(f: Callable): 42 async def _inner(self, request, context): 43 try: 44 return await f(self, request, context) 45 except MLServerError as err: 46 logger.error(err) 47 await context.abort(code=_grpc_status_code(err), details=str(err)) 48 49 return _inner 50 [end of mlserver/grpc/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py --- a/mlserver/grpc/utils.py +++ b/mlserver/grpc/utils.py @@ -43,7 +43,10 @@ try: return await f(self, request, context) except MLServerError as err: - logger.error(err) + logger.exception(err) await context.abort(code=_grpc_status_code(err), details=str(err)) + except Exception as err: + logger.exception(err) + await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err)) return _inner
{"golden_diff": "diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py\n--- a/mlserver/grpc/utils.py\n+++ b/mlserver/grpc/utils.py\n@@ -43,7 +43,10 @@\n try:\n return await f(self, request, context)\n except MLServerError as err:\n- logger.error(err)\n+ logger.exception(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n+ except Exception as err:\n+ logger.exception(err)\n+ await context.abort(code=grpc.StatusCode.INTERNAL, details=str(err))\n \n return _inner\n", "issue": "Add more debugging information\nI am running a custom Python model in MLServer, and it fails with an error. The error itself is not interesting, what's important is that MLServer gives very little helpful information to debug it.\r\n\r\nAfter sending a request with grpcurl here is the entire response:\r\n````\r\nERROR:\r\n Code: Unknown\r\n Message: Unexpected <class 'TypeError'>: list indices must be integers or slices, not str\r\n````\r\nNo stack trace, no line numbers or class names. The only thing that appears in the logs is\r\n> scv2-mlserver-1 | 2022-10-19 09:10:44,218 [mlserver.grpc] INFO - /inference.GRPCInferenceService/ModelInfer\r\n\r\nWould be great if MLServer propagated more debugging info to the response message and to the log.\n", "before_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n # access to the trailing metadata on the service side\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple((key.lower(), value) for key, value in headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.error(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}]}
1,177
127
gh_patches_debug_38523
rasdani/github-patches
git_diff
ietf-tools__datatracker-5167
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> /sync/discrepancies is missing `.distinct()` At the moment in the second table: ![image](https://user-images.githubusercontent.com/10996692/218182843-40d6daeb-ea79-4177-ac46-52d0ede1f032.png) Likely this will not show the same when viewed shortly after now, as I expect the state to be reconciled quickly. </issue> <code> [start of ietf/sync/discrepancies.py] 1 from django.db import models 2 from ietf.doc.models import Document, State 3 4 def find_discrepancies(): 5 res = [] 6 7 title = "Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state" 8 9 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))) 10 11 res.append((title, docs)) 12 13 title = "Internet-Drafts that have the IANA Action state \"In Progress\" but do not have a \"IANA\" RFC-Editor state/tag" 14 15 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))) 16 17 res.append((title, docs)) 18 19 title = "Internet-Drafts that have the IANA Action state \"Waiting on RFC Editor\" or \"RFC-Ed-Ack\" but are in the RFC Editor state \"IANA\"/tagged with \"IANA\"" 20 21 docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))) 22 23 res.append((title, docs)) 24 25 title = "Internet-Drafts that have a state other than \"RFC Ed Queue\", \"RFC Published\" or \"Sent to the RFC Editor\" and have an RFC Editor or IANA Action state" 26 27 docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))) 28 29 res.append((title, docs)) 30 31 for _, docs in res: 32 for d in docs: 33 d.iesg_state = d.get_state("draft-iesg") 34 d.rfc_state = d.get_state("draft-rfceditor") 35 d.iana_action_state = d.get_state("draft-iana-action") 36 37 return res 38 39 [end of ietf/sync/discrepancies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ietf/sync/discrepancies.py b/ietf/sync/discrepancies.py --- a/ietf/sync/discrepancies.py +++ b/ietf/sync/discrepancies.py @@ -6,25 +6,25 @@ title = "Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state" - docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))) + docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("ann", "rfcqueue")))).exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor"))).distinct() res.append((title, docs)) title = "Internet-Drafts that have the IANA Action state \"In Progress\" but do not have a \"IANA\" RFC-Editor state/tag" - docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))) + docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("inprog",)))).exclude(tags="iana").exclude(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana"))).distinct() res.append((title, docs)) title = "Internet-Drafts that have the IANA Action state \"Waiting on RFC Editor\" or \"RFC-Ed-Ack\" but are in the RFC Editor state \"IANA\"/tagged with \"IANA\"" - docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))) + docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type="draft-iana-action", slug__in=("waitrfc", "rfcedack")))).filter(models.Q(tags="iana") | models.Q(states__in=list(State.objects.filter(used=True, type="draft-rfceditor", slug="iana")))).distinct() res.append((title, docs)) title = "Internet-Drafts that have a state other than \"RFC Ed Queue\", \"RFC Published\" or \"Sent to the RFC Editor\" and have an RFC Editor or IANA Action state" - docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))) + docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type="draft-iesg", slug__in=("rfcqueue", "pub"))) + list(State.objects.filter(used=True, type__in=("draft-stream-iab", "draft-stream-ise", "draft-stream-irtf"), slug="rfc-edit"))).filter(states__in=list(State.objects.filter(used=True, type__in=("draft-iana-action", "draft-rfceditor")))).distinct() res.append((title, docs))
{"golden_diff": "diff --git a/ietf/sync/discrepancies.py b/ietf/sync/discrepancies.py\n--- a/ietf/sync/discrepancies.py\n+++ b/ietf/sync/discrepancies.py\n@@ -6,25 +6,25 @@\n \n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\")))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\"))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n \n- docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))))\n+ docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))).distinct()\n \n res.append((title, docs))\n \n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n \n- docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\"))))\n+ docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\")))).distinct()\n \n res.append((title, docs))\n", "issue": "/sync/discrepancies is missing `.distinct()`\nAt the moment in the second table:\r\n![image](https://user-images.githubusercontent.com/10996692/218182843-40d6daeb-ea79-4177-ac46-52d0ede1f032.png)\r\nLikely this will not show the same when viewed shortly after now, as I expect the state to be reconciled quickly.\n", "before_files": [{"content": "from django.db import models\nfrom ietf.doc.models import Document, State\n\ndef find_discrepancies():\n res = []\n\n title = \"Internet-Drafts that have been sent to the RFC Editor but do not have an RFC Editor state\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"ann\", \"rfcqueue\")))).exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\")))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"In Progress\\\" but do not have a \\\"IANA\\\" RFC-Editor state/tag\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"inprog\",)))).exclude(tags=\"iana\").exclude(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\")))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have the IANA Action state \\\"Waiting on RFC Editor\\\" or \\\"RFC-Ed-Ack\\\" but are in the RFC Editor state \\\"IANA\\\"/tagged with \\\"IANA\\\"\"\n\n docs = Document.objects.filter(states__in=list(State.objects.filter(used=True, type=\"draft-iana-action\", slug__in=(\"waitrfc\", \"rfcedack\")))).filter(models.Q(tags=\"iana\") | models.Q(states__in=list(State.objects.filter(used=True, type=\"draft-rfceditor\", slug=\"iana\"))))\n\n res.append((title, docs))\n\n title = \"Internet-Drafts that have a state other than \\\"RFC Ed Queue\\\", \\\"RFC Published\\\" or \\\"Sent to the RFC Editor\\\" and have an RFC Editor or IANA Action state\"\n\n docs = Document.objects.exclude(states__in=list(State.objects.filter(used=True, type=\"draft-iesg\", slug__in=(\"rfcqueue\", \"pub\"))) + list(State.objects.filter(used=True, type__in=(\"draft-stream-iab\", \"draft-stream-ise\", \"draft-stream-irtf\"), slug=\"rfc-edit\"))).filter(states__in=list(State.objects.filter(used=True, type__in=(\"draft-iana-action\", \"draft-rfceditor\"))))\n\n res.append((title, docs))\n\n for _, docs in res:\n for d in docs:\n d.iesg_state = d.get_state(\"draft-iesg\")\n d.rfc_state = d.get_state(\"draft-rfceditor\")\n d.iana_action_state = d.get_state(\"draft-iana-action\")\n\n return res\n\n", "path": "ietf/sync/discrepancies.py"}]}
1,301
855
gh_patches_debug_9564
rasdani/github-patches
git_diff
watchdogpolska__small_eod-494
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> udostępnienie id w API dla tags i documentType Id dla tags i documetType, jest niezbędne dla zbudowania listy tych elementów w reakcie. </issue> <code> [start of backend-project/small_eod/letters/serializers.py] 1 from uuid import uuid4 2 from django.conf import settings 3 from rest_framework import serializers 4 from .models import Letter, DocumentType 5 from ..generic.serializers import UserLogModelSerializer 6 from ..cases.models import Case 7 from ..institutions.models import Institution 8 from ..channels.models import Channel 9 from ..files.apps import minio_app 10 from ..files.serializers import FileSerializer 11 12 13 class DocumentTypeSerializer(serializers.ModelSerializer): 14 class Meta: 15 model = DocumentType 16 fields = ["name"] 17 18 19 class LetterSerializer(UserLogModelSerializer): 20 document_type = serializers.PrimaryKeyRelatedField( 21 many=False, default=None, queryset=DocumentType.objects.all() 22 ) 23 case = serializers.PrimaryKeyRelatedField( 24 many=False, default=None, queryset=Case.objects.all() 25 ) 26 institution = serializers.PrimaryKeyRelatedField( 27 many=False, default=None, queryset=Institution.objects.all() 28 ) 29 channel = serializers.PrimaryKeyRelatedField( 30 many=False, default=None, queryset=Channel.objects.all() 31 ) 32 attachments = FileSerializer(many=True, read_only=True) 33 34 class Meta: 35 model = Letter 36 fields = [ 37 "id", 38 "direction", 39 "channel", 40 "final", 41 "date", 42 "reference_number", 43 "institution", 44 "case", 45 "attachments", 46 "ordering", 47 "comment", 48 "excerpt", 49 "document_type", 50 "created_on", 51 "created_by", 52 "modified_on", 53 "modified_by", 54 ] 55 56 def create(self, validated_data): 57 channel = validated_data.pop("channel") 58 document_type = validated_data.pop("document_type") 59 institution = validated_data.pop("institution") 60 case = validated_data.pop("case") 61 62 letter = super().create(validated_data) 63 letter.channel = channel 64 letter.document_type = document_type 65 letter.institution = institution 66 letter.case = case 67 letter.save() 68 return letter 69 70 def update(self, instance, validated_data): 71 """ 72 nested - variable storing representations of the nested objects 73 of LetterSerializer (Channel, Address and DocumentType). 74 Iterating over those 3 and updating fields of the related objects, 75 using key-value pairs from PATCH request. 76 """ 77 nested = [] 78 for nested_object in nested: 79 for attr, value in nested_object["data"].items(): 80 setattr(nested_object["instance"], attr, value) 81 nested_object["instance"].save() 82 return super().update(instance, validated_data) 83 84 85 class SignRequestSerializer(serializers.Serializer): 86 name = serializers.CharField(max_length=200) 87 method = serializers.CharField(read_only=True) 88 url = serializers.CharField(read_only=True) 89 formData = serializers.DictField(read_only=True, child=serializers.CharField()) 90 path = serializers.CharField(read_only=True) 91 92 def create(self, validated_data): 93 path = f'{uuid4()}/{validated_data["name"]}' 94 url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path) 95 return { 96 "name": validated_data["name"], 97 "method": "POST", 98 "url": url, 99 "formData": form_data, 100 "path": path, 101 } 102 [end of backend-project/small_eod/letters/serializers.py] [start of backend-project/small_eod/tags/serializers.py] 1 from rest_framework import serializers 2 from .models import Tag 3 4 5 class TagSerializer(serializers.ModelSerializer): 6 class Meta: 7 model = Tag 8 fields = [ 9 "name", 10 ] 11 [end of backend-project/small_eod/tags/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/backend-project/small_eod/letters/serializers.py b/backend-project/small_eod/letters/serializers.py --- a/backend-project/small_eod/letters/serializers.py +++ b/backend-project/small_eod/letters/serializers.py @@ -13,7 +13,7 @@ class DocumentTypeSerializer(serializers.ModelSerializer): class Meta: model = DocumentType - fields = ["name"] + fields = ["id", "name"] class LetterSerializer(UserLogModelSerializer): diff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py --- a/backend-project/small_eod/tags/serializers.py +++ b/backend-project/small_eod/tags/serializers.py @@ -6,5 +6,6 @@ class Meta: model = Tag fields = [ + "id", "name", ]
{"golden_diff": "diff --git a/backend-project/small_eod/letters/serializers.py b/backend-project/small_eod/letters/serializers.py\n--- a/backend-project/small_eod/letters/serializers.py\n+++ b/backend-project/small_eod/letters/serializers.py\n@@ -13,7 +13,7 @@\n class DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n- fields = [\"name\"]\n+ fields = [\"id\", \"name\"]\n \n \n class LetterSerializer(UserLogModelSerializer):\ndiff --git a/backend-project/small_eod/tags/serializers.py b/backend-project/small_eod/tags/serializers.py\n--- a/backend-project/small_eod/tags/serializers.py\n+++ b/backend-project/small_eod/tags/serializers.py\n@@ -6,5 +6,6 @@\n class Meta:\n model = Tag\n fields = [\n+ \"id\",\n \"name\",\n ]\n", "issue": "udost\u0119pnienie id w API dla tags i documentType \nId dla tags i documetType, jest niezb\u0119dne dla zbudowania listy tych element\u00f3w w reakcie. \n", "before_files": [{"content": "from uuid import uuid4\nfrom django.conf import settings\nfrom rest_framework import serializers\nfrom .models import Letter, DocumentType\nfrom ..generic.serializers import UserLogModelSerializer\nfrom ..cases.models import Case\nfrom ..institutions.models import Institution\nfrom ..channels.models import Channel\nfrom ..files.apps import minio_app\nfrom ..files.serializers import FileSerializer\n\n\nclass DocumentTypeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DocumentType\n fields = [\"name\"]\n\n\nclass LetterSerializer(UserLogModelSerializer):\n document_type = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=DocumentType.objects.all()\n )\n case = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Case.objects.all()\n )\n institution = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Institution.objects.all()\n )\n channel = serializers.PrimaryKeyRelatedField(\n many=False, default=None, queryset=Channel.objects.all()\n )\n attachments = FileSerializer(many=True, read_only=True)\n\n class Meta:\n model = Letter\n fields = [\n \"id\",\n \"direction\",\n \"channel\",\n \"final\",\n \"date\",\n \"reference_number\",\n \"institution\",\n \"case\",\n \"attachments\",\n \"ordering\",\n \"comment\",\n \"excerpt\",\n \"document_type\",\n \"created_on\",\n \"created_by\",\n \"modified_on\",\n \"modified_by\",\n ]\n\n def create(self, validated_data):\n channel = validated_data.pop(\"channel\")\n document_type = validated_data.pop(\"document_type\")\n institution = validated_data.pop(\"institution\")\n case = validated_data.pop(\"case\")\n\n letter = super().create(validated_data)\n letter.channel = channel\n letter.document_type = document_type\n letter.institution = institution\n letter.case = case\n letter.save()\n return letter\n\n def update(self, instance, validated_data):\n \"\"\"\n nested - variable storing representations of the nested objects\n of LetterSerializer (Channel, Address and DocumentType).\n Iterating over those 3 and updating fields of the related objects,\n using key-value pairs from PATCH request.\n \"\"\"\n nested = []\n for nested_object in nested:\n for attr, value in nested_object[\"data\"].items():\n setattr(nested_object[\"instance\"], attr, value)\n nested_object[\"instance\"].save()\n return super().update(instance, validated_data)\n\n\nclass SignRequestSerializer(serializers.Serializer):\n name = serializers.CharField(max_length=200)\n method = serializers.CharField(read_only=True)\n url = serializers.CharField(read_only=True)\n formData = serializers.DictField(read_only=True, child=serializers.CharField())\n path = serializers.CharField(read_only=True)\n\n def create(self, validated_data):\n path = f'{uuid4()}/{validated_data[\"name\"]}'\n url, form_data = minio_app.presigned_post_form_data(settings.MINIO_BUCKET, path)\n return {\n \"name\": validated_data[\"name\"],\n \"method\": \"POST\",\n \"url\": url,\n \"formData\": form_data,\n \"path\": path,\n }\n", "path": "backend-project/small_eod/letters/serializers.py"}, {"content": "from rest_framework import serializers\nfrom .models import Tag\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = [\n \"name\",\n ]\n", "path": "backend-project/small_eod/tags/serializers.py"}]}
1,537
198
gh_patches_debug_19197
rasdani/github-patches
git_diff
enthought__chaco-717
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove "PopupablePlot" `PopupablePlot` was introduced in the commit https://github.com/enthought/chaco/commit/4af154cec6f678862ba7e695ae62d681312b50e8#diff-c93657842da48caff25acdb9de9f919f9c04b5301d0fafc28598c9fdeee829f8 but it is not clear why it exists or who uses it. </issue> <code> [start of chaco/ui/popupable_plot.py] 1 # Enthought library imports 2 from traits.api import List 3 from chaco.plot import Plot 4 from chaco.plot_containers import VPlotContainer 5 from chaco.tools.pan_tool import PanTool 6 from chaco.tools.zoom_tool import ZoomTool 7 from chaco.ui.plot_window import PlotWindow 8 9 from traitsui.wx.constants import WindowColor 10 11 12 class PopupablePlot(Plot): 13 """A Plot class that pops up in a new window on double click""" 14 15 # FIXME: It would be nice to queue up other types of commands and settings 16 command_queue = List() 17 18 def normal_left_dclick(self, event): 19 plot = Plot(self.data) 20 for data, kw in self.command_queue: 21 plot.plot(data, **kw) 22 plot.title = self.title 23 24 plot.title = self.title 25 container = VPlotContainer(bgcolor=WindowColor) 26 container.add(plot) 27 plot.tools.append(PanTool(plot)) 28 plot.overlays.append(ZoomTool(plot)) 29 window = PlotWindow(plot=container) 30 window.edit_traits(kind="live", parent=event.window.control) 31 32 def plot(self, data, **kw): 33 """Queue up the plot commands""" 34 self.command_queue.append((data, kw)) 35 super().plot(data, **kw) 36 [end of chaco/ui/popupable_plot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chaco/ui/popupable_plot.py b/chaco/ui/popupable_plot.py deleted file mode 100644 --- a/chaco/ui/popupable_plot.py +++ /dev/null @@ -1,35 +0,0 @@ -# Enthought library imports -from traits.api import List -from chaco.plot import Plot -from chaco.plot_containers import VPlotContainer -from chaco.tools.pan_tool import PanTool -from chaco.tools.zoom_tool import ZoomTool -from chaco.ui.plot_window import PlotWindow - -from traitsui.wx.constants import WindowColor - - -class PopupablePlot(Plot): - """A Plot class that pops up in a new window on double click""" - - # FIXME: It would be nice to queue up other types of commands and settings - command_queue = List() - - def normal_left_dclick(self, event): - plot = Plot(self.data) - for data, kw in self.command_queue: - plot.plot(data, **kw) - plot.title = self.title - - plot.title = self.title - container = VPlotContainer(bgcolor=WindowColor) - container.add(plot) - plot.tools.append(PanTool(plot)) - plot.overlays.append(ZoomTool(plot)) - window = PlotWindow(plot=container) - window.edit_traits(kind="live", parent=event.window.control) - - def plot(self, data, **kw): - """Queue up the plot commands""" - self.command_queue.append((data, kw)) - super().plot(data, **kw)
{"golden_diff": "diff --git a/chaco/ui/popupable_plot.py b/chaco/ui/popupable_plot.py\ndeleted file mode 100644\n--- a/chaco/ui/popupable_plot.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-# Enthought library imports\n-from traits.api import List\n-from chaco.plot import Plot\n-from chaco.plot_containers import VPlotContainer\n-from chaco.tools.pan_tool import PanTool\n-from chaco.tools.zoom_tool import ZoomTool\n-from chaco.ui.plot_window import PlotWindow\n-\n-from traitsui.wx.constants import WindowColor\n-\n-\n-class PopupablePlot(Plot):\n- \"\"\"A Plot class that pops up in a new window on double click\"\"\"\n-\n- # FIXME: It would be nice to queue up other types of commands and settings\n- command_queue = List()\n-\n- def normal_left_dclick(self, event):\n- plot = Plot(self.data)\n- for data, kw in self.command_queue:\n- plot.plot(data, **kw)\n- plot.title = self.title\n-\n- plot.title = self.title\n- container = VPlotContainer(bgcolor=WindowColor)\n- container.add(plot)\n- plot.tools.append(PanTool(plot))\n- plot.overlays.append(ZoomTool(plot))\n- window = PlotWindow(plot=container)\n- window.edit_traits(kind=\"live\", parent=event.window.control)\n-\n- def plot(self, data, **kw):\n- \"\"\"Queue up the plot commands\"\"\"\n- self.command_queue.append((data, kw))\n- super().plot(data, **kw)\n", "issue": "Remove \"PopupablePlot\"\n`PopupablePlot` was introduced in the commit https://github.com/enthought/chaco/commit/4af154cec6f678862ba7e695ae62d681312b50e8#diff-c93657842da48caff25acdb9de9f919f9c04b5301d0fafc28598c9fdeee829f8 but it is not clear why it exists or who uses it.\n", "before_files": [{"content": "# Enthought library imports\nfrom traits.api import List\nfrom chaco.plot import Plot\nfrom chaco.plot_containers import VPlotContainer\nfrom chaco.tools.pan_tool import PanTool\nfrom chaco.tools.zoom_tool import ZoomTool\nfrom chaco.ui.plot_window import PlotWindow\n\nfrom traitsui.wx.constants import WindowColor\n\n\nclass PopupablePlot(Plot):\n \"\"\"A Plot class that pops up in a new window on double click\"\"\"\n\n # FIXME: It would be nice to queue up other types of commands and settings\n command_queue = List()\n\n def normal_left_dclick(self, event):\n plot = Plot(self.data)\n for data, kw in self.command_queue:\n plot.plot(data, **kw)\n plot.title = self.title\n\n plot.title = self.title\n container = VPlotContainer(bgcolor=WindowColor)\n container.add(plot)\n plot.tools.append(PanTool(plot))\n plot.overlays.append(ZoomTool(plot))\n window = PlotWindow(plot=container)\n window.edit_traits(kind=\"live\", parent=event.window.control)\n\n def plot(self, data, **kw):\n \"\"\"Queue up the plot commands\"\"\"\n self.command_queue.append((data, kw))\n super().plot(data, **kw)\n", "path": "chaco/ui/popupable_plot.py"}]}
1,003
348
gh_patches_debug_12556
rasdani/github-patches
git_diff
DDMAL__CantusDB-726
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ensure that dummy users (i.e. old indexers) can't log in. Currently, on NewCantus we have created dummy users for people who had an Indexer object but no User account on OldCantus. These dummy users have randomly generated email addresses and no passwords, the rationale being that a user with no password will not be able to log in. A comment on [this stackoverflow topic](https://stackoverflow.com/questions/35690022/django-create-new-user-without-password) suggests that this is not strictly the case. It would probably be safer to set these dummy users to have [has_useable_password = False](https://docs.djangoproject.com/en/4.2/ref/contrib/auth/#django.contrib.auth.models.User.has_usable_password) if it is not already set up this way. </issue> <code> [start of django/cantusdb_project/main_app/management/commands/sync_indexers.py] 1 from django.core.management.base import BaseCommand 2 from django.contrib.auth import get_user_model 3 import requests, json 4 from faker import Faker 5 6 INDEXER_ID_FILE = "indexer_list.txt" 7 8 9 def get_id_list(file_path): 10 indexer_list = [] 11 file = open(file_path, "r") 12 for line in file: 13 line = line.strip("\n") 14 indexer_list.append(line) 15 file.close() 16 return indexer_list 17 18 19 def get_new_indexer(indexer_id): 20 # use json-export to get indexer information 21 url = f"http://cantus.uwaterloo.ca/json-node/{indexer_id}" 22 response = requests.get(url) 23 json_response = json.loads(response.content) 24 if json_response["field_first_name"]: 25 first_name = json_response["field_first_name"]["und"][0]["value"] 26 else: 27 first_name = None 28 if json_response["field_family_name"]: 29 family_name = json_response["field_family_name"]["und"][0]["value"] 30 else: 31 family_name = None 32 if json_response["field_indexer_institution"]: 33 institution = json_response["field_indexer_institution"]["und"][0]["value"] 34 else: 35 institution = None 36 if json_response["field_indexer_city"]: 37 city = json_response["field_indexer_city"]["und"][0]["value"] 38 else: 39 city = None 40 if json_response["field_indexer_country"]: 41 country = json_response["field_indexer_country"]["und"][0]["value"] 42 else: 43 country = None 44 45 # check whether the current indexer has a user entry of the same name 46 indexer_full_name = f"{first_name} {family_name}" 47 print(f"{indexer_id} {indexer_full_name}") 48 homonymous_users = get_user_model().objects.filter( 49 full_name__iexact=indexer_full_name 50 ) 51 # if the indexer also exists as a user 52 if homonymous_users: 53 assert homonymous_users.count() == 1 54 homonymous_user = homonymous_users.get() 55 print(f"homonymous: {homonymous_user.full_name}") 56 # keep the user as it is (merge the indexer into existing user) 57 # and store the ID of its indexer object 58 homonymous_user.old_indexer_id = indexer_id 59 homonymous_user.is_indexer = True 60 homonymous_user.save() 61 # if the indexer doesn't exist as a user 62 else: 63 faker = Faker() 64 # create a new user with the indexer information 65 get_user_model().objects.create( 66 institution=institution, 67 city=city, 68 country=country, 69 full_name=indexer_full_name, 70 # assign random email to dummy users 71 email=f"{faker.lexify('????????')}@fakeemail.com", 72 # leave the password empty for dummy users 73 # the password can't be empty in login form, so they can't log in 74 password="", 75 old_indexer_id=indexer_id, 76 is_indexer=True, 77 ) 78 79 80 class Command(BaseCommand): 81 def add_arguments(self, parser): 82 pass 83 84 def handle(self, *args, **options): 85 indexer_list = get_id_list(INDEXER_ID_FILE) 86 for id in indexer_list: 87 get_new_indexer(id) 88 [end of django/cantusdb_project/main_app/management/commands/sync_indexers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/management/commands/sync_indexers.py b/django/cantusdb_project/main_app/management/commands/sync_indexers.py --- a/django/cantusdb_project/main_app/management/commands/sync_indexers.py +++ b/django/cantusdb_project/main_app/management/commands/sync_indexers.py @@ -69,12 +69,9 @@ full_name=indexer_full_name, # assign random email to dummy users email=f"{faker.lexify('????????')}@fakeemail.com", - # leave the password empty for dummy users - # the password can't be empty in login form, so they can't log in - password="", old_indexer_id=indexer_id, is_indexer=True, - ) + ).set_unusable_password() # Set unusable password so the user can't log in or access reset password page class Command(BaseCommand):
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/management/commands/sync_indexers.py b/django/cantusdb_project/main_app/management/commands/sync_indexers.py\n--- a/django/cantusdb_project/main_app/management/commands/sync_indexers.py\n+++ b/django/cantusdb_project/main_app/management/commands/sync_indexers.py\n@@ -69,12 +69,9 @@\n full_name=indexer_full_name,\n # assign random email to dummy users\n email=f\"{faker.lexify('????????')}@fakeemail.com\",\n- # leave the password empty for dummy users\n- # the password can't be empty in login form, so they can't log in\n- password=\"\",\n old_indexer_id=indexer_id,\n is_indexer=True,\n- )\n+ ).set_unusable_password() # Set unusable password so the user can't log in or access reset password page\n \n \n class Command(BaseCommand):\n", "issue": "Ensure that dummy users (i.e. old indexers) can't log in.\nCurrently, on NewCantus we have created dummy users for people who had an Indexer object but no User account on OldCantus. These dummy users have randomly generated email addresses and no passwords, the rationale being that a user with no password will not be able to log in.\r\n\r\nA comment on [this stackoverflow topic](https://stackoverflow.com/questions/35690022/django-create-new-user-without-password) suggests that this is not strictly the case. It would probably be safer to set these dummy users to have [has_useable_password = False](https://docs.djangoproject.com/en/4.2/ref/contrib/auth/#django.contrib.auth.models.User.has_usable_password) if it is not already set up this way.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom django.contrib.auth import get_user_model\nimport requests, json\nfrom faker import Faker\n\nINDEXER_ID_FILE = \"indexer_list.txt\"\n\n\ndef get_id_list(file_path):\n indexer_list = []\n file = open(file_path, \"r\")\n for line in file:\n line = line.strip(\"\\n\")\n indexer_list.append(line)\n file.close()\n return indexer_list\n\n\ndef get_new_indexer(indexer_id):\n # use json-export to get indexer information\n url = f\"http://cantus.uwaterloo.ca/json-node/{indexer_id}\"\n response = requests.get(url)\n json_response = json.loads(response.content)\n if json_response[\"field_first_name\"]:\n first_name = json_response[\"field_first_name\"][\"und\"][0][\"value\"]\n else:\n first_name = None\n if json_response[\"field_family_name\"]:\n family_name = json_response[\"field_family_name\"][\"und\"][0][\"value\"]\n else:\n family_name = None\n if json_response[\"field_indexer_institution\"]:\n institution = json_response[\"field_indexer_institution\"][\"und\"][0][\"value\"]\n else:\n institution = None\n if json_response[\"field_indexer_city\"]:\n city = json_response[\"field_indexer_city\"][\"und\"][0][\"value\"]\n else:\n city = None\n if json_response[\"field_indexer_country\"]:\n country = json_response[\"field_indexer_country\"][\"und\"][0][\"value\"]\n else:\n country = None\n\n # check whether the current indexer has a user entry of the same name\n indexer_full_name = f\"{first_name} {family_name}\"\n print(f\"{indexer_id} {indexer_full_name}\")\n homonymous_users = get_user_model().objects.filter(\n full_name__iexact=indexer_full_name\n )\n # if the indexer also exists as a user\n if homonymous_users:\n assert homonymous_users.count() == 1\n homonymous_user = homonymous_users.get()\n print(f\"homonymous: {homonymous_user.full_name}\")\n # keep the user as it is (merge the indexer into existing user)\n # and store the ID of its indexer object\n homonymous_user.old_indexer_id = indexer_id\n homonymous_user.is_indexer = True\n homonymous_user.save()\n # if the indexer doesn't exist as a user\n else:\n faker = Faker()\n # create a new user with the indexer information\n get_user_model().objects.create(\n institution=institution,\n city=city,\n country=country,\n full_name=indexer_full_name,\n # assign random email to dummy users\n email=f\"{faker.lexify('????????')}@fakeemail.com\",\n # leave the password empty for dummy users\n # the password can't be empty in login form, so they can't log in\n password=\"\",\n old_indexer_id=indexer_id,\n is_indexer=True,\n )\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n indexer_list = get_id_list(INDEXER_ID_FILE)\n for id in indexer_list:\n get_new_indexer(id)\n", "path": "django/cantusdb_project/main_app/management/commands/sync_indexers.py"}]}
1,596
218
gh_patches_debug_5627
rasdani/github-patches
git_diff
nf-core__tools-2075
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lint linter config for Python < 3.6 and issue warning if Actions are using Python 3.6 and older ### Description of the bug ``` Run nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md ``` ```python Traceback (most recent call last): File "/opt/hostedtoolcache/Python/3.6.15/x64/bin/nf-core", line 5, in <module> from nf_core.__main__ import run_nf_core File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/__main__.py", line 19, in <module> import nf_core.lint File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/lint/__init__.py", line 23, in <module> import nf_core.modules.lint File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/__init__.py", line 1, in <module> from .bump_versions import ModuleVersionBumper File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/bump_versions.py", line 24, in <module> from .modules_command import ModuleCommand File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_command.py", line [11](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:12), in <module> from .modules_json import ModulesJson File "/opt/hostedtoolcache/Python/3.6.[15](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:16)/x64/lib/python3.6/site-packages/nf_core/modules/modules_json.py", line 15, in <module> import nf_core.modules.modules_repo File "/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_repo.py", line 5, in <module> from importlib.resources import path ModuleNotFoundError: No module named 'importlib.resources' ``` ``` Error: Process completed with exit code 1. ``` ### Command used and terminal output ```console https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519 ``` ### System information GHA </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import find_packages, setup 4 5 version = "2.7dev" 6 7 with open("README.md") as f: 8 readme = f.read() 9 10 with open("requirements.txt") as f: 11 required = f.read().splitlines() 12 13 setup( 14 name="nf-core", 15 version=version, 16 description="Helper tools for use with nf-core Nextflow pipelines.", 17 long_description=readme, 18 long_description_content_type="text/markdown", 19 keywords=[ 20 "nf-core", 21 "nextflow", 22 "bioinformatics", 23 "workflow", 24 "pipeline", 25 "biology", 26 "sequencing", 27 "NGS", 28 "next generation sequencing", 29 ], 30 author="Phil Ewels", 31 author_email="[email protected]", 32 url="https://github.com/nf-core/tools", 33 license="MIT", 34 entry_points={ 35 "console_scripts": ["nf-core=nf_core.__main__:run_nf_core"], 36 "refgenie.hooks.post_update": ["nf-core-refgenie=nf_core.refgenie:update_config"], 37 }, 38 install_requires=required, 39 packages=find_packages(exclude=("docs")), 40 include_package_data=True, 41 zip_safe=False, 42 ) 43 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ "console_scripts": ["nf-core=nf_core.__main__:run_nf_core"], "refgenie.hooks.post_update": ["nf-core-refgenie=nf_core.refgenie:update_config"], }, + python_requires=">=3.7, <4", install_requires=required, packages=find_packages(exclude=("docs")), include_package_data=True,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n \"console_scripts\": [\"nf-core=nf_core.__main__:run_nf_core\"],\n \"refgenie.hooks.post_update\": [\"nf-core-refgenie=nf_core.refgenie:update_config\"],\n },\n+ python_requires=\">=3.7, <4\",\n install_requires=required,\n packages=find_packages(exclude=(\"docs\")),\n include_package_data=True,\n", "issue": "Lint linter config for Python < 3.6 and issue warning if Actions are using Python 3.6 and older\n### Description of the bug\r\n\r\n```\r\nRun nf-core -l lint_log.txt lint --dir ${GITHUB_WORKSPACE} --markdown lint_results.md\r\n```\r\n```python\r\nTraceback (most recent call last):\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/bin/nf-core\", line 5, in <module>\r\n from nf_core.__main__ import run_nf_core\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/__main__.py\", line 19, in <module>\r\n import nf_core.lint\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/lint/__init__.py\", line 23, in <module>\r\n import nf_core.modules.lint\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/__init__.py\", line 1, in <module>\r\n from .bump_versions import ModuleVersionBumper\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/bump_versions.py\", line 24, in <module>\r\n from .modules_command import ModuleCommand\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_command.py\", line [11](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:12), in <module>\r\n from .modules_json import ModulesJson\r\n File \"/opt/hostedtoolcache/Python/3.6.[15](https://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519#step:6:16)/x64/lib/python3.6/site-packages/nf_core/modules/modules_json.py\", line 15, in <module>\r\n import nf_core.modules.modules_repo\r\n File \"/opt/hostedtoolcache/Python/3.6.15/x64/lib/python3.6/site-packages/nf_core/modules/modules_repo.py\", line 5, in <module>\r\n from importlib.resources import path\r\nModuleNotFoundError: No module named 'importlib.resources'\r\n```\r\n```\r\nError: Process completed with exit code 1.\r\n```\r\n\r\n### Command used and terminal output\r\n\r\n```console\r\nhttps://github.com/nf-core/scrnaseq/actions/runs/3188310073/jobs/5200822519\r\n```\r\n\r\n\r\n### System information\r\n\r\nGHA\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\n\nversion = \"2.7dev\"\n\nwith open(\"README.md\") as f:\n readme = f.read()\n\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\nsetup(\n name=\"nf-core\",\n version=version,\n description=\"Helper tools for use with nf-core Nextflow pipelines.\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"nf-core\",\n \"nextflow\",\n \"bioinformatics\",\n \"workflow\",\n \"pipeline\",\n \"biology\",\n \"sequencing\",\n \"NGS\",\n \"next generation sequencing\",\n ],\n author=\"Phil Ewels\",\n author_email=\"[email protected]\",\n url=\"https://github.com/nf-core/tools\",\n license=\"MIT\",\n entry_points={\n \"console_scripts\": [\"nf-core=nf_core.__main__:run_nf_core\"],\n \"refgenie.hooks.post_update\": [\"nf-core-refgenie=nf_core.refgenie:update_config\"],\n },\n install_requires=required,\n packages=find_packages(exclude=(\"docs\")),\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]}
1,543
112
gh_patches_debug_9252
rasdani/github-patches
git_diff
HypothesisWorks__hypothesis-2889
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve handling of self-referential strategies As mentioned in #2783, the following strategy is not explicitly forbidden but fails with an `AssertionError`: ```python from hypothesis import strategies as st SELF_REF = st.recursive( st.deferred(lambda: SELF_REF | st.booleans()), lambda s: st.lists(s, min_size=1) ) ``` There is an alternative strategy that produces the same data, but doesn't fail the same way: ```python SELF_REF = st.recursive( st.booleans(), lambda s: st.lists(s, min_size=1) ) ``` I am not sure if all self-referential strategies can be rewritten like this, but probably we can either explicitly forbid such strategies or revisit our drawing approach for them. Here is my reasoning from #2783 > As far as I see, the cap is needed to prevent the drawing from this strategy & generating a certain maximum amount of leaves. However, assuming a single thread (more on the multi-threaded behavior in the next section) and such a self-referential strategy, I am not sure if capping is needed as it is - we can just apply it once on the first capped usage and make all subsequent calls no-op (e.g., just yield without modifying marked). Then we still have the marker set only once on the very first RecursiveStrategy.do_draw call, and it will be monotonically decreasing. Therefore, we'll have the max size properly maintained, and there will be no oversized subtrees because, at some point, LimitReached will occur. </issue> <code> [start of hypothesis-python/src/hypothesis/strategies/_internal/recursive.py] 1 # This file is part of Hypothesis, which may be found at 2 # https://github.com/HypothesisWorks/hypothesis/ 3 # 4 # Most of this work is copyright (C) 2013-2021 David R. MacIver 5 # ([email protected]), but it contains contributions by others. See 6 # CONTRIBUTING.rst for a full list of people who may hold copyright, and 7 # consult the git log if you need to determine who owns an individual 8 # contribution. 9 # 10 # This Source Code Form is subject to the terms of the Mozilla Public License, 11 # v. 2.0. If a copy of the MPL was not distributed with this file, You can 12 # obtain one at https://mozilla.org/MPL/2.0/. 13 # 14 # END HEADER 15 16 import threading 17 from contextlib import contextmanager 18 19 from hypothesis.internal.lazyformat import lazyformat 20 from hypothesis.internal.reflection import get_pretty_function_description 21 from hypothesis.strategies._internal.strategies import ( 22 OneOfStrategy, 23 SearchStrategy, 24 check_strategy, 25 ) 26 27 28 class LimitReached(BaseException): 29 pass 30 31 32 class LimitedStrategy(SearchStrategy): 33 def __init__(self, strategy): 34 super().__init__() 35 self.base_strategy = strategy 36 self._threadlocal = threading.local() 37 38 @property 39 def marker(self): 40 return getattr(self._threadlocal, "marker", 0) 41 42 @marker.setter 43 def marker(self, value): 44 self._threadlocal.marker = value 45 46 @property 47 def currently_capped(self): 48 return getattr(self._threadlocal, "currently_capped", False) 49 50 @currently_capped.setter 51 def currently_capped(self, value): 52 self._threadlocal.currently_capped = value 53 54 def __repr__(self): 55 return f"LimitedStrategy({self.base_strategy!r})" 56 57 def do_validate(self): 58 self.base_strategy.validate() 59 60 def do_draw(self, data): 61 assert self.currently_capped 62 if self.marker <= 0: 63 raise LimitReached() 64 self.marker -= 1 65 return data.draw(self.base_strategy) 66 67 @contextmanager 68 def capped(self, max_templates): 69 assert not self.currently_capped 70 try: 71 self.currently_capped = True 72 self.marker = max_templates 73 yield 74 finally: 75 self.currently_capped = False 76 77 78 class RecursiveStrategy(SearchStrategy): 79 def __init__(self, base, extend, max_leaves): 80 self.max_leaves = max_leaves 81 self.base = base 82 self.limited_base = LimitedStrategy(base) 83 self.extend = extend 84 85 strategies = [self.limited_base, self.extend(self.limited_base)] 86 while 2 ** (len(strategies) - 1) <= max_leaves: 87 strategies.append(extend(OneOfStrategy(tuple(strategies)))) 88 self.strategy = OneOfStrategy(strategies) 89 90 def __repr__(self): 91 if not hasattr(self, "_cached_repr"): 92 self._cached_repr = "recursive(%r, %s, max_leaves=%d)" % ( 93 self.base, 94 get_pretty_function_description(self.extend), 95 self.max_leaves, 96 ) 97 return self._cached_repr 98 99 def do_validate(self): 100 check_strategy(self.base, "base") 101 extended = self.extend(self.limited_base) 102 check_strategy(extended, f"extend({self.limited_base!r})") 103 self.limited_base.validate() 104 extended.validate() 105 106 def do_draw(self, data): 107 count = 0 108 while True: 109 try: 110 with self.limited_base.capped(self.max_leaves): 111 return data.draw(self.strategy) 112 except LimitReached: 113 # Workaround for possible coverage bug - this branch is definitely 114 # covered but for some reason is showing up as not covered. 115 if count == 0: # pragma: no branch 116 data.note_event( 117 lazyformat( 118 "Draw for %r exceeded max_leaves and had to be retried", 119 self, 120 ) 121 ) 122 count += 1 123 [end of hypothesis-python/src/hypothesis/strategies/_internal/recursive.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py --- a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py +++ b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py @@ -66,13 +66,13 @@ @contextmanager def capped(self, max_templates): - assert not self.currently_capped try: + was_capped = self.currently_capped self.currently_capped = True self.marker = max_templates yield finally: - self.currently_capped = False + self.currently_capped = was_capped class RecursiveStrategy(SearchStrategy):
{"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py\n--- a/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py\n+++ b/hypothesis-python/src/hypothesis/strategies/_internal/recursive.py\n@@ -66,13 +66,13 @@\n \n @contextmanager\n def capped(self, max_templates):\n- assert not self.currently_capped\n try:\n+ was_capped = self.currently_capped\n self.currently_capped = True\n self.marker = max_templates\n yield\n finally:\n- self.currently_capped = False\n+ self.currently_capped = was_capped\n \n \n class RecursiveStrategy(SearchStrategy):\n", "issue": "Improve handling of self-referential strategies\nAs mentioned in #2783, the following strategy is not explicitly forbidden but fails with an `AssertionError`:\r\n\r\n```python\r\nfrom hypothesis import strategies as st\r\n\r\nSELF_REF = st.recursive(\r\n st.deferred(lambda: SELF_REF | st.booleans()),\r\n lambda s: st.lists(s, min_size=1)\r\n)\r\n```\r\n\r\nThere is an alternative strategy that produces the same data, but doesn't fail the same way:\r\n\r\n```python\r\nSELF_REF = st.recursive(\r\n st.booleans(),\r\n lambda s: st.lists(s, min_size=1)\r\n)\r\n```\r\n\r\nI am not sure if all self-referential strategies can be rewritten like this, but probably we can either explicitly forbid such strategies or revisit our drawing approach for them.\r\n\r\nHere is my reasoning from #2783 \r\n\r\n> As far as I see, the cap is needed to prevent the drawing from this strategy & generating a certain maximum amount of leaves. However, assuming a single thread (more on the multi-threaded behavior in the next section) and such a self-referential strategy, I am not sure if capping is needed as it is - we can just apply it once on the first capped usage and make all subsequent calls no-op (e.g., just yield without modifying marked). Then we still have the marker set only once on the very first RecursiveStrategy.do_draw call, and it will be monotonically decreasing. Therefore, we'll have the max size properly maintained, and there will be no oversized subtrees because, at some point, LimitReached will occur.\r\n\r\n\n", "before_files": [{"content": "# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis/\n#\n# Most of this work is copyright (C) 2013-2021 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at https://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nimport threading\nfrom contextlib import contextmanager\n\nfrom hypothesis.internal.lazyformat import lazyformat\nfrom hypothesis.internal.reflection import get_pretty_function_description\nfrom hypothesis.strategies._internal.strategies import (\n OneOfStrategy,\n SearchStrategy,\n check_strategy,\n)\n\n\nclass LimitReached(BaseException):\n pass\n\n\nclass LimitedStrategy(SearchStrategy):\n def __init__(self, strategy):\n super().__init__()\n self.base_strategy = strategy\n self._threadlocal = threading.local()\n\n @property\n def marker(self):\n return getattr(self._threadlocal, \"marker\", 0)\n\n @marker.setter\n def marker(self, value):\n self._threadlocal.marker = value\n\n @property\n def currently_capped(self):\n return getattr(self._threadlocal, \"currently_capped\", False)\n\n @currently_capped.setter\n def currently_capped(self, value):\n self._threadlocal.currently_capped = value\n\n def __repr__(self):\n return f\"LimitedStrategy({self.base_strategy!r})\"\n\n def do_validate(self):\n self.base_strategy.validate()\n\n def do_draw(self, data):\n assert self.currently_capped\n if self.marker <= 0:\n raise LimitReached()\n self.marker -= 1\n return data.draw(self.base_strategy)\n\n @contextmanager\n def capped(self, max_templates):\n assert not self.currently_capped\n try:\n self.currently_capped = True\n self.marker = max_templates\n yield\n finally:\n self.currently_capped = False\n\n\nclass RecursiveStrategy(SearchStrategy):\n def __init__(self, base, extend, max_leaves):\n self.max_leaves = max_leaves\n self.base = base\n self.limited_base = LimitedStrategy(base)\n self.extend = extend\n\n strategies = [self.limited_base, self.extend(self.limited_base)]\n while 2 ** (len(strategies) - 1) <= max_leaves:\n strategies.append(extend(OneOfStrategy(tuple(strategies))))\n self.strategy = OneOfStrategy(strategies)\n\n def __repr__(self):\n if not hasattr(self, \"_cached_repr\"):\n self._cached_repr = \"recursive(%r, %s, max_leaves=%d)\" % (\n self.base,\n get_pretty_function_description(self.extend),\n self.max_leaves,\n )\n return self._cached_repr\n\n def do_validate(self):\n check_strategy(self.base, \"base\")\n extended = self.extend(self.limited_base)\n check_strategy(extended, f\"extend({self.limited_base!r})\")\n self.limited_base.validate()\n extended.validate()\n\n def do_draw(self, data):\n count = 0\n while True:\n try:\n with self.limited_base.capped(self.max_leaves):\n return data.draw(self.strategy)\n except LimitReached:\n # Workaround for possible coverage bug - this branch is definitely\n # covered but for some reason is showing up as not covered.\n if count == 0: # pragma: no branch\n data.note_event(\n lazyformat(\n \"Draw for %r exceeded max_leaves and had to be retried\",\n self,\n )\n )\n count += 1\n", "path": "hypothesis-python/src/hypothesis/strategies/_internal/recursive.py"}]}
2,029
181
gh_patches_debug_7921
rasdani/github-patches
git_diff
OpenMined__PySyft-5330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Check and Fix notebook / jupyter client warning on Colab ## Description This needs to be checked on Colab, since it seems colab has its own outdated versions of notebook and jupyter-client as well. https://github.com/OpenMined/PySyft/issues/4915 ## Definition of Done Fix for Colab if possible. </issue> <code> [start of src/syft/core/common/environment.py] 1 # stdlib 2 import sys 3 4 # checks if we are in a python repl or python -i script.py 5 is_interactive = bool(getattr(sys, "ps1", sys.flags.interactive)) 6 7 # checks if we are in jupyter 8 is_jupyter = True 9 10 try: 11 get_ipython() # type: ignore 12 # third party 13 import notebook 14 from packaging import version 15 16 NOTEBOOK_VERSION = version.parse(notebook.__version__.split("+")[0]) 17 if NOTEBOOK_VERSION < version.parse("6.0.0"): 18 raise Exception( 19 "Your Jupyter Notebook is too old. Please upgrade to version 6 or higher." 20 ) 21 except NameError: 22 is_jupyter = False 23 24 __all__ = ["is_interactive", "is_jupyter"] 25 [end of src/syft/core/common/environment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/syft/core/common/environment.py b/src/syft/core/common/environment.py --- a/src/syft/core/common/environment.py +++ b/src/syft/core/common/environment.py @@ -14,7 +14,8 @@ from packaging import version NOTEBOOK_VERSION = version.parse(notebook.__version__.split("+")[0]) - if NOTEBOOK_VERSION < version.parse("6.0.0"): + if NOTEBOOK_VERSION < version.parse("6.0.0") and "google.colab" not in sys.modules: + # google.colab check to fix issue #5315 raise Exception( "Your Jupyter Notebook is too old. Please upgrade to version 6 or higher." )
{"golden_diff": "diff --git a/src/syft/core/common/environment.py b/src/syft/core/common/environment.py\n--- a/src/syft/core/common/environment.py\n+++ b/src/syft/core/common/environment.py\n@@ -14,7 +14,8 @@\n from packaging import version\n \n NOTEBOOK_VERSION = version.parse(notebook.__version__.split(\"+\")[0])\n- if NOTEBOOK_VERSION < version.parse(\"6.0.0\"):\n+ if NOTEBOOK_VERSION < version.parse(\"6.0.0\") and \"google.colab\" not in sys.modules:\n+ # google.colab check to fix issue #5315\n raise Exception(\n \"Your Jupyter Notebook is too old. Please upgrade to version 6 or higher.\"\n )\n", "issue": "Check and Fix notebook / jupyter client warning on Colab\n## Description\r\nThis needs to be checked on Colab, since it seems colab has its own outdated versions of notebook and jupyter-client as well.\r\nhttps://github.com/OpenMined/PySyft/issues/4915\r\n\r\n## Definition of Done\r\nFix for Colab if possible.\n", "before_files": [{"content": "# stdlib\nimport sys\n\n# checks if we are in a python repl or python -i script.py\nis_interactive = bool(getattr(sys, \"ps1\", sys.flags.interactive))\n\n# checks if we are in jupyter\nis_jupyter = True\n\ntry:\n get_ipython() # type: ignore\n # third party\n import notebook\n from packaging import version\n\n NOTEBOOK_VERSION = version.parse(notebook.__version__.split(\"+\")[0])\n if NOTEBOOK_VERSION < version.parse(\"6.0.0\"):\n raise Exception(\n \"Your Jupyter Notebook is too old. Please upgrade to version 6 or higher.\"\n )\nexcept NameError:\n is_jupyter = False\n\n__all__ = [\"is_interactive\", \"is_jupyter\"]\n", "path": "src/syft/core/common/environment.py"}]}
828
164
gh_patches_debug_2135
rasdani/github-patches
git_diff
chainer__chainer-258
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Shape of output value of `concat` is list in GPU `cuda.empty([1])` makes a `GPUArray` with `list` shape. It causes a type invalid error. https://github.com/pfnet/chainer/blob/master/chainer/functions/concat.py#L69 </issue> <code> [start of chainer/functions/concat.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import function 5 from chainer.utils import type_check 6 7 _args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset' 8 _preamble = ''' 9 #define COPY(statement) \ 10 int l = i / (rdim * cdimx); \ 11 int c = i / rdim % cdimx + coffset; \ 12 int r = i % rdim; \ 13 int idx = r + rdim * (c + cdimy * l); \ 14 statement; 15 ''' 16 17 18 class Concat(function.Function): 19 20 """Concatenate multiple tensors towards specified axis.""" 21 22 # concat along the channel dimension by default 23 def __init__(self, axis=1): 24 self.axis = axis 25 26 def check_type_forward(self, in_types): 27 type_check.expect(in_types.size() > 0) 28 type_check.expect(in_types[0].ndim > 29 type_check.Variable(self.axis, 'axis')) 30 31 ndim = in_types[0].ndim.eval() 32 for i in range(1, in_types.size().eval()): 33 type_check.expect( 34 in_types[0].dtype == in_types[i].dtype, 35 in_types[0].ndim == in_types[i].ndim, 36 ) 37 for d in range(0, ndim): 38 if d == self.axis: 39 continue 40 type_check.expect(in_types[0].shape[d] == in_types[i].shape[d]) 41 42 def check_type_backward(self, in_types, out_types): 43 type_check.expect( 44 in_types.size() > 0, 45 out_types.size() == 1, 46 ) 47 y_type, = out_types 48 49 type_check.expect(y_type.dtype == in_types[0].dtype) 50 ndim = in_types[0].ndim.eval() 51 concat_size = sum(typ.shape[self.axis] for typ in in_types) 52 type_check.expect(concat_size == y_type.shape[self.axis]) 53 54 for d in range(0, ndim): 55 if d == self.axis: 56 continue 57 type_check.expect(y_type.shape[d] == in_types[0].shape[d]) 58 59 def forward_cpu(self, xs): 60 return numpy.concatenate(xs, axis=self.axis), 61 62 def forward_gpu(self, xs): 63 # TODO(beam2d): Unify the process into a single kernel. 64 shape = list(xs[0].shape) 65 for x in xs[1:]: 66 shape[self.axis] += x.shape[self.axis] 67 self.shape = shape 68 69 y = cuda.empty(shape, dtype=xs[0].dtype) 70 self.cdimy = y.shape[self.axis] 71 self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int) 72 73 coffset = 0 74 kernel = cuda.elementwise( 75 _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble) 76 for x in xs: 77 cdimx = x.shape[self.axis] 78 kernel(x, y, cdimx, self.cdimy, self.rdim, coffset) 79 coffset += cdimx 80 81 return y, 82 83 def backward_cpu(self, xs, gy): 84 sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum() 85 return numpy.split(gy[0], sizes, axis=self.axis) 86 87 def backward_gpu(self, xs, gy): 88 gxs = tuple(cuda.empty_like(x) for x in xs) 89 90 coffset = 0 91 kernel = cuda.elementwise( 92 _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble) 93 for gx in gxs: 94 cdimx = gx.shape[self.axis] 95 kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset) 96 coffset += cdimx 97 98 return gxs 99 100 101 def concat(xs, axis=1): 102 """Concatenates given variables along an axis. 103 104 Args: 105 xs (tuple of Variables): Variables to be concatenated. 106 axis (int): Axis that the input arrays are concatenated along. 107 108 Returns: 109 ~chainer.Variable: Output variable. 110 111 """ 112 return Concat(axis=axis)(*xs) 113 [end of chainer/functions/concat.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py --- a/chainer/functions/concat.py +++ b/chainer/functions/concat.py @@ -64,6 +64,7 @@ shape = list(xs[0].shape) for x in xs[1:]: shape[self.axis] += x.shape[self.axis] + shape = tuple(shape) self.shape = shape y = cuda.empty(shape, dtype=xs[0].dtype)
{"golden_diff": "diff --git a/chainer/functions/concat.py b/chainer/functions/concat.py\n--- a/chainer/functions/concat.py\n+++ b/chainer/functions/concat.py\n@@ -64,6 +64,7 @@\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n+ shape = tuple(shape)\n self.shape = shape\n \n y = cuda.empty(shape, dtype=xs[0].dtype)\n", "issue": "Shape of output value of `concat` is list in GPU\n`cuda.empty([1])` makes a `GPUArray` with `list` shape. It causes a type invalid error.\n\nhttps://github.com/pfnet/chainer/blob/master/chainer/functions/concat.py#L69\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n_args = 'const float* x, float* y, int cdimx, int cdimy, int rdim, int coffset'\n_preamble = '''\n#define COPY(statement) \\\n int l = i / (rdim * cdimx); \\\n int c = i / rdim % cdimx + coffset; \\\n int r = i % rdim; \\\n int idx = r + rdim * (c + cdimy * l); \\\n statement;\n'''\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n in_types.size() > 0,\n out_types.size() == 1,\n )\n y_type, = out_types\n\n type_check.expect(y_type.dtype == in_types[0].dtype)\n ndim = in_types[0].ndim.eval()\n concat_size = sum(typ.shape[self.axis] for typ in in_types)\n type_check.expect(concat_size == y_type.shape[self.axis])\n\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(y_type.shape[d] == in_types[0].shape[d])\n\n def forward_cpu(self, xs):\n return numpy.concatenate(xs, axis=self.axis),\n\n def forward_gpu(self, xs):\n # TODO(beam2d): Unify the process into a single kernel.\n shape = list(xs[0].shape)\n for x in xs[1:]:\n shape[self.axis] += x.shape[self.axis]\n self.shape = shape\n\n y = cuda.empty(shape, dtype=xs[0].dtype)\n self.cdimy = y.shape[self.axis]\n self.rdim = numpy.prod(shape[self.axis + 1:], dtype=int)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(y[idx] = x[i])', 'concat_fwd', preamble=_preamble)\n for x in xs:\n cdimx = x.shape[self.axis]\n kernel(x, y, cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return y,\n\n def backward_cpu(self, xs, gy):\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return numpy.split(gy[0], sizes, axis=self.axis)\n\n def backward_gpu(self, xs, gy):\n gxs = tuple(cuda.empty_like(x) for x in xs)\n\n coffset = 0\n kernel = cuda.elementwise(\n _args, 'COPY(x[i] = y[idx])', 'concat_bwd', preamble=_preamble)\n for gx in gxs:\n cdimx = gx.shape[self.axis]\n kernel(gx, gy[0], cdimx, self.cdimy, self.rdim, coffset)\n coffset += cdimx\n\n return gxs\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/concat.py"}]}
1,769
106
gh_patches_debug_38697
rasdani/github-patches
git_diff
facebookresearch__hydra-605
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature Request] Allow Initialize to take a module # 🚀 Feature Request Allow hydra.experimental.initialize to take a calling module </issue> <code> [start of hydra/experimental/compose.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from typing import List, Optional 3 4 from omegaconf import DictConfig, open_dict 5 6 from hydra._internal.hydra import Hydra 7 from hydra._internal.utils import detect_calling_file_or_module_from_stack_frame 8 from hydra.core.global_hydra import GlobalHydra 9 10 11 def initialize( 12 config_dir: Optional[str] = None, 13 strict: Optional[bool] = None, 14 caller_stack_depth: int = 1, 15 ) -> None: 16 """ 17 :param config_dir: config directory relative to the calling script 18 :param strict: 19 :param caller_stack_depth: 20 :return: 21 """ 22 calling_file, calling_module = detect_calling_file_or_module_from_stack_frame( 23 caller_stack_depth + 1 24 ) 25 Hydra.create_main_hydra_file_or_module( 26 calling_file, calling_module, config_dir, strict 27 ) 28 29 30 def compose( 31 config_name: Optional[str] = None, 32 overrides: List[str] = [], 33 strict: Optional[bool] = None, 34 ) -> DictConfig: 35 """ 36 :param config_name: optional config name to load 37 :param overrides: list of overrides for config file 38 :param strict: optionally override the default strict mode 39 :return: the composed config 40 """ 41 assert ( 42 GlobalHydra().is_initialized() 43 ), "GlobalHydra is not initialized, use @hydra.main() or call hydra.experimental.initialize() first" 44 45 gh = GlobalHydra.instance() 46 assert gh.hydra is not None 47 cfg = gh.hydra.compose_config( 48 config_name=config_name, overrides=overrides, strict=strict 49 ) 50 assert isinstance(cfg, DictConfig) 51 52 if "hydra" in cfg: 53 with open_dict(cfg): 54 del cfg["hydra"] 55 return cfg 56 [end of hydra/experimental/compose.py] [start of examples/advanced/ad_hoc_composition/hydra_compose_example.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from hydra.experimental import compose, initialize 3 4 if __name__ == "__main__": 5 # initialize the Hydra subsystem. 6 # This is needed for apps that cannot have a standard @hydra.main() entry point 7 initialize(config_dir="conf", strict=True) 8 9 cfg = compose("config.yaml", overrides=["db=mysql", "db.user=${env:USER}"]) 10 print(cfg.pretty(resolve=True)) 11 [end of examples/advanced/ad_hoc_composition/hydra_compose_example.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/advanced/ad_hoc_composition/hydra_compose_example.py b/examples/advanced/ad_hoc_composition/hydra_compose_example.py --- a/examples/advanced/ad_hoc_composition/hydra_compose_example.py +++ b/examples/advanced/ad_hoc_composition/hydra_compose_example.py @@ -4,7 +4,7 @@ if __name__ == "__main__": # initialize the Hydra subsystem. # This is needed for apps that cannot have a standard @hydra.main() entry point - initialize(config_dir="conf", strict=True) + initialize(config_path="conf", strict=True) cfg = compose("config.yaml", overrides=["db=mysql", "db.user=${env:USER}"]) print(cfg.pretty(resolve=True)) diff --git a/hydra/experimental/compose.py b/hydra/experimental/compose.py --- a/hydra/experimental/compose.py +++ b/hydra/experimental/compose.py @@ -9,31 +9,58 @@ def initialize( - config_dir: Optional[str] = None, + config_path: Optional[str] = None, strict: Optional[bool] = None, caller_stack_depth: int = 1, ) -> None: """ - :param config_dir: config directory relative to the calling script - :param strict: - :param caller_stack_depth: - :return: + Initialize automatically detect the calling file or module. + config_path is relative to the detected calling for or module. + + :param config_path: A directory relative to the declaring python file or module + :param strict: (Deprecated), will be removed in the next major version + :param caller_stack_depth: stack depth of module the config_path is relative to """ calling_file, calling_module = detect_calling_file_or_module_from_stack_frame( caller_stack_depth + 1 ) Hydra.create_main_hydra_file_or_module( - calling_file, calling_module, config_dir, strict + calling_file, calling_module, config_path, strict ) +def initialize_with_file( + calling_file: Optional[str], config_path: Optional[str] = None +) -> None: + """ + Initialize Hydra and add the config_path to the search path. + The config path is relative to the calling_file. + :param calling_file : The file to make the config_path relative to + :param config_path : The config path + """ + Hydra.create_main_hydra_file_or_module(calling_file, None, config_path, None) + + +def initialize_with_module( + calling_module: Optional[str], config_path: Optional[str] = None +) -> None: + """ + Initialize Hydra and add the config_path to the search path. + The config path is relative to the calling_module. + :param calling_module : The module to make the config_path relative to + :param config_path : The config path + """ + + Hydra.create_main_hydra_file_or_module(None, calling_module, config_path, None) + + def compose( config_name: Optional[str] = None, overrides: List[str] = [], strict: Optional[bool] = None, ) -> DictConfig: """ - :param config_name: optional config name to load + :param config_name: the name of the config (usually the file name without the .yaml extension) :param overrides: list of overrides for config file :param strict: optionally override the default strict mode :return: the composed config
{"golden_diff": "diff --git a/examples/advanced/ad_hoc_composition/hydra_compose_example.py b/examples/advanced/ad_hoc_composition/hydra_compose_example.py\n--- a/examples/advanced/ad_hoc_composition/hydra_compose_example.py\n+++ b/examples/advanced/ad_hoc_composition/hydra_compose_example.py\n@@ -4,7 +4,7 @@\n if __name__ == \"__main__\":\n # initialize the Hydra subsystem.\n # This is needed for apps that cannot have a standard @hydra.main() entry point\n- initialize(config_dir=\"conf\", strict=True)\n+ initialize(config_path=\"conf\", strict=True)\n \n cfg = compose(\"config.yaml\", overrides=[\"db=mysql\", \"db.user=${env:USER}\"])\n print(cfg.pretty(resolve=True))\ndiff --git a/hydra/experimental/compose.py b/hydra/experimental/compose.py\n--- a/hydra/experimental/compose.py\n+++ b/hydra/experimental/compose.py\n@@ -9,31 +9,58 @@\n \n \n def initialize(\n- config_dir: Optional[str] = None,\n+ config_path: Optional[str] = None,\n strict: Optional[bool] = None,\n caller_stack_depth: int = 1,\n ) -> None:\n \"\"\"\n- :param config_dir: config directory relative to the calling script\n- :param strict:\n- :param caller_stack_depth:\n- :return:\n+ Initialize automatically detect the calling file or module.\n+ config_path is relative to the detected calling for or module.\n+\n+ :param config_path: A directory relative to the declaring python file or module\n+ :param strict: (Deprecated), will be removed in the next major version\n+ :param caller_stack_depth: stack depth of module the config_path is relative to\n \"\"\"\n calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(\n caller_stack_depth + 1\n )\n Hydra.create_main_hydra_file_or_module(\n- calling_file, calling_module, config_dir, strict\n+ calling_file, calling_module, config_path, strict\n )\n \n \n+def initialize_with_file(\n+ calling_file: Optional[str], config_path: Optional[str] = None\n+) -> None:\n+ \"\"\"\n+ Initialize Hydra and add the config_path to the search path.\n+ The config path is relative to the calling_file.\n+ :param calling_file : The file to make the config_path relative to\n+ :param config_path : The config path\n+ \"\"\"\n+ Hydra.create_main_hydra_file_or_module(calling_file, None, config_path, None)\n+\n+\n+def initialize_with_module(\n+ calling_module: Optional[str], config_path: Optional[str] = None\n+) -> None:\n+ \"\"\"\n+ Initialize Hydra and add the config_path to the search path.\n+ The config path is relative to the calling_module.\n+ :param calling_module : The module to make the config_path relative to\n+ :param config_path : The config path\n+ \"\"\"\n+\n+ Hydra.create_main_hydra_file_or_module(None, calling_module, config_path, None)\n+\n+\n def compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n strict: Optional[bool] = None,\n ) -> DictConfig:\n \"\"\"\n- :param config_name: optional config name to load\n+ :param config_name: the name of the config (usually the file name without the .yaml extension)\n :param overrides: list of overrides for config file\n :param strict: optionally override the default strict mode\n :return: the composed config\n", "issue": "[Feature Request] Allow Initialize to take a module\n# \ud83d\ude80 Feature Request\r\n\r\nAllow hydra.experimental.initialize to take a calling module\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import List, Optional\n\nfrom omegaconf import DictConfig, open_dict\n\nfrom hydra._internal.hydra import Hydra\nfrom hydra._internal.utils import detect_calling_file_or_module_from_stack_frame\nfrom hydra.core.global_hydra import GlobalHydra\n\n\ndef initialize(\n config_dir: Optional[str] = None,\n strict: Optional[bool] = None,\n caller_stack_depth: int = 1,\n) -> None:\n \"\"\"\n :param config_dir: config directory relative to the calling script\n :param strict:\n :param caller_stack_depth:\n :return:\n \"\"\"\n calling_file, calling_module = detect_calling_file_or_module_from_stack_frame(\n caller_stack_depth + 1\n )\n Hydra.create_main_hydra_file_or_module(\n calling_file, calling_module, config_dir, strict\n )\n\n\ndef compose(\n config_name: Optional[str] = None,\n overrides: List[str] = [],\n strict: Optional[bool] = None,\n) -> DictConfig:\n \"\"\"\n :param config_name: optional config name to load\n :param overrides: list of overrides for config file\n :param strict: optionally override the default strict mode\n :return: the composed config\n \"\"\"\n assert (\n GlobalHydra().is_initialized()\n ), \"GlobalHydra is not initialized, use @hydra.main() or call hydra.experimental.initialize() first\"\n\n gh = GlobalHydra.instance()\n assert gh.hydra is not None\n cfg = gh.hydra.compose_config(\n config_name=config_name, overrides=overrides, strict=strict\n )\n assert isinstance(cfg, DictConfig)\n\n if \"hydra\" in cfg:\n with open_dict(cfg):\n del cfg[\"hydra\"]\n return cfg\n", "path": "hydra/experimental/compose.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom hydra.experimental import compose, initialize\n\nif __name__ == \"__main__\":\n # initialize the Hydra subsystem.\n # This is needed for apps that cannot have a standard @hydra.main() entry point\n initialize(config_dir=\"conf\", strict=True)\n\n cfg = compose(\"config.yaml\", overrides=[\"db=mysql\", \"db.user=${env:USER}\"])\n print(cfg.pretty(resolve=True))\n", "path": "examples/advanced/ad_hoc_composition/hydra_compose_example.py"}]}
1,225
790
gh_patches_debug_25543
rasdani/github-patches
git_diff
bridgecrewio__checkov-5756
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_AWS_265 false positive **Describe the issue** CKV_AWS_265 fails checks on KeySpaces not using a Customer Managed KMS key even though it is configured to do so This is because the code is looking for ["CUSTOMER_MANAGED_KEY"](https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py#L17) When the specification is listed as actually ["CUSTOMER_MANAGED_KMS_KEY"](https://docs.aws.amazon.com/keyspaces/latest/APIReference/API_EncryptionSpecification.html) **Examples** ``` resource "aws_keyspaces_table" "keyspace_table" { keyspace_name = "foo" table_name = "bar" encryption_specification { ## This should not fail the check CKV_AWS_265 but it does kms_key_identifier = var.kms_key_arn type = "CUSTOMER_MANAGED_KMS_KEY" } } ``` **Version (please complete the following information):** - 3.0.32 **Additional context** ``` ~/Downloads> checkov -f ./keyspaces.tf --support 2023-11-10 09:21:38,953 [MainThread ] [WARNI] --bc-api-key argument is required when using --support [ terraform framework ]: 100%|████████████████████|[1/1], Current File Scanned=keyspaces.tf [ secrets framework ]: 100%|████████████████████|[1/1], Current File Scanned=./keyspaces.tf _ _ ___| |__ ___ ___| | _______ __ / __| '_ \ / _ \/ __| |/ / _ \ \ / / | (__| | | | __/ (__| < (_) \ V / \___|_| |_|\___|\___|_|\_\___/ \_/ By Prisma Cloud | version: 3.0.32 terraform scan results: Passed checks: 0, Failed checks: 1, Skipped checks: 0 Check: CKV_AWS_265: "Ensure Keyspaces Table uses CMK" FAILED for resource: aws_keyspaces_table.keyspace_table File: /keyspaces.tf:1-9 Guide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/aws-policies/aws-general-policies/ensure-aws-keyspace-table-uses-customer-managed-keys-cmks 1 | resource "aws_keyspaces_table" "keyspace_table" { 2 | keyspace_name = "foo" 3 | table_name = "bar" 4 | encryption_specification { 5 | ## This should not fail the check CKV_AWS_265 but it does 6 | kms_key_identifier = var.kms_key_arn 7 | type = "CUSTOMER_MANAGED_KMS_KEY" 8 | } 9 | } ``` </issue> <code> [start of checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py] 1 from checkov.common.models.enums import CheckCategories, CheckResult 2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 3 4 5 class KeyspacesTableUsesCMK(BaseResourceCheck): 6 def __init__(self): 7 name = "Ensure Keyspaces Table uses CMK" 8 id = "CKV_AWS_265" 9 supported_resources = ['aws_keyspaces_table'] 10 categories = [CheckCategories.ENCRYPTION] 11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 12 13 def scan_resource_conf(self, conf) -> CheckResult: 14 if conf.get("encryption_specification") and isinstance(conf.get("encryption_specification"), list): 15 encrypt = conf.get("encryption_specification")[0] 16 if encrypt.get("kms_key_identifier") and isinstance(encrypt.get("kms_key_identifier"), list): 17 if encrypt.get("type") == ["CUSTOMER_MANAGED_KEY"]: 18 return CheckResult.PASSED 19 self.evaluated_keys = ["encryption_specification/[0]/type"] 20 self.evaluated_keys = ["encryption_specification/[0]/kms_key_identifier"] 21 self.evaluated_keys = ["encryption_specification"] 22 return CheckResult.FAILED 23 24 25 check = KeyspacesTableUsesCMK() 26 [end of checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py --- a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py +++ b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py @@ -3,6 +3,10 @@ class KeyspacesTableUsesCMK(BaseResourceCheck): + """ + Valid values for encryption_specification type: + AWS_OWNED_KMS_KEY (default), CUSTOMER_MANAGED_KMS_KEY (requires kms_key_identifier:ARN) + """ def __init__(self): name = "Ensure Keyspaces Table uses CMK" id = "CKV_AWS_265" @@ -14,7 +18,7 @@ if conf.get("encryption_specification") and isinstance(conf.get("encryption_specification"), list): encrypt = conf.get("encryption_specification")[0] if encrypt.get("kms_key_identifier") and isinstance(encrypt.get("kms_key_identifier"), list): - if encrypt.get("type") == ["CUSTOMER_MANAGED_KEY"]: + if encrypt.get("type") == ["CUSTOMER_MANAGED_KMS_KEY"]: return CheckResult.PASSED self.evaluated_keys = ["encryption_specification/[0]/type"] self.evaluated_keys = ["encryption_specification/[0]/kms_key_identifier"]
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py\n--- a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py\n+++ b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py\n@@ -3,6 +3,10 @@\n \n \n class KeyspacesTableUsesCMK(BaseResourceCheck):\n+ \"\"\"\n+ Valid values for encryption_specification type:\n+ AWS_OWNED_KMS_KEY (default), CUSTOMER_MANAGED_KMS_KEY (requires kms_key_identifier:ARN)\n+ \"\"\"\n def __init__(self):\n name = \"Ensure Keyspaces Table uses CMK\"\n id = \"CKV_AWS_265\"\n@@ -14,7 +18,7 @@\n if conf.get(\"encryption_specification\") and isinstance(conf.get(\"encryption_specification\"), list):\n encrypt = conf.get(\"encryption_specification\")[0]\n if encrypt.get(\"kms_key_identifier\") and isinstance(encrypt.get(\"kms_key_identifier\"), list):\n- if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KEY\"]:\n+ if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KMS_KEY\"]:\n return CheckResult.PASSED\n self.evaluated_keys = [\"encryption_specification/[0]/type\"]\n self.evaluated_keys = [\"encryption_specification/[0]/kms_key_identifier\"]\n", "issue": "CKV_AWS_265 false positive\n**Describe the issue**\r\nCKV_AWS_265 fails checks on KeySpaces not using a Customer Managed KMS key even though it is configured to do so\r\n\r\nThis is because the code is looking for [\"CUSTOMER_MANAGED_KEY\"](https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py#L17)\r\n\r\nWhen the specification is listed as actually [\"CUSTOMER_MANAGED_KMS_KEY\"](https://docs.aws.amazon.com/keyspaces/latest/APIReference/API_EncryptionSpecification.html)\r\n\r\n**Examples**\r\n```\r\nresource \"aws_keyspaces_table\" \"keyspace_table\" {\r\n keyspace_name = \"foo\"\r\n table_name = \"bar\"\r\n encryption_specification {\r\n ## This should not fail the check CKV_AWS_265 but it does\r\n kms_key_identifier = var.kms_key_arn\r\n type = \"CUSTOMER_MANAGED_KMS_KEY\"\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - 3.0.32\r\n\r\n**Additional context**\r\n\r\n```\r\n~/Downloads> checkov -f ./keyspaces.tf --support\r\n2023-11-10 09:21:38,953 [MainThread ] [WARNI] --bc-api-key argument is required when using --support\r\n[ terraform framework ]: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588|[1/1], Current File Scanned=keyspaces.tf\r\n[ secrets framework ]: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588|[1/1], Current File Scanned=./keyspaces.tf\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nBy Prisma Cloud | version: 3.0.32\r\n\r\nterraform scan results:\r\n\r\nPassed checks: 0, Failed checks: 1, Skipped checks: 0\r\n\r\nCheck: CKV_AWS_265: \"Ensure Keyspaces Table uses CMK\"\r\n\tFAILED for resource: aws_keyspaces_table.keyspace_table\r\n\tFile: /keyspaces.tf:1-9\r\n\tGuide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/aws-policies/aws-general-policies/ensure-aws-keyspace-table-uses-customer-managed-keys-cmks\r\n\r\n\t\t1 | resource \"aws_keyspaces_table\" \"keyspace_table\" {\r\n\t\t2 | keyspace_name = \"foo\"\r\n\t\t3 | table_name = \"bar\"\r\n\t\t4 | encryption_specification {\r\n\t\t5 | ## This should not fail the check CKV_AWS_265 but it does\r\n\t\t6 | kms_key_identifier = var.kms_key_arn\r\n\t\t7 | type = \"CUSTOMER_MANAGED_KMS_KEY\"\r\n\t\t8 | }\r\n\t\t9 | }\r\n```\r\n\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass KeyspacesTableUsesCMK(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure Keyspaces Table uses CMK\"\n id = \"CKV_AWS_265\"\n supported_resources = ['aws_keyspaces_table']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n if conf.get(\"encryption_specification\") and isinstance(conf.get(\"encryption_specification\"), list):\n encrypt = conf.get(\"encryption_specification\")[0]\n if encrypt.get(\"kms_key_identifier\") and isinstance(encrypt.get(\"kms_key_identifier\"), list):\n if encrypt.get(\"type\") == [\"CUSTOMER_MANAGED_KEY\"]:\n return CheckResult.PASSED\n self.evaluated_keys = [\"encryption_specification/[0]/type\"]\n self.evaluated_keys = [\"encryption_specification/[0]/kms_key_identifier\"]\n self.evaluated_keys = [\"encryption_specification\"]\n return CheckResult.FAILED\n\n\ncheck = KeyspacesTableUsesCMK()\n", "path": "checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py"}]}
1,555
317
gh_patches_debug_253
rasdani/github-patches
git_diff
kserve__kserve-2726
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Knative installation keeps failing in e2e tests /kind bug **What steps did you take and what happened:** [A clear and concise description of what the bug is.] The e2e tests are failing every now and then while running the knative installation step, more specifically while patching the configmap. A solution has to be provided so that the installation completes successfully using some kind of retry mechanism. **What did you expect to happen:** All e2e tests to run without any issues. **Environment:** e2e environment </issue> <code> [start of python/kserve/setup.py] 1 # Copyright 2021 The KServe Authors. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import pathlib 15 16 import setuptools 17 18 TESTS_REQUIRES = [ 19 'pytest', 20 'pytest-xdist', 21 'pytest-cov', 22 'pytest-asyncio', 23 'mypy', 24 'portforward', 25 ] 26 27 with open('requirements.txt') as f: 28 REQUIRES = f.readlines() 29 30 with open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file: 31 version = version_file.read().strip() 32 33 setuptools.setup( 34 name='kserve', 35 version=version, 36 author="The KServe Authors", 37 author_email='[email protected], [email protected], [email protected]', 38 license="Apache License Version 2.0", 39 url="https://github.com/kserve/kserve/tree/master/python/kserve", 40 description="KServe Python SDK", 41 long_description="Python SDK for KServe Server and Client.", 42 python_requires='>=3.7', 43 packages=[ 44 'kserve', 45 'kserve.api', 46 'kserve.constants', 47 'kserve.models', 48 'kserve.utils', 49 'kserve.protocol', 50 'kserve.protocol.rest', 51 'kserve.protocol.grpc', 52 ], 53 package_data={'': ['requirements.txt']}, 54 include_package_data=True, 55 zip_safe=False, 56 classifiers=[ 57 'Intended Audience :: Developers', 58 'Intended Audience :: Education', 59 'Intended Audience :: Science/Research', 60 'Programming Language :: Python :: 3', 61 'Programming Language :: Python :: 3.7', 62 'Programming Language :: Python :: 3.8', 63 'Programming Language :: Python :: 3.9', 64 "License :: OSI Approved :: Apache Software License", 65 "Operating System :: OS Independent", 66 'Topic :: Scientific/Engineering', 67 'Topic :: Scientific/Engineering :: Artificial Intelligence', 68 'Topic :: Software Development', 69 'Topic :: Software Development :: Libraries', 70 'Topic :: Software Development :: Libraries :: Python Modules', 71 ], 72 install_requires=REQUIRES, 73 tests_require=TESTS_REQUIRES, 74 extras_require={'test': TESTS_REQUIRES} 75 ) 76 [end of python/kserve/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/kserve/setup.py b/python/kserve/setup.py --- a/python/kserve/setup.py +++ b/python/kserve/setup.py @@ -21,7 +21,7 @@ 'pytest-cov', 'pytest-asyncio', 'mypy', - 'portforward', + 'portforward==0.4.0', ] with open('requirements.txt') as f:
{"golden_diff": "diff --git a/python/kserve/setup.py b/python/kserve/setup.py\n--- a/python/kserve/setup.py\n+++ b/python/kserve/setup.py\n@@ -21,7 +21,7 @@\n 'pytest-cov',\n 'pytest-asyncio',\n 'mypy',\n- 'portforward',\n+ 'portforward==0.4.0',\n ]\n \n with open('requirements.txt') as f:\n", "issue": "Knative installation keeps failing in e2e tests\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nThe e2e tests are failing every now and then while running the knative installation step, more specifically while patching the configmap. A solution has to be provided so that the installation completes successfully using some kind of retry mechanism.\r\n\r\n**What did you expect to happen:**\r\nAll e2e tests to run without any issues.\r\n\r\n\r\n**Environment:**\r\n\r\ne2e environment\r\n\n", "before_files": [{"content": "# Copyright 2021 The KServe Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\n\nimport setuptools\n\nTESTS_REQUIRES = [\n 'pytest',\n 'pytest-xdist',\n 'pytest-cov',\n 'pytest-asyncio',\n 'mypy',\n 'portforward',\n]\n\nwith open('requirements.txt') as f:\n REQUIRES = f.readlines()\n\nwith open(pathlib.Path(__file__).parent.parent / 'VERSION') as version_file:\n version = version_file.read().strip()\n\nsetuptools.setup(\n name='kserve',\n version=version,\n author=\"The KServe Authors\",\n author_email='[email protected], [email protected], [email protected]',\n license=\"Apache License Version 2.0\",\n url=\"https://github.com/kserve/kserve/tree/master/python/kserve\",\n description=\"KServe Python SDK\",\n long_description=\"Python SDK for KServe Server and Client.\",\n python_requires='>=3.7',\n packages=[\n 'kserve',\n 'kserve.api',\n 'kserve.constants',\n 'kserve.models',\n 'kserve.utils',\n 'kserve.protocol',\n 'kserve.protocol.rest',\n 'kserve.protocol.grpc',\n ],\n package_data={'': ['requirements.txt']},\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRES,\n extras_require={'test': TESTS_REQUIRES}\n)\n", "path": "python/kserve/setup.py"}]}
1,375
92
gh_patches_debug_1688
rasdani/github-patches
git_diff
conda__conda-build-570
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AppVeyor: AttributeError: 'module' object has no attribute 'get_pid_list https://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1187 conda info: https://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1076 Please note a few lines above I ran: `C:\Anaconda\Scripts\conda.exe install --yes --quiet anaconda-client conda-build jinja2` </issue> <code> [start of conda_build/windows.py] 1 from __future__ import absolute_import, division, print_function 2 3 import os 4 import sys 5 import shutil 6 from os.path import dirname, isdir, isfile, join, exists 7 8 import conda.config as cc 9 from conda.compat import iteritems 10 11 from conda_build.config import config 12 from conda_build import environ 13 from conda_build import source 14 from conda_build.utils import _check_call 15 16 try: 17 import psutil 18 except ImportError: 19 psutil = None 20 21 assert sys.platform == 'win32' 22 23 24 def fix_staged_scripts(): 25 """ 26 Fixes scripts which have been installed unix-style to have a .bat 27 helper 28 """ 29 scripts_dir = join(config.build_prefix, 'Scripts') 30 if not isdir(scripts_dir): 31 return 32 for fn in os.listdir(scripts_dir): 33 # process all the extensionless files 34 if not isfile(join(scripts_dir, fn)) or '.' in fn: 35 continue 36 37 with open(join(scripts_dir, fn)) as f: 38 line = f.readline().lower() 39 # If it's a #!python script 40 if not (line.startswith('#!') and 'python' in line.lower()): 41 continue 42 print('Adjusting unix-style #! script %s, ' 43 'and adding a .bat file for it' % fn) 44 # copy it with a .py extension (skipping that first #! line) 45 with open(join(scripts_dir, fn + '-script.py'), 'w') as fo: 46 fo.write(f.read()) 47 # now create the .exe file 48 shutil.copyfile(join(dirname(__file__), 49 'cli-%d.exe' % (8 * tuple.__itemsize__)), 50 join(scripts_dir, fn + '.exe')) 51 52 # remove the original script 53 os.remove(join(scripts_dir, fn)) 54 55 56 def msvc_env_cmd(): 57 if 'ProgramFiles(x86)' in os.environ: 58 program_files = os.environ['ProgramFiles(x86)'] 59 else: 60 program_files = os.environ['ProgramFiles'] 61 62 localappdata = os.environ.get("localappdata") 63 64 if config.PY3K: 65 vcvarsall = os.path.join(program_files, 66 r'Microsoft Visual Studio 10.0' 67 r'\VC\vcvarsall.bat') 68 else: 69 vcvarsall = os.path.join(program_files, 70 r'Microsoft Visual Studio 9.0' 71 r'\VC\vcvarsall.bat') 72 73 # Try the Microsoft Visual C++ Compiler for Python 2.7 74 if not isfile(vcvarsall) and localappdata and not config.PY3K: 75 vcvarsall = os.path.join(localappdata, "Programs", "Common", 76 "Microsoft", "Visual C++ for Python", "9.0", "vcvarsall.bat") 77 if not isfile(vcvarsall) and program_files and not config.PY3K: 78 vcvarsall = os.path.join(program_files, 'Common Files', 79 'Microsoft', 'Visual C++ for Python', "9.0", "vcvarsall.bat") 80 if not isfile(vcvarsall): 81 print("Warning: Couldn't find Visual Studio: %r" % vcvarsall) 82 return '' 83 84 return '''\ 85 call "%s" %s 86 ''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits]) 87 88 89 def kill_processes(): 90 if psutil is None: 91 return 92 for n in psutil.get_pid_list(): 93 try: 94 p = psutil.Process(n) 95 if p.name.lower() == 'msbuild.exe': 96 print('Terminating:', p.name) 97 p.terminate() 98 except: 99 continue 100 101 102 def build(m): 103 env = dict(os.environ) 104 env.update(environ.get_dict(m)) 105 106 for name in 'BIN', 'INC', 'LIB': 107 path = env['LIBRARY_' + name] 108 if not isdir(path): 109 os.makedirs(path) 110 111 src_dir = source.get_dir() 112 bld_bat = join(m.path, 'bld.bat') 113 if exists(bld_bat): 114 with open(bld_bat) as fi: 115 data = fi.read() 116 with open(join(src_dir, 'bld.bat'), 'w') as fo: 117 fo.write(msvc_env_cmd()) 118 for kv in iteritems(env): 119 fo.write('set "%s=%s"\n' % kv) 120 # more debuggable with echo on 121 fo.write('@echo on\n') 122 fo.write("REM ===== end generated header =====\n") 123 fo.write(data) 124 125 cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat'] 126 _check_call(cmd, cwd=src_dir) 127 kill_processes() 128 fix_staged_scripts() 129 [end of conda_build/windows.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conda_build/windows.py b/conda_build/windows.py --- a/conda_build/windows.py +++ b/conda_build/windows.py @@ -89,7 +89,7 @@ def kill_processes(): if psutil is None: return - for n in psutil.get_pid_list(): + for n in psutil.pids(): try: p = psutil.Process(n) if p.name.lower() == 'msbuild.exe':
{"golden_diff": "diff --git a/conda_build/windows.py b/conda_build/windows.py\n--- a/conda_build/windows.py\n+++ b/conda_build/windows.py\n@@ -89,7 +89,7 @@\n def kill_processes():\n if psutil is None:\n return\n- for n in psutil.get_pid_list():\n+ for n in psutil.pids():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n", "issue": "AppVeyor: AttributeError: 'module' object has no attribute 'get_pid_list\nhttps://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1187\n\nconda info:\nhttps://ci.appveyor.com/project/mpi4py/mpi4py/build/2.0.0a0-17/job/965h1pw9k7476768#L1076\n\nPlease note a few lines above I ran:\n`C:\\Anaconda\\Scripts\\conda.exe install --yes --quiet anaconda-client conda-build jinja2`\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport shutil\nfrom os.path import dirname, isdir, isfile, join, exists\n\nimport conda.config as cc\nfrom conda.compat import iteritems\n\nfrom conda_build.config import config\nfrom conda_build import environ\nfrom conda_build import source\nfrom conda_build.utils import _check_call\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nassert sys.platform == 'win32'\n\n\ndef fix_staged_scripts():\n \"\"\"\n Fixes scripts which have been installed unix-style to have a .bat\n helper\n \"\"\"\n scripts_dir = join(config.build_prefix, 'Scripts')\n if not isdir(scripts_dir):\n return\n for fn in os.listdir(scripts_dir):\n # process all the extensionless files\n if not isfile(join(scripts_dir, fn)) or '.' in fn:\n continue\n\n with open(join(scripts_dir, fn)) as f:\n line = f.readline().lower()\n # If it's a #!python script\n if not (line.startswith('#!') and 'python' in line.lower()):\n continue\n print('Adjusting unix-style #! script %s, '\n 'and adding a .bat file for it' % fn)\n # copy it with a .py extension (skipping that first #! line)\n with open(join(scripts_dir, fn + '-script.py'), 'w') as fo:\n fo.write(f.read())\n # now create the .exe file\n shutil.copyfile(join(dirname(__file__),\n 'cli-%d.exe' % (8 * tuple.__itemsize__)),\n join(scripts_dir, fn + '.exe'))\n\n # remove the original script\n os.remove(join(scripts_dir, fn))\n\n\ndef msvc_env_cmd():\n if 'ProgramFiles(x86)' in os.environ:\n program_files = os.environ['ProgramFiles(x86)']\n else:\n program_files = os.environ['ProgramFiles']\n\n localappdata = os.environ.get(\"localappdata\")\n\n if config.PY3K:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 10.0'\n r'\\VC\\vcvarsall.bat')\n else:\n vcvarsall = os.path.join(program_files,\n r'Microsoft Visual Studio 9.0'\n r'\\VC\\vcvarsall.bat')\n\n # Try the Microsoft Visual C++ Compiler for Python 2.7\n if not isfile(vcvarsall) and localappdata and not config.PY3K:\n vcvarsall = os.path.join(localappdata, \"Programs\", \"Common\",\n \"Microsoft\", \"Visual C++ for Python\", \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall) and program_files and not config.PY3K:\n vcvarsall = os.path.join(program_files, 'Common Files',\n 'Microsoft', 'Visual C++ for Python', \"9.0\", \"vcvarsall.bat\")\n if not isfile(vcvarsall):\n print(\"Warning: Couldn't find Visual Studio: %r\" % vcvarsall)\n return ''\n\n return '''\\\ncall \"%s\" %s\n''' % (vcvarsall, {32: 'x86', 64: 'amd64'}[cc.bits])\n\n\ndef kill_processes():\n if psutil is None:\n return\n for n in psutil.get_pid_list():\n try:\n p = psutil.Process(n)\n if p.name.lower() == 'msbuild.exe':\n print('Terminating:', p.name)\n p.terminate()\n except:\n continue\n\n\ndef build(m):\n env = dict(os.environ)\n env.update(environ.get_dict(m))\n\n for name in 'BIN', 'INC', 'LIB':\n path = env['LIBRARY_' + name]\n if not isdir(path):\n os.makedirs(path)\n\n src_dir = source.get_dir()\n bld_bat = join(m.path, 'bld.bat')\n if exists(bld_bat):\n with open(bld_bat) as fi:\n data = fi.read()\n with open(join(src_dir, 'bld.bat'), 'w') as fo:\n fo.write(msvc_env_cmd())\n for kv in iteritems(env):\n fo.write('set \"%s=%s\"\\n' % kv)\n # more debuggable with echo on\n fo.write('@echo on\\n')\n fo.write(\"REM ===== end generated header =====\\n\")\n fo.write(data)\n\n cmd = [os.environ['COMSPEC'], '/c', 'call', 'bld.bat']\n _check_call(cmd, cwd=src_dir)\n kill_processes()\n fix_staged_scripts()\n", "path": "conda_build/windows.py"}]}
2,022
103
gh_patches_debug_40563
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-4169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws - new generic resource action "invoke-stepfunction" enhancement Enhancement which behaves just like action invoke-lambda but invokes (aka starts execution of) step function state machines for use cases which need sequencing and conditional logic which is more easily implemented in step functions. Doc hint: See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/stepfunctions.html#SFN.Client.start_execution </issue> <code> [start of c7n/resources/sfn.py] 1 # Copyright 2015-2017 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from __future__ import absolute_import, division, print_function, unicode_literals 15 16 from c7n.manager import resources 17 from c7n.query import QueryResourceManager 18 from c7n.tags import Tag, RemoveTag 19 20 21 @resources.register('step-machine') 22 class StepFunction(QueryResourceManager): 23 """AWS Step Functions State Machine""" 24 25 class resource_type(object): 26 service = 'stepfunctions' 27 enum_spec = ('list_state_machines', 'stateMachines', None) 28 arn = id = 'stateMachineArn' 29 name = 'name' 30 date = 'creationDate' 31 dimension = None 32 detail_spec = ( 33 "describe_state_machine", "stateMachineArn", 34 'stateMachineArn', None) 35 filter_name = None 36 37 38 @StepFunction.action_registry.register('tag') 39 class TagStepFunction(Tag): 40 """Action to create tag(s) on a step function 41 42 :example: 43 44 .. code-block:: yaml 45 46 policies: 47 - name: tag-step-function 48 resource: step-machine 49 actions: 50 - type: tag 51 key: target-tag 52 value: target-tag-value 53 """ 54 55 permissions = ('stepfunctions:TagResource',) 56 57 def process_resource_set(self, client, resources, tags): 58 59 tags_lower = [] 60 61 for tag in tags: 62 tags_lower.append({k.lower(): v for k, v in tag.items()}) 63 64 for r in resources: 65 client.tag_resource(resourceArn=r['stateMachineArn'], tags=tags_lower) 66 67 68 @StepFunction.action_registry.register('remove-tag') 69 class UnTagStepFunction(RemoveTag): 70 """Action to create tag(s) on a step function 71 72 :example: 73 74 .. code-block:: yaml 75 76 policies: 77 - name: step-function-remove-tag 78 resource: step-machine 79 actions: 80 - type: remove-tag 81 tags: ["test"] 82 """ 83 84 permissions = ('stepfunctions:UntagResource',) 85 86 def process_resource_set(self, client, resources, tag_keys): 87 88 for r in resources: 89 client.untag_resource(resourceArn=r['stateMachineArn'], tagKeys=tag_keys) 90 [end of c7n/resources/sfn.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/c7n/resources/sfn.py b/c7n/resources/sfn.py --- a/c7n/resources/sfn.py +++ b/c7n/resources/sfn.py @@ -13,9 +13,12 @@ # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals + +from c7n.actions import Action from c7n.manager import resources from c7n.query import QueryResourceManager from c7n.tags import Tag, RemoveTag +from c7n.utils import type_schema, local_session, dumps, chunks @resources.register('step-machine') @@ -35,6 +38,95 @@ filter_name = None +class InvokeStepFunction(Action): + """Invoke step function on resources. + + By default this will invoke a step function for each resource + providing both the `policy` and `resource` as input. + + That behavior can be configured setting policy and bulk + boolean flags on the action. + + If bulk action parameter is set to true, then the step + function will be invoked in bulk, with a set of resource arns + under the `resources` key. + + The size of the batch can be configured via the batch-size + parameter. Note step function state (input, execution, etc)must + fit within 32k, we default to batch size 250. + + :example: + + .. code-block:: yaml + + policies: + - name: invoke-step-function + resource: s3 + filters: + - is-log-target + - "tag:IngestSetup": absent + actions: + - type: invoke-sfn + # This will cause the workflow to be invoked + # with many resources arns in a single execution. + # Note this is *not* the default. + bulk: true + batch-size: 10 + state-machine: LogIngestSetup + """ + + schema = type_schema( + 'invoke-sfn', + required=['state-machine'], + **{'state-machine': {'type': 'string'}, + 'batch-size': {'type': 'integer'}, + 'bulk': {'type': 'boolean'}, + 'policy': {'type': 'boolean'}}) + schema_alias = True + permissions = ('stepfunctions:StartExecution',) + + def process(self, resources): + client = local_session( + self.manager.session_factory).client('stepfunctions') + arn = self.data['state-machine'] + if not arn.startswith('arn'): + arn = 'arn:aws:states:{}:{}:stateMachine:{}'.format( + self.manager.config.region, self.manager.config.account_id, arn) + + params = {'stateMachineArn': arn} + pinput = {} + + if self.data.get('policy', True): + pinput['policy'] = dict(self.manager.data) + + resource_set = list(zip(self.manager.get_arns(resources), resources)) + if self.data.get('bulk', False) is True: + return self.invoke_batch(client, params, pinput, resource_set) + + for arn, r in resource_set: + pinput['resource'] = r + params['input'] = dumps(pinput) + r['c7n:execution-arn'] = self.manager.retry( + client.start_execution, **params).get('executionArn') + + def invoke_batch(self, client, params, pinput, resource_set): + for batch_rset in chunks(resource_set, self.data.get('batch-size', 250)): + pinput['resources'] = [rarn for rarn, _ in batch_rset] + params['input'] = dumps(pinput) + exec_arn = self.manager.retry( + client.start_execution, **params).get('executionArn') + for _, r in resource_set: + r['c7n:execution-arn'] = exec_arn + + @classmethod + def register(cls, registry, key): + for _, r in registry.items(): + r.action_registry.register('invoke-sfn', cls) + + +resources.subscribe(resources.EVENT_FINAL, InvokeStepFunction.register) + + @StepFunction.action_registry.register('tag') class TagStepFunction(Tag): """Action to create tag(s) on a step function
{"golden_diff": "diff --git a/c7n/resources/sfn.py b/c7n/resources/sfn.py\n--- a/c7n/resources/sfn.py\n+++ b/c7n/resources/sfn.py\n@@ -13,9 +13,12 @@\n # limitations under the License.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+\n+from c7n.actions import Action\n from c7n.manager import resources\n from c7n.query import QueryResourceManager\n from c7n.tags import Tag, RemoveTag\n+from c7n.utils import type_schema, local_session, dumps, chunks\n \n \n @resources.register('step-machine')\n@@ -35,6 +38,95 @@\n filter_name = None\n \n \n+class InvokeStepFunction(Action):\n+ \"\"\"Invoke step function on resources.\n+\n+ By default this will invoke a step function for each resource\n+ providing both the `policy` and `resource` as input.\n+\n+ That behavior can be configured setting policy and bulk\n+ boolean flags on the action.\n+\n+ If bulk action parameter is set to true, then the step\n+ function will be invoked in bulk, with a set of resource arns\n+ under the `resources` key.\n+\n+ The size of the batch can be configured via the batch-size\n+ parameter. Note step function state (input, execution, etc)must\n+ fit within 32k, we default to batch size 250.\n+\n+ :example:\n+\n+ .. code-block:: yaml\n+\n+ policies:\n+ - name: invoke-step-function\n+ resource: s3\n+ filters:\n+ - is-log-target\n+ - \"tag:IngestSetup\": absent\n+ actions:\n+ - type: invoke-sfn\n+ # This will cause the workflow to be invoked\n+ # with many resources arns in a single execution.\n+ # Note this is *not* the default.\n+ bulk: true\n+ batch-size: 10\n+ state-machine: LogIngestSetup\n+ \"\"\"\n+\n+ schema = type_schema(\n+ 'invoke-sfn',\n+ required=['state-machine'],\n+ **{'state-machine': {'type': 'string'},\n+ 'batch-size': {'type': 'integer'},\n+ 'bulk': {'type': 'boolean'},\n+ 'policy': {'type': 'boolean'}})\n+ schema_alias = True\n+ permissions = ('stepfunctions:StartExecution',)\n+\n+ def process(self, resources):\n+ client = local_session(\n+ self.manager.session_factory).client('stepfunctions')\n+ arn = self.data['state-machine']\n+ if not arn.startswith('arn'):\n+ arn = 'arn:aws:states:{}:{}:stateMachine:{}'.format(\n+ self.manager.config.region, self.manager.config.account_id, arn)\n+\n+ params = {'stateMachineArn': arn}\n+ pinput = {}\n+\n+ if self.data.get('policy', True):\n+ pinput['policy'] = dict(self.manager.data)\n+\n+ resource_set = list(zip(self.manager.get_arns(resources), resources))\n+ if self.data.get('bulk', False) is True:\n+ return self.invoke_batch(client, params, pinput, resource_set)\n+\n+ for arn, r in resource_set:\n+ pinput['resource'] = r\n+ params['input'] = dumps(pinput)\n+ r['c7n:execution-arn'] = self.manager.retry(\n+ client.start_execution, **params).get('executionArn')\n+\n+ def invoke_batch(self, client, params, pinput, resource_set):\n+ for batch_rset in chunks(resource_set, self.data.get('batch-size', 250)):\n+ pinput['resources'] = [rarn for rarn, _ in batch_rset]\n+ params['input'] = dumps(pinput)\n+ exec_arn = self.manager.retry(\n+ client.start_execution, **params).get('executionArn')\n+ for _, r in resource_set:\n+ r['c7n:execution-arn'] = exec_arn\n+\n+ @classmethod\n+ def register(cls, registry, key):\n+ for _, r in registry.items():\n+ r.action_registry.register('invoke-sfn', cls)\n+\n+\n+resources.subscribe(resources.EVENT_FINAL, InvokeStepFunction.register)\n+\n+\n @StepFunction.action_registry.register('tag')\n class TagStepFunction(Tag):\n \"\"\"Action to create tag(s) on a step function\n", "issue": "aws - new generic resource action \"invoke-stepfunction\" enhancement\nEnhancement which behaves just like action invoke-lambda but invokes (aka starts execution of) step function state machines for use cases which need sequencing and conditional logic which is more easily implemented in step functions. \r\n\r\nDoc hint: See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/stepfunctions.html#SFN.Client.start_execution\n", "before_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.tags import Tag, RemoveTag\n\n\[email protected]('step-machine')\nclass StepFunction(QueryResourceManager):\n \"\"\"AWS Step Functions State Machine\"\"\"\n\n class resource_type(object):\n service = 'stepfunctions'\n enum_spec = ('list_state_machines', 'stateMachines', None)\n arn = id = 'stateMachineArn'\n name = 'name'\n date = 'creationDate'\n dimension = None\n detail_spec = (\n \"describe_state_machine\", \"stateMachineArn\",\n 'stateMachineArn', None)\n filter_name = None\n\n\[email protected]_registry.register('tag')\nclass TagStepFunction(Tag):\n \"\"\"Action to create tag(s) on a step function\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: tag-step-function\n resource: step-machine\n actions:\n - type: tag\n key: target-tag\n value: target-tag-value\n \"\"\"\n\n permissions = ('stepfunctions:TagResource',)\n\n def process_resource_set(self, client, resources, tags):\n\n tags_lower = []\n\n for tag in tags:\n tags_lower.append({k.lower(): v for k, v in tag.items()})\n\n for r in resources:\n client.tag_resource(resourceArn=r['stateMachineArn'], tags=tags_lower)\n\n\[email protected]_registry.register('remove-tag')\nclass UnTagStepFunction(RemoveTag):\n \"\"\"Action to create tag(s) on a step function\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: step-function-remove-tag\n resource: step-machine\n actions:\n - type: remove-tag\n tags: [\"test\"]\n \"\"\"\n\n permissions = ('stepfunctions:UntagResource',)\n\n def process_resource_set(self, client, resources, tag_keys):\n\n for r in resources:\n client.untag_resource(resourceArn=r['stateMachineArn'], tagKeys=tag_keys)\n", "path": "c7n/resources/sfn.py"}]}
1,392
991
gh_patches_debug_1598
rasdani/github-patches
git_diff
CTFd__CTFd-1908
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bump pybluemonday to latest Just tracking this so we don't forget. Latest should be 0.0.6 with bluemonday at 1.0.10. It adds support for comments in the HTML output. </issue> <code> [start of CTFd/utils/security/sanitize.py] 1 from pybluemonday import UGCPolicy 2 3 # Copied from lxml: 4 # https://github.com/lxml/lxml/blob/e986a9cb5d54827c59aefa8803bc90954d67221e/src/lxml/html/defs.py#L38 5 # fmt: off 6 SAFE_ATTRS = ( 7 'abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align', 8 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff', 9 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan', 10 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype', 11 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id', 12 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method', 13 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 14 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 15 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 16 'type', 'usemap', 'valign', 'value', 'vspace', 'width' 17 ) 18 # fmt: on 19 20 PAGE_STRUCTURE_TAGS = { 21 "title": [], 22 } 23 24 META_TAGS = { 25 "meta": ["name", "content", "property"], 26 } 27 28 FORM_TAGS = { 29 "form": ["method", "action"], 30 "button": ["name", "type", "value", "disabled"], 31 "input": ["name", "type", "value", "placeholder"], 32 "select": ["name", "value", "placeholder"], 33 "option": ["value"], 34 "textarea": ["name", "value", "placeholder"], 35 "label": ["for"], 36 } 37 38 ANNOYING_TAGS = { 39 "blink": [], 40 "marquee": [], 41 } 42 43 44 MEDIA_TAGS = { 45 "audio": ["autoplay", "controls", "crossorigin", "loop", "muted", "preload", "src"], 46 "video": [ 47 "autoplay", 48 "buffered", 49 "controls", 50 "crossorigin", 51 "loop", 52 "muted", 53 "playsinline", 54 "poster", 55 "preload", 56 "src", 57 ], 58 "source": ["src", "type"], 59 "iframe": ["width", "height", "src", "frameborder", "allow", "allowfullscreen"], 60 } 61 62 SANITIZER = UGCPolicy() 63 64 for TAGS in (PAGE_STRUCTURE_TAGS, META_TAGS, FORM_TAGS, ANNOYING_TAGS, MEDIA_TAGS): 65 for element in TAGS: 66 SANITIZER.AllowElements(element) 67 SANITIZER.AllowAttrs(*TAGS[element]).OnElements(element) 68 69 # Allow safe attrs copied from lxml 70 SANITIZER.AllowAttrs(*SAFE_ATTRS).Globally() 71 72 # Allow styling globally 73 SANITIZER.AllowAttrs("class", "style").Globally() 74 75 # Allow styling via bluemonday 76 SANITIZER.AllowStyling() 77 78 # Allow safe convenience functions from bluemonday 79 SANITIZER.AllowStandardAttributes() 80 SANITIZER.AllowStandardURLs() 81 82 # Allow data atributes 83 SANITIZER.AllowDataAttributes() 84 85 # Allow data URI images 86 SANITIZER.AllowDataURIImages() 87 88 # Link security 89 SANITIZER.AllowRelativeURLs(True) 90 SANITIZER.RequireNoFollowOnFullyQualifiedLinks(True) 91 SANITIZER.RequireNoFollowOnLinks(True) 92 SANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True) 93 SANITIZER.RequireNoReferrerOnLinks(True) 94 95 96 def sanitize_html(html): 97 return SANITIZER.sanitize(html) 98 [end of CTFd/utils/security/sanitize.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/utils/security/sanitize.py b/CTFd/utils/security/sanitize.py --- a/CTFd/utils/security/sanitize.py +++ b/CTFd/utils/security/sanitize.py @@ -92,6 +92,9 @@ SANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True) SANITIZER.RequireNoReferrerOnLinks(True) +# Allow Comments +SANITIZER.AllowComments() + def sanitize_html(html): return SANITIZER.sanitize(html)
{"golden_diff": "diff --git a/CTFd/utils/security/sanitize.py b/CTFd/utils/security/sanitize.py\n--- a/CTFd/utils/security/sanitize.py\n+++ b/CTFd/utils/security/sanitize.py\n@@ -92,6 +92,9 @@\n SANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)\n SANITIZER.RequireNoReferrerOnLinks(True)\n \n+# Allow Comments\n+SANITIZER.AllowComments()\n+\n \n def sanitize_html(html):\n return SANITIZER.sanitize(html)\n", "issue": "Bump pybluemonday to latest\nJust tracking this so we don't forget. Latest should be 0.0.6 with bluemonday at 1.0.10. It adds support for comments in the HTML output. \n", "before_files": [{"content": "from pybluemonday import UGCPolicy\n\n# Copied from lxml:\n# https://github.com/lxml/lxml/blob/e986a9cb5d54827c59aefa8803bc90954d67221e/src/lxml/html/defs.py#L38\n# fmt: off\nSAFE_ATTRS = (\n 'abbr', 'accept', 'accept-charset', 'accesskey', 'action', 'align',\n 'alt', 'axis', 'border', 'cellpadding', 'cellspacing', 'char', 'charoff',\n 'charset', 'checked', 'cite', 'class', 'clear', 'cols', 'colspan',\n 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled', 'enctype',\n 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace', 'id',\n 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',\n 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',\n 'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape',\n 'size', 'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',\n 'type', 'usemap', 'valign', 'value', 'vspace', 'width'\n)\n# fmt: on\n\nPAGE_STRUCTURE_TAGS = {\n \"title\": [],\n}\n\nMETA_TAGS = {\n \"meta\": [\"name\", \"content\", \"property\"],\n}\n\nFORM_TAGS = {\n \"form\": [\"method\", \"action\"],\n \"button\": [\"name\", \"type\", \"value\", \"disabled\"],\n \"input\": [\"name\", \"type\", \"value\", \"placeholder\"],\n \"select\": [\"name\", \"value\", \"placeholder\"],\n \"option\": [\"value\"],\n \"textarea\": [\"name\", \"value\", \"placeholder\"],\n \"label\": [\"for\"],\n}\n\nANNOYING_TAGS = {\n \"blink\": [],\n \"marquee\": [],\n}\n\n\nMEDIA_TAGS = {\n \"audio\": [\"autoplay\", \"controls\", \"crossorigin\", \"loop\", \"muted\", \"preload\", \"src\"],\n \"video\": [\n \"autoplay\",\n \"buffered\",\n \"controls\",\n \"crossorigin\",\n \"loop\",\n \"muted\",\n \"playsinline\",\n \"poster\",\n \"preload\",\n \"src\",\n ],\n \"source\": [\"src\", \"type\"],\n \"iframe\": [\"width\", \"height\", \"src\", \"frameborder\", \"allow\", \"allowfullscreen\"],\n}\n\nSANITIZER = UGCPolicy()\n\nfor TAGS in (PAGE_STRUCTURE_TAGS, META_TAGS, FORM_TAGS, ANNOYING_TAGS, MEDIA_TAGS):\n for element in TAGS:\n SANITIZER.AllowElements(element)\n SANITIZER.AllowAttrs(*TAGS[element]).OnElements(element)\n\n# Allow safe attrs copied from lxml\nSANITIZER.AllowAttrs(*SAFE_ATTRS).Globally()\n\n# Allow styling globally\nSANITIZER.AllowAttrs(\"class\", \"style\").Globally()\n\n# Allow styling via bluemonday\nSANITIZER.AllowStyling()\n\n# Allow safe convenience functions from bluemonday\nSANITIZER.AllowStandardAttributes()\nSANITIZER.AllowStandardURLs()\n\n# Allow data atributes\nSANITIZER.AllowDataAttributes()\n\n# Allow data URI images\nSANITIZER.AllowDataURIImages()\n\n# Link security\nSANITIZER.AllowRelativeURLs(True)\nSANITIZER.RequireNoFollowOnFullyQualifiedLinks(True)\nSANITIZER.RequireNoFollowOnLinks(True)\nSANITIZER.RequireNoReferrerOnFullyQualifiedLinks(True)\nSANITIZER.RequireNoReferrerOnLinks(True)\n\n\ndef sanitize_html(html):\n return SANITIZER.sanitize(html)\n", "path": "CTFd/utils/security/sanitize.py"}]}
1,634
109
gh_patches_debug_7905
rasdani/github-patches
git_diff
coala__coala-591
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> run_tests --html doesn't work in ubuntu In ubuntu and debian systems, coverage isn't installed in `/usr/bin/coverage3` ... rather, it is installed as `/usr/bin/python3-coverage`. Hence, `--html` flag doesn't work. The corresponding launchpad bug is [here](https://bugs.launchpad.net/horizon/+bug/1241332) It would be better to use `python3 -m coverage` as this is platform independent. </issue> <code> [start of coalib/misc/StringConstants.py] 1 import inspect 2 import os 3 4 from coalib.misc.i18n import _ 5 6 7 class StringConstants: 8 THIS_IS_A_BUG = _("This is a bug. We are sorry for the inconvenience. " 9 "Please contact the developers for assistance.") 10 11 OBJ_NOT_ACCESSIBLE = _("{} is not accessible and will be ignored!") 12 13 TRUE_STRINGS = ['1', 14 "on", 15 'y', 16 'yes', 17 "yeah", 18 "sure", 19 'true', 20 'definitely', 21 'yup', 22 "right"] 23 24 FALSE_STRINGS = ['0', 25 'off', 26 'n', 27 'no', 28 'nope', 29 'nah', 30 'false', 31 "wrong"] 32 33 # This string contains many unicode characters to challenge tests. 34 COMPLEX_TEST_STRING = ("4 r34l ch4ll3n63: 123 ÄÖü ABc @€¥ §&% {[( ←↓→↑ " 35 "ĦŊħ ß°^ \\\n\u2192") 36 37 # Results from coverage for unittests are stored here. 38 COVERAGE_DIR = "./.coverageresults" 39 40 # Path to the coalib directory 41 coalib_root = os.path.join(os.path.dirname(inspect.getfile(_)), 42 os.path.pardir) 43 44 # Path to the directory containing the default bears 45 coalib_bears_root = os.path.join(coalib_root, os.path.pardir, "bears") 46 47 # Path to the language definition files 48 language_definitions = os.path.join(coalib_root, 49 "bearlib", 50 "languages", 51 "definitions") 52 53 system_coafile = os.path.join(coalib_root, "default_coafile") 54 55 user_coafile = os.path.join(os.path.expanduser("~"), ".coarc") 56 [end of coalib/misc/StringConstants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/coalib/misc/StringConstants.py b/coalib/misc/StringConstants.py --- a/coalib/misc/StringConstants.py +++ b/coalib/misc/StringConstants.py @@ -1,5 +1,6 @@ import inspect import os +import sys from coalib.misc.i18n import _ @@ -53,3 +54,8 @@ system_coafile = os.path.join(coalib_root, "default_coafile") user_coafile = os.path.join(os.path.expanduser("~"), ".coarc") + + if sys.platform.startswith("win"): # pragma: no cover + python_executable = "python" + else: + python_executable = "python3"
{"golden_diff": "diff --git a/coalib/misc/StringConstants.py b/coalib/misc/StringConstants.py\n--- a/coalib/misc/StringConstants.py\n+++ b/coalib/misc/StringConstants.py\n@@ -1,5 +1,6 @@\n import inspect\n import os\n+import sys\n \n from coalib.misc.i18n import _\n \n@@ -53,3 +54,8 @@\n system_coafile = os.path.join(coalib_root, \"default_coafile\")\n \n user_coafile = os.path.join(os.path.expanduser(\"~\"), \".coarc\")\n+\n+ if sys.platform.startswith(\"win\"): # pragma: no cover\n+ python_executable = \"python\"\n+ else:\n+ python_executable = \"python3\"\n", "issue": "run_tests --html doesn't work in ubuntu\nIn ubuntu and debian systems, coverage isn't installed in `/usr/bin/coverage3` ... rather, it is installed as `/usr/bin/python3-coverage`. Hence, `--html` flag doesn't work. The corresponding launchpad bug is [here](https://bugs.launchpad.net/horizon/+bug/1241332)\n\nIt would be better to use `python3 -m coverage` as this is platform independent.\n\n", "before_files": [{"content": "import inspect\nimport os\n\nfrom coalib.misc.i18n import _\n\n\nclass StringConstants:\n THIS_IS_A_BUG = _(\"This is a bug. We are sorry for the inconvenience. \"\n \"Please contact the developers for assistance.\")\n\n OBJ_NOT_ACCESSIBLE = _(\"{} is not accessible and will be ignored!\")\n\n TRUE_STRINGS = ['1',\n \"on\",\n 'y',\n 'yes',\n \"yeah\",\n \"sure\",\n 'true',\n 'definitely',\n 'yup',\n \"right\"]\n\n FALSE_STRINGS = ['0',\n 'off',\n 'n',\n 'no',\n 'nope',\n 'nah',\n 'false',\n \"wrong\"]\n\n # This string contains many unicode characters to challenge tests.\n COMPLEX_TEST_STRING = (\"4 r34l ch4ll3n63: 123 \u00c4\u00d6\u00fc ABc @\u20ac\u00a5 \u00a7&% {[( \u2190\u2193\u2192\u2191 \"\n \"\u0126\u014a\u0127 \u00df\u00b0^ \\\\\\n\\u2192\")\n\n # Results from coverage for unittests are stored here.\n COVERAGE_DIR = \"./.coverageresults\"\n\n # Path to the coalib directory\n coalib_root = os.path.join(os.path.dirname(inspect.getfile(_)),\n os.path.pardir)\n\n # Path to the directory containing the default bears\n coalib_bears_root = os.path.join(coalib_root, os.path.pardir, \"bears\")\n\n # Path to the language definition files\n language_definitions = os.path.join(coalib_root,\n \"bearlib\",\n \"languages\",\n \"definitions\")\n\n system_coafile = os.path.join(coalib_root, \"default_coafile\")\n\n user_coafile = os.path.join(os.path.expanduser(\"~\"), \".coarc\")\n", "path": "coalib/misc/StringConstants.py"}]}
1,144
164
gh_patches_debug_33170
rasdani/github-patches
git_diff
kedro-org__kedro-1838
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tracking.JSONDataSet documentation and docstring are not accurate Here is the source code for `tracking.JSONDataSet`: https://kedro.readthedocs.io/en/latest/_modules/kedro/extras/datasets/tracking/json_dataset.html#JSONDataSet The `_load` method has been overriden to raise a `DataSetError` when called. The documentation does not reflect that well. What has to be fixed: - The `tracking.JSONDataSet` documentation and API ref should mention that this dataset cannot be loaded (see https://kedro.readthedocs.io/en/latest/kedro.extras.datasets.tracking.JSONDataSet.html) - The `tracking.JSONDataSet` class docstring should not call `data_set.load()` (this code snippet that was probably copied from the `json.JSONDataSet`) </issue> <code> [start of kedro/extras/datasets/tracking/json_dataset.py] 1 """``JSONDataSet`` saves data to a JSON file using an underlying 2 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. 3 The ``JSONDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default. 4 """ 5 from typing import NoReturn 6 7 from kedro.extras.datasets.json import JSONDataSet as JDS 8 from kedro.io.core import DataSetError 9 10 11 class JSONDataSet(JDS): 12 """``JSONDataSet`` saves data to a JSON file using an underlying 13 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. 14 The ``JSONDataSet`` is part of Kedro Experiment Tracking. 15 The dataset is versioned by default. 16 17 Example: 18 :: 19 20 >>> from kedro.extras.datasets.tracking import JSONDataSet 21 >>> 22 >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002} 23 >>> 24 >>> # data_set = JSONDataSet(filepath="gcs://bucket/test.json") 25 >>> data_set = JSONDataSet(filepath="test.json") 26 >>> data_set.save(data) 27 >>> reloaded = data_set.load() 28 >>> assert data == reloaded 29 30 """ 31 32 versioned = True 33 34 def _load(self) -> NoReturn: 35 raise DataSetError(f"Loading not supported for '{self.__class__.__name__}'") 36 [end of kedro/extras/datasets/tracking/json_dataset.py] [start of kedro/extras/datasets/tracking/metrics_dataset.py] 1 """``MetricsDataSet`` saves data to a JSON file using an underlying 2 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. 3 The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default 4 and only takes metrics of numeric values. 5 """ 6 import json 7 from typing import Dict, NoReturn 8 9 from kedro.extras.datasets.json import JSONDataSet 10 from kedro.io.core import DataSetError, get_filepath_str 11 12 13 class MetricsDataSet(JSONDataSet): 14 """``MetricsDataSet`` saves data to a JSON file using an underlying 15 filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. 16 The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default 17 and only takes metrics of numeric values. 18 19 Example: 20 :: 21 22 >>> from kedro.extras.datasets.tracking import MetricsDataSet 23 >>> 24 >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002} 25 >>> 26 >>> # data_set = MetricsDataSet(filepath="gcs://bucket/test.json") 27 >>> data_set = MetricsDataSet(filepath="test.json") 28 >>> data_set.save(data) 29 >>> reloaded = data_set.load() 30 >>> assert data == reloaded 31 32 """ 33 34 versioned = True 35 36 def _load(self) -> NoReturn: 37 raise DataSetError(f"Loading not supported for '{self.__class__.__name__}'") 38 39 def _save(self, data: Dict[str, float]) -> None: 40 """Converts all values in the data from a ``MetricsDataSet`` to float to make sure 41 they are numeric values which can be displayed in Kedro Viz and then saves the dataset. 42 """ 43 try: 44 for key, value in data.items(): 45 data[key] = float(value) 46 except ValueError as exc: 47 raise DataSetError( 48 f"The MetricsDataSet expects only numeric values. {exc}" 49 ) from exc 50 51 save_path = get_filepath_str(self._get_save_path(), self._protocol) 52 53 with self._fs.open(save_path, **self._fs_open_args_save) as fs_file: 54 json.dump(data, fs_file, **self._save_args) 55 56 self._invalidate_cache() 57 [end of kedro/extras/datasets/tracking/metrics_dataset.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kedro/extras/datasets/tracking/json_dataset.py b/kedro/extras/datasets/tracking/json_dataset.py --- a/kedro/extras/datasets/tracking/json_dataset.py +++ b/kedro/extras/datasets/tracking/json_dataset.py @@ -12,7 +12,7 @@ """``JSONDataSet`` saves data to a JSON file using an underlying filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. The ``JSONDataSet`` is part of Kedro Experiment Tracking. - The dataset is versioned by default. + The dataset is write-only and it is versioned by default. Example: :: @@ -24,8 +24,6 @@ >>> # data_set = JSONDataSet(filepath="gcs://bucket/test.json") >>> data_set = JSONDataSet(filepath="test.json") >>> data_set.save(data) - >>> reloaded = data_set.load() - >>> assert data == reloaded """ diff --git a/kedro/extras/datasets/tracking/metrics_dataset.py b/kedro/extras/datasets/tracking/metrics_dataset.py --- a/kedro/extras/datasets/tracking/metrics_dataset.py +++ b/kedro/extras/datasets/tracking/metrics_dataset.py @@ -12,9 +12,9 @@ class MetricsDataSet(JSONDataSet): """``MetricsDataSet`` saves data to a JSON file using an underlying - filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. - The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default - and only takes metrics of numeric values. + filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. The + ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is write-only, + it is versioned by default and only takes metrics of numeric values. Example: :: @@ -26,8 +26,6 @@ >>> # data_set = MetricsDataSet(filepath="gcs://bucket/test.json") >>> data_set = MetricsDataSet(filepath="test.json") >>> data_set.save(data) - >>> reloaded = data_set.load() - >>> assert data == reloaded """
{"golden_diff": "diff --git a/kedro/extras/datasets/tracking/json_dataset.py b/kedro/extras/datasets/tracking/json_dataset.py\n--- a/kedro/extras/datasets/tracking/json_dataset.py\n+++ b/kedro/extras/datasets/tracking/json_dataset.py\n@@ -12,7 +12,7 @@\n \"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``JSONDataSet`` is part of Kedro Experiment Tracking.\n- The dataset is versioned by default.\n+ The dataset is write-only and it is versioned by default.\n \n Example:\n ::\n@@ -24,8 +24,6 @@\n >>> # data_set = JSONDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = JSONDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n- >>> reloaded = data_set.load()\n- >>> assert data == reloaded\n \n \"\"\"\n \ndiff --git a/kedro/extras/datasets/tracking/metrics_dataset.py b/kedro/extras/datasets/tracking/metrics_dataset.py\n--- a/kedro/extras/datasets/tracking/metrics_dataset.py\n+++ b/kedro/extras/datasets/tracking/metrics_dataset.py\n@@ -12,9 +12,9 @@\n \n class MetricsDataSet(JSONDataSet):\n \"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\n- filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n- The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\n- and only takes metrics of numeric values.\n+ filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file. The\n+ ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is write-only,\n+ it is versioned by default and only takes metrics of numeric values.\n \n Example:\n ::\n@@ -26,8 +26,6 @@\n >>> # data_set = MetricsDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = MetricsDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n- >>> reloaded = data_set.load()\n- >>> assert data == reloaded\n \n \"\"\"\n", "issue": "tracking.JSONDataSet documentation and docstring are not accurate\nHere is the source code for `tracking.JSONDataSet`: https://kedro.readthedocs.io/en/latest/_modules/kedro/extras/datasets/tracking/json_dataset.html#JSONDataSet\r\n\r\nThe `_load` method has been overriden to raise a `DataSetError` when called.\r\n\r\nThe documentation does not reflect that well. What has to be fixed:\r\n\r\n- The `tracking.JSONDataSet` documentation and API ref should mention that this dataset cannot be loaded (see https://kedro.readthedocs.io/en/latest/kedro.extras.datasets.tracking.JSONDataSet.html)\r\n- The `tracking.JSONDataSet` class docstring should not call `data_set.load()` (this code snippet that was probably copied from the `json.JSONDataSet`)\n", "before_files": [{"content": "\"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\nThe ``JSONDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default.\n\"\"\"\nfrom typing import NoReturn\n\nfrom kedro.extras.datasets.json import JSONDataSet as JDS\nfrom kedro.io.core import DataSetError\n\n\nclass JSONDataSet(JDS):\n \"\"\"``JSONDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``JSONDataSet`` is part of Kedro Experiment Tracking.\n The dataset is versioned by default.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.tracking import JSONDataSet\n >>>\n >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}\n >>>\n >>> # data_set = JSONDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = JSONDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data == reloaded\n\n \"\"\"\n\n versioned = True\n\n def _load(self) -> NoReturn:\n raise DataSetError(f\"Loading not supported for '{self.__class__.__name__}'\")\n", "path": "kedro/extras/datasets/tracking/json_dataset.py"}, {"content": "\"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\nfilesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\nThe ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\nand only takes metrics of numeric values.\n\"\"\"\nimport json\nfrom typing import Dict, NoReturn\n\nfrom kedro.extras.datasets.json import JSONDataSet\nfrom kedro.io.core import DataSetError, get_filepath_str\n\n\nclass MetricsDataSet(JSONDataSet):\n \"\"\"``MetricsDataSet`` saves data to a JSON file using an underlying\n filesystem (e.g.: local, S3, GCS). It uses native json to handle the JSON file.\n The ``MetricsDataSet`` is part of Kedro Experiment Tracking. The dataset is versioned by default\n and only takes metrics of numeric values.\n\n Example:\n ::\n\n >>> from kedro.extras.datasets.tracking import MetricsDataSet\n >>>\n >>> data = {'col1': 1, 'col2': 0.23, 'col3': 0.002}\n >>>\n >>> # data_set = MetricsDataSet(filepath=\"gcs://bucket/test.json\")\n >>> data_set = MetricsDataSet(filepath=\"test.json\")\n >>> data_set.save(data)\n >>> reloaded = data_set.load()\n >>> assert data == reloaded\n\n \"\"\"\n\n versioned = True\n\n def _load(self) -> NoReturn:\n raise DataSetError(f\"Loading not supported for '{self.__class__.__name__}'\")\n\n def _save(self, data: Dict[str, float]) -> None:\n \"\"\"Converts all values in the data from a ``MetricsDataSet`` to float to make sure\n they are numeric values which can be displayed in Kedro Viz and then saves the dataset.\n \"\"\"\n try:\n for key, value in data.items():\n data[key] = float(value)\n except ValueError as exc:\n raise DataSetError(\n f\"The MetricsDataSet expects only numeric values. {exc}\"\n ) from exc\n\n save_path = get_filepath_str(self._get_save_path(), self._protocol)\n\n with self._fs.open(save_path, **self._fs_open_args_save) as fs_file:\n json.dump(data, fs_file, **self._save_args)\n\n self._invalidate_cache()\n", "path": "kedro/extras/datasets/tracking/metrics_dataset.py"}]}
1,731
526
gh_patches_debug_29307
rasdani/github-patches
git_diff
shuup__shuup-1557
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Admin: Search doesn't work well with categories Admin > Search > Search for a category Why does it say "Products" when it's clearly showing categories? Deleted categories are also visible in search. ![screenshot from 2018-09-28 22-12-03](https://user-images.githubusercontent.com/40273438/46231259-8aa84280-c36b-11e8-9211-f8e3b0e55697.png) </issue> <code> [start of shuup/admin/modules/categories/__init__.py] 1 # -*- coding: utf-8 -*- 2 # This file is part of Shuup. 3 # 4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved. 5 # 6 # This source code is licensed under the OSL-3.0 license found in the 7 # LICENSE file in the root directory of this source tree. 8 import six 9 from django.db.models import Q 10 from django.utils.translation import ugettext_lazy as _ 11 from filer.models import File 12 13 from shuup.admin.base import AdminModule, MenuEntry, SearchResult 14 from shuup.admin.menu import PRODUCTS_MENU_CATEGORY 15 from shuup.admin.utils.permissions import get_default_model_permissions 16 from shuup.admin.utils.urls import ( 17 admin_url, derive_model_url, get_edit_and_list_urls, get_model_url 18 ) 19 from shuup.admin.views.home import HelpBlockCategory, SimpleHelpBlock 20 from shuup.core.models import Category 21 22 23 class CategoryModule(AdminModule): 24 name = _("Categories") 25 category = _("Products") 26 breadcrumbs_menu_entry = MenuEntry(text=name, url="shuup_admin:category.list", category=PRODUCTS_MENU_CATEGORY) 27 28 def get_urls(self): 29 return [ 30 admin_url( 31 "^categories/(?P<pk>\d+)/copy-visibility/$", 32 "shuup.admin.modules.categories.views.CategoryCopyVisibilityView", 33 name="category.copy_visibility", 34 permissions=get_default_model_permissions(Category) 35 ), 36 admin_url( 37 "^categories/(?P<pk>\d+)/delete/$", 38 "shuup.admin.modules.categories.views.CategoryDeleteView", 39 name="category.delete", 40 permissions=get_default_model_permissions(Category) 41 ), 42 admin_url( 43 "^categories/organize/$", 44 "shuup.admin.modules.categories.views.CategoryOrganizeView", 45 name="category.organize", 46 permissions=get_default_model_permissions(Category) 47 ) 48 ] + get_edit_and_list_urls( 49 url_prefix="^categories", 50 view_template="shuup.admin.modules.categories.views.Category%sView", 51 name_template="category.%s", 52 permissions=get_default_model_permissions(Category), 53 ) 54 55 def get_menu_entries(self, request): 56 return [ 57 MenuEntry( 58 text=_("Categories"), icon="fa fa-sitemap", 59 url="shuup_admin:category.list", category=PRODUCTS_MENU_CATEGORY, ordering=2 60 ) 61 ] 62 63 def get_search_results(self, request, query): 64 minimum_query_length = 3 65 if len(query) >= minimum_query_length: 66 categories = Category.objects.filter( 67 Q(translations__name__icontains=query) | 68 Q(identifier__icontains=query) 69 ).distinct().order_by("tree_id", "lft") 70 for i, category in enumerate(categories[:10]): 71 relevance = 100 - i 72 yield SearchResult( 73 text=six.text_type(category), 74 url=get_model_url(category), 75 category=self.category, 76 relevance=relevance 77 ) 78 79 def get_help_blocks(self, request, kind): 80 yield SimpleHelpBlock( 81 text=_("Add a product category to organize your products"), 82 actions=[{ 83 "text": _("New category"), 84 "url": get_model_url(Category, "new") 85 }], 86 icon_url="shuup_admin/img/category.png", 87 category=HelpBlockCategory.PRODUCTS, 88 priority=1, 89 done=Category.objects.filter(shops=request.shop).exists() if kind == "setup" else False 90 ) 91 92 def get_required_permissions(self): 93 return get_default_model_permissions(Category) | get_default_model_permissions(File) 94 95 def get_model_url(self, object, kind, shop=None): 96 return derive_model_url(Category, "shuup_admin:category", object, kind) 97 [end of shuup/admin/modules/categories/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shuup/admin/modules/categories/__init__.py b/shuup/admin/modules/categories/__init__.py --- a/shuup/admin/modules/categories/__init__.py +++ b/shuup/admin/modules/categories/__init__.py @@ -12,6 +12,7 @@ from shuup.admin.base import AdminModule, MenuEntry, SearchResult from shuup.admin.menu import PRODUCTS_MENU_CATEGORY +from shuup.admin.shop_provider import get_shop from shuup.admin.utils.permissions import get_default_model_permissions from shuup.admin.utils.urls import ( admin_url, derive_model_url, get_edit_and_list_urls, get_model_url @@ -22,7 +23,7 @@ class CategoryModule(AdminModule): name = _("Categories") - category = _("Products") + category = _("Categories") breadcrumbs_menu_entry = MenuEntry(text=name, url="shuup_admin:category.list", category=PRODUCTS_MENU_CATEGORY) def get_urls(self): @@ -56,8 +57,10 @@ def get_search_results(self, request, query): minimum_query_length = 3 + if len(query) >= minimum_query_length: - categories = Category.objects.filter( + shop = get_shop(request) + categories = Category.objects.all_except_deleted(shop=shop).filter( Q(translations__name__icontains=query) | Q(identifier__icontains=query) ).distinct().order_by("tree_id", "lft")
{"golden_diff": "diff --git a/shuup/admin/modules/categories/__init__.py b/shuup/admin/modules/categories/__init__.py\n--- a/shuup/admin/modules/categories/__init__.py\n+++ b/shuup/admin/modules/categories/__init__.py\n@@ -12,6 +12,7 @@\n \n from shuup.admin.base import AdminModule, MenuEntry, SearchResult\n from shuup.admin.menu import PRODUCTS_MENU_CATEGORY\n+from shuup.admin.shop_provider import get_shop\n from shuup.admin.utils.permissions import get_default_model_permissions\n from shuup.admin.utils.urls import (\n admin_url, derive_model_url, get_edit_and_list_urls, get_model_url\n@@ -22,7 +23,7 @@\n \n class CategoryModule(AdminModule):\n name = _(\"Categories\")\n- category = _(\"Products\")\n+ category = _(\"Categories\")\n breadcrumbs_menu_entry = MenuEntry(text=name, url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY)\n \n def get_urls(self):\n@@ -56,8 +57,10 @@\n \n def get_search_results(self, request, query):\n minimum_query_length = 3\n+\n if len(query) >= minimum_query_length:\n- categories = Category.objects.filter(\n+ shop = get_shop(request)\n+ categories = Category.objects.all_except_deleted(shop=shop).filter(\n Q(translations__name__icontains=query) |\n Q(identifier__icontains=query)\n ).distinct().order_by(\"tree_id\", \"lft\")\n", "issue": "Admin: Search doesn't work well with categories\nAdmin > Search > Search for a category\r\nWhy does it say \"Products\" when it's clearly showing categories?\r\nDeleted categories are also visible in search. \r\n\r\n![screenshot from 2018-09-28 22-12-03](https://user-images.githubusercontent.com/40273438/46231259-8aa84280-c36b-11e8-9211-f8e3b0e55697.png)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nimport six\nfrom django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\nfrom filer.models import File\n\nfrom shuup.admin.base import AdminModule, MenuEntry, SearchResult\nfrom shuup.admin.menu import PRODUCTS_MENU_CATEGORY\nfrom shuup.admin.utils.permissions import get_default_model_permissions\nfrom shuup.admin.utils.urls import (\n admin_url, derive_model_url, get_edit_and_list_urls, get_model_url\n)\nfrom shuup.admin.views.home import HelpBlockCategory, SimpleHelpBlock\nfrom shuup.core.models import Category\n\n\nclass CategoryModule(AdminModule):\n name = _(\"Categories\")\n category = _(\"Products\")\n breadcrumbs_menu_entry = MenuEntry(text=name, url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY)\n\n def get_urls(self):\n return [\n admin_url(\n \"^categories/(?P<pk>\\d+)/copy-visibility/$\",\n \"shuup.admin.modules.categories.views.CategoryCopyVisibilityView\",\n name=\"category.copy_visibility\",\n permissions=get_default_model_permissions(Category)\n ),\n admin_url(\n \"^categories/(?P<pk>\\d+)/delete/$\",\n \"shuup.admin.modules.categories.views.CategoryDeleteView\",\n name=\"category.delete\",\n permissions=get_default_model_permissions(Category)\n ),\n admin_url(\n \"^categories/organize/$\",\n \"shuup.admin.modules.categories.views.CategoryOrganizeView\",\n name=\"category.organize\",\n permissions=get_default_model_permissions(Category)\n )\n ] + get_edit_and_list_urls(\n url_prefix=\"^categories\",\n view_template=\"shuup.admin.modules.categories.views.Category%sView\",\n name_template=\"category.%s\",\n permissions=get_default_model_permissions(Category),\n )\n\n def get_menu_entries(self, request):\n return [\n MenuEntry(\n text=_(\"Categories\"), icon=\"fa fa-sitemap\",\n url=\"shuup_admin:category.list\", category=PRODUCTS_MENU_CATEGORY, ordering=2\n )\n ]\n\n def get_search_results(self, request, query):\n minimum_query_length = 3\n if len(query) >= minimum_query_length:\n categories = Category.objects.filter(\n Q(translations__name__icontains=query) |\n Q(identifier__icontains=query)\n ).distinct().order_by(\"tree_id\", \"lft\")\n for i, category in enumerate(categories[:10]):\n relevance = 100 - i\n yield SearchResult(\n text=six.text_type(category),\n url=get_model_url(category),\n category=self.category,\n relevance=relevance\n )\n\n def get_help_blocks(self, request, kind):\n yield SimpleHelpBlock(\n text=_(\"Add a product category to organize your products\"),\n actions=[{\n \"text\": _(\"New category\"),\n \"url\": get_model_url(Category, \"new\")\n }],\n icon_url=\"shuup_admin/img/category.png\",\n category=HelpBlockCategory.PRODUCTS,\n priority=1,\n done=Category.objects.filter(shops=request.shop).exists() if kind == \"setup\" else False\n )\n\n def get_required_permissions(self):\n return get_default_model_permissions(Category) | get_default_model_permissions(File)\n\n def get_model_url(self, object, kind, shop=None):\n return derive_model_url(Category, \"shuup_admin:category\", object, kind)\n", "path": "shuup/admin/modules/categories/__init__.py"}]}
1,643
326
gh_patches_debug_34114
rasdani/github-patches
git_diff
learningequality__kolibri-1396
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Coach Reports - change when progress starts being measured in coach reports ## Summary Implementations team members were confused as to why some exercises were showing up as having progress in Recent but there ended up being no data for it when they clicked into it. **Suggestion:** On coach reports, log that an exercise has been engaged with only when the learner clicked "check answer". That way, we avoid having exercises display on the coach's end because of learners who happen to click into an exercise and leave the page without interacting with the exercise. A potential problem area in the future is overloading the Recent tab with content that has 0 data because of learners who like to click around & explore. ## System information - Version: Kolibri 0.4.0alpha6 http://192.237.248.135:8088/ - Operating system: Mac - Browser: Chrome ## Screenshots ![apr-27-2017 16-46-51](https://cloud.githubusercontent.com/assets/6668144/25508810/7782926e-2b69-11e7-90b5-5ebc6dc3543b.gif) Exercises should log as "in progress" as soon as there's an interaction with it ## Summary Marking incorrect answers and getting hints does not log it as "in progress" The first correct answer on an exercise logs it as "in progress" ## System information - Version: Kolibri 0.4.0alpha1.dev915 - Operating system: MacOS Sierra - Browser: Chrome ![apr-27-2017 18-03-18](https://cloud.githubusercontent.com/assets/6668144/25510591/234e5e28-2b76-11e7-966a-3ea42938845b.gif) </issue> <code> [start of kolibri/plugins/coach/api.py] 1 import datetime 2 from dateutil.parser import parse 3 4 from django.utils import timezone 5 6 from kolibri.auth.constants import role_kinds 7 from kolibri.auth.models import Collection, FacilityUser 8 from kolibri.content.models import ContentNode 9 from kolibri.logger.models import ContentSummaryLog 10 from rest_framework import pagination, permissions, viewsets 11 12 from .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer 13 from .utils.return_users import get_members_or_user 14 15 16 class OptionalPageNumberPagination(pagination.PageNumberPagination): 17 """ 18 Pagination class that allows for page number-style pagination, when requested. 19 To activate, the `page_size` argument must be set. For example, to request the first 20 records: 20 `?page_size=20&page=1` 21 """ 22 page_size = None 23 page_size_query_param = "page_size" 24 25 26 class KolibriReportPermissions(permissions.BasePermission): 27 28 # check if requesting user has permission for collection or user 29 def has_permission(self, request, view): 30 collection_kind = view.kwargs.get('collection_kind', 'user') 31 collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk')) 32 33 allowed_roles = [role_kinds.ADMIN, role_kinds.COACH] 34 if 'user' == collection_kind: 35 return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk)) 36 else: 37 return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk)) 38 39 40 class UserReportViewSet(viewsets.ModelViewSet): 41 42 permission_classes = (KolibriReportPermissions,) 43 pagination_class = OptionalPageNumberPagination 44 serializer_class = UserReportSerializer 45 46 def get_queryset(self): 47 assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint' 48 return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']) 49 50 51 class ContentReportViewSet(viewsets.ModelViewSet): 52 53 permission_classes = (KolibriReportPermissions,) 54 pagination_class = OptionalPageNumberPagination 55 serializer_class = ContentReportSerializer 56 57 def get_queryset(self): 58 content_node_id = self.kwargs['content_node_id'] 59 return ContentNode.objects.filter(parent=content_node_id) 60 61 62 class ContentSummaryViewSet(viewsets.ModelViewSet): 63 64 permission_classes = (KolibriReportPermissions,) 65 serializer_class = ContentSummarySerializer 66 67 def get_queryset(self): 68 return ContentNode.objects.all() 69 70 71 class UserSummaryViewSet(viewsets.ModelViewSet): 72 73 permission_classes = (KolibriReportPermissions,) 74 serializer_class = UserReportSerializer 75 76 def get_queryset(self): 77 return FacilityUser.objects.all() 78 79 80 class RecentReportViewSet(viewsets.ModelViewSet): 81 82 permission_classes = (KolibriReportPermissions,) 83 pagination_class = OptionalPageNumberPagination 84 serializer_class = ContentReportSerializer 85 86 def get_queryset(self): 87 query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id']) 88 if self.request.query_params.get('last_active_time'): 89 # Last active time specified 90 datetime_cutoff = parse(self.request.query_params.get('last_active_time')) 91 else: 92 datetime_cutoff = timezone.now() - datetime.timedelta(7) 93 # Set on the kwargs to pass into the serializer 94 self.kwargs['last_active_time'] = datetime_cutoff.isoformat() 95 recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter( 96 user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']), 97 end_timestamp__gte=datetime_cutoff).values_list('content_id') 98 return ContentNode.objects.filter(content_id__in=recent_content_items) 99 [end of kolibri/plugins/coach/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py --- a/kolibri/plugins/coach/api.py +++ b/kolibri/plugins/coach/api.py @@ -1,12 +1,13 @@ import datetime from dateutil.parser import parse +from django.db.models import Q from django.utils import timezone from kolibri.auth.constants import role_kinds from kolibri.auth.models import Collection, FacilityUser from kolibri.content.models import ContentNode -from kolibri.logger.models import ContentSummaryLog +from kolibri.logger.models import ContentSummaryLog, MasteryLog from rest_framework import pagination, permissions, viewsets from .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer @@ -84,6 +85,7 @@ serializer_class = ContentReportSerializer def get_queryset(self): + attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False) query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id']) if self.request.query_params.get('last_active_time'): # Last active time specified @@ -93,6 +95,7 @@ # Set on the kwargs to pass into the serializer self.kwargs['last_active_time'] = datetime_cutoff.isoformat() recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter( + Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs), user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']), end_timestamp__gte=datetime_cutoff).values_list('content_id') return ContentNode.objects.filter(content_id__in=recent_content_items)
{"golden_diff": "diff --git a/kolibri/plugins/coach/api.py b/kolibri/plugins/coach/api.py\n--- a/kolibri/plugins/coach/api.py\n+++ b/kolibri/plugins/coach/api.py\n@@ -1,12 +1,13 @@\n import datetime\n from dateutil.parser import parse\n \n+from django.db.models import Q\n from django.utils import timezone\n \n from kolibri.auth.constants import role_kinds\n from kolibri.auth.models import Collection, FacilityUser\n from kolibri.content.models import ContentNode\n-from kolibri.logger.models import ContentSummaryLog\n+from kolibri.logger.models import ContentSummaryLog, MasteryLog\n from rest_framework import pagination, permissions, viewsets\n \n from .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer\n@@ -84,6 +85,7 @@\n serializer_class = ContentReportSerializer\n \n def get_queryset(self):\n+ attempted_mastery_logs = MasteryLog.objects.filter(attemptlogs__isnull=False)\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n@@ -93,6 +95,7 @@\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n+ Q(progress__gt=0) | Q(masterylogs__in=attempted_mastery_logs),\n user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),\n end_timestamp__gte=datetime_cutoff).values_list('content_id')\n return ContentNode.objects.filter(content_id__in=recent_content_items)\n", "issue": "Coach Reports - change when progress starts being measured in coach reports\n## Summary\r\n\r\nImplementations team members were confused as to why some exercises were showing up as having progress in Recent but there ended up being no data for it when they clicked into it.\r\n\r\n**Suggestion:** \r\nOn coach reports, log that an exercise has been engaged with only when the learner clicked \"check answer\". \r\n\r\nThat way, we avoid having exercises display on the coach's end because of learners who happen to click into an exercise and leave the page without interacting with the exercise.\r\n\r\nA potential problem area in the future is overloading the Recent tab with content that has 0 data because of learners who like to click around & explore. \r\n\r\n## System information\r\n\r\n - Version: Kolibri 0.4.0alpha6 http://192.237.248.135:8088/\r\n - Operating system: Mac\r\n - Browser: Chrome\r\n\r\n## Screenshots\r\n\r\n![apr-27-2017 16-46-51](https://cloud.githubusercontent.com/assets/6668144/25508810/7782926e-2b69-11e7-90b5-5ebc6dc3543b.gif)\r\n\nExercises should log as \"in progress\" as soon as there's an interaction with it\n## Summary\r\n\r\nMarking incorrect answers and getting hints does not log it as \"in progress\"\r\nThe first correct answer on an exercise logs it as \"in progress\"\r\n\r\n## System information\r\n\r\n - Version: Kolibri 0.4.0alpha1.dev915\r\n - Operating system: MacOS Sierra\r\n - Browser: Chrome\r\n\r\n![apr-27-2017 18-03-18](https://cloud.githubusercontent.com/assets/6668144/25510591/234e5e28-2b76-11e7-966a-3ea42938845b.gif)\r\n\n", "before_files": [{"content": "import datetime\nfrom dateutil.parser import parse\n\nfrom django.utils import timezone\n\nfrom kolibri.auth.constants import role_kinds\nfrom kolibri.auth.models import Collection, FacilityUser\nfrom kolibri.content.models import ContentNode\nfrom kolibri.logger.models import ContentSummaryLog\nfrom rest_framework import pagination, permissions, viewsets\n\nfrom .serializers import ContentReportSerializer, ContentSummarySerializer, UserReportSerializer\nfrom .utils.return_users import get_members_or_user\n\n\nclass OptionalPageNumberPagination(pagination.PageNumberPagination):\n \"\"\"\n Pagination class that allows for page number-style pagination, when requested.\n To activate, the `page_size` argument must be set. For example, to request the first 20 records:\n `?page_size=20&page=1`\n \"\"\"\n page_size = None\n page_size_query_param = \"page_size\"\n\n\nclass KolibriReportPermissions(permissions.BasePermission):\n\n # check if requesting user has permission for collection or user\n def has_permission(self, request, view):\n collection_kind = view.kwargs.get('collection_kind', 'user')\n collection_or_user_pk = view.kwargs.get('collection_id', view.kwargs.get('pk'))\n\n allowed_roles = [role_kinds.ADMIN, role_kinds.COACH]\n if 'user' == collection_kind:\n return request.user.has_role_for(allowed_roles, FacilityUser.objects.get(pk=collection_or_user_pk))\n else:\n return request.user.has_role_for(allowed_roles, Collection.objects.get(pk=collection_or_user_pk))\n\n\nclass UserReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n assert 'user' != self.kwargs['collection_kind'], 'only a `collection` should be passed to this endpoint'\n return get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id'])\n\n\nclass ContentReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n content_node_id = self.kwargs['content_node_id']\n return ContentNode.objects.filter(parent=content_node_id)\n\n\nclass ContentSummaryViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = ContentSummarySerializer\n\n def get_queryset(self):\n return ContentNode.objects.all()\n\n\nclass UserSummaryViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n serializer_class = UserReportSerializer\n\n def get_queryset(self):\n return FacilityUser.objects.all()\n\n\nclass RecentReportViewSet(viewsets.ModelViewSet):\n\n permission_classes = (KolibriReportPermissions,)\n pagination_class = OptionalPageNumberPagination\n serializer_class = ContentReportSerializer\n\n def get_queryset(self):\n query_node = ContentNode.objects.get(pk=self.kwargs['content_node_id'])\n if self.request.query_params.get('last_active_time'):\n # Last active time specified\n datetime_cutoff = parse(self.request.query_params.get('last_active_time'))\n else:\n datetime_cutoff = timezone.now() - datetime.timedelta(7)\n # Set on the kwargs to pass into the serializer\n self.kwargs['last_active_time'] = datetime_cutoff.isoformat()\n recent_content_items = ContentSummaryLog.objects.filter_by_topic(query_node).filter(\n user__in=get_members_or_user(self.kwargs['collection_kind'], self.kwargs['collection_id']),\n end_timestamp__gte=datetime_cutoff).values_list('content_id')\n return ContentNode.objects.filter(content_id__in=recent_content_items)\n", "path": "kolibri/plugins/coach/api.py"}]}
1,956
370
gh_patches_debug_54055
rasdani/github-patches
git_diff
docker__docker-py-1669
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issue with port option in 2.4.0 version Hi, I update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) : `ports: - "127.0.0.1:9292:9090"` I got the following error: ` ERROR: for ContainerName expected string or buffer Traceback (most recent call last): File "/usr/local/bin/docker-compose", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 68, in main command() File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 118, in perform_command handler(command, command_options) File "/usr/local/lib/python2.7/dist-packages/compose/cli/main.py", line 926, in up scale_override=parse_scale_args(options['--scale']), File "/usr/local/lib/python2.7/dist-packages/compose/project.py", line 424, in up get_deps File "/usr/local/lib/python2.7/dist-packages/compose/parallel.py", line 69, in parallel_execute raise error_to_reraise TypeError: expected string or buffer ` I have no issue when i downgrade again to the 2.3 version of the package To reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image): ``` version: '2' services : ContainerName: image: bae2d441e03a ports: - "127.0.0.1:9292:9090" ``` I run on Ubuntu 14.04.5 LTS with the following package: ``` docker==2.4.0 docker-compose==1.14.0 docker-pycreds==0.2.1 dockerpty==0.4.1 Python 2.7.6 Client: Version: 17.05.0-ce API version: 1.29 Go version: go1.7.5 Git commit: 89658be Built: Thu May 4 22:06:06 2017 OS/Arch: linux/amd64 Server: Version: 17.05.0-ce API version: 1.29 (minimum version 1.12) Go version: go1.7.5 Git commit: 89658be Built: Thu May 4 22:06:06 2017 OS/Arch: linux/amd64 Experimental: false ``` </issue> <code> [start of docker/utils/ports.py] 1 import re 2 3 PORT_SPEC = re.compile( 4 "^" # Match full string 5 "(" # External part 6 "((?P<host>[a-fA-F\d.:]+):)?" # Address 7 "(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range 8 ")?" 9 "(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range 10 "(?P<proto>/(udp|tcp))?" # Protocol 11 "$" # Match full string 12 ) 13 14 15 def add_port_mapping(port_bindings, internal_port, external): 16 if internal_port in port_bindings: 17 port_bindings[internal_port].append(external) 18 else: 19 port_bindings[internal_port] = [external] 20 21 22 def add_port(port_bindings, internal_port_range, external_range): 23 if external_range is None: 24 for internal_port in internal_port_range: 25 add_port_mapping(port_bindings, internal_port, None) 26 else: 27 ports = zip(internal_port_range, external_range) 28 for internal_port, external_port in ports: 29 add_port_mapping(port_bindings, internal_port, external_port) 30 31 32 def build_port_bindings(ports): 33 port_bindings = {} 34 for port in ports: 35 internal_port_range, external_range = split_port(port) 36 add_port(port_bindings, internal_port_range, external_range) 37 return port_bindings 38 39 40 def _raise_invalid_port(port): 41 raise ValueError('Invalid port "%s", should be ' 42 '[[remote_ip:]remote_port[-remote_port]:]' 43 'port[/protocol]' % port) 44 45 46 def port_range(start, end, proto, randomly_available_port=False): 47 if not start: 48 return start 49 if not end: 50 return [start + proto] 51 if randomly_available_port: 52 return ['{}-{}'.format(start, end) + proto] 53 return [str(port) + proto for port in range(int(start), int(end) + 1)] 54 55 56 def split_port(port): 57 match = PORT_SPEC.match(port) 58 if match is None: 59 _raise_invalid_port(port) 60 parts = match.groupdict() 61 62 host = parts['host'] 63 proto = parts['proto'] or '' 64 internal = port_range(parts['int'], parts['int_end'], proto) 65 external = port_range( 66 parts['ext'], parts['ext_end'], '', len(internal) == 1) 67 68 if host is None: 69 if external is not None and len(internal) != len(external): 70 raise ValueError('Port ranges don\'t match in length') 71 return internal, external 72 else: 73 if not external: 74 external = [None] * len(internal) 75 elif len(internal) != len(external): 76 raise ValueError('Port ranges don\'t match in length') 77 return internal, [(host, ext_port) for ext_port in external] 78 [end of docker/utils/ports.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/utils/ports.py b/docker/utils/ports.py --- a/docker/utils/ports.py +++ b/docker/utils/ports.py @@ -54,6 +54,7 @@ def split_port(port): + port = str(port) match = PORT_SPEC.match(port) if match is None: _raise_invalid_port(port)
{"golden_diff": "diff --git a/docker/utils/ports.py b/docker/utils/ports.py\n--- a/docker/utils/ports.py\n+++ b/docker/utils/ports.py\n@@ -54,6 +54,7 @@\n \n \n def split_port(port):\n+ port = str(port)\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n", "issue": "Issue with port option in 2.4.0 version\nHi,\r\nI update to the 2.4 today and i got issue with docker-compose when i try to add the following line to my configuration file (docker-compose.yml) : \r\n`ports:\r\n - \"127.0.0.1:9292:9090\"`\r\n\r\nI got the following error:\r\n\r\n`\r\nERROR: for ContainerName expected string or buffer\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/docker-compose\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 68, in main\r\n command()\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 118, in perform_command\r\n handler(command, command_options)\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/cli/main.py\", line 926, in up\r\n scale_override=parse_scale_args(options['--scale']),\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/project.py\", line 424, in up\r\n get_deps\r\n File \"/usr/local/lib/python2.7/dist-packages/compose/parallel.py\", line 69, in parallel_execute\r\n raise error_to_reraise\r\nTypeError: expected string or buffer\r\n`\r\n\r\nI have no issue when i downgrade again to the 2.3 version of the package\r\n\r\nTo reproduce the issue, i use the following configuration ( it doesn't seem to depend on the image):\r\n```\r\nversion: '2'\r\n\r\nservices :\r\n ContainerName:\r\n image: bae2d441e03a\r\n ports:\r\n - \"127.0.0.1:9292:9090\"\r\n```\r\n\r\nI run on Ubuntu 14.04.5 LTS with the following package:\r\n```\r\ndocker==2.4.0\r\ndocker-compose==1.14.0\r\ndocker-pycreds==0.2.1\r\ndockerpty==0.4.1\r\nPython 2.7.6\r\nClient:\r\n Version: 17.05.0-ce\r\n API version: 1.29\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n\r\nServer:\r\n Version: 17.05.0-ce\r\n API version: 1.29 (minimum version 1.12)\r\n Go version: go1.7.5\r\n Git commit: 89658be\r\n Built: Thu May 4 22:06:06 2017\r\n OS/Arch: linux/amd64\r\n Experimental: false\r\n```\n", "before_files": [{"content": "import re\n\nPORT_SPEC = re.compile(\n \"^\" # Match full string\n \"(\" # External part\n \"((?P<host>[a-fA-F\\d.:]+):)?\" # Address\n \"(?P<ext>[\\d]*)(-(?P<ext_end>[\\d]+))?:\" # External range\n \")?\"\n \"(?P<int>[\\d]+)(-(?P<int_end>[\\d]+))?\" # Internal range\n \"(?P<proto>/(udp|tcp))?\" # Protocol\n \"$\" # Match full string\n)\n\n\ndef add_port_mapping(port_bindings, internal_port, external):\n if internal_port in port_bindings:\n port_bindings[internal_port].append(external)\n else:\n port_bindings[internal_port] = [external]\n\n\ndef add_port(port_bindings, internal_port_range, external_range):\n if external_range is None:\n for internal_port in internal_port_range:\n add_port_mapping(port_bindings, internal_port, None)\n else:\n ports = zip(internal_port_range, external_range)\n for internal_port, external_port in ports:\n add_port_mapping(port_bindings, internal_port, external_port)\n\n\ndef build_port_bindings(ports):\n port_bindings = {}\n for port in ports:\n internal_port_range, external_range = split_port(port)\n add_port(port_bindings, internal_port_range, external_range)\n return port_bindings\n\n\ndef _raise_invalid_port(port):\n raise ValueError('Invalid port \"%s\", should be '\n '[[remote_ip:]remote_port[-remote_port]:]'\n 'port[/protocol]' % port)\n\n\ndef port_range(start, end, proto, randomly_available_port=False):\n if not start:\n return start\n if not end:\n return [start + proto]\n if randomly_available_port:\n return ['{}-{}'.format(start, end) + proto]\n return [str(port) + proto for port in range(int(start), int(end) + 1)]\n\n\ndef split_port(port):\n match = PORT_SPEC.match(port)\n if match is None:\n _raise_invalid_port(port)\n parts = match.groupdict()\n\n host = parts['host']\n proto = parts['proto'] or ''\n internal = port_range(parts['int'], parts['int_end'], proto)\n external = port_range(\n parts['ext'], parts['ext_end'], '', len(internal) == 1)\n\n if host is None:\n if external is not None and len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, external\n else:\n if not external:\n external = [None] * len(internal)\n elif len(internal) != len(external):\n raise ValueError('Port ranges don\\'t match in length')\n return internal, [(host, ext_port) for ext_port in external]\n", "path": "docker/utils/ports.py"}]}
1,949
79
gh_patches_debug_32765
rasdani/github-patches
git_diff
openai__evals-1180
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Registry path CLI option for oaievalset ### Describe the feature or improvement you're requesting It should take an `--registry_path` like `oaieval` does ### Additional context Useful when using as a library, to run eval sets stored in my own project outside this repo. </issue> <code> [start of evals/cli/oaievalset.py] 1 """ 2 This file defines the `oaievalset` CLI for running eval sets. 3 """ 4 import argparse 5 import json 6 import logging 7 import subprocess 8 from pathlib import Path 9 from typing import Optional, cast 10 11 from evals.registry import Registry 12 13 Task = list[str] 14 logger = logging.getLogger(__name__) 15 16 17 class Progress: 18 def __init__(self, file: str) -> None: 19 self.file = Path(file) 20 self.completed: list[Task] = [] 21 22 def load(self) -> bool: 23 if not self.file.exists(): 24 return False 25 26 with self.file.open() as f: 27 for line in f: 28 self.completed.append(json.loads(line)) 29 return len(self.completed) > 0 30 31 def add(self, item: Task) -> None: 32 self.completed.append(item) 33 self.save() 34 35 def save(self) -> None: 36 self.file.parent.mkdir(parents=True, exist_ok=True) 37 with self.file.open("w") as f: 38 for item in self.completed: 39 f.write(json.dumps(item) + "\n") 40 print(highlight(f"Saved progress to {self.file}")) 41 42 43 def highlight(str: str) -> str: 44 return f"\033[1;32m>>> {str}\033[0m" 45 46 47 def get_parser() -> argparse.ArgumentParser: 48 parser = argparse.ArgumentParser(description="Run eval sets through the API") 49 parser.add_argument("model", type=str, help="Name of a completion model.") 50 parser.add_argument("eval_set", type=str, help="Name of eval set. See registry.") 51 parser.add_argument( 52 "--resume", 53 action=argparse.BooleanOptionalAction, 54 default=True, 55 help="Resume from last checkpoint.", 56 ) 57 parser.add_argument( 58 "--exit-on-error", 59 action=argparse.BooleanOptionalAction, 60 default=True, 61 help="Exit if any oaieval command fails.", 62 ) 63 return parser 64 65 66 class OaiEvalSetArguments(argparse.Namespace): 67 model: str 68 eval_set: str 69 resume: bool 70 exit_on_error: bool 71 72 73 def run( 74 args: OaiEvalSetArguments, 75 unknown_args: list[str], 76 registry: Optional[Registry] = None, 77 run_command: str = "oaieval", 78 ) -> None: 79 registry = registry or Registry() 80 commands: list[Task] = [] 81 eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None 82 if eval_set: 83 for index, eval in enumerate(registry.get_evals(eval_set.evals)): 84 if not eval or not eval.key: 85 logger.debug("The eval #%d in eval_set is not valid", index) 86 87 command = [run_command, args.model, eval.key] + unknown_args 88 if command in commands: 89 continue 90 commands.append(command) 91 else: 92 logger.warning("No eval set found for %s", args.eval_set) 93 94 num_evals = len(commands) 95 96 progress = Progress(f"/tmp/oaievalset/{args.model}.{args.eval_set}.progress.txt") 97 if args.resume and progress.load(): 98 print(f"Loaded progress from {progress.file}") 99 print(f"{len(progress.completed)}/{len(commands)} evals already completed:") 100 for item in progress.completed: 101 print(" " + " ".join(item)) 102 103 commands = [c for c in commands if c not in progress.completed] 104 command_strs = [" ".join(cmd) for cmd in commands] 105 print("Going to run the following commands:") 106 for command_str in command_strs: 107 print(" " + command_str) 108 109 num_already_completed = num_evals - len(commands) 110 for idx, command in enumerate(commands): 111 real_idx = idx + num_already_completed 112 print(highlight("Running command: " + " ".join(command) + f" ({real_idx+1}/{num_evals})")) 113 subprocess.run(command, stdout=subprocess.PIPE, check=args.exit_on_error) 114 progress.add(command) 115 116 print(highlight("All done!")) 117 118 119 def main() -> None: 120 parser = get_parser() 121 args, unknown_args = parser.parse_known_args() 122 run(cast(OaiEvalSetArguments, args), unknown_args) 123 124 125 if __name__ == "__main__": 126 main() 127 [end of evals/cli/oaievalset.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evals/cli/oaievalset.py b/evals/cli/oaievalset.py --- a/evals/cli/oaievalset.py +++ b/evals/cli/oaievalset.py @@ -48,6 +48,13 @@ parser = argparse.ArgumentParser(description="Run eval sets through the API") parser.add_argument("model", type=str, help="Name of a completion model.") parser.add_argument("eval_set", type=str, help="Name of eval set. See registry.") + parser.add_argument( + "--registry_path", + type=str, + default=None, + action="append", + help="Path to the registry", + ) parser.add_argument( "--resume", action=argparse.BooleanOptionalAction, @@ -66,6 +73,7 @@ class OaiEvalSetArguments(argparse.Namespace): model: str eval_set: str + registry_path: Optional[str] resume: bool exit_on_error: bool @@ -77,6 +85,9 @@ run_command: str = "oaieval", ) -> None: registry = registry or Registry() + if args.registry_path: + registry.add_registry_paths(args.registry_path) + commands: list[Task] = [] eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None if eval_set: @@ -85,6 +96,9 @@ logger.debug("The eval #%d in eval_set is not valid", index) command = [run_command, args.model, eval.key] + unknown_args + if args.registry_path: + command.append("--registry_path") + command = command + args.registry_path if command in commands: continue commands.append(command)
{"golden_diff": "diff --git a/evals/cli/oaievalset.py b/evals/cli/oaievalset.py\n--- a/evals/cli/oaievalset.py\n+++ b/evals/cli/oaievalset.py\n@@ -48,6 +48,13 @@\n parser = argparse.ArgumentParser(description=\"Run eval sets through the API\")\n parser.add_argument(\"model\", type=str, help=\"Name of a completion model.\")\n parser.add_argument(\"eval_set\", type=str, help=\"Name of eval set. See registry.\")\n+ parser.add_argument(\n+ \"--registry_path\",\n+ type=str,\n+ default=None,\n+ action=\"append\",\n+ help=\"Path to the registry\",\n+ )\n parser.add_argument(\n \"--resume\",\n action=argparse.BooleanOptionalAction,\n@@ -66,6 +73,7 @@\n class OaiEvalSetArguments(argparse.Namespace):\n model: str\n eval_set: str\n+ registry_path: Optional[str]\n resume: bool\n exit_on_error: bool\n \n@@ -77,6 +85,9 @@\n run_command: str = \"oaieval\",\n ) -> None:\n registry = registry or Registry()\n+ if args.registry_path:\n+ registry.add_registry_paths(args.registry_path)\n+\n commands: list[Task] = []\n eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None\n if eval_set:\n@@ -85,6 +96,9 @@\n logger.debug(\"The eval #%d in eval_set is not valid\", index)\n \n command = [run_command, args.model, eval.key] + unknown_args\n+ if args.registry_path:\n+ command.append(\"--registry_path\")\n+ command = command + args.registry_path\n if command in commands:\n continue\n commands.append(command)\n", "issue": "Registry path CLI option for oaievalset\n### Describe the feature or improvement you're requesting\n\nIt should take an `--registry_path` like `oaieval` does\n\n### Additional context\n\nUseful when using as a library, to run eval sets stored in my own project outside this repo.\n", "before_files": [{"content": "\"\"\"\nThis file defines the `oaievalset` CLI for running eval sets.\n\"\"\"\nimport argparse\nimport json\nimport logging\nimport subprocess\nfrom pathlib import Path\nfrom typing import Optional, cast\n\nfrom evals.registry import Registry\n\nTask = list[str]\nlogger = logging.getLogger(__name__)\n\n\nclass Progress:\n def __init__(self, file: str) -> None:\n self.file = Path(file)\n self.completed: list[Task] = []\n\n def load(self) -> bool:\n if not self.file.exists():\n return False\n\n with self.file.open() as f:\n for line in f:\n self.completed.append(json.loads(line))\n return len(self.completed) > 0\n\n def add(self, item: Task) -> None:\n self.completed.append(item)\n self.save()\n\n def save(self) -> None:\n self.file.parent.mkdir(parents=True, exist_ok=True)\n with self.file.open(\"w\") as f:\n for item in self.completed:\n f.write(json.dumps(item) + \"\\n\")\n print(highlight(f\"Saved progress to {self.file}\"))\n\n\ndef highlight(str: str) -> str:\n return f\"\\033[1;32m>>> {str}\\033[0m\"\n\n\ndef get_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(description=\"Run eval sets through the API\")\n parser.add_argument(\"model\", type=str, help=\"Name of a completion model.\")\n parser.add_argument(\"eval_set\", type=str, help=\"Name of eval set. See registry.\")\n parser.add_argument(\n \"--resume\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Resume from last checkpoint.\",\n )\n parser.add_argument(\n \"--exit-on-error\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Exit if any oaieval command fails.\",\n )\n return parser\n\n\nclass OaiEvalSetArguments(argparse.Namespace):\n model: str\n eval_set: str\n resume: bool\n exit_on_error: bool\n\n\ndef run(\n args: OaiEvalSetArguments,\n unknown_args: list[str],\n registry: Optional[Registry] = None,\n run_command: str = \"oaieval\",\n) -> None:\n registry = registry or Registry()\n commands: list[Task] = []\n eval_set = registry.get_eval_set(args.eval_set) if args.eval_set else None\n if eval_set:\n for index, eval in enumerate(registry.get_evals(eval_set.evals)):\n if not eval or not eval.key:\n logger.debug(\"The eval #%d in eval_set is not valid\", index)\n\n command = [run_command, args.model, eval.key] + unknown_args\n if command in commands:\n continue\n commands.append(command)\n else:\n logger.warning(\"No eval set found for %s\", args.eval_set)\n\n num_evals = len(commands)\n\n progress = Progress(f\"/tmp/oaievalset/{args.model}.{args.eval_set}.progress.txt\")\n if args.resume and progress.load():\n print(f\"Loaded progress from {progress.file}\")\n print(f\"{len(progress.completed)}/{len(commands)} evals already completed:\")\n for item in progress.completed:\n print(\" \" + \" \".join(item))\n\n commands = [c for c in commands if c not in progress.completed]\n command_strs = [\" \".join(cmd) for cmd in commands]\n print(\"Going to run the following commands:\")\n for command_str in command_strs:\n print(\" \" + command_str)\n\n num_already_completed = num_evals - len(commands)\n for idx, command in enumerate(commands):\n real_idx = idx + num_already_completed\n print(highlight(\"Running command: \" + \" \".join(command) + f\" ({real_idx+1}/{num_evals})\"))\n subprocess.run(command, stdout=subprocess.PIPE, check=args.exit_on_error)\n progress.add(command)\n\n print(highlight(\"All done!\"))\n\n\ndef main() -> None:\n parser = get_parser()\n args, unknown_args = parser.parse_known_args()\n run(cast(OaiEvalSetArguments, args), unknown_args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "evals/cli/oaievalset.py"}]}
1,790
396
gh_patches_debug_22706
rasdani/github-patches
git_diff
arviz-devs__arviz-426
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Incompatibility with emcee3 pre-release Trying to use the `from_emcee()` function raises the following error for `emcee3`: > AttributeError: 'EnsembleSampler' object has no attribute 'args' This is due to line 29 in `io_emcee.py`: num_args = len(sampler.args) In version 2, the EnsembleSampler class stored this information, but only to pass this to `_function_wrapper` class, which also stored the same information. This wrapper was for the posterior probability, so, `sampler.args` is equivalent to `sampler.lnprobfn.args`. In version 3, this has been slightly modified, and now only the `_FunctionWrapper` class stores this information, however, its name has also been modified, so in this case it should be retrieved with `sampler.log_prob_fn.args`. </issue> <code> [start of arviz/data/io_emcee.py] 1 """emcee-specific conversion code.""" 2 from .inference_data import InferenceData 3 from .base import dict_to_dataset 4 5 6 def _verify_names(sampler, var_names, arg_names): 7 """Make sure var_names and arg_names are assigned reasonably. 8 9 This is meant to run before loading emcee objects into InferenceData. 10 In case var_names or arg_names is None, will provide defaults. If they are 11 not None, it verifies there are the right number of them. 12 13 Throws a ValueError in case validation fails. 14 15 Parameters 16 ---------- 17 sampler : emcee.EnsembleSampler 18 Fitted emcee sampler 19 var_names : list[str] or None 20 Names for the emcee parameters 21 arg_names : list[str] or None 22 Names for the args/observations provided to emcee 23 24 Returns 25 ------- 26 list[str], list[str] 27 Defaults for var_names and arg_names 28 """ 29 num_vars = sampler.chain.shape[-1] 30 num_args = len(sampler.args) 31 32 if var_names is None: 33 var_names = ["var_{}".format(idx) for idx in range(num_vars)] 34 if arg_names is None: 35 arg_names = ["arg_{}".format(idx) for idx in range(num_args)] 36 37 if len(var_names) != num_vars: 38 raise ValueError( 39 "The sampler has {} variables, but only {} var_names were provided!".format( 40 num_vars, len(var_names) 41 ) 42 ) 43 44 if len(arg_names) != num_args: 45 raise ValueError( 46 "The sampler has {} args, but only {} arg_names were provided!".format( 47 num_args, len(arg_names) 48 ) 49 ) 50 return var_names, arg_names 51 52 53 class EmceeConverter: 54 """Encapsulate emcee specific logic.""" 55 56 def __init__(self, sampler, *_, var_names=None, arg_names=None, coords=None, dims=None): 57 var_names, arg_names = _verify_names(sampler, var_names, arg_names) 58 self.sampler = sampler 59 self.var_names = var_names 60 self.arg_names = arg_names 61 self.coords = coords 62 self.dims = dims 63 import emcee 64 65 self.emcee = emcee 66 67 def posterior_to_xarray(self): 68 """Convert the posterior to an xarray dataset.""" 69 data = {} 70 for idx, var_name in enumerate(self.var_names): 71 data[var_name] = self.sampler.chain[(..., idx)] 72 return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims) 73 74 def observed_data_to_xarray(self): 75 """Convert observed data to xarray.""" 76 data = {} 77 for idx, var_name in enumerate(self.arg_names): 78 data[var_name] = self.sampler.args[idx] 79 return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims) 80 81 def to_inference_data(self): 82 """Convert all available data to an InferenceData object.""" 83 return InferenceData( 84 **{ 85 "posterior": self.posterior_to_xarray(), 86 "observed_data": self.observed_data_to_xarray(), 87 } 88 ) 89 90 91 def from_emcee(sampler, *, var_names=None, arg_names=None, coords=None, dims=None): 92 """Convert emcee data into an InferenceData object. 93 94 Parameters 95 ---------- 96 sampler : emcee.EnsembleSampler 97 Fitted sampler from emcee. 98 var_names : list[str] (Optional) 99 A list of names for variables in the sampler 100 arg_names : list[str] (Optional) 101 A list of names for args in the sampler 102 coords : dict[str] -> list[str] 103 Map of dimensions to coordinates 104 dims : dict[str] -> list[str] 105 Map variable names to their coordinates 106 """ 107 return EmceeConverter( 108 sampler, var_names=var_names, arg_names=arg_names, coords=coords, dims=dims 109 ).to_inference_data() 110 [end of arviz/data/io_emcee.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/arviz/data/io_emcee.py b/arviz/data/io_emcee.py --- a/arviz/data/io_emcee.py +++ b/arviz/data/io_emcee.py @@ -27,7 +27,8 @@ Defaults for var_names and arg_names """ num_vars = sampler.chain.shape[-1] - num_args = len(sampler.args) + # Get emcee version 2 sampler args, else get emcee version 3 + num_args = len(sampler.args) if hasattr(sampler, "args") else len(sampler.log_prob_fn.args) if var_names is None: var_names = ["var_{}".format(idx) for idx in range(num_vars)] @@ -75,7 +76,12 @@ """Convert observed data to xarray.""" data = {} for idx, var_name in enumerate(self.arg_names): - data[var_name] = self.sampler.args[idx] + # Get emcee version 2 sampler args, else get emcee version 3 + data[var_name] = ( + self.sampler.args[idx] + if hasattr(self.sampler, "args") + else self.sampler.log_prob_fn.args[idx] + ) return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims) def to_inference_data(self):
{"golden_diff": "diff --git a/arviz/data/io_emcee.py b/arviz/data/io_emcee.py\n--- a/arviz/data/io_emcee.py\n+++ b/arviz/data/io_emcee.py\n@@ -27,7 +27,8 @@\n Defaults for var_names and arg_names\n \"\"\"\n num_vars = sampler.chain.shape[-1]\n- num_args = len(sampler.args)\n+ # Get emcee version 2 sampler args, else get emcee version 3\n+ num_args = len(sampler.args) if hasattr(sampler, \"args\") else len(sampler.log_prob_fn.args)\n \n if var_names is None:\n var_names = [\"var_{}\".format(idx) for idx in range(num_vars)]\n@@ -75,7 +76,12 @@\n \"\"\"Convert observed data to xarray.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.arg_names):\n- data[var_name] = self.sampler.args[idx]\n+ # Get emcee version 2 sampler args, else get emcee version 3\n+ data[var_name] = (\n+ self.sampler.args[idx]\n+ if hasattr(self.sampler, \"args\")\n+ else self.sampler.log_prob_fn.args[idx]\n+ )\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n \n def to_inference_data(self):\n", "issue": "Incompatibility with emcee3 pre-release\nTrying to use the `from_emcee()` function raises the following error for `emcee3`:\r\n\r\n> AttributeError: 'EnsembleSampler' object has no attribute 'args'\r\n\r\nThis is due to line 29 in `io_emcee.py`:\r\n\r\n num_args = len(sampler.args)\r\n\r\nIn version 2, the EnsembleSampler class stored this information, but only to pass this to `_function_wrapper` class, which also stored the same information. This wrapper was for the posterior probability, so, `sampler.args` is equivalent to `sampler.lnprobfn.args`.\r\n\r\nIn version 3, this has been slightly modified, and now only the `_FunctionWrapper` class stores this information, however, its name has also been modified, so in this case it should be retrieved with `sampler.log_prob_fn.args`. \n", "before_files": [{"content": "\"\"\"emcee-specific conversion code.\"\"\"\nfrom .inference_data import InferenceData\nfrom .base import dict_to_dataset\n\n\ndef _verify_names(sampler, var_names, arg_names):\n \"\"\"Make sure var_names and arg_names are assigned reasonably.\n\n This is meant to run before loading emcee objects into InferenceData.\n In case var_names or arg_names is None, will provide defaults. If they are\n not None, it verifies there are the right number of them.\n\n Throws a ValueError in case validation fails.\n\n Parameters\n ----------\n sampler : emcee.EnsembleSampler\n Fitted emcee sampler\n var_names : list[str] or None\n Names for the emcee parameters\n arg_names : list[str] or None\n Names for the args/observations provided to emcee\n\n Returns\n -------\n list[str], list[str]\n Defaults for var_names and arg_names\n \"\"\"\n num_vars = sampler.chain.shape[-1]\n num_args = len(sampler.args)\n\n if var_names is None:\n var_names = [\"var_{}\".format(idx) for idx in range(num_vars)]\n if arg_names is None:\n arg_names = [\"arg_{}\".format(idx) for idx in range(num_args)]\n\n if len(var_names) != num_vars:\n raise ValueError(\n \"The sampler has {} variables, but only {} var_names were provided!\".format(\n num_vars, len(var_names)\n )\n )\n\n if len(arg_names) != num_args:\n raise ValueError(\n \"The sampler has {} args, but only {} arg_names were provided!\".format(\n num_args, len(arg_names)\n )\n )\n return var_names, arg_names\n\n\nclass EmceeConverter:\n \"\"\"Encapsulate emcee specific logic.\"\"\"\n\n def __init__(self, sampler, *_, var_names=None, arg_names=None, coords=None, dims=None):\n var_names, arg_names = _verify_names(sampler, var_names, arg_names)\n self.sampler = sampler\n self.var_names = var_names\n self.arg_names = arg_names\n self.coords = coords\n self.dims = dims\n import emcee\n\n self.emcee = emcee\n\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.var_names):\n data[var_name] = self.sampler.chain[(..., idx)]\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n data = {}\n for idx, var_name in enumerate(self.arg_names):\n data[var_name] = self.sampler.args[idx]\n return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\"\"\"\n return InferenceData(\n **{\n \"posterior\": self.posterior_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n )\n\n\ndef from_emcee(sampler, *, var_names=None, arg_names=None, coords=None, dims=None):\n \"\"\"Convert emcee data into an InferenceData object.\n\n Parameters\n ----------\n sampler : emcee.EnsembleSampler\n Fitted sampler from emcee.\n var_names : list[str] (Optional)\n A list of names for variables in the sampler\n arg_names : list[str] (Optional)\n A list of names for args in the sampler\n coords : dict[str] -> list[str]\n Map of dimensions to coordinates\n dims : dict[str] -> list[str]\n Map variable names to their coordinates\n \"\"\"\n return EmceeConverter(\n sampler, var_names=var_names, arg_names=arg_names, coords=coords, dims=dims\n ).to_inference_data()\n", "path": "arviz/data/io_emcee.py"}]}
1,798
300
gh_patches_debug_3463
rasdani/github-patches
git_diff
ipython__ipython-10046
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Autocompletion with Init-Signatures of C extension classes I don't quite get the autocompletion to work with C extension classes. These classes have a Signature (at least since python 3.4 by utilizing `__text_signature__`) but it seems IPython cannot autocomplete the parameters in `__text_signature__`. For example: ``` >>> import pickle >>> import inspect >>> inspect.Signature.from_callable(pickle.Pickler) <Signature (file, protocol=None, fix_imports=True)> ``` Note that the signature is saved as `__text_signature__`-attribute on ``pickle.Pickle``. I would expect that after printing`pickle.Pickler(` and then hitting TAB it would show: ``` file= protocol= fix_imports= # other stuff ``` but it doesn't. Classes seem to be special in this regard - IPython is definitly able to get the parameters for functions and methods! It just doesn't work for classes. </issue> <code> [start of IPython/utils/dir2.py] 1 # encoding: utf-8 2 """A fancy version of Python's builtin :func:`dir` function. 3 """ 4 5 # Copyright (c) IPython Development Team. 6 # Distributed under the terms of the Modified BSD License. 7 8 import inspect 9 from .py3compat import string_types 10 11 12 def safe_hasattr(obj, attr): 13 """In recent versions of Python, hasattr() only catches AttributeError. 14 This catches all errors. 15 """ 16 try: 17 getattr(obj, attr) 18 return True 19 except: 20 return False 21 22 23 def dir2(obj): 24 """dir2(obj) -> list of strings 25 26 Extended version of the Python builtin dir(), which does a few extra 27 checks. 28 29 This version is guaranteed to return only a list of true strings, whereas 30 dir() returns anything that objects inject into themselves, even if they 31 are later not really valid for attribute access (many extension libraries 32 have such bugs). 33 """ 34 35 # Start building the attribute list via dir(), and then complete it 36 # with a few extra special-purpose calls. 37 38 try: 39 words = set(dir(obj)) 40 except Exception: 41 # TypeError: dir(obj) does not return a list 42 words = set() 43 44 # filter out non-string attributes which may be stuffed by dir() calls 45 # and poor coding in third-party modules 46 47 words = [w for w in words if isinstance(w, string_types)] 48 return sorted(words) 49 50 51 def get_real_method(obj, name): 52 """Like getattr, but with a few extra sanity checks: 53 54 - If obj is a class, ignore its methods 55 - Check if obj is a proxy that claims to have all attributes 56 - Catch attribute access failing with any exception 57 - Check that the attribute is a callable object 58 59 Returns the method or None. 60 """ 61 if inspect.isclass(obj): 62 return None 63 64 try: 65 canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None) 66 except Exception: 67 return None 68 69 if canary is not None: 70 # It claimed to have an attribute it should never have 71 return None 72 73 try: 74 m = getattr(obj, name, None) 75 except Exception: 76 return None 77 78 if callable(m): 79 return m 80 81 return None 82 [end of IPython/utils/dir2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/utils/dir2.py b/IPython/utils/dir2.py --- a/IPython/utils/dir2.py +++ b/IPython/utils/dir2.py @@ -41,6 +41,9 @@ # TypeError: dir(obj) does not return a list words = set() + if safe_hasattr(obj, '__class__'): + words |= set(dir(obj.__class__)) + # filter out non-string attributes which may be stuffed by dir() calls # and poor coding in third-party modules
{"golden_diff": "diff --git a/IPython/utils/dir2.py b/IPython/utils/dir2.py\n--- a/IPython/utils/dir2.py\n+++ b/IPython/utils/dir2.py\n@@ -41,6 +41,9 @@\n # TypeError: dir(obj) does not return a list\n words = set()\n \n+ if safe_hasattr(obj, '__class__'):\n+ words |= set(dir(obj.__class__))\n+\n # filter out non-string attributes which may be stuffed by dir() calls\n # and poor coding in third-party modules\n", "issue": "Autocompletion with Init-Signatures of C extension classes\nI don't quite get the autocompletion to work with C extension classes. These classes have a Signature (at least since python 3.4 by utilizing `__text_signature__`) but it seems IPython cannot autocomplete the parameters in `__text_signature__`.\r\n\r\nFor example:\r\n\r\n```\r\n>>> import pickle\r\n>>> import inspect\r\n>>> inspect.Signature.from_callable(pickle.Pickler)\r\n<Signature (file, protocol=None, fix_imports=True)>\r\n```\r\n\r\nNote that the signature is saved as `__text_signature__`-attribute on ``pickle.Pickle``.\r\n\r\nI would expect that after printing`pickle.Pickler(` and then hitting TAB it would show:\r\n```\r\nfile=\r\nprotocol=\r\nfix_imports=\r\n# other stuff\r\n```\r\nbut it doesn't. \r\n\r\nClasses seem to be special in this regard - IPython is definitly able to get the parameters for functions and methods! It just doesn't work for classes.\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"A fancy version of Python's builtin :func:`dir` function.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport inspect\nfrom .py3compat import string_types\n\n\ndef safe_hasattr(obj, attr):\n \"\"\"In recent versions of Python, hasattr() only catches AttributeError.\n This catches all errors.\n \"\"\"\n try:\n getattr(obj, attr)\n return True\n except:\n return False\n\n\ndef dir2(obj):\n \"\"\"dir2(obj) -> list of strings\n\n Extended version of the Python builtin dir(), which does a few extra\n checks.\n\n This version is guaranteed to return only a list of true strings, whereas\n dir() returns anything that objects inject into themselves, even if they\n are later not really valid for attribute access (many extension libraries\n have such bugs).\n \"\"\"\n\n # Start building the attribute list via dir(), and then complete it\n # with a few extra special-purpose calls.\n\n try:\n words = set(dir(obj))\n except Exception:\n # TypeError: dir(obj) does not return a list\n words = set()\n\n # filter out non-string attributes which may be stuffed by dir() calls\n # and poor coding in third-party modules\n\n words = [w for w in words if isinstance(w, string_types)]\n return sorted(words)\n\n\ndef get_real_method(obj, name):\n \"\"\"Like getattr, but with a few extra sanity checks:\n\n - If obj is a class, ignore its methods\n - Check if obj is a proxy that claims to have all attributes\n - Catch attribute access failing with any exception\n - Check that the attribute is a callable object\n\n Returns the method or None.\n \"\"\"\n if inspect.isclass(obj):\n return None\n\n try:\n canary = getattr(obj, '_ipython_canary_method_should_not_exist_', None)\n except Exception:\n return None\n\n if canary is not None:\n # It claimed to have an attribute it should never have\n return None\n\n try:\n m = getattr(obj, name, None)\n except Exception:\n return None\n\n if callable(m):\n return m\n\n return None\n", "path": "IPython/utils/dir2.py"}]}
1,396
117
gh_patches_debug_9205
rasdani/github-patches
git_diff
sanic-org__sanic-2622
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> restart workers online (graceful restart) to hot reload, in production environment. ### Is there an existing issue for this? - [X] I have searched the existing issues ### Is your feature request related to a problem? Please describe. Condition: keep service available any time. # ---- client code---- ```py async def run(): while 1: await post('http://127.0.0.1:8000/') ``` when called app.m.restart("__ALL_PROCESSES__") in a worker, sanic crashed. # ---- server code ---- ```py @app.post("/") async def handler(request): app.m.restart('__ALL_PROCESSES__') return response.text('ok') if __name__ == "__main__": app.run(debug=True, workers=2) ``` ### Describe the solution you'd like graceful restarting and reduce the effect when restarting. my messy describe: 1. graceful restart workers; restart all workers will not crash, if only 1 worker, block a little while (if worker not started yet) is ok. 2. a way to graceful restart worker one by one, code eg: woker_names = tuple(app.m.workers.keys()) for woker_name in worker_names: ret_val = app.m.restart(worker_name) # here, the worker has been graceful restarted, ret_val is meaningful 3. may combine the above 2, when restarting all workers, 50% workers restarting, 50% old workers keep serving ### Additional context simplify the api, ```py app.m.restart('__ALL_PROCESSES__') => app.m.restart_all() ``` thanks. </issue> <code> [start of sanic/worker/multiplexer.py] 1 from multiprocessing.connection import Connection 2 from os import environ, getpid 3 from typing import Any, Dict 4 5 from sanic.worker.process import ProcessState 6 from sanic.worker.state import WorkerState 7 8 9 class WorkerMultiplexer: 10 def __init__( 11 self, 12 monitor_publisher: Connection, 13 worker_state: Dict[str, Any], 14 ): 15 self._monitor_publisher = monitor_publisher 16 self._state = WorkerState(worker_state, self.name) 17 18 def ack(self): 19 self._state._state[self.name] = { 20 **self._state._state[self.name], 21 "state": ProcessState.ACKED.name, 22 } 23 24 def restart(self, name: str = ""): 25 if not name: 26 name = self.name 27 self._monitor_publisher.send(name) 28 29 reload = restart # no cov 30 31 def terminate(self, early: bool = False): 32 message = "__TERMINATE_EARLY__" if early else "__TERMINATE__" 33 self._monitor_publisher.send(message) 34 35 @property 36 def pid(self) -> int: 37 return getpid() 38 39 @property 40 def name(self) -> str: 41 return environ.get("SANIC_WORKER_NAME", "") 42 43 @property 44 def state(self): 45 return self._state 46 47 @property 48 def workers(self) -> Dict[str, Any]: 49 return self.state.full() 50 [end of sanic/worker/multiplexer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/worker/multiplexer.py b/sanic/worker/multiplexer.py --- a/sanic/worker/multiplexer.py +++ b/sanic/worker/multiplexer.py @@ -21,9 +21,14 @@ "state": ProcessState.ACKED.name, } - def restart(self, name: str = ""): + def restart(self, name: str = "", all_workers: bool = False): + if name and all_workers: + raise ValueError( + "Ambiguous restart with both a named process and" + " all_workers=True" + ) if not name: - name = self.name + name = "__ALL_PROCESSES__:" if all_workers else self.name self._monitor_publisher.send(name) reload = restart # no cov
{"golden_diff": "diff --git a/sanic/worker/multiplexer.py b/sanic/worker/multiplexer.py\n--- a/sanic/worker/multiplexer.py\n+++ b/sanic/worker/multiplexer.py\n@@ -21,9 +21,14 @@\n \"state\": ProcessState.ACKED.name,\n }\n \n- def restart(self, name: str = \"\"):\n+ def restart(self, name: str = \"\", all_workers: bool = False):\n+ if name and all_workers:\n+ raise ValueError(\n+ \"Ambiguous restart with both a named process and\"\n+ \" all_workers=True\"\n+ )\n if not name:\n- name = self.name\n+ name = \"__ALL_PROCESSES__:\" if all_workers else self.name\n self._monitor_publisher.send(name)\n \n reload = restart # no cov\n", "issue": "restart workers online (graceful restart) to hot reload, in production environment.\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Is your feature request related to a problem? Please describe.\r\n\r\nCondition: keep service available any time. \r\n\r\n# ---- client code----\r\n\r\n```py\r\nasync def run(): \r\n while 1:\r\n await post('http://127.0.0.1:8000/') \r\n```\r\n\r\nwhen called app.m.restart(\"__ALL_PROCESSES__\") in a worker, sanic crashed.\r\n\r\n# ---- server code ----\r\n```py\r\[email protected](\"/\")\r\nasync def handler(request):\r\n app.m.restart('__ALL_PROCESSES__')\r\n return response.text('ok')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, workers=2)\r\n```\r\n\r\n### Describe the solution you'd like\r\n\r\ngraceful restarting and reduce the effect when restarting.\r\nmy messy describe:\r\n1. graceful restart workers; restart all workers will not crash, if only 1 worker, block a little while (if worker not started yet) is ok.\r\n2. a way to graceful restart worker one by one, code eg:\r\n woker_names = tuple(app.m.workers.keys())\r\n for woker_name in worker_names:\r\n ret_val = app.m.restart(worker_name)\r\n # here, the worker has been graceful restarted, ret_val is meaningful\r\n3. may combine the above 2, when restarting all workers, 50% workers restarting, 50% old workers keep serving\r\n\r\n### Additional context\r\n\r\nsimplify the api,\r\n\r\n```py\r\napp.m.restart('__ALL_PROCESSES__') => app.m.restart_all()\r\n```\r\n\r\nthanks.\n", "before_files": [{"content": "from multiprocessing.connection import Connection\nfrom os import environ, getpid\nfrom typing import Any, Dict\n\nfrom sanic.worker.process import ProcessState\nfrom sanic.worker.state import WorkerState\n\n\nclass WorkerMultiplexer:\n def __init__(\n self,\n monitor_publisher: Connection,\n worker_state: Dict[str, Any],\n ):\n self._monitor_publisher = monitor_publisher\n self._state = WorkerState(worker_state, self.name)\n\n def ack(self):\n self._state._state[self.name] = {\n **self._state._state[self.name],\n \"state\": ProcessState.ACKED.name,\n }\n\n def restart(self, name: str = \"\"):\n if not name:\n name = self.name\n self._monitor_publisher.send(name)\n\n reload = restart # no cov\n\n def terminate(self, early: bool = False):\n message = \"__TERMINATE_EARLY__\" if early else \"__TERMINATE__\"\n self._monitor_publisher.send(message)\n\n @property\n def pid(self) -> int:\n return getpid()\n\n @property\n def name(self) -> str:\n return environ.get(\"SANIC_WORKER_NAME\", \"\")\n\n @property\n def state(self):\n return self._state\n\n @property\n def workers(self) -> Dict[str, Any]:\n return self.state.full()\n", "path": "sanic/worker/multiplexer.py"}]}
1,294
186
gh_patches_debug_15392
rasdani/github-patches
git_diff
DataBiosphere__toil-1385
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve toil cluster utils CLI Instead of requiring the -p flag make it default to 'aws'. It would also be great to make the cluster name optional -- if the user only has 1 cluster running it should use that by default, else raise an error asking for an explicit argument </issue> <code> [start of src/toil/utils/__init__.py] 1 from __future__ import absolute_import 2 3 from toil import version 4 import logging 5 6 from toil.provisioners.aws import getCurrentAWSZone 7 8 logger = logging.getLogger( __name__ ) 9 10 11 def addBasicProvisionerOptions(parser): 12 parser.add_argument("--version", action='version', version=version) 13 parser.add_argument('-p', "--provisioner", dest='provisioner', choices=['aws'], required=True, 14 help="The provisioner for cluster auto-scaling. Only aws is currently " 15 "supported") 16 currentZone = getCurrentAWSZone() 17 zoneString = currentZone if currentZone else 'No zone could be determined' 18 parser.add_argument('-z', '--zone', dest='zone', required=False, default=currentZone, 19 help="The AWS availability zone of the master. This parameter can also be " 20 "set via the TOIL_AWS_ZONE environment variable, or by the ec2_region_name " 21 "parameter in your .boto file, or derived from the instance metadata if " 22 "using this utility on an existing EC2 instance. " 23 "Currently: %s" % zoneString) 24 parser.add_argument("clusterName", help="The name that the cluster will be identifiable by. " 25 "Must be lowercase and may not contain the '_' " 26 "character.") 27 return parser 28 [end of src/toil/utils/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/toil/utils/__init__.py b/src/toil/utils/__init__.py --- a/src/toil/utils/__init__.py +++ b/src/toil/utils/__init__.py @@ -5,12 +5,12 @@ from toil.provisioners.aws import getCurrentAWSZone -logger = logging.getLogger( __name__ ) +logger = logging.getLogger(__name__) def addBasicProvisionerOptions(parser): parser.add_argument("--version", action='version', version=version) - parser.add_argument('-p', "--provisioner", dest='provisioner', choices=['aws'], required=True, + parser.add_argument('-p', "--provisioner", dest='provisioner', choices=['aws'], required=False, default="aws", help="The provisioner for cluster auto-scaling. Only aws is currently " "supported") currentZone = getCurrentAWSZone()
{"golden_diff": "diff --git a/src/toil/utils/__init__.py b/src/toil/utils/__init__.py\n--- a/src/toil/utils/__init__.py\n+++ b/src/toil/utils/__init__.py\n@@ -5,12 +5,12 @@\n \n from toil.provisioners.aws import getCurrentAWSZone\n \n-logger = logging.getLogger( __name__ )\n+logger = logging.getLogger(__name__)\n \n \n def addBasicProvisionerOptions(parser):\n parser.add_argument(\"--version\", action='version', version=version)\n- parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=True,\n+ parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=False, default=\"aws\",\n help=\"The provisioner for cluster auto-scaling. Only aws is currently \"\n \"supported\")\n currentZone = getCurrentAWSZone()\n", "issue": "Improve toil cluster utils CLI\nInstead of requiring the -p flag make it default to 'aws'. It would also be great to make the cluster name optional -- if the user only has 1 cluster running it should use that by default, else raise an error asking for an explicit argument\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom toil import version\nimport logging\n\nfrom toil.provisioners.aws import getCurrentAWSZone\n\nlogger = logging.getLogger( __name__ )\n\n\ndef addBasicProvisionerOptions(parser):\n parser.add_argument(\"--version\", action='version', version=version)\n parser.add_argument('-p', \"--provisioner\", dest='provisioner', choices=['aws'], required=True,\n help=\"The provisioner for cluster auto-scaling. Only aws is currently \"\n \"supported\")\n currentZone = getCurrentAWSZone()\n zoneString = currentZone if currentZone else 'No zone could be determined'\n parser.add_argument('-z', '--zone', dest='zone', required=False, default=currentZone,\n help=\"The AWS availability zone of the master. This parameter can also be \"\n \"set via the TOIL_AWS_ZONE environment variable, or by the ec2_region_name \"\n \"parameter in your .boto file, or derived from the instance metadata if \"\n \"using this utility on an existing EC2 instance. \"\n \"Currently: %s\" % zoneString)\n parser.add_argument(\"clusterName\", help=\"The name that the cluster will be identifiable by. \"\n \"Must be lowercase and may not contain the '_' \"\n \"character.\")\n return parser\n", "path": "src/toil/utils/__init__.py"}]}
929
198
gh_patches_debug_3211
rasdani/github-patches
git_diff
xonsh__xonsh-428
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can't type literal tab with prompt_toolkit In bash or zsh I would typically type `Ctl-V TAB` to insert a literal tab, however, when using prompt_tookitas my shell type this simply tries to do tab completion and there doesn't seem to be a way to create a literal tab. </issue> <code> [start of xonsh/prompt_toolkit_key_bindings.py] 1 """Key bindings for prompt_toolkit xonsh shell.""" 2 import builtins 3 4 from prompt_toolkit.filters import Filter 5 from prompt_toolkit.keys import Keys 6 7 8 class TabShouldInsertIndentFilter(Filter): 9 """ 10 Filter that is intended to check if <Tab> should insert indent instead of 11 starting autocompletion. 12 It basically just checks if there are only whitespaces before the cursor - 13 if so indent should be inserted, otherwise autocompletion. 14 """ 15 def __call__(self, cli): 16 before_cursor = cli.current_buffer.document.current_line_before_cursor 17 18 return bool(before_cursor.isspace()) 19 20 21 def load_xonsh_bindings(key_bindings_manager): 22 """ 23 Load custom key bindings. 24 """ 25 handle = key_bindings_manager.registry.add_binding 26 env = builtins.__xonsh_env__ 27 28 @handle(Keys.Tab, filter=TabShouldInsertIndentFilter()) 29 def _(event): 30 """ 31 If there are only whitespaces before current cursor position insert 32 indent instead of autocompleting. 33 """ 34 event.cli.current_buffer.insert_text(env.get('INDENT')) 35 [end of xonsh/prompt_toolkit_key_bindings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py --- a/xonsh/prompt_toolkit_key_bindings.py +++ b/xonsh/prompt_toolkit_key_bindings.py @@ -32,3 +32,11 @@ indent instead of autocompleting. """ event.cli.current_buffer.insert_text(env.get('INDENT')) + + @handle(Keys.BackTab) + def insert_literal_tab(event): + """ + Insert literal tab on Shift+Tab instead of autocompleting + """ + event.cli.current_buffer.insert_text(env.get('INDENT')) +
{"golden_diff": "diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py\n--- a/xonsh/prompt_toolkit_key_bindings.py\n+++ b/xonsh/prompt_toolkit_key_bindings.py\n@@ -32,3 +32,11 @@\n indent instead of autocompleting.\n \"\"\"\n event.cli.current_buffer.insert_text(env.get('INDENT'))\n+\n+ @handle(Keys.BackTab)\n+ def insert_literal_tab(event):\n+ \"\"\"\n+ Insert literal tab on Shift+Tab instead of autocompleting\n+ \"\"\"\n+ event.cli.current_buffer.insert_text(env.get('INDENT'))\n+\n", "issue": "Can't type literal tab with prompt_toolkit\nIn bash or zsh I would typically type `Ctl-V TAB` to insert a literal tab, however, when using prompt_tookitas my shell type this simply tries to do tab completion and there doesn't seem to be a way to create a literal tab.\n\n", "before_files": [{"content": "\"\"\"Key bindings for prompt_toolkit xonsh shell.\"\"\"\nimport builtins\n\nfrom prompt_toolkit.filters import Filter\nfrom prompt_toolkit.keys import Keys\n\n\nclass TabShouldInsertIndentFilter(Filter):\n \"\"\"\n Filter that is intended to check if <Tab> should insert indent instead of\n starting autocompletion.\n It basically just checks if there are only whitespaces before the cursor -\n if so indent should be inserted, otherwise autocompletion.\n \"\"\"\n def __call__(self, cli):\n before_cursor = cli.current_buffer.document.current_line_before_cursor\n\n return bool(before_cursor.isspace())\n\n\ndef load_xonsh_bindings(key_bindings_manager):\n \"\"\"\n Load custom key bindings.\n \"\"\"\n handle = key_bindings_manager.registry.add_binding\n env = builtins.__xonsh_env__\n\n @handle(Keys.Tab, filter=TabShouldInsertIndentFilter())\n def _(event):\n \"\"\"\n If there are only whitespaces before current cursor position insert\n indent instead of autocompleting.\n \"\"\"\n event.cli.current_buffer.insert_text(env.get('INDENT'))\n", "path": "xonsh/prompt_toolkit_key_bindings.py"}]}
900
144
gh_patches_debug_689
rasdani/github-patches
git_diff
great-expectations__great_expectations-1500
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` </issue> <code> [start of great_expectations/cli/cli_messages.py] 1 # -*- coding: utf-8 -*- 2 from great_expectations import DataContext 3 4 GREETING = """<cyan>\ 5 ___ _ ___ _ _ _ 6 / __|_ _ ___ __ _| |_ | __|_ ___ __ ___ __| |_ __ _| |_(_)___ _ _ ___ 7 | (_ | '_/ -_) _` | _| | _|\ \ / '_ \/ -_) _| _/ _` | _| / _ \ ' \(_-< 8 \___|_| \___\__,_|\__| |___/_\_\ .__/\___\__|\__\__,_|\__|_\___/_||_/__/ 9 |_| 10 ~ Always know what to expect from your data ~ 11 </cyan>""" 12 13 LETS_BEGIN_PROMPT = """Let's configure a new Data Context. 14 15 First, Great Expectations will create a new directory: 16 17 great_expectations 18 |-- great_expectations.yml 19 |-- expectations 20 |-- notebooks 21 |-- plugins 22 |-- .gitignore 23 |-- uncommitted 24 |-- config_variables.yml 25 |-- documentation 26 |-- validations 27 28 OK to proceed?""" 29 30 PROJECT_IS_COMPLETE = "This looks like an existing project that <green>appears complete!</green> You are <green>ready to roll.</green>\n" 31 32 RUN_INIT_AGAIN = ( 33 "OK. You must run <green>great_expectations init</green> to fix the missing files!" 34 ) 35 36 COMPLETE_ONBOARDING_PROMPT = """To run locally, we need some files that are not in source control. 37 - Anything existing will not be modified. 38 - Would you like to fix this automatically?""" 39 40 SLACK_SETUP_INTRO = """ 41 <cyan>========== Slack Notifications ==========</cyan> 42 """ 43 44 SLACK_SETUP_PROMPT = "Would you like to set up Slack data quality notifications?" 45 46 SLACK_DOC_LINK = """http://docs.greatexpectations.io/en/latest/getting_started/cli_init.html#configuring-slack-notifications 47 """ 48 49 SLACK_WEBHOOK_PROMPT = """Please add your Slack webhook below. Getting one is easy! 50 """ 51 52 SLACK_LATER = "\nTo setup Slack later please see the the slack section in the CLI init getting started guide." 53 54 SLACK_SETUP_COMPLETE = """ 55 OK. <green>Slack is set up.</green> To modify this in the future please see the slack section in the CLI init getting started guide.""" 56 57 ONBOARDING_COMPLETE = """ 58 Great Expectations added some missing files required to run. 59 - You may see new files in `<yellow>great_expectations/uncommitted</yellow>`. 60 - You may need to add secrets to `<yellow>great_expectations/uncommitted/config_variables.yml</yellow>` to finish onboarding. 61 """ 62 63 BUILD_DOCS_PROMPT = "Would you like to build & view this project's Data Docs!?" 64 65 NO_DATASOURCES_FOUND = """<red>Error: No datasources were found.</red> Please add one by: 66 - running `<green>great_expectations datasource new</green>` or 67 - by editing the {} file 68 """.format( 69 DataContext.GE_YML 70 ) 71 72 SETUP_SUCCESS = "\n<cyan>Congratulations! Great Expectations is now set up.</cyan>" 73 74 SECTION_SEPARATOR = "\n================================================================================\n" 75 76 DONE = "Done" 77 [end of great_expectations/cli/cli_messages.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/great_expectations/cli/cli_messages.py b/great_expectations/cli/cli_messages.py --- a/great_expectations/cli/cli_messages.py +++ b/great_expectations/cli/cli_messages.py @@ -17,6 +17,7 @@ great_expectations |-- great_expectations.yml |-- expectations + |-- checkpoints |-- notebooks |-- plugins |-- .gitignore
{"golden_diff": "diff --git a/great_expectations/cli/cli_messages.py b/great_expectations/cli/cli_messages.py\n--- a/great_expectations/cli/cli_messages.py\n+++ b/great_expectations/cli/cli_messages.py\n@@ -17,6 +17,7 @@\n great_expectations\n |-- great_expectations.yml\n |-- expectations\n+ |-- checkpoints \n |-- notebooks\n |-- plugins\n |-- .gitignore\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom great_expectations import DataContext\n\nGREETING = \"\"\"<cyan>\\\n ___ _ ___ _ _ _\n / __|_ _ ___ __ _| |_ | __|_ ___ __ ___ __| |_ __ _| |_(_)___ _ _ ___\n| (_ | '_/ -_) _` | _| | _|\\ \\ / '_ \\/ -_) _| _/ _` | _| / _ \\ ' \\(_-<\n \\___|_| \\___\\__,_|\\__| |___/_\\_\\ .__/\\___\\__|\\__\\__,_|\\__|_\\___/_||_/__/\n |_|\n ~ Always know what to expect from your data ~\n</cyan>\"\"\"\n\nLETS_BEGIN_PROMPT = \"\"\"Let's configure a new Data Context.\n\nFirst, Great Expectations will create a new directory:\n\n great_expectations\n |-- great_expectations.yml\n |-- expectations\n |-- notebooks\n |-- plugins\n |-- .gitignore\n |-- uncommitted\n |-- config_variables.yml\n |-- documentation\n |-- validations\n\nOK to proceed?\"\"\"\n\nPROJECT_IS_COMPLETE = \"This looks like an existing project that <green>appears complete!</green> You are <green>ready to roll.</green>\\n\"\n\nRUN_INIT_AGAIN = (\n \"OK. You must run <green>great_expectations init</green> to fix the missing files!\"\n)\n\nCOMPLETE_ONBOARDING_PROMPT = \"\"\"To run locally, we need some files that are not in source control.\n - Anything existing will not be modified.\n - Would you like to fix this automatically?\"\"\"\n\nSLACK_SETUP_INTRO = \"\"\"\n<cyan>========== Slack Notifications ==========</cyan>\n\"\"\"\n\nSLACK_SETUP_PROMPT = \"Would you like to set up Slack data quality notifications?\"\n\nSLACK_DOC_LINK = \"\"\"http://docs.greatexpectations.io/en/latest/getting_started/cli_init.html#configuring-slack-notifications\n\"\"\"\n\nSLACK_WEBHOOK_PROMPT = \"\"\"Please add your Slack webhook below. Getting one is easy!\n\"\"\"\n\nSLACK_LATER = \"\\nTo setup Slack later please see the the slack section in the CLI init getting started guide.\"\n\nSLACK_SETUP_COMPLETE = \"\"\"\nOK. <green>Slack is set up.</green> To modify this in the future please see the slack section in the CLI init getting started guide.\"\"\"\n\nONBOARDING_COMPLETE = \"\"\"\nGreat Expectations added some missing files required to run.\n - You may see new files in `<yellow>great_expectations/uncommitted</yellow>`.\n - You may need to add secrets to `<yellow>great_expectations/uncommitted/config_variables.yml</yellow>` to finish onboarding.\n\"\"\"\n\nBUILD_DOCS_PROMPT = \"Would you like to build & view this project's Data Docs!?\"\n\nNO_DATASOURCES_FOUND = \"\"\"<red>Error: No datasources were found.</red> Please add one by:\n - running `<green>great_expectations datasource new</green>` or\n - by editing the {} file\n\"\"\".format(\n DataContext.GE_YML\n)\n\nSETUP_SUCCESS = \"\\n<cyan>Congratulations! Great Expectations is now set up.</cyan>\"\n\nSECTION_SEPARATOR = \"\\n================================================================================\\n\"\n\nDONE = \"Done\"\n", "path": "great_expectations/cli/cli_messages.py"}]}
1,418
92
gh_patches_debug_25258
rasdani/github-patches
git_diff
mindsdb__lightwood-1091
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Apple M1 support ## Your Environment * Operating system: macOS in an M1 apple computer ## Describe your issue As reported by @alejandrovillegas, installation currently fails in MacBooks with M1 processors. See attached log for more details. [m1_issue.txt](https://github.com/mindsdb/lightwood/files/7042557/m1_issue.txt) </issue> <code> [start of lightwood/mixer/__init__.py] 1 from lightwood.mixer.base import BaseMixer 2 from lightwood.mixer.unit import Unit 3 from lightwood.mixer.neural import Neural 4 from lightwood.mixer.neural_ts import NeuralTs 5 from lightwood.mixer.xgboost import XGBoostMixer 6 from lightwood.mixer.random_forest import RandomForest 7 from lightwood.mixer.sktime import SkTime 8 from lightwood.mixer.arima import ARIMAMixer 9 from lightwood.mixer.ets import ETSMixer 10 from lightwood.mixer.gluonts import GluonTSMixer 11 from lightwood.mixer.regression import Regression 12 13 try: 14 from lightwood.mixer.qclassic import QClassic 15 except Exception: 16 QClassic = None 17 18 try: 19 from lightwood.mixer.nhits import NHitsMixer 20 except Exception: 21 NHitsMixer = None 22 23 try: 24 from lightwood.mixer.prophet import ProphetMixer 25 except Exception: 26 ProphetMixer = None 27 28 try: 29 from lightwood.mixer.lightgbm import LightGBM 30 from lightwood.mixer.lightgbm_array import LightGBMArray 31 except Exception: 32 LightGBM = None 33 LightGBMArray = None 34 35 __all__ = ['BaseMixer', 'Neural', 'NeuralTs', 'LightGBM', 'RandomForest', 'LightGBMArray', 'Unit', 'Regression', 36 'SkTime', 'QClassic', 'ProphetMixer', 'ETSMixer', 'ARIMAMixer', 'NHitsMixer', 'GluonTSMixer', 'XGBoostMixer'] 37 [end of lightwood/mixer/__init__.py] [start of lightwood/helpers/seed.py] 1 import random 2 import torch 3 import numpy as np 4 import mxnet as mx 5 6 7 def seed(seed_nr: int) -> None: 8 torch.manual_seed(seed_nr) 9 torch.backends.cudnn.deterministic = True 10 torch.backends.cudnn.benchmark = False 11 np.random.seed(seed_nr) 12 random.seed(seed_nr) 13 mx.random.seed(seed_nr) 14 [end of lightwood/helpers/seed.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightwood/helpers/seed.py b/lightwood/helpers/seed.py --- a/lightwood/helpers/seed.py +++ b/lightwood/helpers/seed.py @@ -1,7 +1,11 @@ import random import torch import numpy as np -import mxnet as mx + +try: + import mxnet as mx +except Exception: + mx = None def seed(seed_nr: int) -> None: @@ -10,4 +14,6 @@ torch.backends.cudnn.benchmark = False np.random.seed(seed_nr) random.seed(seed_nr) - mx.random.seed(seed_nr) + + if mx is not None: + mx.random.seed(seed_nr) diff --git a/lightwood/mixer/__init__.py b/lightwood/mixer/__init__.py --- a/lightwood/mixer/__init__.py +++ b/lightwood/mixer/__init__.py @@ -7,7 +7,6 @@ from lightwood.mixer.sktime import SkTime from lightwood.mixer.arima import ARIMAMixer from lightwood.mixer.ets import ETSMixer -from lightwood.mixer.gluonts import GluonTSMixer from lightwood.mixer.regression import Regression try: @@ -25,6 +24,11 @@ except Exception: ProphetMixer = None +try: + from lightwood.mixer.gluonts import GluonTSMixer +except Exception: + GluonTSMixer = None + try: from lightwood.mixer.lightgbm import LightGBM from lightwood.mixer.lightgbm_array import LightGBMArray
{"golden_diff": "diff --git a/lightwood/helpers/seed.py b/lightwood/helpers/seed.py\n--- a/lightwood/helpers/seed.py\n+++ b/lightwood/helpers/seed.py\n@@ -1,7 +1,11 @@\n import random\n import torch\n import numpy as np\n-import mxnet as mx\n+\n+try:\n+ import mxnet as mx\n+except Exception:\n+ mx = None\n \n \n def seed(seed_nr: int) -> None:\n@@ -10,4 +14,6 @@\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed_nr)\n random.seed(seed_nr)\n- mx.random.seed(seed_nr)\n+\n+ if mx is not None:\n+ mx.random.seed(seed_nr)\ndiff --git a/lightwood/mixer/__init__.py b/lightwood/mixer/__init__.py\n--- a/lightwood/mixer/__init__.py\n+++ b/lightwood/mixer/__init__.py\n@@ -7,7 +7,6 @@\n from lightwood.mixer.sktime import SkTime\n from lightwood.mixer.arima import ARIMAMixer\n from lightwood.mixer.ets import ETSMixer\n-from lightwood.mixer.gluonts import GluonTSMixer\n from lightwood.mixer.regression import Regression\n \n try:\n@@ -25,6 +24,11 @@\n except Exception:\n ProphetMixer = None\n \n+try:\n+ from lightwood.mixer.gluonts import GluonTSMixer\n+except Exception:\n+ GluonTSMixer = None\n+\n try:\n from lightwood.mixer.lightgbm import LightGBM\n from lightwood.mixer.lightgbm_array import LightGBMArray\n", "issue": "Apple M1 support\n## Your Environment\r\n* Operating system: macOS in an M1 apple computer\r\n\r\n## Describe your issue\r\nAs reported by @alejandrovillegas, installation currently fails in MacBooks with M1 processors. See attached log for more details.\r\n[m1_issue.txt](https://github.com/mindsdb/lightwood/files/7042557/m1_issue.txt)\r\n\r\n\n", "before_files": [{"content": "from lightwood.mixer.base import BaseMixer\nfrom lightwood.mixer.unit import Unit\nfrom lightwood.mixer.neural import Neural\nfrom lightwood.mixer.neural_ts import NeuralTs\nfrom lightwood.mixer.xgboost import XGBoostMixer\nfrom lightwood.mixer.random_forest import RandomForest\nfrom lightwood.mixer.sktime import SkTime\nfrom lightwood.mixer.arima import ARIMAMixer\nfrom lightwood.mixer.ets import ETSMixer\nfrom lightwood.mixer.gluonts import GluonTSMixer\nfrom lightwood.mixer.regression import Regression\n\ntry:\n from lightwood.mixer.qclassic import QClassic\nexcept Exception:\n QClassic = None\n\ntry:\n from lightwood.mixer.nhits import NHitsMixer\nexcept Exception:\n NHitsMixer = None\n\ntry:\n from lightwood.mixer.prophet import ProphetMixer\nexcept Exception:\n ProphetMixer = None\n\ntry:\n from lightwood.mixer.lightgbm import LightGBM\n from lightwood.mixer.lightgbm_array import LightGBMArray\nexcept Exception:\n LightGBM = None\n LightGBMArray = None\n\n__all__ = ['BaseMixer', 'Neural', 'NeuralTs', 'LightGBM', 'RandomForest', 'LightGBMArray', 'Unit', 'Regression',\n 'SkTime', 'QClassic', 'ProphetMixer', 'ETSMixer', 'ARIMAMixer', 'NHitsMixer', 'GluonTSMixer', 'XGBoostMixer']\n", "path": "lightwood/mixer/__init__.py"}, {"content": "import random\nimport torch\nimport numpy as np\nimport mxnet as mx\n\n\ndef seed(seed_nr: int) -> None:\n torch.manual_seed(seed_nr)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(seed_nr)\n random.seed(seed_nr)\n mx.random.seed(seed_nr)\n", "path": "lightwood/helpers/seed.py"}]}
1,141
363
gh_patches_debug_5753
rasdani/github-patches
git_diff
OpenEnergyPlatform__oeplatform-787
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Meta-schemas are not created properly It seems that a fresh installation of the most recent version does not create the meta-schemas anymore. </issue> <code> [start of oedb_datamodels/versions/5c098aa81e2c_create_schemas.py] 1 """create schemas 2 3 Revision ID: 5c098aa81e2c 4 Revises: 46fb02acc3b1 5 Create Date: 2017-11-23 15:53:57.716306 6 7 """ 8 import sqlalchemy as sa 9 from alembic import op 10 11 # revision identifiers, used by Alembic. 12 revision = "5c098aa81e2c" 13 down_revision = "048215319c74" 14 branch_labels = None 15 depends_on = None 16 17 schemas = [ 18 "demand", 19 "economy", 20 "emission", 21 "environment", 22 "grid", 23 "boundaries", 24 "society", 25 "supply", 26 "scenario", 27 "climate", 28 "model_draft", 29 "openstreetmap", 30 "reference", 31 ] 32 33 34 def upgrade(): 35 for s in schemas: 36 op.execute("CREATE SCHEMA " + s) 37 38 39 def downgrade(): 40 for s in schemas: 41 op.execute("DROP SCHEMA " + s) 42 [end of oedb_datamodels/versions/5c098aa81e2c_create_schemas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py --- a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py +++ b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py @@ -30,12 +30,15 @@ "reference", ] - def upgrade(): for s in schemas: op.execute("CREATE SCHEMA " + s) + for s in schemas: + op.execute("CREATE SCHEMA _" + s) def downgrade(): for s in schemas: - op.execute("DROP SCHEMA " + s) + op.execute("DROP SCHEMA _" + s + " CASCADE") + for s in schemas: + op.execute("DROP SCHEMA " + s + " CASCADE")
{"golden_diff": "diff --git a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py\n--- a/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py\n+++ b/oedb_datamodels/versions/5c098aa81e2c_create_schemas.py\n@@ -30,12 +30,15 @@\n \"reference\",\n ]\n \n-\n def upgrade():\n for s in schemas:\n op.execute(\"CREATE SCHEMA \" + s)\n+ for s in schemas:\n+ op.execute(\"CREATE SCHEMA _\" + s)\n \n \n def downgrade():\n for s in schemas:\n- op.execute(\"DROP SCHEMA \" + s)\n+ op.execute(\"DROP SCHEMA _\" + s + \" CASCADE\")\n+ for s in schemas:\n+ op.execute(\"DROP SCHEMA \" + s + \" CASCADE\")\n", "issue": "Meta-schemas are not created properly\nIt seems that a fresh installation of the most recent version does not create the meta-schemas anymore.\n", "before_files": [{"content": "\"\"\"create schemas\n\nRevision ID: 5c098aa81e2c\nRevises: 46fb02acc3b1\nCreate Date: 2017-11-23 15:53:57.716306\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"5c098aa81e2c\"\ndown_revision = \"048215319c74\"\nbranch_labels = None\ndepends_on = None\n\nschemas = [\n \"demand\",\n \"economy\",\n \"emission\",\n \"environment\",\n \"grid\",\n \"boundaries\",\n \"society\",\n \"supply\",\n \"scenario\",\n \"climate\",\n \"model_draft\",\n \"openstreetmap\",\n \"reference\",\n]\n\n\ndef upgrade():\n for s in schemas:\n op.execute(\"CREATE SCHEMA \" + s)\n\n\ndef downgrade():\n for s in schemas:\n op.execute(\"DROP SCHEMA \" + s)\n", "path": "oedb_datamodels/versions/5c098aa81e2c_create_schemas.py"}]}
917
226
gh_patches_debug_15539
rasdani/github-patches
git_diff
scikit-hep__pyhf-1355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Validate lower bounds of dependencies in CI # Description To ensure that the lower bound of all dependencies are still sufficient for the API used, add a test that installs from a `lower-bound-requirements.txt` that might look something like ``` # core scipy==1.4.0 click==7.0.0 tqdm==4.56.0 jsonschema==3.2.0 jsonpatch==1.23.0 pyyaml==5.1.0 # xmlio uproot3==3.14.0 uproot==4.0.0 # minuit iminuit==2.1.0 # ... ``` and then run the test suite. </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major 7 'tensorflow-probability~=0.10.0', 8 ], 9 'torch': ['torch~=1.8'], 10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'], 11 'xmlio': [ 12 'uproot3~=3.14', 13 'uproot~=4.0', 14 ], # uproot3 required until writing to ROOT supported in uproot4 15 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood 16 } 17 extras_require['backends'] = sorted( 18 set( 19 extras_require['tensorflow'] 20 + extras_require['torch'] 21 + extras_require['jax'] 22 + extras_require['minuit'] 23 ) 24 ) 25 extras_require['contrib'] = sorted({'matplotlib', 'requests'}) 26 extras_require['lint'] = sorted({'flake8', 'black'}) 27 28 extras_require['test'] = sorted( 29 set( 30 extras_require['backends'] 31 + extras_require['xmlio'] 32 + extras_require['contrib'] 33 + extras_require['shellcomplete'] 34 + [ 35 'pytest~=6.0', 36 'pytest-cov>=2.5.1', 37 'pytest-mock', 38 'pytest-benchmark[histogram]', 39 'pytest-console-scripts', 40 'pytest-mpl', 41 'pydocstyle', 42 'papermill~=2.0', 43 'nteract-scrapbook~=0.2', 44 'jupyter', 45 'graphviz', 46 ] 47 ) 48 ) 49 extras_require['docs'] = sorted( 50 set( 51 extras_require['xmlio'] 52 + [ 53 'sphinx>=3.1.2', 54 'sphinxcontrib-bibtex~=2.1', 55 'sphinx-click', 56 'sphinx_rtd_theme', 57 'nbsphinx', 58 'ipywidgets', 59 'sphinx-issues', 60 'sphinx-copybutton>0.2.9', 61 ] 62 ) 63 ) 64 extras_require['develop'] = sorted( 65 set( 66 extras_require['docs'] 67 + extras_require['lint'] 68 + extras_require['test'] 69 + [ 70 'nbdime', 71 'bump2version', 72 'ipython', 73 'pre-commit', 74 'check-manifest', 75 'codemetapy>=0.3.4', 76 'twine', 77 ] 78 ) 79 ) 80 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 81 82 83 setup( 84 extras_require=extras_require, 85 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 86 ) 87 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -3,13 +3,13 @@ extras_require = { 'shellcomplete': ['click_completion'], 'tensorflow': [ - 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major - 'tensorflow-probability~=0.10.0', + 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major + 'tensorflow-probability~=0.10.1', ], 'torch': ['torch~=1.8'], - 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'], + 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'], 'xmlio': [ - 'uproot3~=3.14', + 'uproot3>=3.14.1', 'uproot~=4.0', ], # uproot3 required until writing to ROOT supported in uproot4 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,13 +3,13 @@\n extras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n- 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n- 'tensorflow-probability~=0.10.0',\n+ 'tensorflow~=2.2.1', # TensorFlow minor releases are as volatile as major\n+ 'tensorflow-probability~=0.10.1',\n ],\n 'torch': ['torch~=1.8'],\n- 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n+ 'jax': ['jax~=0.2.8', 'jaxlib~=0.1.58'],\n 'xmlio': [\n- 'uproot3~=3.14',\n+ 'uproot3>=3.14.1',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood\n", "issue": "Validate lower bounds of dependencies in CI\n# Description\r\n\r\nTo ensure that the lower bound of all dependencies are still sufficient for the API used, add a test that installs from a `lower-bound-requirements.txt` that might look something like\r\n\r\n```\r\n# core\r\nscipy==1.4.0\r\nclick==7.0.0\r\ntqdm==4.56.0\r\njsonschema==3.2.0\r\njsonpatch==1.23.0\r\npyyaml==5.1.0\r\n# xmlio\r\nuproot3==3.14.0\r\nuproot==4.0.0\r\n# minuit\r\niminuit==2.1.0\r\n# ...\r\n```\r\n\r\nand then run the test suite.\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.8'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': [\n 'uproot3~=3.14',\n 'uproot~=4.0',\n ], # uproot3 required until writing to ROOT supported in uproot4\n 'minuit': ['iminuit~=2.1,<2.4'], # iminuit v2.4.0 behavior needs to be understood\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,456
281
gh_patches_debug_723
rasdani/github-patches
git_diff
pypi__warehouse-1177
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Permanent URL (Heroku "No such app" error) I noticed that https://warehouse.python.org/ produces a `Heroku | No such app` error at the moment. Is this intentional? Are we permanently at https://pypi.io/ now? If so, we should probably update the URL in a few places: https://github.com/pypa/warehouse/search?utf8=%E2%9C%93&q=%22warehouse.python.org%22 </issue> <code> [start of warehouse/__about__.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import os.path 14 15 __all__ = [ 16 "__title__", "__summary__", "__uri__", "__version__", "__commit__", 17 "__author__", "__email__", "__license__", "__copyright__", 18 ] 19 20 21 try: 22 base_dir = os.path.dirname(os.path.abspath(__file__)) 23 except NameError: 24 base_dir = None 25 26 27 __title__ = "warehouse" 28 __summary__ = "Next Generation Python Package Repository" 29 __uri__ = "https://warehouse.python.org/" 30 31 __version__ = "15.0.dev0" 32 33 if base_dir is not None and os.path.exists(os.path.join(base_dir, ".commit")): 34 with open(os.path.join(base_dir, ".commit")) as fp: 35 __commit__ = fp.read().strip() 36 else: 37 __commit__ = None 38 39 __author__ = "The Python Packaging Authority" 40 __email__ = "[email protected]" 41 42 __license__ = "Apache License, Version 2.0" 43 __copyright__ = "2015 %s" % __author__ 44 [end of warehouse/__about__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/__about__.py b/warehouse/__about__.py --- a/warehouse/__about__.py +++ b/warehouse/__about__.py @@ -26,7 +26,7 @@ __title__ = "warehouse" __summary__ = "Next Generation Python Package Repository" -__uri__ = "https://warehouse.python.org/" +__uri__ = "https://pypi.io/" __version__ = "15.0.dev0"
{"golden_diff": "diff --git a/warehouse/__about__.py b/warehouse/__about__.py\n--- a/warehouse/__about__.py\n+++ b/warehouse/__about__.py\n@@ -26,7 +26,7 @@\n \n __title__ = \"warehouse\"\n __summary__ = \"Next Generation Python Package Repository\"\n-__uri__ = \"https://warehouse.python.org/\"\n+__uri__ = \"https://pypi.io/\"\n \n __version__ = \"15.0.dev0\"\n", "issue": "Permanent URL (Heroku \"No such app\" error)\nI noticed that https://warehouse.python.org/ produces a `Heroku | No such app` error at the moment. Is this intentional? Are we permanently at https://pypi.io/ now?\n\nIf so, we should probably update the URL in a few places: https://github.com/pypa/warehouse/search?utf8=%E2%9C%93&q=%22warehouse.python.org%22\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__commit__\",\n \"__author__\", \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n\ntry:\n base_dir = os.path.dirname(os.path.abspath(__file__))\nexcept NameError:\n base_dir = None\n\n\n__title__ = \"warehouse\"\n__summary__ = \"Next Generation Python Package Repository\"\n__uri__ = \"https://warehouse.python.org/\"\n\n__version__ = \"15.0.dev0\"\n\nif base_dir is not None and os.path.exists(os.path.join(base_dir, \".commit\")):\n with open(os.path.join(base_dir, \".commit\")) as fp:\n __commit__ = fp.read().strip()\nelse:\n __commit__ = None\n\n__author__ = \"The Python Packaging Authority\"\n__email__ = \"[email protected]\"\n\n__license__ = \"Apache License, Version 2.0\"\n__copyright__ = \"2015 %s\" % __author__\n", "path": "warehouse/__about__.py"}]}
1,062
105
gh_patches_debug_27547
rasdani/github-patches
git_diff
conan-io__conan-300
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [requires} in conanfile.txt gives no error The typo: ``` [requires} ... ``` in `conanfile.txt` does not give any error indication at all when running `conan install ...`. The way to see that something is wrong is that it doesn't do anything, but finding the cause was not as easy as it could've been. This is on 0.9.2 on Ubuntu 15.10. </issue> <code> [start of conans/util/config_parser.py] 1 import re 2 from conans.errors import ConanException 3 4 5 class ConfigParser(object): 6 """ util class to load a file with sections as [section1] 7 checking the values of those sections, and returns each section 8 as parser.section 9 Currently used in ConanInfo and ConanFileTextLoader 10 """ 11 def __init__(self, text, allowed_fields=None, parse_lines=False): 12 self._sections = {} 13 self._allowed_fields = allowed_fields or [] 14 pattern = re.compile("^\[([a-z_]{2,50})\]") 15 current_lines = [] 16 for line in text.splitlines(): 17 line = line.strip() 18 if not line or line[0] == '#': 19 continue 20 m = pattern.match(line) 21 if m: 22 group = m.group(1) 23 if self._allowed_fields and group not in self._allowed_fields: 24 raise ConanException("ConfigParser: Unrecognized field '%s'" % group) 25 current_lines = [] 26 self._sections[group] = current_lines 27 else: 28 if parse_lines: 29 line = line.split('#')[0] 30 line = line.strip() 31 current_lines.append(line) 32 33 def __getattr__(self, name): 34 if name in self._sections: 35 return "\n".join(self._sections[name]) 36 else: 37 if self._allowed_fields and name in self._allowed_fields: 38 return "" 39 else: 40 raise ConanException("ConfigParser: Unrecognized field '%s'" % name) 41 [end of conans/util/config_parser.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/util/config_parser.py b/conans/util/config_parser.py --- a/conans/util/config_parser.py +++ b/conans/util/config_parser.py @@ -12,19 +12,26 @@ self._sections = {} self._allowed_fields = allowed_fields or [] pattern = re.compile("^\[([a-z_]{2,50})\]") - current_lines = [] + current_lines = None for line in text.splitlines(): line = line.strip() if not line or line[0] == '#': continue - m = pattern.match(line) - if m: - group = m.group(1) - if self._allowed_fields and group not in self._allowed_fields: - raise ConanException("ConfigParser: Unrecognized field '%s'" % group) + field = None + if line[0] == '[': + m = pattern.match(line) + if m: + field = m.group(1) + else: + raise ConanException("ConfigParser: Bad syntax '%s'" % line) + if field: + if self._allowed_fields and field not in self._allowed_fields: + raise ConanException("ConfigParser: Unrecognized field '%s'" % field) current_lines = [] - self._sections[group] = current_lines + self._sections[field] = current_lines else: + if current_lines is None: + raise ConanException("ConfigParser: Unexpected line '%s'" % line) if parse_lines: line = line.split('#')[0] line = line.strip()
{"golden_diff": "diff --git a/conans/util/config_parser.py b/conans/util/config_parser.py\n--- a/conans/util/config_parser.py\n+++ b/conans/util/config_parser.py\n@@ -12,19 +12,26 @@\n self._sections = {}\n self._allowed_fields = allowed_fields or []\n pattern = re.compile(\"^\\[([a-z_]{2,50})\\]\")\n- current_lines = []\n+ current_lines = None\n for line in text.splitlines():\n line = line.strip()\n if not line or line[0] == '#':\n continue\n- m = pattern.match(line)\n- if m:\n- group = m.group(1)\n- if self._allowed_fields and group not in self._allowed_fields:\n- raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % group)\n+ field = None\n+ if line[0] == '[':\n+ m = pattern.match(line)\n+ if m:\n+ field = m.group(1)\n+ else:\n+ raise ConanException(\"ConfigParser: Bad syntax '%s'\" % line)\n+ if field:\n+ if self._allowed_fields and field not in self._allowed_fields:\n+ raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % field)\n current_lines = []\n- self._sections[group] = current_lines\n+ self._sections[field] = current_lines\n else:\n+ if current_lines is None:\n+ raise ConanException(\"ConfigParser: Unexpected line '%s'\" % line)\n if parse_lines:\n line = line.split('#')[0]\n line = line.strip()\n", "issue": "[requires} in conanfile.txt gives no error\nThe typo:\n\n```\n[requires}\n...\n```\n\nin `conanfile.txt` does not give any error indication at all when running `conan install ...`. The way to see that something is wrong is that it doesn't do anything, but finding the cause was not as easy as it could've been.\n\nThis is on 0.9.2 on Ubuntu 15.10.\n\n", "before_files": [{"content": "import re\nfrom conans.errors import ConanException\n\n\nclass ConfigParser(object):\n \"\"\" util class to load a file with sections as [section1]\n checking the values of those sections, and returns each section\n as parser.section\n Currently used in ConanInfo and ConanFileTextLoader\n \"\"\"\n def __init__(self, text, allowed_fields=None, parse_lines=False):\n self._sections = {}\n self._allowed_fields = allowed_fields or []\n pattern = re.compile(\"^\\[([a-z_]{2,50})\\]\")\n current_lines = []\n for line in text.splitlines():\n line = line.strip()\n if not line or line[0] == '#':\n continue\n m = pattern.match(line)\n if m:\n group = m.group(1)\n if self._allowed_fields and group not in self._allowed_fields:\n raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % group)\n current_lines = []\n self._sections[group] = current_lines\n else:\n if parse_lines:\n line = line.split('#')[0]\n line = line.strip()\n current_lines.append(line)\n\n def __getattr__(self, name):\n if name in self._sections:\n return \"\\n\".join(self._sections[name])\n else:\n if self._allowed_fields and name in self._allowed_fields:\n return \"\"\n else:\n raise ConanException(\"ConfigParser: Unrecognized field '%s'\" % name)\n", "path": "conans/util/config_parser.py"}]}
1,021
358
gh_patches_debug_30732
rasdani/github-patches
git_diff
ManageIQ__integration_tests-194
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unexpected popup in server_roles fixture The server_roles fixture attempts to explicitly navigate to the current server's settings page before clicking any checkboxen, but the call to do that (`settings_pg.click_on_current_server_tree_node()`) occasionally fails because a popup appears. We'll need to make sure the popup can be safely ignored, and conditionally ignore it if it appears. Until this is fixed, a workaround is to assume that navigation with the `cnf_configuration_pg` fixture will land on the current server settings page: ``` diff diff --git a/fixtures/server_roles.py b/fixtures/server_roles.py index dfcb574..2b99836 100644 --- a/fixtures/server_roles.py +++ b/fixtures/server_roles.py @@ -94,7 +94,9 @@ def server_roles(fixtureconf, cfme_data, cnf_configuration_pg): # Nav to the settings tab settings_pg = cnf_configuration_pg.click_on_settings() - server_settings_pg = settings_pg.click_on_current_server_tree_node() + #server_settings_pg = settings_pg.click_on_current_server_tree_node() + from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings + server_settings_pg = ServerSettings(settings_pg.testsetup) # sst is a configuration_subpages.settings_subpages.server_settings_subpages. # server_settings_tab.ServerSettingsTab sst = server_settings_pg.click_on_server_tab() ``` </issue> <code> [start of fixtures/server_roles.py] 1 import logging 2 3 import pytest 4 from unittestzero import Assert 5 6 logger = logging.getLogger(__name__) 7 8 default_roles = ( 9 'database_operations', 10 'event', 11 'ems_inventory', 12 'ems_operations', 13 'reporting', 14 'scheduler', 15 'smartstate', 16 'user_interface', 17 'web_services', 18 ) 19 20 @pytest.fixture 21 def server_roles(fixtureconf, cfme_data, cnf_configuration_pg): 22 """Set the server roles based on a list of roles attached to the test using this fixture 23 24 Usage examples: 25 26 Pass the desired roles in to the "server_roles_set" decorator: 27 28 _roles = ('database_operations', 'event', 'user_interface', 'web_services') 29 30 @pytest.mark.fixtureconf(server_roles=_roles) 31 def test_appliance_roles(server_roles): 32 assert len(server_roles) == 4 33 34 Roles can be pulled from the cfme_data fixture using yaml selectors, 35 which will do a 'set' with the list of roles found at the target path: 36 37 @pytest.mark.fixtureconf(server_roles_cfmedata=('level1', 'sublevel2')) 38 def test_appliance_roles(server_roles): 39 assert len(server_roles) == 3 40 41 Which corresponds to this yaml layout: 42 43 level1: 44 sublevel2: 45 - database_operations 46 - user_interface 47 - web_services 48 49 To ensure the appliance has the default roles: 50 51 from fixtures.server_roles import default_roles 52 53 @pytest.mark.fixtureconf(server_roles=default_roles) 54 def test_appliance_roles(server_roles): 55 assert len(server_roles) == len(default_roles) 56 57 List of server role names currently exposed in the CFME interface: 58 59 - automate 60 - ems_metrics_coordinator 61 - ems_metrics_collector 62 - ems_metrics_processor 63 - database_operations 64 - database_synchronization 65 - event 66 - ems_inventory 67 - ems_operations 68 - notifier 69 - reporting 70 - scheduler 71 - smartproxy 72 - smartstate 73 - user_interface 74 - web_services 75 76 """ 77 78 if 'server_roles' in fixtureconf: 79 roles_list = list(fixtureconf['server_roles']) 80 elif 'server_roles_cfmedata' in fixtureconf: 81 roles_list = cfme_data.data 82 # Drills down into cfme_data YAML by selector, expecting a list 83 # of roles at the end. A KeyError here probably means the YAMe 84 # selector is wrong 85 for selector in fixtureconf['server_roles_cfmedata']: 86 roles_list = roles_list[selector] 87 else: 88 raise Exception('server_roles config not found on test callable') 89 90 # Deselecting the user interface role is really un-fun, and is 91 # counterproductive in the middle of user interface testing. 92 if 'user_interface' not in roles_list: 93 raise Exception('Refusing to remove the user_interface role') 94 95 # Nav to the settings tab 96 settings_pg = cnf_configuration_pg.click_on_settings() 97 server_settings_pg = settings_pg.click_on_current_server_tree_node() 98 # sst is a configuration_subpages.settings_subpages.server_settings_subpages. 99 # server_settings_tab.ServerSettingsTab 100 sst = server_settings_pg.click_on_server_tab() 101 102 # Set the roles! 103 if sorted(sst.selected_server_role_names) != sorted(roles_list): 104 sst.set_server_roles(roles_list) 105 sst.save() 106 sst._wait_for_results_refresh() 107 else: 108 logger.info('Server roles already match configured fixture roles, not changing server roles') 109 110 # If this assert fails, check roles names for typos or other minor differences 111 Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list)) 112 113 return sst.selected_server_role_names 114 115 [end of fixtures/server_roles.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fixtures/server_roles.py b/fixtures/server_roles.py --- a/fixtures/server_roles.py +++ b/fixtures/server_roles.py @@ -17,6 +17,7 @@ 'web_services', ) + @pytest.fixture def server_roles(fixtureconf, cfme_data, cnf_configuration_pg): """Set the server roles based on a list of roles attached to the test using this fixture @@ -94,7 +95,11 @@ # Nav to the settings tab settings_pg = cnf_configuration_pg.click_on_settings() - server_settings_pg = settings_pg.click_on_current_server_tree_node() + # Workaround to rudely bypass a popup that sometimes appears for + # unknown reasons. + # See also: https://github.com/RedHatQE/cfme_tests/issues/168 + from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings + server_settings_pg = ServerSettings(settings_pg.testsetup) # sst is a configuration_subpages.settings_subpages.server_settings_subpages. # server_settings_tab.ServerSettingsTab sst = server_settings_pg.click_on_server_tab() @@ -105,10 +110,9 @@ sst.save() sst._wait_for_results_refresh() else: - logger.info('Server roles already match configured fixture roles, not changing server roles') + logger.info('Server roles match configured fixture roles, not changing server roles') # If this assert fails, check roles names for typos or other minor differences Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list)) return sst.selected_server_role_names -
{"golden_diff": "diff --git a/fixtures/server_roles.py b/fixtures/server_roles.py\n--- a/fixtures/server_roles.py\n+++ b/fixtures/server_roles.py\n@@ -17,6 +17,7 @@\n 'web_services',\n )\n \n+\n @pytest.fixture\n def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n \"\"\"Set the server roles based on a list of roles attached to the test using this fixture\n@@ -94,7 +95,11 @@\n \n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n- server_settings_pg = settings_pg.click_on_current_server_tree_node()\n+ # Workaround to rudely bypass a popup that sometimes appears for\n+ # unknown reasons.\n+ # See also: https://github.com/RedHatQE/cfme_tests/issues/168\n+ from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings\n+ server_settings_pg = ServerSettings(settings_pg.testsetup)\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n@@ -105,10 +110,9 @@\n sst.save()\n sst._wait_for_results_refresh()\n else:\n- logger.info('Server roles already match configured fixture roles, not changing server roles')\n+ logger.info('Server roles match configured fixture roles, not changing server roles')\n \n # If this assert fails, check roles names for typos or other minor differences\n Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))\n \n return sst.selected_server_role_names\n-\n", "issue": "Unexpected popup in server_roles fixture\nThe server_roles fixture attempts to explicitly navigate to the current server's settings page before clicking any checkboxen, but the call to do that (`settings_pg.click_on_current_server_tree_node()`) occasionally fails because a popup appears. We'll need to make sure the popup can be safely ignored, and conditionally ignore it if it appears.\n\nUntil this is fixed, a workaround is to assume that navigation with the `cnf_configuration_pg` fixture will land on the current server settings page:\n\n``` diff\ndiff --git a/fixtures/server_roles.py b/fixtures/server_roles.py\nindex dfcb574..2b99836 100644\n--- a/fixtures/server_roles.py\n+++ b/fixtures/server_roles.py\n@@ -94,7 +94,9 @@ def server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n\n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n- server_settings_pg = settings_pg.click_on_current_server_tree_node()\n+ #server_settings_pg = settings_pg.click_on_current_server_tree_node()\n+ from pages.configuration_subpages.settings_subpages.server_settings import ServerSettings\n+ server_settings_pg = ServerSettings(settings_pg.testsetup)\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n```\n\n", "before_files": [{"content": "import logging\n\nimport pytest\nfrom unittestzero import Assert\n\nlogger = logging.getLogger(__name__)\n\ndefault_roles = (\n 'database_operations',\n 'event',\n 'ems_inventory',\n 'ems_operations',\n 'reporting',\n 'scheduler',\n 'smartstate',\n 'user_interface',\n 'web_services',\n)\n\[email protected]\ndef server_roles(fixtureconf, cfme_data, cnf_configuration_pg):\n \"\"\"Set the server roles based on a list of roles attached to the test using this fixture\n\n Usage examples:\n\n Pass the desired roles in to the \"server_roles_set\" decorator:\n\n _roles = ('database_operations', 'event', 'user_interface', 'web_services')\n\n @pytest.mark.fixtureconf(server_roles=_roles)\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 4\n\n Roles can be pulled from the cfme_data fixture using yaml selectors,\n which will do a 'set' with the list of roles found at the target path:\n\n @pytest.mark.fixtureconf(server_roles_cfmedata=('level1', 'sublevel2'))\n def test_appliance_roles(server_roles):\n assert len(server_roles) == 3\n\n Which corresponds to this yaml layout:\n\n level1:\n sublevel2:\n - database_operations\n - user_interface\n - web_services\n\n To ensure the appliance has the default roles:\n\n from fixtures.server_roles import default_roles\n\n @pytest.mark.fixtureconf(server_roles=default_roles)\n def test_appliance_roles(server_roles):\n assert len(server_roles) == len(default_roles)\n\n List of server role names currently exposed in the CFME interface:\n\n - automate\n - ems_metrics_coordinator\n - ems_metrics_collector\n - ems_metrics_processor\n - database_operations\n - database_synchronization\n - event\n - ems_inventory\n - ems_operations\n - notifier\n - reporting\n - scheduler\n - smartproxy\n - smartstate\n - user_interface\n - web_services\n\n \"\"\"\n\n if 'server_roles' in fixtureconf:\n roles_list = list(fixtureconf['server_roles'])\n elif 'server_roles_cfmedata' in fixtureconf:\n roles_list = cfme_data.data\n # Drills down into cfme_data YAML by selector, expecting a list\n # of roles at the end. A KeyError here probably means the YAMe\n # selector is wrong\n for selector in fixtureconf['server_roles_cfmedata']:\n roles_list = roles_list[selector]\n else:\n raise Exception('server_roles config not found on test callable')\n\n # Deselecting the user interface role is really un-fun, and is\n # counterproductive in the middle of user interface testing.\n if 'user_interface' not in roles_list:\n raise Exception('Refusing to remove the user_interface role')\n\n # Nav to the settings tab\n settings_pg = cnf_configuration_pg.click_on_settings()\n server_settings_pg = settings_pg.click_on_current_server_tree_node()\n # sst is a configuration_subpages.settings_subpages.server_settings_subpages.\n # server_settings_tab.ServerSettingsTab\n sst = server_settings_pg.click_on_server_tab()\n\n # Set the roles!\n if sorted(sst.selected_server_role_names) != sorted(roles_list):\n sst.set_server_roles(roles_list)\n sst.save()\n sst._wait_for_results_refresh()\n else:\n logger.info('Server roles already match configured fixture roles, not changing server roles')\n\n # If this assert fails, check roles names for typos or other minor differences\n Assert.equal(sorted(sst.selected_server_role_names), sorted(roles_list))\n\n return sst.selected_server_role_names\n\n", "path": "fixtures/server_roles.py"}]}
1,890
364
gh_patches_debug_18842
rasdani/github-patches
git_diff
scikit-image__scikit-image-6169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> broken match histograms gallery example ## Description This bug was reported here: https://forum.image.sc/t/exposure-match-histograms-broken/61682 Most likely this is due to some unintended change in scaling that was introduced when adding the float32 support, so hopefully it is an easy fix. I can take a look soon. ## Way to reproduce Run the following demo with v0.19.0 or v0.19.1: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_histogram_matching.html#sphx-glr-auto-examples-color-exposure-plot-histogram-matching-py </issue> <code> [start of skimage/exposure/histogram_matching.py] 1 import numpy as np 2 3 from .._shared import utils 4 5 6 def _match_cumulative_cdf(source, template): 7 """ 8 Return modified source array so that the cumulative density function of 9 its values matches the cumulative density function of the template. 10 """ 11 src_values, src_unique_indices, src_counts = np.unique(source.ravel(), 12 return_inverse=True, 13 return_counts=True) 14 tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True) 15 16 # calculate normalized quantiles for each array 17 src_quantiles = np.cumsum(src_counts) / source.size 18 tmpl_quantiles = np.cumsum(tmpl_counts) / template.size 19 20 interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values) 21 return interp_a_values[src_unique_indices].reshape(source.shape) 22 23 24 @utils.channel_as_last_axis(channel_arg_positions=(0, 1)) 25 @utils.deprecate_multichannel_kwarg() 26 def match_histograms(image, reference, *, channel_axis=None, 27 multichannel=False): 28 """Adjust an image so that its cumulative histogram matches that of another. 29 30 The adjustment is applied separately for each channel. 31 32 Parameters 33 ---------- 34 image : ndarray 35 Input image. Can be gray-scale or in color. 36 reference : ndarray 37 Image to match histogram of. Must have the same number of channels as 38 image. 39 channel_axis : int or None, optional 40 If None, the image is assumed to be a grayscale (single channel) image. 41 Otherwise, this parameter indicates which axis of the array corresponds 42 to channels. 43 multichannel : bool, optional 44 Apply the matching separately for each channel. This argument is 45 deprecated: specify `channel_axis` instead. 46 47 Returns 48 ------- 49 matched : ndarray 50 Transformed input image. 51 52 Raises 53 ------ 54 ValueError 55 Thrown when the number of channels in the input image and the reference 56 differ. 57 58 References 59 ---------- 60 .. [1] http://paulbourke.net/miscellaneous/equalisation/ 61 62 """ 63 if image.ndim != reference.ndim: 64 raise ValueError('Image and reference must have the same number ' 65 'of channels.') 66 out_dtype = utils._supported_float_type(image.dtype) 67 68 if channel_axis is not None: 69 if image.shape[-1] != reference.shape[-1]: 70 raise ValueError('Number of channels in the input image and ' 71 'reference image must match!') 72 73 matched = np.empty(image.shape, dtype=image.dtype) 74 for channel in range(image.shape[-1]): 75 matched_channel = _match_cumulative_cdf(image[..., channel], 76 reference[..., channel]) 77 matched[..., channel] = matched_channel 78 else: 79 # _match_cumulative_cdf will always return float64 due to np.interp 80 matched = _match_cumulative_cdf(image, reference) 81 82 return matched.astype(out_dtype, copy=False) 83 [end of skimage/exposure/histogram_matching.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/exposure/histogram_matching.py b/skimage/exposure/histogram_matching.py --- a/skimage/exposure/histogram_matching.py +++ b/skimage/exposure/histogram_matching.py @@ -63,7 +63,6 @@ if image.ndim != reference.ndim: raise ValueError('Image and reference must have the same number ' 'of channels.') - out_dtype = utils._supported_float_type(image.dtype) if channel_axis is not None: if image.shape[-1] != reference.shape[-1]: @@ -79,4 +78,8 @@ # _match_cumulative_cdf will always return float64 due to np.interp matched = _match_cumulative_cdf(image, reference) - return matched.astype(out_dtype, copy=False) + if matched.dtype.kind == 'f': + # output a float32 result when the input is float16 or float32 + out_dtype = utils._supported_float_type(image.dtype) + matched = matched.astype(out_dtype, copy=False) + return matched
{"golden_diff": "diff --git a/skimage/exposure/histogram_matching.py b/skimage/exposure/histogram_matching.py\n--- a/skimage/exposure/histogram_matching.py\n+++ b/skimage/exposure/histogram_matching.py\n@@ -63,7 +63,6 @@\n if image.ndim != reference.ndim:\n raise ValueError('Image and reference must have the same number '\n 'of channels.')\n- out_dtype = utils._supported_float_type(image.dtype)\n \n if channel_axis is not None:\n if image.shape[-1] != reference.shape[-1]:\n@@ -79,4 +78,8 @@\n # _match_cumulative_cdf will always return float64 due to np.interp\n matched = _match_cumulative_cdf(image, reference)\n \n- return matched.astype(out_dtype, copy=False)\n+ if matched.dtype.kind == 'f':\n+ # output a float32 result when the input is float16 or float32\n+ out_dtype = utils._supported_float_type(image.dtype)\n+ matched = matched.astype(out_dtype, copy=False)\n+ return matched\n", "issue": "broken match histograms gallery example\n## Description\r\n\r\nThis bug was reported here: https://forum.image.sc/t/exposure-match-histograms-broken/61682\r\n\r\nMost likely this is due to some unintended change in scaling that was introduced when adding the float32 support, so hopefully it is an easy fix. I can take a look soon.\r\n\r\n## Way to reproduce\r\n\r\nRun the following demo with v0.19.0 or v0.19.1: https://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_histogram_matching.html#sphx-glr-auto-examples-color-exposure-plot-histogram-matching-py\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .._shared import utils\n\n\ndef _match_cumulative_cdf(source, template):\n \"\"\"\n Return modified source array so that the cumulative density function of\n its values matches the cumulative density function of the template.\n \"\"\"\n src_values, src_unique_indices, src_counts = np.unique(source.ravel(),\n return_inverse=True,\n return_counts=True)\n tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)\n\n # calculate normalized quantiles for each array\n src_quantiles = np.cumsum(src_counts) / source.size\n tmpl_quantiles = np.cumsum(tmpl_counts) / template.size\n\n interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)\n return interp_a_values[src_unique_indices].reshape(source.shape)\n\n\[email protected]_as_last_axis(channel_arg_positions=(0, 1))\[email protected]_multichannel_kwarg()\ndef match_histograms(image, reference, *, channel_axis=None,\n multichannel=False):\n \"\"\"Adjust an image so that its cumulative histogram matches that of another.\n\n The adjustment is applied separately for each channel.\n\n Parameters\n ----------\n image : ndarray\n Input image. Can be gray-scale or in color.\n reference : ndarray\n Image to match histogram of. Must have the same number of channels as\n image.\n channel_axis : int or None, optional\n If None, the image is assumed to be a grayscale (single channel) image.\n Otherwise, this parameter indicates which axis of the array corresponds\n to channels.\n multichannel : bool, optional\n Apply the matching separately for each channel. This argument is\n deprecated: specify `channel_axis` instead.\n\n Returns\n -------\n matched : ndarray\n Transformed input image.\n\n Raises\n ------\n ValueError\n Thrown when the number of channels in the input image and the reference\n differ.\n\n References\n ----------\n .. [1] http://paulbourke.net/miscellaneous/equalisation/\n\n \"\"\"\n if image.ndim != reference.ndim:\n raise ValueError('Image and reference must have the same number '\n 'of channels.')\n out_dtype = utils._supported_float_type(image.dtype)\n\n if channel_axis is not None:\n if image.shape[-1] != reference.shape[-1]:\n raise ValueError('Number of channels in the input image and '\n 'reference image must match!')\n\n matched = np.empty(image.shape, dtype=image.dtype)\n for channel in range(image.shape[-1]):\n matched_channel = _match_cumulative_cdf(image[..., channel],\n reference[..., channel])\n matched[..., channel] = matched_channel\n else:\n # _match_cumulative_cdf will always return float64 due to np.interp\n matched = _match_cumulative_cdf(image, reference)\n\n return matched.astype(out_dtype, copy=False)\n", "path": "skimage/exposure/histogram_matching.py"}]}
1,457
240
gh_patches_debug_49612
rasdani/github-patches
git_diff
StackStorm__st2-5306
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a CI lint task to check st2client's README.md We need to make sure that the st2client `README.rst` file is acceptable to PyPI, since any syntax errors in it will cause the `push_st2client` task of the `st2cd.st2_finalize_release` workflow to fail. We can check the syntax using the same renderer that PyPI itself uses: ```bash # Use the same README renderer that PyPI uses to catch syntax issues in the # README.rst file # st2client uses README.rst # https://pypi.org/help/#description-content-type # https://pypi.org/project/readme-renderer # https://packaging.python.org/tutorials/packaging-projects/#description echo "Checking README.rst syntax" virtualenv venv-st2client-readme-checker . venv-st2client-readme-checker/bin/activate pip install --upgrade readme_renderer python -m readme_renderer README.rst deactivate ``` It would be nice if we could catch these errors before release, which means that we should create a step in our CI tooling to check it before any bad changes get merged. </issue> <code> [start of st2client/setup.py] 1 #!/usr/bin/env python3 2 # Copyright 2020 The StackStorm Authors. 3 # Copyright 2019 Extreme Networks, Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 import os.path 18 19 from setuptools import setup, find_packages 20 21 from dist_utils import check_pip_version 22 from dist_utils import fetch_requirements 23 from dist_utils import apply_vagrant_workaround 24 25 from st2client import __version__ 26 27 check_pip_version() 28 29 ST2_COMPONENT = "st2client" 30 BASE_DIR = os.path.dirname(os.path.abspath(__file__)) 31 REQUIREMENTS_FILE = os.path.join(BASE_DIR, "requirements.txt") 32 README_FILE = os.path.join(BASE_DIR, "README.rst") 33 34 install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE) 35 apply_vagrant_workaround() 36 37 with open(README_FILE) as f: 38 readme = f.read() 39 40 setup( 41 name=ST2_COMPONENT, 42 version=__version__, 43 description=( 44 "Python client library and CLI for the StackStorm (st2) event-driven " 45 "automation platform." 46 ), 47 long_description=readme, 48 author="StackStorm", 49 author_email="[email protected]", 50 url="https://stackstorm.com/", 51 classifiers=[ 52 "Development Status :: 5 - Production/Stable", 53 "Intended Audience :: Information Technology", 54 "Intended Audience :: Developers", 55 "Intended Audience :: System Administrators", 56 "License :: OSI Approved :: Apache Software License", 57 "Operating System :: POSIX :: Linux", 58 "Programming Language :: Python", 59 "Programming Language :: Python :: 3", 60 "Programming Language :: Python :: 3.6", 61 ], 62 install_requires=install_reqs, 63 dependency_links=dep_links, 64 test_suite=ST2_COMPONENT, 65 zip_safe=False, 66 include_package_data=True, 67 packages=find_packages(exclude=["setuptools", "tests"]), 68 entry_points={"console_scripts": ["st2 = st2client.shell:main"]}, 69 project_urls={ 70 "Pack Exchange": "https://exchange.stackstorm.org", 71 "Repository": "https://github.com/StackStorm/st2", 72 "Documentation": "https://docs.stackstorm.com", 73 "Community": "https://stackstorm.com/community-signup", 74 "Questions": "https://forum.stackstorm.com/", 75 "Donate": "https://funding.communitybridge.org/projects/stackstorm", 76 "News/Blog": "https://stackstorm.com/blog", 77 "Security": "https://docs.stackstorm.com/latest/security.html", 78 "Bug Reports": "https://github.com/StackStorm/st2/issues", 79 }, 80 ) 81 [end of st2client/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/st2client/setup.py b/st2client/setup.py --- a/st2client/setup.py +++ b/st2client/setup.py @@ -45,6 +45,7 @@ "automation platform." ), long_description=readme, + long_description_content_type="text/x-rst", author="StackStorm", author_email="[email protected]", url="https://stackstorm.com/",
{"golden_diff": "diff --git a/st2client/setup.py b/st2client/setup.py\n--- a/st2client/setup.py\n+++ b/st2client/setup.py\n@@ -45,6 +45,7 @@\n \"automation platform.\"\n ),\n long_description=readme,\n+ long_description_content_type=\"text/x-rst\",\n author=\"StackStorm\",\n author_email=\"[email protected]\",\n url=\"https://stackstorm.com/\",\n", "issue": "Add a CI lint task to check st2client's README.md\nWe need to make sure that the st2client `README.rst` file is acceptable to PyPI, since any syntax errors in it will cause the `push_st2client` task of the `st2cd.st2_finalize_release` workflow to fail.\r\n\r\nWe can check the syntax using the same renderer that PyPI itself uses:\r\n\r\n```bash\r\n# Use the same README renderer that PyPI uses to catch syntax issues in the\r\n# README.rst file # st2client uses README.rst\r\n# https://pypi.org/help/#description-content-type\r\n# https://pypi.org/project/readme-renderer\r\n# https://packaging.python.org/tutorials/packaging-projects/#description\r\necho \"Checking README.rst syntax\"\r\nvirtualenv venv-st2client-readme-checker\r\n. venv-st2client-readme-checker/bin/activate\r\npip install --upgrade readme_renderer\r\npython -m readme_renderer README.rst\r\ndeactivate\r\n```\r\n\r\nIt would be nice if we could catch these errors before release, which means that we should create a step in our CI tooling to check it before any bad changes get merged.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import check_pip_version\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\n\nfrom st2client import __version__\n\ncheck_pip_version()\n\nST2_COMPONENT = \"st2client\"\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, \"requirements.txt\")\nREADME_FILE = os.path.join(BASE_DIR, \"README.rst\")\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\napply_vagrant_workaround()\n\nwith open(README_FILE) as f:\n readme = f.read()\n\nsetup(\n name=ST2_COMPONENT,\n version=__version__,\n description=(\n \"Python client library and CLI for the StackStorm (st2) event-driven \"\n \"automation platform.\"\n ),\n long_description=readme,\n author=\"StackStorm\",\n author_email=\"[email protected]\",\n url=\"https://stackstorm.com/\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=[\"setuptools\", \"tests\"]),\n entry_points={\"console_scripts\": [\"st2 = st2client.shell:main\"]},\n project_urls={\n \"Pack Exchange\": \"https://exchange.stackstorm.org\",\n \"Repository\": \"https://github.com/StackStorm/st2\",\n \"Documentation\": \"https://docs.stackstorm.com\",\n \"Community\": \"https://stackstorm.com/community-signup\",\n \"Questions\": \"https://forum.stackstorm.com/\",\n \"Donate\": \"https://funding.communitybridge.org/projects/stackstorm\",\n \"News/Blog\": \"https://stackstorm.com/blog\",\n \"Security\": \"https://docs.stackstorm.com/latest/security.html\",\n \"Bug Reports\": \"https://github.com/StackStorm/st2/issues\",\n },\n)\n", "path": "st2client/setup.py"}]}
1,602
95
gh_patches_debug_1313
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4853
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Confusing error message to end user In https://github.com/rtfd/readthedocs.org/issues/4071#issuecomment-405939492 I realized that we are saying that we have a problem parsing the YAML file but the problem is in fact in one of the options set from the web admin dashboard. Example: ![captura de pantalla_2018-07-18_10-57-36](https://user-images.githubusercontent.com/244656/42886232-647bedbc-8a79-11e8-8aca-fe2430139fe0.png) There is no `requirements_file` entry in the YAML file (https://github.com/geopandas/geopandas/blob/master/readthedocs.yml) but it exists under the `Admin -> Advanced Settings` field form. We need to improve this error to something more user-friendly that expresses the real error. It's not an error on parsing the YAML file. The file was parsed properly, but the problem is with one of the values from one of the fields. </issue> <code> [start of readthedocs/doc_builder/exceptions.py] 1 # -*- coding: utf-8 -*- 2 """Exceptions raised when building documentation.""" 3 4 from __future__ import division, print_function, unicode_literals 5 6 from django.utils.translation import ugettext_noop 7 8 9 class BuildEnvironmentException(Exception): 10 message = None 11 status_code = None 12 13 def __init__(self, message=None, **kwargs): 14 self.status_code = kwargs.pop('status_code', None) or self.status_code or 1 15 message = message or self.get_default_message() 16 super(BuildEnvironmentException, self).__init__(message, **kwargs) 17 18 def get_default_message(self): 19 return self.message 20 21 22 class BuildEnvironmentError(BuildEnvironmentException): 23 GENERIC_WITH_BUILD_ID = ugettext_noop( 24 'There was a problem with Read the Docs while building your documentation. ' 25 'Please try again later. ' 26 'However, if this problem persists, ' 27 'please report this to us with your build id ({build_id}).', 28 ) 29 30 31 class BuildEnvironmentCreationFailed(BuildEnvironmentError): 32 message = ugettext_noop('Build environment creation failed') 33 34 35 class VersionLockedError(BuildEnvironmentError): 36 message = ugettext_noop('Version locked, retrying in 5 minutes.') 37 status_code = 423 38 39 40 class ProjectBuildsSkippedError(BuildEnvironmentError): 41 message = ugettext_noop('Builds for this project are temporarily disabled') 42 43 44 class YAMLParseError(BuildEnvironmentError): 45 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop( 46 'Problem parsing YAML configuration. {exception}', 47 ) 48 49 50 class BuildTimeoutError(BuildEnvironmentError): 51 message = ugettext_noop('Build exited due to time out') 52 53 54 class BuildEnvironmentWarning(BuildEnvironmentException): 55 pass 56 57 58 class MkDocsYAMLParseError(BuildEnvironmentError): 59 GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop( 60 'Problem parsing MkDocs YAML configuration. {exception}', 61 ) 62 [end of readthedocs/doc_builder/exceptions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py --- a/readthedocs/doc_builder/exceptions.py +++ b/readthedocs/doc_builder/exceptions.py @@ -43,7 +43,7 @@ class YAMLParseError(BuildEnvironmentError): GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop( - 'Problem parsing YAML configuration. {exception}', + 'Problem in your project\'s configuration. {exception}', )
{"golden_diff": "diff --git a/readthedocs/doc_builder/exceptions.py b/readthedocs/doc_builder/exceptions.py\n--- a/readthedocs/doc_builder/exceptions.py\n+++ b/readthedocs/doc_builder/exceptions.py\n@@ -43,7 +43,7 @@\n \n class YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n- 'Problem parsing YAML configuration. {exception}',\n+ 'Problem in your project\\'s configuration. {exception}',\n )\n", "issue": "Confusing error message to end user\nIn https://github.com/rtfd/readthedocs.org/issues/4071#issuecomment-405939492 I realized that we are saying that we have a problem parsing the YAML file but the problem is in fact in one of the options set from the web admin dashboard.\r\n\r\nExample:\r\n\r\n![captura de pantalla_2018-07-18_10-57-36](https://user-images.githubusercontent.com/244656/42886232-647bedbc-8a79-11e8-8aca-fe2430139fe0.png)\r\n\r\nThere is no `requirements_file` entry in the YAML file (https://github.com/geopandas/geopandas/blob/master/readthedocs.yml) but it exists under the `Admin -> Advanced Settings` field form.\r\n\r\nWe need to improve this error to something more user-friendly that expresses the real error. It's not an error on parsing the YAML file. The file was parsed properly, but the problem is with one of the values from one of the fields.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Exceptions raised when building documentation.\"\"\"\n\nfrom __future__ import division, print_function, unicode_literals\n\nfrom django.utils.translation import ugettext_noop\n\n\nclass BuildEnvironmentException(Exception):\n message = None\n status_code = None\n\n def __init__(self, message=None, **kwargs):\n self.status_code = kwargs.pop('status_code', None) or self.status_code or 1\n message = message or self.get_default_message()\n super(BuildEnvironmentException, self).__init__(message, **kwargs)\n\n def get_default_message(self):\n return self.message\n\n\nclass BuildEnvironmentError(BuildEnvironmentException):\n GENERIC_WITH_BUILD_ID = ugettext_noop(\n 'There was a problem with Read the Docs while building your documentation. '\n 'Please try again later. '\n 'However, if this problem persists, '\n 'please report this to us with your build id ({build_id}).',\n )\n\n\nclass BuildEnvironmentCreationFailed(BuildEnvironmentError):\n message = ugettext_noop('Build environment creation failed')\n\n\nclass VersionLockedError(BuildEnvironmentError):\n message = ugettext_noop('Version locked, retrying in 5 minutes.')\n status_code = 423\n\n\nclass ProjectBuildsSkippedError(BuildEnvironmentError):\n message = ugettext_noop('Builds for this project are temporarily disabled')\n\n\nclass YAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing YAML configuration. {exception}',\n )\n\n\nclass BuildTimeoutError(BuildEnvironmentError):\n message = ugettext_noop('Build exited due to time out')\n\n\nclass BuildEnvironmentWarning(BuildEnvironmentException):\n pass\n\n\nclass MkDocsYAMLParseError(BuildEnvironmentError):\n GENERIC_WITH_PARSE_EXCEPTION = ugettext_noop(\n 'Problem parsing MkDocs YAML configuration. {exception}',\n )\n", "path": "readthedocs/doc_builder/exceptions.py"}]}
1,311
106
gh_patches_debug_50216
rasdani/github-patches
git_diff
pex-tool__pex-991
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.12 On the docket: - [x] A PEX_EXTRA_SYS_PATH runtime variable #989 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '2.1.11' 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.11' +__version__ = '2.1.12'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.11'\n+__version__ = '2.1.12'\n", "issue": "Release 2.1.12\nOn the docket:\r\n- [x] A PEX_EXTRA_SYS_PATH runtime variable #989 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.11'\n", "path": "pex/version.py"}]}
614
97
gh_patches_debug_14012
rasdani/github-patches
git_diff
streamlink__streamlink-4355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plugins.ard_live: Unable to parse MEDIAINFO ### Checklist - [X] This is a plugin issue and not a different kind of issue - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest stable release ### Description On streamlink 3.1.1 Linux: ``` [cli][info] Found matching plugin ard_live for URL https://live.daserste.de/ error: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\n<html lang="de" i ...) ``` Streamlink 2.0.0 Windows works fine. Can't find a working 2.0.0 Linux build to verify. 3.1.1 seems to expect a player url at `https://live.daserste.de/live-de-102~playerJson.json` and 2.0.0 at `https://www.daserste.de/live/live-de-102~playerJson.json`. Is there a commandline arg to override it? ### Debug log ```text [cli][debug] OS: Linux-5.15.2-arch1-1-x86_64-with-glibc2.35 [cli][debug] Python: 3.10.2 [cli][debug] Streamlink: 3.1.1 [cli][debug] Requests(2.27.0), Socks(1.7.1), Websocket(1.2.3) [cli][debug] Arguments: [cli][debug] url=https://live.daserste.de/ [cli][debug] --loglevel=debug [cli][info] Found matching plugin ard_live for URL https://live.daserste.de/ [plugins.ard_live][debug] Player URL: 'https://live.daserste.de/live-de-102~playerJson.json' error: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\n<html lang="de" i ...) ``` </issue> <code> [start of src/streamlink/plugins/ard_live.py] 1 import logging 2 import re 3 from urllib.parse import urljoin 4 5 from streamlink.plugin import Plugin, PluginError, pluginmatcher 6 from streamlink.plugin.api import validate 7 from streamlink.stream.hls import HLSStream 8 from streamlink.stream.http import HTTPStream 9 10 log = logging.getLogger(__name__) 11 12 13 @pluginmatcher(re.compile( 14 r"https?://((www|live)\.)?daserste\.de/" 15 )) 16 class ARDLive(Plugin): 17 _QUALITY_MAP = { 18 4: "1080p", 19 3: "720p", 20 2: "540p", 21 1: "270p", 22 0: "180p" 23 } 24 25 def _get_streams(self): 26 try: 27 data_url = self.session.http.get(self.url, schema=validate.Schema( 28 validate.parse_html(), 29 validate.xml_find(".//*[@data-ctrl-player]"), 30 validate.get("data-ctrl-player"), 31 validate.transform(lambda s: s.replace("'", "\"")), 32 validate.parse_json(), 33 {"url": str}, 34 validate.get("url") 35 )) 36 except PluginError: 37 return 38 39 data_url = urljoin(self.url, data_url) 40 log.debug(f"Player URL: '{data_url}'") 41 42 self.title, media = self.session.http.get(data_url, schema=validate.Schema( 43 validate.parse_json(name="MEDIAINFO"), 44 {"mc": { 45 validate.optional("_title"): str, 46 "_mediaArray": [validate.all( 47 { 48 "_mediaStreamArray": [validate.all( 49 { 50 "_quality": validate.any(str, int), 51 "_stream": [validate.url()], 52 }, 53 validate.union_get("_quality", ("_stream", 0)) 54 )] 55 }, 56 validate.get("_mediaStreamArray"), 57 validate.transform(dict) 58 )] 59 }}, 60 validate.get("mc"), 61 validate.union_get("_title", ("_mediaArray", 0)) 62 )) 63 64 if media.get("auto"): 65 yield from HLSStream.parse_variant_playlist(self.session, media.get("auto")).items() 66 else: 67 for quality, stream in media.items(): 68 yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream) 69 70 71 __plugin__ = ARDLive 72 [end of src/streamlink/plugins/ard_live.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/ard_live.py b/src/streamlink/plugins/ard_live.py --- a/src/streamlink/plugins/ard_live.py +++ b/src/streamlink/plugins/ard_live.py @@ -14,6 +14,7 @@ r"https?://((www|live)\.)?daserste\.de/" )) class ARDLive(Plugin): + _URL_DATA_BASE = "https://www.daserste.de/" _QUALITY_MAP = { 4: "1080p", 3: "720p", @@ -36,7 +37,7 @@ except PluginError: return - data_url = urljoin(self.url, data_url) + data_url = urljoin(self._URL_DATA_BASE, data_url) log.debug(f"Player URL: '{data_url}'") self.title, media = self.session.http.get(data_url, schema=validate.Schema(
{"golden_diff": "diff --git a/src/streamlink/plugins/ard_live.py b/src/streamlink/plugins/ard_live.py\n--- a/src/streamlink/plugins/ard_live.py\n+++ b/src/streamlink/plugins/ard_live.py\n@@ -14,6 +14,7 @@\n r\"https?://((www|live)\\.)?daserste\\.de/\"\n ))\n class ARDLive(Plugin):\n+ _URL_DATA_BASE = \"https://www.daserste.de/\"\n _QUALITY_MAP = {\n 4: \"1080p\",\n 3: \"720p\",\n@@ -36,7 +37,7 @@\n except PluginError:\n return\n \n- data_url = urljoin(self.url, data_url)\n+ data_url = urljoin(self._URL_DATA_BASE, data_url)\n log.debug(f\"Player URL: '{data_url}'\")\n \n self.title, media = self.session.http.get(data_url, schema=validate.Schema(\n", "issue": "plugins.ard_live: Unable to parse MEDIAINFO\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\nOn streamlink 3.1.1 Linux:\r\n\r\n```\r\n[cli][info] Found matching plugin ard_live for URL https://live.daserste.de/\r\nerror: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\\n<html lang=\"de\" i ...)\r\n```\r\n\r\nStreamlink 2.0.0 Windows works fine. Can't find a working 2.0.0 Linux build to verify.\r\n\r\n3.1.1 seems to expect a player url at `https://live.daserste.de/live-de-102~playerJson.json` and 2.0.0 at `https://www.daserste.de/live/live-de-102~playerJson.json`.\r\n\r\nIs there a commandline arg to override it?\n\n### Debug log\n\n```text\n[cli][debug] OS: Linux-5.15.2-arch1-1-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.2\r\n[cli][debug] Streamlink: 3.1.1\r\n[cli][debug] Requests(2.27.0), Socks(1.7.1), Websocket(1.2.3)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://live.daserste.de/\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin ard_live for URL https://live.daserste.de/\r\n[plugins.ard_live][debug] Player URL: 'https://live.daserste.de/live-de-102~playerJson.json'\r\nerror: Unable to validate response text: Unable to parse MEDIAINFO: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE HTML>\\n<html lang=\"de\" i ...)\n```\n\n", "before_files": [{"content": "import logging\nimport re\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://((www|live)\\.)?daserste\\.de/\"\n))\nclass ARDLive(Plugin):\n _QUALITY_MAP = {\n 4: \"1080p\",\n 3: \"720p\",\n 2: \"540p\",\n 1: \"270p\",\n 0: \"180p\"\n }\n\n def _get_streams(self):\n try:\n data_url = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//*[@data-ctrl-player]\"),\n validate.get(\"data-ctrl-player\"),\n validate.transform(lambda s: s.replace(\"'\", \"\\\"\")),\n validate.parse_json(),\n {\"url\": str},\n validate.get(\"url\")\n ))\n except PluginError:\n return\n\n data_url = urljoin(self.url, data_url)\n log.debug(f\"Player URL: '{data_url}'\")\n\n self.title, media = self.session.http.get(data_url, schema=validate.Schema(\n validate.parse_json(name=\"MEDIAINFO\"),\n {\"mc\": {\n validate.optional(\"_title\"): str,\n \"_mediaArray\": [validate.all(\n {\n \"_mediaStreamArray\": [validate.all(\n {\n \"_quality\": validate.any(str, int),\n \"_stream\": [validate.url()],\n },\n validate.union_get(\"_quality\", (\"_stream\", 0))\n )]\n },\n validate.get(\"_mediaStreamArray\"),\n validate.transform(dict)\n )]\n }},\n validate.get(\"mc\"),\n validate.union_get(\"_title\", (\"_mediaArray\", 0))\n ))\n\n if media.get(\"auto\"):\n yield from HLSStream.parse_variant_playlist(self.session, media.get(\"auto\")).items()\n else:\n for quality, stream in media.items():\n yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)\n\n\n__plugin__ = ARDLive\n", "path": "src/streamlink/plugins/ard_live.py"}]}
1,724
208
gh_patches_debug_38602
rasdani/github-patches
git_diff
streamlink__streamlink-2969
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Plugin App17 cannot fetch any stream ## Plugin Issue <!-- Replace [ ] with [x] in order to check the box --> - [x] This is a plugin issue and I have read the contribution guidelines. ### Description `Stream currently unavailable.` The output of the Streamlink is always the same. Even if the streamer is streaming online. ### Reproduction steps / Explicit stream URLs to test <!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. --> Paste any URL (e.g., `https://17.live/live/{any stream ID}`) and execute the command. ### Log output <!-- TEXT LOG OUTPUT IS REQUIRED for a plugin issue! Use the `--loglevel debug` parameter and avoid using parameters which suppress log output. https://streamlink.github.io/cli.html#cmdoption-l Make sure to **remove usernames and passwords** You can copy the output to https://gist.github.com/ or paste it below. --> ``` C:\>streamlink https://17.live/live/7000362 -l trace [22:44:25,408][cli][debug] OS: Windows 10 [22:44:25,409][cli][debug] Python: 3.6.6 [22:44:25,410][cli][debug] Streamlink: 1.4.1 [22:44:25,410][cli][debug] Requests(2.23.0), Socks(1.7.1), Websocket(0.57.0) [22:44:25,410][cli][info] Found matching plugin app17 for URL https://17.live/live/7000362 [22:44:25,735][plugin.app17][info] Stream currently unavailable. error: No playable streams found on this URL: https://17.live/live/7000362 ``` ### Additional comments, screenshots, etc. [screenshot](https://imgur.com/MFn7K0y) </issue> <code> [start of src/streamlink/plugins/app17.py] 1 import re 2 3 from streamlink.plugin import Plugin 4 from streamlink.plugin.api import useragents 5 from streamlink.stream import HLSStream, RTMPStream, HTTPStream 6 7 API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo" 8 9 _url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)") 10 _status_re = re.compile(r'\\"closeBy\\":\\"\\"') 11 _rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"') 12 13 14 class App17(Plugin): 15 @classmethod 16 def can_handle_url(cls, url): 17 return _url_re.match(url) 18 19 def _get_streams(self): 20 match = _url_re.match(self.url) 21 channel = match.group("channel") 22 23 self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url}) 24 25 payload = '{"liveStreamID": "%s"}' % (channel) 26 res = self.session.http.post(API_URL, data=payload) 27 status = _status_re.search(res.text) 28 if not status: 29 self.logger.info("Stream currently unavailable.") 30 return 31 32 http_url = _rtmp_re.search(res.text).group(1) 33 https_url = http_url.replace("http:", "https:") 34 yield "live", HTTPStream(self.session, https_url) 35 36 if 'pull-rtmp' in http_url: 37 rtmp_url = http_url.replace("http:", "rtmp:").replace(".flv", "") 38 stream = RTMPStream(self.session, { 39 "rtmp": rtmp_url, 40 "live": True, 41 "pageUrl": self.url, 42 }) 43 yield "live", stream 44 45 if 'wansu-' in http_url: 46 hls_url = http_url.replace(".flv", "/playlist.m3u8") 47 else: 48 hls_url = http_url.replace("live-hdl", "live-hls").replace(".flv", ".m3u8") 49 50 s = [] 51 for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): 52 yield s 53 if not s: 54 yield "live", HLSStream(self.session, hls_url) 55 56 57 __plugin__ = App17 58 [end of src/streamlink/plugins/app17.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/app17.py b/src/streamlink/plugins/app17.py --- a/src/streamlink/plugins/app17.py +++ b/src/streamlink/plugins/app17.py @@ -1,35 +1,49 @@ +import logging import re from streamlink.plugin import Plugin -from streamlink.plugin.api import useragents +from streamlink.plugin.api import useragents, validate from streamlink.stream import HLSStream, RTMPStream, HTTPStream -API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo" - -_url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)") -_status_re = re.compile(r'\\"closeBy\\":\\"\\"') -_rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"') +log = logging.getLogger(__name__) class App17(Plugin): + _url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)") + API_URL = "https://api-dsa.17app.co/api/v1/lives/{0}/viewers/alive" + + _api_schema = validate.Schema( + { + "rtmpUrls": [{ + validate.optional("provider"): validate.any(int, None), + "url": validate.url(), + }], + }, + validate.get("rtmpUrls"), + ) + @classmethod def can_handle_url(cls, url): - return _url_re.match(url) + return cls._url_re.match(url) is not None def _get_streams(self): - match = _url_re.match(self.url) + match = self._url_re.match(self.url) channel = match.group("channel") self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url}) - payload = '{"liveStreamID": "%s"}' % (channel) - res = self.session.http.post(API_URL, data=payload) - status = _status_re.search(res.text) - if not status: - self.logger.info("Stream currently unavailable.") + data = '{"liveStreamID":"%s"}' % (channel) + + try: + res = self.session.http.post(self.API_URL.format(channel), data=data) + res_json = self.session.http.json(res, schema=self._api_schema) + log.trace("{0!r}".format(res_json)) + http_url = res_json[0]["url"] + except Exception as e: + log.info("Stream currently unavailable.") + log.debug(str(e)) return - http_url = _rtmp_re.search(res.text).group(1) https_url = http_url.replace("http:", "https:") yield "live", HTTPStream(self.session, https_url) @@ -47,11 +61,16 @@ else: hls_url = http_url.replace("live-hdl", "live-hls").replace(".flv", ".m3u8") - s = [] - for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): - yield s + s = HLSStream.parse_variant_playlist(self.session, hls_url) if not s: yield "live", HLSStream(self.session, hls_url) + else: + if len(s) == 1: + for _n, _s in s.items(): + yield "live", _s + else: + for _s in s.items(): + yield _s __plugin__ = App17
{"golden_diff": "diff --git a/src/streamlink/plugins/app17.py b/src/streamlink/plugins/app17.py\n--- a/src/streamlink/plugins/app17.py\n+++ b/src/streamlink/plugins/app17.py\n@@ -1,35 +1,49 @@\n+import logging\n import re\n \n from streamlink.plugin import Plugin\n-from streamlink.plugin.api import useragents\n+from streamlink.plugin.api import useragents, validate\n from streamlink.stream import HLSStream, RTMPStream, HTTPStream\n \n-API_URL = \"https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo\"\n-\n-_url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n-_status_re = re.compile(r'\\\\\"closeBy\\\\\":\\\\\"\\\\\"')\n-_rtmp_re = re.compile(r'\\\\\"url\\\\\"\\s*:\\s*\\\\\"(.+?)\\\\\"')\n+log = logging.getLogger(__name__)\n \n \n class App17(Plugin):\n+ _url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n+ API_URL = \"https://api-dsa.17app.co/api/v1/lives/{0}/viewers/alive\"\n+\n+ _api_schema = validate.Schema(\n+ {\n+ \"rtmpUrls\": [{\n+ validate.optional(\"provider\"): validate.any(int, None),\n+ \"url\": validate.url(),\n+ }],\n+ },\n+ validate.get(\"rtmpUrls\"),\n+ )\n+\n @classmethod\n def can_handle_url(cls, url):\n- return _url_re.match(url)\n+ return cls._url_re.match(url) is not None\n \n def _get_streams(self):\n- match = _url_re.match(self.url)\n+ match = self._url_re.match(self.url)\n channel = match.group(\"channel\")\n \n self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})\n \n- payload = '{\"liveStreamID\": \"%s\"}' % (channel)\n- res = self.session.http.post(API_URL, data=payload)\n- status = _status_re.search(res.text)\n- if not status:\n- self.logger.info(\"Stream currently unavailable.\")\n+ data = '{\"liveStreamID\":\"%s\"}' % (channel)\n+\n+ try:\n+ res = self.session.http.post(self.API_URL.format(channel), data=data)\n+ res_json = self.session.http.json(res, schema=self._api_schema)\n+ log.trace(\"{0!r}\".format(res_json))\n+ http_url = res_json[0][\"url\"]\n+ except Exception as e:\n+ log.info(\"Stream currently unavailable.\")\n+ log.debug(str(e))\n return\n \n- http_url = _rtmp_re.search(res.text).group(1)\n https_url = http_url.replace(\"http:\", \"https:\")\n yield \"live\", HTTPStream(self.session, https_url)\n \n@@ -47,11 +61,16 @@\n else:\n hls_url = http_url.replace(\"live-hdl\", \"live-hls\").replace(\".flv\", \".m3u8\")\n \n- s = []\n- for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n- yield s\n+ s = HLSStream.parse_variant_playlist(self.session, hls_url)\n if not s:\n yield \"live\", HLSStream(self.session, hls_url)\n+ else:\n+ if len(s) == 1:\n+ for _n, _s in s.items():\n+ yield \"live\", _s\n+ else:\n+ for _s in s.items():\n+ yield _s\n \n \n __plugin__ = App17\n", "issue": "Plugin App17 cannot fetch any stream\n## Plugin Issue\r\n\r\n<!-- Replace [ ] with [x] in order to check the box -->\r\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\n`Stream currently unavailable.`\r\nThe output of the Streamlink is always the same. Even if the streamer is streaming online.\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\nPaste any URL (e.g., `https://17.live/live/{any stream ID}`) and execute the command.\r\n\r\n\r\n### Log output\r\n\r\n<!--\r\nTEXT LOG OUTPUT IS REQUIRED for a plugin issue!\r\nUse the `--loglevel debug` parameter and avoid using parameters which suppress log output.\r\nhttps://streamlink.github.io/cli.html#cmdoption-l\r\n\r\nMake sure to **remove usernames and passwords**\r\nYou can copy the output to https://gist.github.com/ or paste it below.\r\n-->\r\n\r\n```\r\nC:\\>streamlink https://17.live/live/7000362 -l trace\r\n[22:44:25,408][cli][debug] OS: Windows 10\r\n[22:44:25,409][cli][debug] Python: 3.6.6\r\n[22:44:25,410][cli][debug] Streamlink: 1.4.1\r\n[22:44:25,410][cli][debug] Requests(2.23.0), Socks(1.7.1), Websocket(0.57.0)\r\n[22:44:25,410][cli][info] Found matching plugin app17 for URL https://17.live/live/7000362\r\n[22:44:25,735][plugin.app17][info] Stream currently unavailable.\r\nerror: No playable streams found on this URL: https://17.live/live/7000362\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n[screenshot](https://imgur.com/MFn7K0y)\r\n\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream, RTMPStream, HTTPStream\n\nAPI_URL = \"https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo\"\n\n_url_re = re.compile(r\"https://17.live/live/(?P<channel>[^/&?]+)\")\n_status_re = re.compile(r'\\\\\"closeBy\\\\\":\\\\\"\\\\\"')\n_rtmp_re = re.compile(r'\\\\\"url\\\\\"\\s*:\\s*\\\\\"(.+?)\\\\\"')\n\n\nclass App17(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n channel = match.group(\"channel\")\n\n self.session.http.headers.update({'User-Agent': useragents.CHROME, 'Referer': self.url})\n\n payload = '{\"liveStreamID\": \"%s\"}' % (channel)\n res = self.session.http.post(API_URL, data=payload)\n status = _status_re.search(res.text)\n if not status:\n self.logger.info(\"Stream currently unavailable.\")\n return\n\n http_url = _rtmp_re.search(res.text).group(1)\n https_url = http_url.replace(\"http:\", \"https:\")\n yield \"live\", HTTPStream(self.session, https_url)\n\n if 'pull-rtmp' in http_url:\n rtmp_url = http_url.replace(\"http:\", \"rtmp:\").replace(\".flv\", \"\")\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp_url,\n \"live\": True,\n \"pageUrl\": self.url,\n })\n yield \"live\", stream\n\n if 'wansu-' in http_url:\n hls_url = http_url.replace(\".flv\", \"/playlist.m3u8\")\n else:\n hls_url = http_url.replace(\"live-hdl\", \"live-hls\").replace(\".flv\", \".m3u8\")\n\n s = []\n for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n yield s\n if not s:\n yield \"live\", HLSStream(self.session, hls_url)\n\n\n__plugin__ = App17\n", "path": "src/streamlink/plugins/app17.py"}]}
1,637
809
gh_patches_debug_66700
rasdani/github-patches
git_diff
googleapis__google-cloud-python-10076
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Synthesis failed for recommender Hello! Autosynth couldn't regenerate recommender. :broken_heart: Here's the output from running `synth.py`: ``` Cloning into 'working_repo'... Switched to branch 'autosynth-recommender' Running synthtool ['/tmpfs/src/git/autosynth/env/bin/python3', '-m', 'synthtool', 'synth.py', '--'] synthtool > Executing /tmpfs/src/git/autosynth/working_repo/recommender/synth.py. synthtool > Ensuring dependencies. synthtool > Pulling artman image. latest: Pulling from googleapis/artman Digest: sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57 Status: Image is up to date for googleapis/artman:latest synthtool > Cloning googleapis. Traceback (most recent call last): File "/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py", line 85, in _run_code exec(code, run_globals) File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py", line 87, in <module> main() File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 764, in __call__ return self.main(*args, **kwargs) File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 717, in main rv = self.invoke(ctx) File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py", line 555, in invoke return callback(*args, **kwargs) File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py", line 79, in main spec.loader.exec_module(synth_module) # type: ignore File "<frozen importlib._bootstrap_external>", line 678, in exec_module File "<frozen importlib._bootstrap>", line 205, in _call_with_frames_removed File "/tmpfs/src/git/autosynth/working_repo/recommender/synth.py", line 32, in <module> include_protos=True File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py", line 50, in py_library return self._generate_code(service, version, "python", **kwargs) File "/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py", line 121, in _generate_code f"Unable to find configuration yaml file: {(googleapis / config_path)}." FileNotFoundError: Unable to find configuration yaml file: /home/kbuilder/.cache/synthtool/googleapis/google/cloud/recommender/artman_recommender_v1beta1.yaml. synthtool > Cleaned up 1 temporary directories. synthtool > Wrote metadata to synth.metadata. Synthesis failed ``` Google internal developers can see the full log [here](https://sponge/74dee1a3-0367-43bf-9f40-1001ae7ea243). </issue> <code> [start of recommender/synth.py] 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """This script is used to synthesize generated parts of this library.""" 16 import re 17 18 import synthtool as s 19 from synthtool import gcp 20 21 gapic = gcp.GAPICGenerator() 22 versions = ["v1beta1"] 23 common = gcp.CommonTemplates() 24 25 26 # ---------------------------------------------------------------------------- 27 # Generate Cloud Recommender 28 # ---------------------------------------------------------------------------- 29 for version in versions: 30 library = gapic.py_library( 31 "recommender", version, 32 include_protos=True 33 ) 34 s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py']) 35 36 # ---------------------------------------------------------------------------- 37 # Add templated files 38 # ---------------------------------------------------------------------------- 39 templated_files = common.py_library(unit_cov_level=97, cov_level=100) 40 s.move(templated_files, excludes=['noxfile.py']) 41 42 s.shell.run(["nox", "-s", "blacken"], hide_output=False) [end of recommender/synth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recommender/synth.py b/recommender/synth.py --- a/recommender/synth.py +++ b/recommender/synth.py @@ -29,7 +29,8 @@ for version in versions: library = gapic.py_library( "recommender", version, - include_protos=True + include_protos=True, + config_path="/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml" ) s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])
{"golden_diff": "diff --git a/recommender/synth.py b/recommender/synth.py\n--- a/recommender/synth.py\n+++ b/recommender/synth.py\n@@ -29,7 +29,8 @@\n for version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n- include_protos=True\n+ include_protos=True,\n+ config_path=\"/google/cloud/recommender/v1beta1/artman_recommender_v1beta1.yaml\"\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n", "issue": "Synthesis failed for recommender\nHello! Autosynth couldn't regenerate recommender. :broken_heart:\n\nHere's the output from running `synth.py`:\n\n```\nCloning into 'working_repo'...\nSwitched to branch 'autosynth-recommender'\nRunning synthtool\n['/tmpfs/src/git/autosynth/env/bin/python3', '-m', 'synthtool', 'synth.py', '--']\nsynthtool > Executing /tmpfs/src/git/autosynth/working_repo/recommender/synth.py.\nsynthtool > Ensuring dependencies.\nsynthtool > Pulling artman image.\nlatest: Pulling from googleapis/artman\nDigest: sha256:c773192618c608a7a0415dd95282f841f8e6bcdef7dd760a988c93b77a64bd57\nStatus: Image is up to date for googleapis/artman:latest\nsynthtool > Cloning googleapis.\nTraceback (most recent call last):\n File \"/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/home/kbuilder/.pyenv/versions/3.6.1/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py\", line 87, in <module>\n main()\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 764, in __call__\n return self.main(*args, **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 717, in main\n rv = self.invoke(ctx)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 956, in invoke\n return ctx.invoke(self.callback, **ctx.params)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/click/core.py\", line 555, in invoke\n return callback(*args, **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/__main__.py\", line 79, in main\n spec.loader.exec_module(synth_module) # type: ignore\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\n File \"<frozen importlib._bootstrap>\", line 205, in _call_with_frames_removed\n File \"/tmpfs/src/git/autosynth/working_repo/recommender/synth.py\", line 32, in <module>\n include_protos=True\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py\", line 50, in py_library\n return self._generate_code(service, version, \"python\", **kwargs)\n File \"/tmpfs/src/git/autosynth/env/lib/python3.6/site-packages/synthtool/gcp/gapic_generator.py\", line 121, in _generate_code\n f\"Unable to find configuration yaml file: {(googleapis / config_path)}.\"\nFileNotFoundError: Unable to find configuration yaml file: /home/kbuilder/.cache/synthtool/googleapis/google/cloud/recommender/artman_recommender_v1beta1.yaml.\nsynthtool > Cleaned up 1 temporary directories.\nsynthtool > Wrote metadata to synth.metadata.\n\nSynthesis failed\n\n```\n\nGoogle internal developers can see the full log [here](https://sponge/74dee1a3-0367-43bf-9f40-1001ae7ea243).\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to synthesize generated parts of this library.\"\"\"\nimport re\n\nimport synthtool as s\nfrom synthtool import gcp\n\ngapic = gcp.GAPICGenerator()\nversions = [\"v1beta1\"]\ncommon = gcp.CommonTemplates()\n\n\n# ----------------------------------------------------------------------------\n# Generate Cloud Recommender\n# ----------------------------------------------------------------------------\nfor version in versions:\n library = gapic.py_library(\n \"recommender\", version,\n include_protos=True\n )\n s.move(library, excludes=['nox.py', 'docs/index.rst', 'README.rst', 'setup.py'])\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library(unit_cov_level=97, cov_level=100)\ns.move(templated_files, excludes=['noxfile.py'])\n\ns.shell.run([\"nox\", \"-s\", \"blacken\"], hide_output=False) ", "path": "recommender/synth.py"}]}
1,812
136
gh_patches_debug_16252
rasdani/github-patches
git_diff
fedora-infra__bodhi-2097
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bodhi-check-policies only operates on unpushed updates I noticed today when working on #1514 that the update query that ```bodhi-check-policies``` performs only selects [unpushed updates](https://github.com/fedora-infra/bodhi/blob/3.1.0/bodhi/server/scripts/check_policies.py#L40-L42): ``` updates = models.Update.query.filter(models.Update.pushed == false())\ .filter(models.Update.status.in_( [models.UpdateStatus.pending, models.UpdateStatus.testing])) ``` I think this is probably not what we want, as [testing updates are marked pushed](https://github.com/fedora-infra/bodhi/blob/3b655f2/bodhi/server/consumers/masher.py#L845-L857). This means we will only consult Greenwave about pending updates. </issue> <code> [start of bodhi/server/scripts/check_policies.py] 1 # -*- coding: utf-8 -*- 2 # Copyright © 2017 Red Hat, Inc. 3 # 4 # This file is part of Bodhi. 5 # 6 # This program is free software; you can redistribute it and/or 7 # modify it under the terms of the GNU General Public License 8 # as published by the Free Software Foundation; either version 2 9 # of the License, or (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this program; if not, write to the Free Software 18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 19 20 """ 21 Check the enforced policies by Greenwave for each open update. 22 23 Ideally, this should be done in a fedmsg consumer but we currently do not have any 24 messages in the message bus yet. 25 """ 26 import click 27 from sqlalchemy.sql.expression import false 28 29 from bodhi.server import config, initialize_db, models, Session 30 31 32 @click.command() 33 @click.version_option(message='%(version)s') 34 def check(): 35 """Check the enforced policies by Greenwave for each open update.""" 36 initialize_db(config.config) 37 session = Session() 38 39 updates = models.Update.query.filter(models.Update.pushed == false())\ 40 .filter(models.Update.status.in_( 41 [models.UpdateStatus.pending, models.UpdateStatus.testing])) 42 for update in updates: 43 try: 44 update.update_test_gating_status() 45 session.commit() 46 except Exception as e: 47 # If there is a problem talking to Greenwave server, print the error. 48 click.echo(str(e)) 49 session.rollback() 50 51 52 if __name__ == '__main__': 53 check() 54 [end of bodhi/server/scripts/check_policies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bodhi/server/scripts/check_policies.py b/bodhi/server/scripts/check_policies.py --- a/bodhi/server/scripts/check_policies.py +++ b/bodhi/server/scripts/check_policies.py @@ -24,7 +24,6 @@ messages in the message bus yet. """ import click -from sqlalchemy.sql.expression import false from bodhi.server import config, initialize_db, models, Session @@ -36,9 +35,8 @@ initialize_db(config.config) session = Session() - updates = models.Update.query.filter(models.Update.pushed == false())\ - .filter(models.Update.status.in_( - [models.UpdateStatus.pending, models.UpdateStatus.testing])) + updates = models.Update.query.filter(models.Update.status.in_( + [models.UpdateStatus.pending, models.UpdateStatus.testing])) for update in updates: try: update.update_test_gating_status()
{"golden_diff": "diff --git a/bodhi/server/scripts/check_policies.py b/bodhi/server/scripts/check_policies.py\n--- a/bodhi/server/scripts/check_policies.py\n+++ b/bodhi/server/scripts/check_policies.py\n@@ -24,7 +24,6 @@\n messages in the message bus yet.\n \"\"\"\n import click\n-from sqlalchemy.sql.expression import false\n \n from bodhi.server import config, initialize_db, models, Session\n \n@@ -36,9 +35,8 @@\n initialize_db(config.config)\n session = Session()\n \n- updates = models.Update.query.filter(models.Update.pushed == false())\\\n- .filter(models.Update.status.in_(\n- [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n+ updates = models.Update.query.filter(models.Update.status.in_(\n+ [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n for update in updates:\n try:\n update.update_test_gating_status()\n", "issue": "bodhi-check-policies only operates on unpushed updates\nI noticed today when working on #1514 that the update query that ```bodhi-check-policies``` performs only selects [unpushed updates](https://github.com/fedora-infra/bodhi/blob/3.1.0/bodhi/server/scripts/check_policies.py#L40-L42):\r\n\r\n```\r\nupdates = models.Update.query.filter(models.Update.pushed == false())\\\r\n .filter(models.Update.status.in_(\r\n [models.UpdateStatus.pending, models.UpdateStatus.testing]))\r\n```\r\n\r\nI think this is probably not what we want, as [testing updates are marked pushed](https://github.com/fedora-infra/bodhi/blob/3b655f2/bodhi/server/consumers/masher.py#L845-L857). This means we will only consult Greenwave about pending updates.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright \u00a9 2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\"\"\"\nCheck the enforced policies by Greenwave for each open update.\n\nIdeally, this should be done in a fedmsg consumer but we currently do not have any\nmessages in the message bus yet.\n\"\"\"\nimport click\nfrom sqlalchemy.sql.expression import false\n\nfrom bodhi.server import config, initialize_db, models, Session\n\n\[email protected]()\[email protected]_option(message='%(version)s')\ndef check():\n \"\"\"Check the enforced policies by Greenwave for each open update.\"\"\"\n initialize_db(config.config)\n session = Session()\n\n updates = models.Update.query.filter(models.Update.pushed == false())\\\n .filter(models.Update.status.in_(\n [models.UpdateStatus.pending, models.UpdateStatus.testing]))\n for update in updates:\n try:\n update.update_test_gating_status()\n session.commit()\n except Exception as e:\n # If there is a problem talking to Greenwave server, print the error.\n click.echo(str(e))\n session.rollback()\n\n\nif __name__ == '__main__':\n check()\n", "path": "bodhi/server/scripts/check_policies.py"}]}
1,249
200
gh_patches_debug_26464
rasdani/github-patches
git_diff
geopandas__geopandas-2418
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> wrong country code ISO_3 for France in naturalearth_lowres dataset Not sure this is the right place to mention it, but code iso_3 for France should be 'FRA' not -99. ``` import geopandas path = geopandas.datasets.get_path('naturalearth_lowres') world = geopandas.read_file(path) world[world['name']=='France'].iso_a3 ``` </issue> <code> [start of geopandas/datasets/naturalearth_creation.py] 1 """ 2 Script that generates the included dataset 'naturalearth_lowres.shp'. 3 4 Raw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/ 5 Current version used: version 4.1.0 6 """ # noqa (E501 link is longer than max line length) 7 8 import geopandas as gpd 9 10 # assumes zipfile from naturalearthdata was downloaded to current directory 11 world_raw = gpd.read_file("zip://./ne_110m_admin_0_countries.zip") 12 # subsets columns of interest for geopandas examples 13 world_df = world_raw[ 14 ["POP_EST", "CONTINENT", "NAME", "ISO_A3", "GDP_MD_EST", "geometry"] 15 ] 16 world_df.columns = world_df.columns.str.lower() 17 world_df.to_file( 18 driver="ESRI Shapefile", filename="./naturalearth_lowres/naturalearth_lowres.shp" 19 ) 20 [end of geopandas/datasets/naturalearth_creation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geopandas/datasets/naturalearth_creation.py b/geopandas/datasets/naturalearth_creation.py --- a/geopandas/datasets/naturalearth_creation.py +++ b/geopandas/datasets/naturalearth_creation.py @@ -1,19 +1,29 @@ """ Script that generates the included dataset 'naturalearth_lowres.shp'. -Raw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/ -Current version used: version 4.1.0 +Raw data: https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_countries.zip +Current version used: version 5.0.1 """ # noqa (E501 link is longer than max line length) import geopandas as gpd # assumes zipfile from naturalearthdata was downloaded to current directory world_raw = gpd.read_file("zip://./ne_110m_admin_0_countries.zip") + +# not ideal - fix some country codes +mask = world_raw["ISO_A3"].eq("-99") & world_raw["TYPE"].isin( + ["Sovereign country", "Country"] +) +world_raw.loc[mask, "ISO_A3"] = world_raw.loc[mask, "ADM0_A3"] + # subsets columns of interest for geopandas examples world_df = world_raw[ - ["POP_EST", "CONTINENT", "NAME", "ISO_A3", "GDP_MD_EST", "geometry"] -] + ["POP_EST", "CONTINENT", "NAME", "ISO_A3", "GDP_MD", "geometry"] +].rename( + columns={"GDP_MD": "GDP_MD_EST"} +) # column has changed name... world_df.columns = world_df.columns.str.lower() + world_df.to_file( driver="ESRI Shapefile", filename="./naturalearth_lowres/naturalearth_lowres.shp" )
{"golden_diff": "diff --git a/geopandas/datasets/naturalearth_creation.py b/geopandas/datasets/naturalearth_creation.py\n--- a/geopandas/datasets/naturalearth_creation.py\n+++ b/geopandas/datasets/naturalearth_creation.py\n@@ -1,19 +1,29 @@\n \"\"\"\n Script that generates the included dataset 'naturalearth_lowres.shp'.\n \n-Raw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/\n-Current version used: version 4.1.0\n+Raw data: https://www.naturalearthdata.com/http//www.naturalearthdata.com/download/110m/cultural/ne_110m_admin_0_countries.zip\n+Current version used: version 5.0.1\n \"\"\" # noqa (E501 link is longer than max line length)\n \n import geopandas as gpd\n \n # assumes zipfile from naturalearthdata was downloaded to current directory\n world_raw = gpd.read_file(\"zip://./ne_110m_admin_0_countries.zip\")\n+\n+# not ideal - fix some country codes\n+mask = world_raw[\"ISO_A3\"].eq(\"-99\") & world_raw[\"TYPE\"].isin(\n+ [\"Sovereign country\", \"Country\"]\n+)\n+world_raw.loc[mask, \"ISO_A3\"] = world_raw.loc[mask, \"ADM0_A3\"]\n+\n # subsets columns of interest for geopandas examples\n world_df = world_raw[\n- [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD_EST\", \"geometry\"]\n-]\n+ [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD\", \"geometry\"]\n+].rename(\n+ columns={\"GDP_MD\": \"GDP_MD_EST\"}\n+) # column has changed name...\n world_df.columns = world_df.columns.str.lower()\n+\n world_df.to_file(\n driver=\"ESRI Shapefile\", filename=\"./naturalearth_lowres/naturalearth_lowres.shp\"\n )\n", "issue": "wrong country code ISO_3 for France in naturalearth_lowres dataset\nNot sure this is the right place to mention it, but code iso_3 for France should be 'FRA' not -99.\r\n\r\n```\r\n import geopandas\r\n path = geopandas.datasets.get_path('naturalearth_lowres')\r\n world = geopandas.read_file(path)\r\n world[world['name']=='France'].iso_a3\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nScript that generates the included dataset 'naturalearth_lowres.shp'.\n\nRaw data: https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/\nCurrent version used: version 4.1.0\n\"\"\" # noqa (E501 link is longer than max line length)\n\nimport geopandas as gpd\n\n# assumes zipfile from naturalearthdata was downloaded to current directory\nworld_raw = gpd.read_file(\"zip://./ne_110m_admin_0_countries.zip\")\n# subsets columns of interest for geopandas examples\nworld_df = world_raw[\n [\"POP_EST\", \"CONTINENT\", \"NAME\", \"ISO_A3\", \"GDP_MD_EST\", \"geometry\"]\n]\nworld_df.columns = world_df.columns.str.lower()\nworld_df.to_file(\n driver=\"ESRI Shapefile\", filename=\"./naturalearth_lowres/naturalearth_lowres.shp\"\n)\n", "path": "geopandas/datasets/naturalearth_creation.py"}]}
888
462
gh_patches_debug_18801
rasdani/github-patches
git_diff
TheAlgorithms__Python-338
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug report for radix sort **Description** if the test case for [`radix_sort.py`](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py) is `[104, 203, 308, 401]`, the result would be `[401, 203, 104, 308]` It's wrong! The reason is that if the `tmp` is always `0` in one loop, it will exit the loop. In other words, If the same digit of all numbers is 0, then the result may be wrong. The similar example like: *Input*: `[2018, 33017, 24016]` *Output*: `[24016, 33017, 2018]` Wrong again!! **Suggestion** Do not use `maxLength` as a loop variable because the value of `maxLength` is related to `tmp`. I think that by finding the maximum value of the array and assigning it to `max_digit`, using another variable `digit` with an initial value of 1 as the loop variable, each loop `digit` is multiplied by 10, and exit the loops when the `digit` greater than `max_digit`, which can guarantee the correct number of loops. And the complexity will be O(nk + n) . n is the size of input list and k is the digit length of the number. </issue> <code> [start of sorts/radix_sort.py] 1 def radixsort(lst): 2 RADIX = 10 3 maxLength = False 4 tmp , placement = -1, 1 5 6 while not maxLength: 7 maxLength = True 8 # declare and initialize buckets 9 buckets = [list() for _ in range( RADIX )] 10 11 # split lst between lists 12 for i in lst: 13 tmp = int((i / placement) % RADIX) 14 buckets[tmp].append(i) 15 16 if maxLength and tmp > 0: 17 maxLength = False 18 19 # empty lists into lst array 20 a = 0 21 for b in range( RADIX ): 22 buck = buckets[b] 23 for i in buck: 24 lst[a] = i 25 a += 1 26 27 # move to next 28 placement *= RADIX 29 [end of sorts/radix_sort.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py --- a/sorts/radix_sort.py +++ b/sorts/radix_sort.py @@ -1,28 +1,26 @@ def radixsort(lst): - RADIX = 10 - maxLength = False - tmp , placement = -1, 1 + RADIX = 10 + placement = 1 - while not maxLength: - maxLength = True - # declare and initialize buckets - buckets = [list() for _ in range( RADIX )] + # get the maximum number + max_digit = max(lst) - # split lst between lists - for i in lst: - tmp = int((i / placement) % RADIX) - buckets[tmp].append(i) + while placement < max_digit: + # declare and initialize buckets + buckets = [list() for _ in range( RADIX )] - if maxLength and tmp > 0: - maxLength = False + # split lst between lists + for i in lst: + tmp = int((i / placement) % RADIX) + buckets[tmp].append(i) - # empty lists into lst array - a = 0 - for b in range( RADIX ): - buck = buckets[b] - for i in buck: - lst[a] = i - a += 1 + # empty lists into lst array + a = 0 + for b in range( RADIX ): + buck = buckets[b] + for i in buck: + lst[a] = i + a += 1 - # move to next - placement *= RADIX + # move to next + placement *= RADIX
{"golden_diff": "diff --git a/sorts/radix_sort.py b/sorts/radix_sort.py\n--- a/sorts/radix_sort.py\n+++ b/sorts/radix_sort.py\n@@ -1,28 +1,26 @@\n def radixsort(lst):\n- RADIX = 10\n- maxLength = False\n- tmp , placement = -1, 1\n+ RADIX = 10\n+ placement = 1\n \n- while not maxLength:\n- maxLength = True\n- # declare and initialize buckets\n- buckets = [list() for _ in range( RADIX )]\n+ # get the maximum number\n+ max_digit = max(lst)\n \n- # split lst between lists\n- for i in lst:\n- tmp = int((i / placement) % RADIX)\n- buckets[tmp].append(i)\n+ while placement < max_digit:\n+ # declare and initialize buckets\n+ buckets = [list() for _ in range( RADIX )]\n \n- if maxLength and tmp > 0:\n- maxLength = False\n+ # split lst between lists\n+ for i in lst:\n+ tmp = int((i / placement) % RADIX)\n+ buckets[tmp].append(i)\n \n- # empty lists into lst array\n- a = 0\n- for b in range( RADIX ):\n- buck = buckets[b]\n- for i in buck:\n- lst[a] = i\n- a += 1\n+ # empty lists into lst array\n+ a = 0\n+ for b in range( RADIX ):\n+ buck = buckets[b]\n+ for i in buck:\n+ lst[a] = i\n+ a += 1\n \n- # move to next\n- placement *= RADIX\n+ # move to next\n+ placement *= RADIX\n", "issue": "Bug report for radix sort\n**Description**\r\n\r\nif the test case for [`radix_sort.py`](https://github.com/TheAlgorithms/Python/blob/master/sorts/radix_sort.py) is `[104, 203, 308, 401]`, the result would be `[401, 203, 104, 308]`\r\n\r\nIt's wrong!\r\n\r\nThe reason is that if the `tmp` is always `0` in one loop, it will exit the loop. In other words, If the same digit of all numbers is 0, then the result may be wrong. The similar example like:\r\n*Input*: `[2018, 33017, 24016]`\r\n*Output*: `[24016, 33017, 2018]`\r\nWrong again!!\r\n\r\n**Suggestion**\r\n\r\nDo not use `maxLength` as a loop variable because the value of `maxLength` is related to `tmp`.\r\n\r\nI think that by finding the maximum value of the array and assigning it to `max_digit`, using another variable `digit` with an initial value of 1 as the loop variable, each loop `digit` is multiplied by 10, and exit the loops when the `digit` greater than `max_digit`, which can guarantee the correct number of loops.\r\n\r\nAnd the complexity will be O(nk + n) . n is the size of input list and k is the digit length of the number.\n", "before_files": [{"content": "def radixsort(lst):\n RADIX = 10\n maxLength = False\n tmp , placement = -1, 1\n\n while not maxLength:\n maxLength = True\n # declare and initialize buckets\n buckets = [list() for _ in range( RADIX )]\n\n # split lst between lists\n for i in lst:\n tmp = int((i / placement) % RADIX)\n buckets[tmp].append(i)\n\n if maxLength and tmp > 0:\n maxLength = False\n\n # empty lists into lst array\n a = 0\n for b in range( RADIX ):\n buck = buckets[b]\n for i in buck:\n lst[a] = i\n a += 1\n\n # move to next\n placement *= RADIX\n", "path": "sorts/radix_sort.py"}]}
1,087
414
gh_patches_debug_26262
rasdani/github-patches
git_diff
conan-io__conan-2927
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Version("6") < Version("6.0") == True Version model has a bug. Version("6") is considered lower than "6.0". It introduced a bug (fixed in #2885) with the default`std`. </issue> <code> [start of conans/model/version.py] 1 import re 2 3 4 class Version(str): 5 """ This is NOT an implementation of semver, as users may use any pattern in their versions. 6 It is just a helper to parse .-, and compare taking into account integers when possible 7 """ 8 version_pattern = re.compile('[.-]') 9 10 def __new__(cls, content): 11 return str.__new__(cls, content.strip()) 12 13 @property 14 def as_list(self): 15 if not hasattr(self, "_cached_list"): 16 tokens = self.rsplit('+', 1) 17 self._base = tokens[0] 18 if len(tokens) == 2: 19 self._build = tokens[1] 20 self._cached_list = [] 21 tokens = Version.version_pattern.split(tokens[0]) 22 for item in tokens: 23 self._cached_list.append(int(item) if item.isdigit() else item) 24 return self._cached_list 25 26 def major(self, fill=True): 27 self_list = self.as_list 28 if not isinstance(self_list[0], int): 29 return self._base 30 v = str(self_list[0]) if self_list else "0" 31 if fill: 32 return Version(".".join([v, 'Y', 'Z'])) 33 return Version(v) 34 35 def stable(self): 36 """ same as major, but as semver, 0.Y.Z is not considered 37 stable, so return it as is 38 """ 39 if self.as_list[0] == 0: 40 return self 41 return self.major() 42 43 def minor(self, fill=True): 44 self_list = self.as_list 45 if not isinstance(self_list[0], int): 46 return self._base 47 v0 = str(self_list[0]) if len(self_list) > 0 else "0" 48 v1 = str(self_list[1]) if len(self_list) > 1 else "0" 49 if fill: 50 return Version(".".join([v0, v1, 'Z'])) 51 return Version(".".join([v0, v1])) 52 53 def patch(self): 54 self_list = self.as_list 55 if not isinstance(self_list[0], int): 56 return self._base 57 v0 = str(self_list[0]) if len(self_list) > 0 else "0" 58 v1 = str(self_list[1]) if len(self_list) > 1 else "0" 59 v2 = str(self_list[2]) if len(self_list) > 2 else "0" 60 return Version(".".join([v0, v1, v2])) 61 62 def pre(self): 63 self_list = self.as_list 64 if not isinstance(self_list[0], int): 65 return self._base 66 v0 = str(self_list[0]) if len(self_list) > 0 else "0" 67 v1 = str(self_list[1]) if len(self_list) > 1 else "0" 68 v2 = str(self_list[2]) if len(self_list) > 2 else "0" 69 v = ".".join([v0, v1, v2]) 70 if len(self_list) > 3: 71 v += "-%s" % self_list[3] 72 return Version(v) 73 74 @property 75 def build(self): 76 if hasattr(self, "_build"): 77 return self._build 78 return "" 79 80 @property 81 def base(self): 82 self.as_list 83 return Version(self._base) 84 85 def compatible(self, other): 86 if not isinstance(other, Version): 87 other = Version(other) 88 for v1, v2 in zip(self.as_list, other.as_list): 89 if v1 in ["X", "Y", "Z"] or v2 in ["X", "Y", "Z"]: 90 return True 91 if v1 != v2: 92 return False 93 return True 94 95 def __cmp__(self, other): 96 if other is None: 97 return 1 98 if not isinstance(other, Version): 99 other = Version(other) 100 101 other_list = other.as_list 102 for ind, el in enumerate(self.as_list): 103 if ind + 1 > len(other_list): 104 if isinstance(el, int): 105 return 1 106 return -1 107 if not isinstance(el, int) and isinstance(other_list[ind], int): 108 # Version compare with 1.4.rc2 109 return -1 110 elif not isinstance(other_list[ind], int) and isinstance(el, int): 111 return 1 112 elif el == other_list[ind]: 113 continue 114 elif el > other_list[ind]: 115 return 1 116 else: 117 return -1 118 if len(other_list) > len(self.as_list): 119 return -1 120 else: 121 return 0 122 123 def __gt__(self, other): 124 return self.__cmp__(other) == 1 125 126 def __lt__(self, other): 127 return self.__cmp__(other) == -1 128 129 def __le__(self, other): 130 return self.__cmp__(other) in [0, -1] 131 132 def __ge__(self, other): 133 return self.__cmp__(other) in [0, 1] 134 [end of conans/model/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/model/version.py b/conans/model/version.py --- a/conans/model/version.py +++ b/conans/model/version.py @@ -98,6 +98,18 @@ if not isinstance(other, Version): other = Version(other) + # Check equals + def get_el(a_list, index): + if len(a_list) - 1 < index: + return 0 # out of range, 4 == 4.0 == 4.0.0 + return a_list[index] + + equals = all(get_el(other.as_list, ind) == get_el(self.as_list, ind) + for ind in range(0, max(len(other.as_list), len(self.as_list)))) + if equals: + return 0 + + # Check greater than or less than other_list = other.as_list for ind, el in enumerate(self.as_list): if ind + 1 > len(other_list): @@ -117,8 +129,6 @@ return -1 if len(other_list) > len(self.as_list): return -1 - else: - return 0 def __gt__(self, other): return self.__cmp__(other) == 1 @@ -131,3 +141,12 @@ def __ge__(self, other): return self.__cmp__(other) in [0, 1] + + def __eq__(self, other): + return self.__cmp__(other) == 0 + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return str.__hash__(self)
{"golden_diff": "diff --git a/conans/model/version.py b/conans/model/version.py\n--- a/conans/model/version.py\n+++ b/conans/model/version.py\n@@ -98,6 +98,18 @@\n if not isinstance(other, Version):\n other = Version(other)\n \n+ # Check equals\n+ def get_el(a_list, index):\n+ if len(a_list) - 1 < index:\n+ return 0 # out of range, 4 == 4.0 == 4.0.0\n+ return a_list[index]\n+\n+ equals = all(get_el(other.as_list, ind) == get_el(self.as_list, ind)\n+ for ind in range(0, max(len(other.as_list), len(self.as_list))))\n+ if equals:\n+ return 0\n+\n+ # Check greater than or less than\n other_list = other.as_list\n for ind, el in enumerate(self.as_list):\n if ind + 1 > len(other_list):\n@@ -117,8 +129,6 @@\n return -1\n if len(other_list) > len(self.as_list):\n return -1\n- else:\n- return 0\n \n def __gt__(self, other):\n return self.__cmp__(other) == 1\n@@ -131,3 +141,12 @@\n \n def __ge__(self, other):\n return self.__cmp__(other) in [0, 1]\n+\n+ def __eq__(self, other):\n+ return self.__cmp__(other) == 0\n+\n+ def __ne__(self, other):\n+ return not self.__eq__(other)\n+\n+ def __hash__(self):\n+ return str.__hash__(self)\n", "issue": "Version(\"6\") < Version(\"6.0\") == True \nVersion model has a bug.\r\nVersion(\"6\") is considered lower than \"6.0\". It introduced a bug (fixed in #2885) with the default`std`.\r\n\n", "before_files": [{"content": "import re\n\n\nclass Version(str):\n \"\"\" This is NOT an implementation of semver, as users may use any pattern in their versions.\n It is just a helper to parse .-, and compare taking into account integers when possible\n \"\"\"\n version_pattern = re.compile('[.-]')\n\n def __new__(cls, content):\n return str.__new__(cls, content.strip())\n\n @property\n def as_list(self):\n if not hasattr(self, \"_cached_list\"):\n tokens = self.rsplit('+', 1)\n self._base = tokens[0]\n if len(tokens) == 2:\n self._build = tokens[1]\n self._cached_list = []\n tokens = Version.version_pattern.split(tokens[0])\n for item in tokens:\n self._cached_list.append(int(item) if item.isdigit() else item)\n return self._cached_list\n\n def major(self, fill=True):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v = str(self_list[0]) if self_list else \"0\"\n if fill:\n return Version(\".\".join([v, 'Y', 'Z']))\n return Version(v)\n\n def stable(self):\n \"\"\" same as major, but as semver, 0.Y.Z is not considered\n stable, so return it as is\n \"\"\"\n if self.as_list[0] == 0:\n return self\n return self.major()\n\n def minor(self, fill=True):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n if fill:\n return Version(\".\".join([v0, v1, 'Z']))\n return Version(\".\".join([v0, v1]))\n\n def patch(self):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n v2 = str(self_list[2]) if len(self_list) > 2 else \"0\"\n return Version(\".\".join([v0, v1, v2]))\n\n def pre(self):\n self_list = self.as_list\n if not isinstance(self_list[0], int):\n return self._base\n v0 = str(self_list[0]) if len(self_list) > 0 else \"0\"\n v1 = str(self_list[1]) if len(self_list) > 1 else \"0\"\n v2 = str(self_list[2]) if len(self_list) > 2 else \"0\"\n v = \".\".join([v0, v1, v2])\n if len(self_list) > 3:\n v += \"-%s\" % self_list[3]\n return Version(v)\n\n @property\n def build(self):\n if hasattr(self, \"_build\"):\n return self._build\n return \"\"\n\n @property\n def base(self):\n self.as_list\n return Version(self._base)\n\n def compatible(self, other):\n if not isinstance(other, Version):\n other = Version(other)\n for v1, v2 in zip(self.as_list, other.as_list):\n if v1 in [\"X\", \"Y\", \"Z\"] or v2 in [\"X\", \"Y\", \"Z\"]:\n return True\n if v1 != v2:\n return False\n return True\n\n def __cmp__(self, other):\n if other is None:\n return 1\n if not isinstance(other, Version):\n other = Version(other)\n\n other_list = other.as_list\n for ind, el in enumerate(self.as_list):\n if ind + 1 > len(other_list):\n if isinstance(el, int):\n return 1\n return -1\n if not isinstance(el, int) and isinstance(other_list[ind], int):\n # Version compare with 1.4.rc2\n return -1\n elif not isinstance(other_list[ind], int) and isinstance(el, int):\n return 1\n elif el == other_list[ind]:\n continue\n elif el > other_list[ind]:\n return 1\n else:\n return -1\n if len(other_list) > len(self.as_list):\n return -1\n else:\n return 0\n\n def __gt__(self, other):\n return self.__cmp__(other) == 1\n\n def __lt__(self, other):\n return self.__cmp__(other) == -1\n\n def __le__(self, other):\n return self.__cmp__(other) in [0, -1]\n\n def __ge__(self, other):\n return self.__cmp__(other) in [0, 1]\n", "path": "conans/model/version.py"}]}
1,990
388
gh_patches_debug_25246
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2734
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> IndexError in _wrap_get_create ### Which version of dd-trace-py are you using? 0.50.3 ### Which version of pip are you using? 20.3.3 ### Which version of the libraries are you using? `dogpile.cache==0.7.1` ### How can we reproduce your problem? Install redis if needed (on macOS): ``` brew install redis brew services start redis ``` Create a dogpile region cache: ``` from dogpile.cache import make_region region = make_region("foo", key_mangler=lambda key: '{}:{}:{}'.format("foo", "env", key)) \ .configure('dogpile.cache.redis', arguments=dict(url="redis://localhost:6379") ``` Call `get_or_create` using only kwargs: ``` return token_cache.get_or_create( key="cache_key", creator=lambda: make_value(foo, bar, buzz)) ``` Invoke your script via ddtrace-run: ``` ddtrace-run python3 test_ddtrace_and_dogpile.py ``` ### What is the result that you get? ``` [ERROR] IndexError: tuple index out of range Traceback (most recent call last): File "/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py", line 54, in __call__ return self.func(*args, **kwargs) File "/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py", line 124, in __call__ self.response = self.func(event, context, **kwargs) File "/var/task/handler.py", line 105, in execute_task executor = executor_getter(event) File "/var/task/handler.py", line 43, in get_step_executor executor = ETLStepExecutor(Worker.lambda_instance.value, **get_parameters_for_inference_pipeline(event)) File "/var/lang/lib/python3.8/site-packages/sondes/containers/executors/base.py", line 24, in __init__ client = get_third_party_client(kwargs["third_party_module_name"])( File "/var/task/caiso/client.py", line 17, in __init__ self.api = get_caiso_api_client( File "/var/task/caiso/utility/caiso_api_client.py", line 40, in get_caiso_api_client token = get_login_token(service_url, username, password, worker, environment, cache_host) File "/var/task/caiso/utility/caiso_api_client.py", line 32, in get_login_token return token_cache.get_or_create( File "/var/lang/lib/python3.8/site-packages/ddtrace/contrib/dogpile_cache/region.py", line 14, in _wrap_get_create key = args[0] ``` ### What is the result that you expected? No error </issue> <code> [start of ddtrace/contrib/dogpile_cache/region.py] 1 import dogpile 2 3 from ddtrace.ext import SpanTypes 4 5 from ...constants import SPAN_MEASURED_KEY 6 from ...pin import Pin 7 8 9 def _wrap_get_create(func, instance, args, kwargs): 10 pin = Pin.get_from(dogpile.cache) 11 if not pin or not pin.enabled(): 12 return func(*args, **kwargs) 13 14 key = args[0] 15 with pin.tracer.trace("dogpile.cache", resource="get_or_create", span_type=SpanTypes.CACHE) as span: 16 span.set_tag(SPAN_MEASURED_KEY) 17 span.set_tag("key", key) 18 span.set_tag("region", instance.name) 19 span.set_tag("backend", instance.actual_backend.__class__.__name__) 20 return func(*args, **kwargs) 21 22 23 def _wrap_get_create_multi(func, instance, args, kwargs): 24 pin = Pin.get_from(dogpile.cache) 25 if not pin or not pin.enabled(): 26 return func(*args, **kwargs) 27 28 keys = args[0] 29 with pin.tracer.trace("dogpile.cache", resource="get_or_create_multi", span_type="cache") as span: 30 span.set_tag(SPAN_MEASURED_KEY) 31 span.set_tag("keys", keys) 32 span.set_tag("region", instance.name) 33 span.set_tag("backend", instance.actual_backend.__class__.__name__) 34 return func(*args, **kwargs) 35 [end of ddtrace/contrib/dogpile_cache/region.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/dogpile_cache/region.py b/ddtrace/contrib/dogpile_cache/region.py --- a/ddtrace/contrib/dogpile_cache/region.py +++ b/ddtrace/contrib/dogpile_cache/region.py @@ -4,6 +4,7 @@ from ...constants import SPAN_MEASURED_KEY from ...pin import Pin +from ...utils import get_argument_value def _wrap_get_create(func, instance, args, kwargs): @@ -11,7 +12,7 @@ if not pin or not pin.enabled(): return func(*args, **kwargs) - key = args[0] + key = get_argument_value(args, kwargs, 0, "key") with pin.tracer.trace("dogpile.cache", resource="get_or_create", span_type=SpanTypes.CACHE) as span: span.set_tag(SPAN_MEASURED_KEY) span.set_tag("key", key) @@ -25,7 +26,7 @@ if not pin or not pin.enabled(): return func(*args, **kwargs) - keys = args[0] + keys = get_argument_value(args, kwargs, 0, "keys") with pin.tracer.trace("dogpile.cache", resource="get_or_create_multi", span_type="cache") as span: span.set_tag(SPAN_MEASURED_KEY) span.set_tag("keys", keys)
{"golden_diff": "diff --git a/ddtrace/contrib/dogpile_cache/region.py b/ddtrace/contrib/dogpile_cache/region.py\n--- a/ddtrace/contrib/dogpile_cache/region.py\n+++ b/ddtrace/contrib/dogpile_cache/region.py\n@@ -4,6 +4,7 @@\n \n from ...constants import SPAN_MEASURED_KEY\n from ...pin import Pin\n+from ...utils import get_argument_value\n \n \n def _wrap_get_create(func, instance, args, kwargs):\n@@ -11,7 +12,7 @@\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n \n- key = args[0]\n+ key = get_argument_value(args, kwargs, 0, \"key\")\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create\", span_type=SpanTypes.CACHE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"key\", key)\n@@ -25,7 +26,7 @@\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n \n- keys = args[0]\n+ keys = get_argument_value(args, kwargs, 0, \"keys\")\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create_multi\", span_type=\"cache\") as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"keys\", keys)\n", "issue": "IndexError in _wrap_get_create\n### Which version of dd-trace-py are you using? 0.50.3\r\n\r\n### Which version of pip are you using? 20.3.3\r\n\r\n### Which version of the libraries are you using? `dogpile.cache==0.7.1`\r\n\r\n### How can we reproduce your problem?\r\n\r\nInstall redis if needed (on macOS):\r\n```\r\nbrew install redis\r\nbrew services start redis\r\n```\r\n\r\nCreate a dogpile region cache:\r\n```\r\nfrom dogpile.cache import make_region\r\nregion = make_region(\"foo\",\r\n key_mangler=lambda key: '{}:{}:{}'.format(\"foo\", \"env\", key)) \\\r\n .configure('dogpile.cache.redis', arguments=dict(url=\"redis://localhost:6379\")\r\n```\r\n\r\nCall `get_or_create` using only kwargs:\r\n\r\n```\r\n return token_cache.get_or_create(\r\n key=\"cache_key\", creator=lambda: make_value(foo, bar, buzz))\r\n\r\n```\r\n\r\nInvoke your script via ddtrace-run:\r\n\r\n```\r\nddtrace-run python3 test_ddtrace_and_dogpile.py\r\n```\r\n\r\n### What is the result that you get?\r\n\r\n```\r\n[ERROR] IndexError: tuple index out of range\r\nTraceback (most recent call last):\r\n File \"/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py\", line 54, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/var/lang/lib/python3.8/site-packages/datadog_lambda/wrapper.py\", line 124, in __call__\r\n self.response = self.func(event, context, **kwargs)\r\n File \"/var/task/handler.py\", line 105, in execute_task\r\n executor = executor_getter(event)\r\n File \"/var/task/handler.py\", line 43, in get_step_executor\r\n executor = ETLStepExecutor(Worker.lambda_instance.value, **get_parameters_for_inference_pipeline(event))\r\n File \"/var/lang/lib/python3.8/site-packages/sondes/containers/executors/base.py\", line 24, in __init__\r\n client = get_third_party_client(kwargs[\"third_party_module_name\"])(\r\n File \"/var/task/caiso/client.py\", line 17, in __init__\r\n self.api = get_caiso_api_client(\r\n File \"/var/task/caiso/utility/caiso_api_client.py\", line 40, in get_caiso_api_client\r\n token = get_login_token(service_url, username, password, worker, environment, cache_host)\r\n File \"/var/task/caiso/utility/caiso_api_client.py\", line 32, in get_login_token\r\n return token_cache.get_or_create(\r\n File \"/var/lang/lib/python3.8/site-packages/ddtrace/contrib/dogpile_cache/region.py\", line 14, in _wrap_get_create\r\n key = args[0]\r\n```\r\n\r\n### What is the result that you expected?\r\nNo error \n", "before_files": [{"content": "import dogpile\n\nfrom ddtrace.ext import SpanTypes\n\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...pin import Pin\n\n\ndef _wrap_get_create(func, instance, args, kwargs):\n pin = Pin.get_from(dogpile.cache)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n key = args[0]\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create\", span_type=SpanTypes.CACHE) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"key\", key)\n span.set_tag(\"region\", instance.name)\n span.set_tag(\"backend\", instance.actual_backend.__class__.__name__)\n return func(*args, **kwargs)\n\n\ndef _wrap_get_create_multi(func, instance, args, kwargs):\n pin = Pin.get_from(dogpile.cache)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n keys = args[0]\n with pin.tracer.trace(\"dogpile.cache\", resource=\"get_or_create_multi\", span_type=\"cache\") as span:\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(\"keys\", keys)\n span.set_tag(\"region\", instance.name)\n span.set_tag(\"backend\", instance.actual_backend.__class__.__name__)\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/dogpile_cache/region.py"}]}
1,533
314
gh_patches_debug_20132
rasdani/github-patches
git_diff
ipython__ipython-6827
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> /files/ only serving attachments https://github.com/ipython/ipython/pull/6624 changes the behavior of serving html files from '/files/'. Instead of displaying a html page, I get the option to download it as file. Before I got as a response: ``` Python import requests r = requests.get('http://localhost:8888/files/Untitled10.html') print(r.headers) {'Last-Modified': 'Sat, 23 Aug 2014 16:03:02 GMT', 'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:14:47 GMT; Path=/', 'Content-Type': 'text/html', 'X-Frame-Options': 'SAMEORIGIN', 'Server': 'TornadoServer/4.0.2', 'Date': 'Fri, 31 Oct 2014 22:14:47 GMT', 'Content-Length': '237903', 'Accept-Ranges': 'bytes'} ``` Now I get ``` Python r = requests.get('http://localhost:8888/files/Untitled10.html') print(r.headers) {'Date': 'Fri, 31 Oct 2014 22:17:27 GMT', 'Transfer-Encoding': 'chunked', 'X-Frame-Options': 'SAMEORIGIN', 'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:17:27 GMT; Path=/', 'Server': 'TornadoServer/4.0.2', 'Content-Type': 'text/html', 'Content-Disposition': 'attachment; filename="Untitled10.html"'} ``` Is this an intended new behavior ? </issue> <code> [start of IPython/html/files/handlers.py] 1 """Serve files directly from the ContentsManager.""" 2 3 # Copyright (c) IPython Development Team. 4 # Distributed under the terms of the Modified BSD License. 5 6 import os 7 import mimetypes 8 import json 9 import base64 10 11 from tornado import web 12 13 from IPython.html.base.handlers import IPythonHandler 14 15 class FilesHandler(IPythonHandler): 16 """serve files via ContentsManager""" 17 18 @web.authenticated 19 def get(self, path): 20 cm = self.settings['contents_manager'] 21 if cm.is_hidden(path): 22 self.log.info("Refusing to serve hidden file, via 404 Error") 23 raise web.HTTPError(404) 24 25 path, name = os.path.split(path) 26 model = cm.get_model(name, path) 27 28 if model['type'] == 'notebook': 29 self.set_header('Content-Type', 'application/json') 30 else: 31 cur_mime = mimetypes.guess_type(name)[0] 32 if cur_mime is not None: 33 self.set_header('Content-Type', cur_mime) 34 35 self.set_header('Content-Disposition','attachment; filename="%s"' % name) 36 37 if model['format'] == 'base64': 38 b64_bytes = model['content'].encode('ascii') 39 self.write(base64.decodestring(b64_bytes)) 40 elif model['format'] == 'json': 41 self.write(json.dumps(model['content'])) 42 else: 43 self.write(model['content']) 44 self.flush() 45 46 default_handlers = [ 47 (r"/files/(.*)", FilesHandler), 48 ] [end of IPython/html/files/handlers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/html/files/handlers.py b/IPython/html/files/handlers.py --- a/IPython/html/files/handlers.py +++ b/IPython/html/files/handlers.py @@ -24,7 +24,10 @@ path, name = os.path.split(path) model = cm.get_model(name, path) - + + if self.get_argument("download", False): + self.set_header('Content-Disposition','attachment; filename="%s"' % name) + if model['type'] == 'notebook': self.set_header('Content-Type', 'application/json') else: @@ -32,8 +35,6 @@ if cur_mime is not None: self.set_header('Content-Type', cur_mime) - self.set_header('Content-Disposition','attachment; filename="%s"' % name) - if model['format'] == 'base64': b64_bytes = model['content'].encode('ascii') self.write(base64.decodestring(b64_bytes))
{"golden_diff": "diff --git a/IPython/html/files/handlers.py b/IPython/html/files/handlers.py\n--- a/IPython/html/files/handlers.py\n+++ b/IPython/html/files/handlers.py\n@@ -24,7 +24,10 @@\n \n path, name = os.path.split(path)\n model = cm.get_model(name, path)\n-\n+ \n+ if self.get_argument(\"download\", False):\n+ self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n+ \n if model['type'] == 'notebook':\n self.set_header('Content-Type', 'application/json')\n else:\n@@ -32,8 +35,6 @@\n if cur_mime is not None:\n self.set_header('Content-Type', cur_mime)\n \n- self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n-\n if model['format'] == 'base64':\n b64_bytes = model['content'].encode('ascii')\n self.write(base64.decodestring(b64_bytes))\n", "issue": "/files/ only serving attachments\nhttps://github.com/ipython/ipython/pull/6624 changes the behavior of serving html files from '/files/'.\nInstead of displaying a html page, I get the option to download it as file.\n\nBefore I got as a response:\n\n``` Python\nimport requests\nr = requests.get('http://localhost:8888/files/Untitled10.html')\nprint(r.headers)\n\n{'Last-Modified': 'Sat, 23 Aug 2014 16:03:02 GMT', \n'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:14:47 GMT; Path=/',\n'Content-Type': 'text/html', 'X-Frame-Options': 'SAMEORIGIN', \n'Server': 'TornadoServer/4.0.2', 'Date': 'Fri, 31 Oct 2014 22:14:47 GMT', \n'Content-Length': '237903', 'Accept-Ranges': 'bytes'}\n```\n\nNow I get\n\n``` Python\nr = requests.get('http://localhost:8888/files/Untitled10.html')\nprint(r.headers)\n\n{'Date': 'Fri, 31 Oct 2014 22:17:27 GMT', \n'Transfer-Encoding': 'chunked', 'X-Frame-Options': 'SAMEORIGIN', \n'Set-Cookie': 'username-localhost-8888=; expires=Thu, 31 Oct 2013 22:17:27 GMT; Path=/',\n 'Server': 'TornadoServer/4.0.2', 'Content-Type': 'text/html', \n'Content-Disposition': 'attachment; filename=\"Untitled10.html\"'}\n```\n\nIs this an intended new behavior ?\n\n", "before_files": [{"content": "\"\"\"Serve files directly from the ContentsManager.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nimport mimetypes\nimport json\nimport base64\n\nfrom tornado import web\n\nfrom IPython.html.base.handlers import IPythonHandler\n\nclass FilesHandler(IPythonHandler):\n \"\"\"serve files via ContentsManager\"\"\"\n\n @web.authenticated\n def get(self, path):\n cm = self.settings['contents_manager']\n if cm.is_hidden(path):\n self.log.info(\"Refusing to serve hidden file, via 404 Error\")\n raise web.HTTPError(404)\n\n path, name = os.path.split(path)\n model = cm.get_model(name, path)\n\n if model['type'] == 'notebook':\n self.set_header('Content-Type', 'application/json')\n else:\n cur_mime = mimetypes.guess_type(name)[0]\n if cur_mime is not None:\n self.set_header('Content-Type', cur_mime)\n \n self.set_header('Content-Disposition','attachment; filename=\"%s\"' % name)\n\n if model['format'] == 'base64':\n b64_bytes = model['content'].encode('ascii')\n self.write(base64.decodestring(b64_bytes))\n elif model['format'] == 'json':\n self.write(json.dumps(model['content']))\n else:\n self.write(model['content'])\n self.flush()\n\ndefault_handlers = [\n (r\"/files/(.*)\", FilesHandler),\n]", "path": "IPython/html/files/handlers.py"}]}
1,373
230
gh_patches_debug_1323
rasdani/github-patches
git_diff
ivy-llc__ivy-22920
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> eigvals </issue> <code> [start of ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.numpy.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 from_zero_dim_arrays_to_scalar, 6 ) 7 8 9 @to_ivy_arrays_and_back 10 def eig(a): 11 return ivy.eig(a) 12 13 14 @to_ivy_arrays_and_back 15 @from_zero_dim_arrays_to_scalar 16 def eigh(a, /, UPLO="L"): 17 return ivy.eigh(a, UPLO=UPLO) 18 19 20 @to_ivy_arrays_and_back 21 @from_zero_dim_arrays_to_scalar 22 def eigvalsh(a, /, UPLO="L"): 23 return ivy.eigvalsh(a, UPLO=UPLO) 24 [end of ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py --- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py +++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py @@ -17,6 +17,11 @@ return ivy.eigh(a, UPLO=UPLO) +@to_ivy_arrays_and_back +def eigvals(a): + return ivy.eig(a)[0] + + @to_ivy_arrays_and_back @from_zero_dim_arrays_to_scalar def eigvalsh(a, /, UPLO="L"):
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n@@ -17,6 +17,11 @@\n return ivy.eigh(a, UPLO=UPLO)\n \n \n+@to_ivy_arrays_and_back\n+def eigvals(a):\n+ return ivy.eig(a)[0]\n+\n+\n @to_ivy_arrays_and_back\n @from_zero_dim_arrays_to_scalar\n def eigvalsh(a, /, UPLO=\"L\"):\n", "issue": "eigvals\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}]}
749
162
gh_patches_debug_37040
rasdani/github-patches
git_diff
opsdroid__opsdroid-522
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Switch CLI to use click I recently discovered [`click`](http://click.pocoo.org/5/) which makes adding command line options and arguments to your python application super simple and much more robust. We should look at replacing the [current argparse code](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/__main__.py#L88) with `click`! </issue> <code> [start of opsdroid/__main__.py] 1 """Starts opsdroid.""" 2 3 import os 4 import sys 5 import logging 6 import argparse 7 import gettext 8 9 from opsdroid.core import OpsDroid 10 from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\ 11 DEFAULT_LANGUAGE, LOCALE_DIR 12 from opsdroid.web import Web 13 14 15 gettext.install('opsdroid') 16 _LOGGER = logging.getLogger("opsdroid") 17 18 19 def configure_lang(config): 20 """Configure app language based on user config.""" 21 lang_code = config.get("lang", DEFAULT_LANGUAGE) 22 if lang_code != DEFAULT_LANGUAGE: 23 lang = gettext.translation( 24 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True) 25 lang.install() 26 27 28 def configure_logging(config): 29 """Configure the root logger based on user config.""" 30 rootlogger = logging.getLogger() 31 while rootlogger.handlers: 32 rootlogger.handlers.pop() 33 34 try: 35 if config["logging"]["path"]: 36 logfile_path = os.path.expanduser(config["logging"]["path"]) 37 else: 38 logfile_path = config["logging"]["path"] 39 except KeyError: 40 logfile_path = DEFAULT_LOG_FILENAME 41 42 try: 43 log_level = get_logging_level( 44 config["logging"]["level"]) 45 except KeyError: 46 log_level = logging.INFO 47 48 rootlogger.setLevel(log_level) 49 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s') 50 51 console_handler = logging.StreamHandler() 52 console_handler.setLevel(log_level) 53 console_handler.setFormatter(formatter) 54 rootlogger.addHandler(console_handler) 55 56 try: 57 if not config["logging"]["console"]: 58 console_handler.setLevel(logging.CRITICAL) 59 except KeyError: 60 pass 61 62 if logfile_path: 63 logdir = os.path.dirname(os.path.realpath(logfile_path)) 64 if not os.path.isdir(logdir): 65 os.makedirs(logdir) 66 file_handler = logging.FileHandler(logfile_path) 67 file_handler.setLevel(log_level) 68 file_handler.setFormatter(formatter) 69 rootlogger.addHandler(file_handler) 70 _LOGGER.info("="*40) 71 _LOGGER.info(_("Started application")) 72 73 74 def get_logging_level(logging_level): 75 """Get the logger level based on the user configuration.""" 76 if logging_level == 'critical': 77 return logging.CRITICAL 78 elif logging_level == 'error': 79 return logging.ERROR 80 elif logging_level == 'warning': 81 return logging.WARNING 82 elif logging_level == 'debug': 83 return logging.DEBUG 84 85 return logging.INFO 86 87 88 def parse_args(args): 89 """Parse command line arguments.""" 90 parser = argparse.ArgumentParser(description='Run opsdroid.') 91 parser.add_argument('--gen-config', action="store_true", 92 help='prints out an example configuration file') 93 return parser.parse_args(args) 94 95 96 def check_dependencies(): 97 """Check for system dependencies required by opsdroid.""" 98 if sys.version_info.major < 3 or sys.version_info.minor < 5: 99 logging.critical(_("Whoops! opsdroid requires python 3.5 or above.")) 100 sys.exit(1) 101 102 103 def welcome_message(config): 104 """Add welcome message if set to true in configuration.""" 105 try: 106 if config['welcome-message']: 107 _LOGGER.info("=" * 40) 108 _LOGGER.info(_("You can customise your opsdroid by modifying " 109 "your configuration.yaml")) 110 _LOGGER.info(_("Read more at: " 111 "http://opsdroid.readthedocs.io/#configuration")) 112 _LOGGER.info(_("Watch the Get Started Videos at: " 113 "http://bit.ly/2fnC0Fh")) 114 _LOGGER.info(_("Install Opsdroid Desktop at: \n" 115 "https://github.com/opsdroid/opsdroid-desktop/" 116 "releases")) 117 _LOGGER.info("=" * 40) 118 except KeyError: 119 _LOGGER.warning(_("'welcome-message: true/false' is missing in " 120 "configuration.yaml")) 121 122 123 def main(): 124 """Parse the args and then start the application.""" 125 args = parse_args(sys.argv[1:]) 126 127 if args.gen_config: 128 with open(EXAMPLE_CONFIG_FILE, 'r') as conf: 129 print(conf.read()) 130 sys.exit(0) 131 132 check_dependencies() 133 134 with OpsDroid() as opsdroid: 135 opsdroid.load() 136 configure_lang(opsdroid.config) 137 configure_logging(opsdroid.config) 138 welcome_message(opsdroid.config) 139 opsdroid.web_server = Web(opsdroid) 140 opsdroid.start_loop() 141 142 143 def init(): 144 """Enter the application.""" 145 if __name__ == "__main__": 146 main() 147 148 149 init() 150 [end of opsdroid/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py --- a/opsdroid/__main__.py +++ b/opsdroid/__main__.py @@ -3,12 +3,13 @@ import os import sys import logging -import argparse import gettext +import click + from opsdroid.core import OpsDroid -from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\ - DEFAULT_LANGUAGE, LOCALE_DIR +from opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \ + EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR from opsdroid.web import Web @@ -85,14 +86,6 @@ return logging.INFO -def parse_args(args): - """Parse command line arguments.""" - parser = argparse.ArgumentParser(description='Run opsdroid.') - parser.add_argument('--gen-config', action="store_true", - help='prints out an example configuration file') - return parser.parse_args(args) - - def check_dependencies(): """Check for system dependencies required by opsdroid.""" if sys.version_info.major < 3 or sys.version_info.minor < 5: @@ -100,6 +93,23 @@ sys.exit(1) +def print_version(ctx, param, value): + """Print out the version of opsdroid that is installed.""" + if not value or ctx.resilient_parsing: + return + click.echo('opsdroid v{version}'.format(version=__version__)) + ctx.exit(0) + + +def print_example_config(ctx, param, value): + """Print out the example config.""" + if not value or ctx.resilient_parsing: + return + with open(EXAMPLE_CONFIG_FILE, 'r') as conf: + click.echo(conf.read()) + ctx.exit(0) + + def welcome_message(config): """Add welcome message if set to true in configuration.""" try: @@ -120,15 +130,19 @@ "configuration.yaml")) [email protected]() [email protected]('--gen-config', is_flag=True, callback=print_example_config, + expose_value=False, default=False, + help='Print an example config and exit.') [email protected]('--version', '-v', is_flag=True, callback=print_version, + expose_value=False, default=False, is_eager=True, + help='Print the version and exit.') def main(): - """Parse the args and then start the application.""" - args = parse_args(sys.argv[1:]) - - if args.gen_config: - with open(EXAMPLE_CONFIG_FILE, 'r') as conf: - print(conf.read()) - sys.exit(0) + """Opsdroid is a chat bot framework written in Python. + It is designed to be extendable, scalable and simple. + See https://opsdroid.github.io/ for more information. + """ check_dependencies() with OpsDroid() as opsdroid:
{"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -3,12 +3,13 @@\n import os\n import sys\n import logging\n-import argparse\n import gettext\n \n+import click\n+\n from opsdroid.core import OpsDroid\n-from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\\\n- DEFAULT_LANGUAGE, LOCALE_DIR\n+from opsdroid.const import __version__, DEFAULT_LOG_FILENAME, \\\n+ EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, LOCALE_DIR\n from opsdroid.web import Web\n \n \n@@ -85,14 +86,6 @@\n return logging.INFO\n \n \n-def parse_args(args):\n- \"\"\"Parse command line arguments.\"\"\"\n- parser = argparse.ArgumentParser(description='Run opsdroid.')\n- parser.add_argument('--gen-config', action=\"store_true\",\n- help='prints out an example configuration file')\n- return parser.parse_args(args)\n-\n-\n def check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n@@ -100,6 +93,23 @@\n sys.exit(1)\n \n \n+def print_version(ctx, param, value):\n+ \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n+ if not value or ctx.resilient_parsing:\n+ return\n+ click.echo('opsdroid v{version}'.format(version=__version__))\n+ ctx.exit(0)\n+\n+\n+def print_example_config(ctx, param, value):\n+ \"\"\"Print out the example config.\"\"\"\n+ if not value or ctx.resilient_parsing:\n+ return\n+ with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n+ click.echo(conf.read())\n+ ctx.exit(0)\n+\n+\n def welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n@@ -120,15 +130,19 @@\n \"configuration.yaml\"))\n \n \[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n+ expose_value=False, default=False,\n+ help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n+ expose_value=False, default=False, is_eager=True,\n+ help='Print the version and exit.')\n def main():\n- \"\"\"Parse the args and then start the application.\"\"\"\n- args = parse_args(sys.argv[1:])\n-\n- if args.gen_config:\n- with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n- print(conf.read())\n- sys.exit(0)\n+ \"\"\"Opsdroid is a chat bot framework written in Python.\n \n+ It is designed to be extendable, scalable and simple.\n+ See https://opsdroid.github.io/ for more information.\n+ \"\"\"\n check_dependencies()\n \n with OpsDroid() as opsdroid:\n", "issue": "Switch CLI to use click\nI recently discovered [`click`](http://click.pocoo.org/5/) which makes adding command line options and arguments to your python application super simple and much more robust.\r\n\r\nWe should look at replacing the [current argparse code](https://github.com/opsdroid/opsdroid/blob/master/opsdroid/__main__.py#L88) with `click`!\n", "before_files": [{"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\nimport gettext\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE,\\\n DEFAULT_LANGUAGE, LOCALE_DIR\nfrom opsdroid.web import Web\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started application\"))\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n sys.exit(1)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\ndef main():\n \"\"\"Parse the args and then start the application.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_lang(opsdroid.config)\n configure_logging(opsdroid.config)\n welcome_message(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}]}
1,936
673
gh_patches_debug_6492
rasdani/github-patches
git_diff
mozilla__kitsune-2981
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add whitenoise and serve static files from the app Add whitenoise to serve static files in the dev (and stage?) instances. In production we'll probably go with another more efficient approach which is being investigated in #2949. Whitenoise activation should be configurable via the `ENABLE_WHITENOISE` env variable. </issue> <code> [start of wsgi/app.py] 1 """ 2 WSGI config for kitsune project. 3 4 It exposes the WSGI callable as a module-level variable named ``application``. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/ 8 """ 9 import os 10 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA 11 12 from django.core.wsgi import get_wsgi_application 13 14 import newrelic.agent 15 from decouple import config 16 from raven.contrib.django.raven_compat.middleware.wsgi import Sentry 17 18 # For django-celery 19 os.environ['CELERY_LOADER'] = 'django' 20 21 application = get_wsgi_application() 22 application = Sentry(application) 23 24 # Add NewRelic 25 newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini') 26 newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None) 27 if newrelic_ini and newrelic_license_key: 28 newrelic.agent.initialize(newrelic_ini) 29 application = newrelic.agent.wsgi_application()(application) 30 [end of wsgi/app.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wsgi/app.py b/wsgi/app.py --- a/wsgi/app.py +++ b/wsgi/app.py @@ -21,6 +21,10 @@ application = get_wsgi_application() application = Sentry(application) +if config('ENABLE_WHITENOISE', default=False, cast=bool): + from whitenoise.django import DjangoWhiteNoise + application = DjangoWhiteNoise(application) + # Add NewRelic newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini') newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
{"golden_diff": "diff --git a/wsgi/app.py b/wsgi/app.py\n--- a/wsgi/app.py\n+++ b/wsgi/app.py\n@@ -21,6 +21,10 @@\n application = get_wsgi_application()\n application = Sentry(application)\n \n+if config('ENABLE_WHITENOISE', default=False, cast=bool):\n+ from whitenoise.django import DjangoWhiteNoise\n+ application = DjangoWhiteNoise(application)\n+\n # Add NewRelic\n newrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\n newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\n", "issue": "Add whitenoise and serve static files from the app\nAdd whitenoise to serve static files in the dev (and stage?) instances. \r\n\r\nIn production we'll probably go with another more efficient approach which is being investigated in #2949.\r\n\r\nWhitenoise activation should be configurable via the `ENABLE_WHITENOISE` env variable.\n", "before_files": [{"content": "\"\"\"\nWSGI config for kitsune project.\n\nIt exposes the WSGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/\n\"\"\"\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA\n\nfrom django.core.wsgi import get_wsgi_application\n\nimport newrelic.agent\nfrom decouple import config\nfrom raven.contrib.django.raven_compat.middleware.wsgi import Sentry\n\n# For django-celery\nos.environ['CELERY_LOADER'] = 'django'\n\napplication = get_wsgi_application()\napplication = Sentry(application)\n\n# Add NewRelic\nnewrelic_ini = config('NEW_RELIC_CONFIG_FILE', default='newrelic.ini')\nnewrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)\nif newrelic_ini and newrelic_license_key:\n newrelic.agent.initialize(newrelic_ini)\n application = newrelic.agent.wsgi_application()(application)\n", "path": "wsgi/app.py"}]}
888
137
gh_patches_debug_14733
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-3271
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add openAPI Specification for /database_roles/ endpoint Generate spec for `database_roles` endpoint </issue> <code> [start of config/settings/openapi.py] 1 def custom_preprocessing_hook(endpoints): 2 filtered = [] 3 for (path, path_regex, method, callback) in endpoints: 4 # Remove all but DRF API endpoints 5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"): 6 filtered.append((path, path_regex, method, callback)) 7 return filtered 8 9 10 def remove_url_prefix_hook(result, **kwargs): 11 # Remove namespace and version URL prefix from the operation Id of the generated API schema 12 for path, path_info in result['paths'].items(): 13 for method, operation in path_info.items(): 14 operation_id = operation.get('operationId') 15 if operation_id: 16 if path.startswith('/api/db/v0/'): 17 operation['operationId'] = operation_id.replace('db_v0_', '') 18 elif path.startswith('/api/ui/v0/'): 19 operation['operationId'] = operation_id.replace('ui_v0_', '') 20 21 return result 22 [end of config/settings/openapi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/config/settings/openapi.py b/config/settings/openapi.py --- a/config/settings/openapi.py +++ b/config/settings/openapi.py @@ -1,9 +1,16 @@ def custom_preprocessing_hook(endpoints): - filtered = [] - for (path, path_regex, method, callback) in endpoints: - # Remove all but DRF API endpoints - if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"): - filtered.append((path, path_regex, method, callback)) + prefixes = [ + "/api/db/v0/databases/", + "/api/db/v0/data_files/", + "/api/db/v0/schemas/", + "/api/db/v0/tables/", + "/api/db/v0/links/", + "/api/db/v0/queries/", + "/api/ui/v0/databases/", + "/api/ui/v0/users/", + "/api/ui/v0/database_roles/" + ] + filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)] return filtered
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -1,9 +1,16 @@\n def custom_preprocessing_hook(endpoints):\n- filtered = []\n- for (path, path_regex, method, callback) in endpoints:\n- # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n- filtered.append((path, path_regex, method, callback))\n+ prefixes = [\n+ \"/api/db/v0/databases/\",\n+ \"/api/db/v0/data_files/\",\n+ \"/api/db/v0/schemas/\",\n+ \"/api/db/v0/tables/\",\n+ \"/api/db/v0/links/\",\n+ \"/api/db/v0/queries/\",\n+ \"/api/ui/v0/databases/\",\n+ \"/api/ui/v0/users/\",\n+ \"/api/ui/v0/database_roles/\"\n+ ]\n+ filtered = [(path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if any(path.startswith(prefix) for prefix in prefixes)]\n return filtered\n", "issue": "Add openAPI Specification for /database_roles/ endpoint\nGenerate spec for `database_roles` endpoint\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
818
282
gh_patches_debug_35341
rasdani/github-patches
git_diff
google__mobly-170
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Snippets are not compatible with UIAutomator 'am instrument' only sets up a UiAutomationConnection in -w mode (I don't know why). Snippets are not run in wait mode, so UiAutomationConnection is null. This crashes uiautomator, which needs that proxy object for privileged operations back to the shell. We need to start the snippets in -w mode, using something like `start_standing_subprocess`. </issue> <code> [start of mobly/controllers/android_device_lib/snippet_client.py] 1 #/usr/bin/env python3.4 2 # 3 # Copyright 2016 Google Inc. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 """JSON RPC interface to Mobly Snippet Lib.""" 17 import logging 18 import re 19 20 from mobly.controllers.android_device_lib import adb 21 from mobly.controllers.android_device_lib import jsonrpc_client_base 22 23 _INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner' 24 25 _LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE 26 27 _STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE 28 29 30 class Error(Exception): 31 pass 32 33 34 class SnippetClient(jsonrpc_client_base.JsonRpcClientBase): 35 """A client for interacting with snippet APKs using Mobly Snippet Lib. 36 37 See superclass documentation for a list of public attributes. 38 """ 39 40 def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()): 41 """Initializes a SnippetClient. 42 43 Args: 44 package: (str) The package name of the apk where the snippets are 45 defined. 46 host_port: (int) The port at which to start the snippet client. Note 47 that the same port will currently be used for both the 48 device and host side of the connection. 49 adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app. 50 """ 51 # TODO(adorokhine): Don't assume that a free host-side port is free on 52 # the device as well. Both sides should allocate a unique port. 53 super(SnippetClient, self).__init__( 54 host_port=host_port, 55 device_port=host_port, 56 app_name=package, 57 adb_proxy=adb_proxy, 58 log=log) 59 self.package = package 60 self.log = log 61 self._serial = self._adb.serial 62 63 def _do_start_app(self): 64 """Overrides superclass.""" 65 cmd = _LAUNCH_CMD % (self.device_port, self.package) 66 # Use info here so people know exactly what's happening here, which is 67 # helpful since they need to create their own instrumentations and 68 # manifest. 69 self.log.info('Launching snippet apk %s', self.package) 70 self._adb.shell(cmd) 71 72 def stop_app(self): 73 """Overrides superclass.""" 74 cmd = _STOP_CMD % self.package 75 self.log.debug('Stopping snippet apk %s', self.package) 76 out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8') 77 if 'OK (0 tests)' not in out: 78 raise Error('Failed to stop existing apk. Unexpected output: %s' % 79 out) 80 81 def check_app_installed(self): 82 """Overrides superclass.""" 83 # Check that the Mobly Snippet app is installed. 84 if not self._adb_grep_wrapper( 85 r'pm list package | tr -d "\r" | grep "^package:%s$"' % 86 self.package): 87 raise jsonrpc_client_base.AppStartError( 88 '%s is not installed on %s' % (self.package, self._serial)) 89 # Check that the app is instrumented. 90 out = self._adb_grep_wrapper( 91 r'pm list instrumentation | tr -d "\r" | grep ^instrumentation:%s/%s' 92 % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE)) 93 if not out: 94 raise jsonrpc_client_base.AppStartError( 95 '%s is installed on %s, but it is not instrumented.' % 96 (self.package, self._serial)) 97 match = re.search(r'^instrumentation:(.*)\/(.*) \(target=(.*)\)$', out) 98 target_name = match.group(3) 99 # Check that the instrumentation target is installed if it's not the 100 # same as the snippet package. 101 if target_name != self.package: 102 out = self._adb_grep_wrapper( 103 r'pm list package | tr -d "\r" | grep ^package:%s$' % 104 target_name) 105 if not out: 106 raise jsonrpc_client_base.AppStartError( 107 'Instrumentation target %s is not installed on %s' % 108 (target_name, self._serial)) 109 110 def _start_event_client(self): 111 event_client = SnippetClient( 112 package=self.package, 113 host_port=self.host_port, 114 adb_proxy=self._adb, 115 log=self.log) 116 event_client.connect(self.uid, 117 jsonrpc_client_base.JsonRpcCommand.CONTINUE) 118 return event_client 119 [end of mobly/controllers/android_device_lib/snippet_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py --- a/mobly/controllers/android_device_lib/snippet_client.py +++ b/mobly/controllers/android_device_lib/snippet_client.py @@ -17,14 +17,18 @@ import logging import re -from mobly.controllers.android_device_lib import adb +from mobly import utils from mobly.controllers.android_device_lib import jsonrpc_client_base -_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner' +_INSTRUMENTATION_RUNNER_PACKAGE = ( + 'com.google.android.mobly.snippet.SnippetRunner') -_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE +_LAUNCH_CMD = ( + 'am instrument -w -e action start -e port %s %s/' + + _INSTRUMENTATION_RUNNER_PACKAGE) -_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE +_STOP_CMD = ( + 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE) class Error(Exception): @@ -59,6 +63,7 @@ self.package = package self.log = log self._serial = self._adb.serial + self._proc = None def _do_start_app(self): """Overrides superclass.""" @@ -67,11 +72,18 @@ # helpful since they need to create their own instrumentations and # manifest. self.log.info('Launching snippet apk %s', self.package) - self._adb.shell(cmd) + adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd] + self._proc = utils.start_standing_subprocess(adb_cmd, shell=False) def stop_app(self): """Overrides superclass.""" - cmd = _STOP_CMD % self.package + # Kill the pending 'adb shell am instrument -w' process if there is one. + # Although killing the snippet apk would abort this process anyway, we + # want to call stop_standing_subprocess() to perform a health check, + # print the failure stack trace if there was any, and reap it from the + # process table. + if self._proc: + utils.stop_standing_subprocess(self._proc) self.log.debug('Stopping snippet apk %s', self.package) out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8') if 'OK (0 tests)' not in out:
{"golden_diff": "diff --git a/mobly/controllers/android_device_lib/snippet_client.py b/mobly/controllers/android_device_lib/snippet_client.py\n--- a/mobly/controllers/android_device_lib/snippet_client.py\n+++ b/mobly/controllers/android_device_lib/snippet_client.py\n@@ -17,14 +17,18 @@\n import logging\n import re\n \n-from mobly.controllers.android_device_lib import adb\n+from mobly import utils\n from mobly.controllers.android_device_lib import jsonrpc_client_base\n \n-_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'\n+_INSTRUMENTATION_RUNNER_PACKAGE = (\n+ 'com.google.android.mobly.snippet.SnippetRunner')\n \n-_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n+_LAUNCH_CMD = (\n+ 'am instrument -w -e action start -e port %s %s/' +\n+ _INSTRUMENTATION_RUNNER_PACKAGE)\n \n-_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n+_STOP_CMD = (\n+ 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE)\n \n \n class Error(Exception):\n@@ -59,6 +63,7 @@\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n+ self._proc = None\n \n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n@@ -67,11 +72,18 @@\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n- self._adb.shell(cmd)\n+ adb_cmd = ['adb', '-s', self._adb.serial, 'shell', cmd]\n+ self._proc = utils.start_standing_subprocess(adb_cmd, shell=False)\n \n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n- cmd = _STOP_CMD % self.package\n+ # Kill the pending 'adb shell am instrument -w' process if there is one.\n+ # Although killing the snippet apk would abort this process anyway, we\n+ # want to call stop_standing_subprocess() to perform a health check,\n+ # print the failure stack trace if there was any, and reap it from the\n+ # process table.\n+ if self._proc:\n+ utils.stop_standing_subprocess(self._proc)\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n", "issue": "Snippets are not compatible with UIAutomator\n'am instrument' only sets up a UiAutomationConnection in -w mode (I don't know why). Snippets are not run in wait mode, so UiAutomationConnection is null. This crashes uiautomator, which needs that proxy object for privileged operations back to the shell.\r\n\r\nWe need to start the snippets in -w mode, using something like `start_standing_subprocess`.\n", "before_files": [{"content": "#/usr/bin/env python3.4\n#\n# Copyright 2016 Google Inc.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"JSON RPC interface to Mobly Snippet Lib.\"\"\"\nimport logging\nimport re\n\nfrom mobly.controllers.android_device_lib import adb\nfrom mobly.controllers.android_device_lib import jsonrpc_client_base\n\n_INSTRUMENTATION_RUNNER_PACKAGE = 'com.google.android.mobly.snippet.SnippetRunner'\n\n_LAUNCH_CMD = 'am instrument -e action start -e port %s %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n\n_STOP_CMD = 'am instrument -w -e action stop %s/' + _INSTRUMENTATION_RUNNER_PACKAGE\n\n\nclass Error(Exception):\n pass\n\n\nclass SnippetClient(jsonrpc_client_base.JsonRpcClientBase):\n \"\"\"A client for interacting with snippet APKs using Mobly Snippet Lib.\n\n See superclass documentation for a list of public attributes.\n \"\"\"\n\n def __init__(self, package, host_port, adb_proxy, log=logging.getLogger()):\n \"\"\"Initializes a SnippetClient.\n \n Args:\n package: (str) The package name of the apk where the snippets are\n defined.\n host_port: (int) The port at which to start the snippet client. Note\n that the same port will currently be used for both the\n device and host side of the connection.\n adb_proxy: (adb.AdbProxy) The adb proxy to use to start the app.\n \"\"\"\n # TODO(adorokhine): Don't assume that a free host-side port is free on\n # the device as well. Both sides should allocate a unique port.\n super(SnippetClient, self).__init__(\n host_port=host_port,\n device_port=host_port,\n app_name=package,\n adb_proxy=adb_proxy,\n log=log)\n self.package = package\n self.log = log\n self._serial = self._adb.serial\n\n def _do_start_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _LAUNCH_CMD % (self.device_port, self.package)\n # Use info here so people know exactly what's happening here, which is\n # helpful since they need to create their own instrumentations and\n # manifest.\n self.log.info('Launching snippet apk %s', self.package)\n self._adb.shell(cmd)\n\n def stop_app(self):\n \"\"\"Overrides superclass.\"\"\"\n cmd = _STOP_CMD % self.package\n self.log.debug('Stopping snippet apk %s', self.package)\n out = self._adb.shell(_STOP_CMD % self.package).decode('utf-8')\n if 'OK (0 tests)' not in out:\n raise Error('Failed to stop existing apk. Unexpected output: %s' %\n out)\n\n def check_app_installed(self):\n \"\"\"Overrides superclass.\"\"\"\n # Check that the Mobly Snippet app is installed.\n if not self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep \"^package:%s$\"' %\n self.package):\n raise jsonrpc_client_base.AppStartError(\n '%s is not installed on %s' % (self.package, self._serial))\n # Check that the app is instrumented.\n out = self._adb_grep_wrapper(\n r'pm list instrumentation | tr -d \"\\r\" | grep ^instrumentation:%s/%s'\n % (self.package, _INSTRUMENTATION_RUNNER_PACKAGE))\n if not out:\n raise jsonrpc_client_base.AppStartError(\n '%s is installed on %s, but it is not instrumented.' %\n (self.package, self._serial))\n match = re.search(r'^instrumentation:(.*)\\/(.*) \\(target=(.*)\\)$', out)\n target_name = match.group(3)\n # Check that the instrumentation target is installed if it's not the\n # same as the snippet package.\n if target_name != self.package:\n out = self._adb_grep_wrapper(\n r'pm list package | tr -d \"\\r\" | grep ^package:%s$' %\n target_name)\n if not out:\n raise jsonrpc_client_base.AppStartError(\n 'Instrumentation target %s is not installed on %s' %\n (target_name, self._serial))\n\n def _start_event_client(self):\n event_client = SnippetClient(\n package=self.package,\n host_port=self.host_port,\n adb_proxy=self._adb,\n log=self.log)\n event_client.connect(self.uid,\n jsonrpc_client_base.JsonRpcCommand.CONTINUE)\n return event_client\n", "path": "mobly/controllers/android_device_lib/snippet_client.py"}]}
1,986
597
gh_patches_debug_57176
rasdani/github-patches
git_diff
celery__celery-4037
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> celery.contrib.sphinx fails with Sphinx 1.6.1 When using the `celery.contrib.sphinx` extension with Sphinx 1.6.1 with Celery 4.0.2 the following occurs: ``` Exception occurred: File "/home/ubuntu/virtualenvs/venv-system/lib/python2.7/site-packages/celery/contrib/sphinx.py", line 72, in setup app.domains['py'].directives['task'] = TaskDirective AttributeError: 'Sphinx' object has no attribute 'domains' The full traceback has been saved in /tmp/sphinx-err-oOWabx.log, if you want to report the issue to the developers. Please also report this if it was a user error, so that a better error message can be provided next time. A bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks! make: *** [html] Error 1 ``` The `domains` property seems to have been removed in sphinx-doc/sphinx#3656 and I think this line needs to be replaced with the [`add_directive` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive) (or more likely the [`add_directive_to_domain` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive_to_domain)). </issue> <code> [start of celery/contrib/sphinx.py] 1 # -*- coding: utf-8 -*- 2 """Sphinx documentation plugin used to document tasks. 3 4 Introduction 5 ============ 6 7 Usage 8 ----- 9 10 Add the extension to your :file:`docs/conf.py` configuration module: 11 12 .. code-block:: python 13 14 extensions = (..., 15 'celery.contrib.sphinx') 16 17 If you'd like to change the prefix for tasks in reference documentation 18 then you can change the ``celery_task_prefix`` configuration value: 19 20 .. code-block:: python 21 22 celery_task_prefix = '(task)' # < default 23 24 With the extension installed `autodoc` will automatically find 25 task decorated objects and generate the correct (as well as 26 add a ``(task)`` prefix), and you can also refer to the tasks 27 using `:task:proj.tasks.add` syntax. 28 29 Use ``.. autotask::`` to manually document a task. 30 """ 31 from __future__ import absolute_import, unicode_literals 32 from inspect import formatargspec 33 from sphinx.domains.python import PyModulelevel 34 from sphinx.ext.autodoc import FunctionDocumenter 35 from celery.app.task import BaseTask 36 from celery.five import getfullargspec 37 38 39 class TaskDocumenter(FunctionDocumenter): 40 """Document task definitions.""" 41 42 objtype = 'task' 43 member_order = 11 44 45 @classmethod 46 def can_document_member(cls, member, membername, isattr, parent): 47 return isinstance(member, BaseTask) and getattr(member, '__wrapped__') 48 49 def format_args(self): 50 wrapped = getattr(self.object, '__wrapped__', None) 51 if wrapped is not None: 52 argspec = getfullargspec(wrapped) 53 fmt = formatargspec(*argspec) 54 fmt = fmt.replace('\\', '\\\\') 55 return fmt 56 return '' 57 58 def document_members(self, all_members=False): 59 pass 60 61 62 class TaskDirective(PyModulelevel): 63 """Sphinx task directive.""" 64 65 def get_signature_prefix(self, sig): 66 return self.env.config.celery_task_prefix 67 68 69 def setup(app): 70 """Setup Sphinx extension.""" 71 app.add_autodocumenter(TaskDocumenter) 72 app.domains['py'].directives['task'] = TaskDirective 73 app.add_config_value('celery_task_prefix', '(task)', True) 74 [end of celery/contrib/sphinx.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py --- a/celery/contrib/sphinx.py +++ b/celery/contrib/sphinx.py @@ -69,5 +69,5 @@ def setup(app): """Setup Sphinx extension.""" app.add_autodocumenter(TaskDocumenter) - app.domains['py'].directives['task'] = TaskDirective + app.add_directive_to_domain('py', 'task', TaskDirective) app.add_config_value('celery_task_prefix', '(task)', True)
{"golden_diff": "diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py\n--- a/celery/contrib/sphinx.py\n+++ b/celery/contrib/sphinx.py\n@@ -69,5 +69,5 @@\n def setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n- app.domains['py'].directives['task'] = TaskDirective\n+ app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "issue": "celery.contrib.sphinx fails with Sphinx 1.6.1\nWhen using the `celery.contrib.sphinx` extension with Sphinx 1.6.1 with Celery 4.0.2 the following occurs:\r\n\r\n```\r\nException occurred:\r\n File \"/home/ubuntu/virtualenvs/venv-system/lib/python2.7/site-packages/celery/contrib/sphinx.py\", line 72, in setup\r\n app.domains['py'].directives['task'] = TaskDirective\r\nAttributeError: 'Sphinx' object has no attribute 'domains'\r\nThe full traceback has been saved in /tmp/sphinx-err-oOWabx.log, if you want to report the issue to the developers.\r\nPlease also report this if it was a user error, so that a better error message can be provided next time.\r\nA bug report can be filed in the tracker at <https://github.com/sphinx-doc/sphinx/issues>. Thanks!\r\nmake: *** [html] Error 1\r\n```\r\n\r\nThe `domains` property seems to have been removed in sphinx-doc/sphinx#3656 and I think this line needs to be replaced with the [`add_directive` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive) (or more likely the [`add_directive_to_domain` method](http://www.sphinx-doc.org/en/stable/extdev/appapi.html#sphinx.application.Sphinx.add_directive_to_domain)).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom inspect import formatargspec\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\nfrom celery.five import getfullargspec\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.domains['py'].directives['task'] = TaskDirective\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}]}
1,474
128
gh_patches_debug_30866
rasdani/github-patches
git_diff
sktime__sktime-3723
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [MNT] PyPi publish action for 0.14 failed Summary by @fkiraly: The attempted 0.14.0 release has failed at pypi upload of successfully built wheels. Help on diagnosing and solving this is appreciated. Original post: --- Just to let you know: 0.14 has not been published on PyPI yet: https://github.com/sktime/sktime/actions/runs/3402037795 </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """Install script for sktime.""" 4 5 __author__ = ["mloning", "lmmentel"] 6 7 import codecs 8 9 import toml 10 from setuptools import find_packages, setup 11 12 pyproject = toml.load("pyproject.toml") 13 14 15 def long_description(): 16 """Read and return README as long description.""" 17 with codecs.open("README.md", encoding="utf-8-sig") as f: 18 return f.read() 19 20 21 # ground truth package metadata is loaded from pyproject.toml 22 # for context see: 23 # - [PEP 621 -- Storing project metadata in pyproject.toml] 24 # (https://www.python.org/dev/peps/pep-0621) 25 pyproject = toml.load("pyproject.toml") 26 27 28 def setup_package(): 29 """Set up package.""" 30 projectname = pyproject["project"]["name"] 31 setup( 32 author_email="[email protected]", 33 author=f"{projectname} developers", 34 classifiers=pyproject["project"]["classifiers"], 35 description=pyproject["project"]["description"], 36 download_url=pyproject["project"]["urls"]["download"], 37 extras_require=pyproject["project"]["optional-dependencies"], 38 include_package_data=True, 39 install_requires=pyproject["project"]["dependencies"], 40 keywords=pyproject["project"]["keywords"], 41 license=pyproject["project"]["license"], 42 long_description=long_description(), 43 maintainer_email="[email protected]", 44 maintainer=f"{projectname} developers", 45 name=projectname, 46 package_data={ 47 "sktime": [ 48 "*.csv", 49 "*.csv.gz", 50 "*.arff", 51 "*.arff.gz", 52 "*.txt", 53 "*.ts", 54 "*.tsv", 55 ] 56 }, 57 packages=find_packages( 58 where=".", 59 exclude=["tests", "tests.*"], 60 ), 61 project_urls=pyproject["project"]["urls"], 62 python_requires=pyproject["project"]["requires-python"], 63 setup_requires=pyproject["build-system"]["requires"], 64 url=pyproject["project"]["urls"]["repository"], 65 version=pyproject["project"]["version"], 66 zip_safe=False, 67 ) 68 69 70 if __name__ == "__main__": 71 setup_package() 72 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py deleted file mode 100644 --- a/setup.py +++ /dev/null @@ -1,71 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf-8 -*- -"""Install script for sktime.""" - -__author__ = ["mloning", "lmmentel"] - -import codecs - -import toml -from setuptools import find_packages, setup - -pyproject = toml.load("pyproject.toml") - - -def long_description(): - """Read and return README as long description.""" - with codecs.open("README.md", encoding="utf-8-sig") as f: - return f.read() - - -# ground truth package metadata is loaded from pyproject.toml -# for context see: -# - [PEP 621 -- Storing project metadata in pyproject.toml] -# (https://www.python.org/dev/peps/pep-0621) -pyproject = toml.load("pyproject.toml") - - -def setup_package(): - """Set up package.""" - projectname = pyproject["project"]["name"] - setup( - author_email="[email protected]", - author=f"{projectname} developers", - classifiers=pyproject["project"]["classifiers"], - description=pyproject["project"]["description"], - download_url=pyproject["project"]["urls"]["download"], - extras_require=pyproject["project"]["optional-dependencies"], - include_package_data=True, - install_requires=pyproject["project"]["dependencies"], - keywords=pyproject["project"]["keywords"], - license=pyproject["project"]["license"], - long_description=long_description(), - maintainer_email="[email protected]", - maintainer=f"{projectname} developers", - name=projectname, - package_data={ - "sktime": [ - "*.csv", - "*.csv.gz", - "*.arff", - "*.arff.gz", - "*.txt", - "*.ts", - "*.tsv", - ] - }, - packages=find_packages( - where=".", - exclude=["tests", "tests.*"], - ), - project_urls=pyproject["project"]["urls"], - python_requires=pyproject["project"]["requires-python"], - setup_requires=pyproject["build-system"]["requires"], - url=pyproject["project"]["urls"]["repository"], - version=pyproject["project"]["version"], - zip_safe=False, - ) - - -if __name__ == "__main__": - setup_package()
{"golden_diff": "diff --git a/setup.py b/setup.py\ndeleted file mode 100644\n--- a/setup.py\n+++ /dev/null\n@@ -1,71 +0,0 @@\n-#! /usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\"\"\"Install script for sktime.\"\"\"\n-\n-__author__ = [\"mloning\", \"lmmentel\"]\n-\n-import codecs\n-\n-import toml\n-from setuptools import find_packages, setup\n-\n-pyproject = toml.load(\"pyproject.toml\")\n-\n-\n-def long_description():\n- \"\"\"Read and return README as long description.\"\"\"\n- with codecs.open(\"README.md\", encoding=\"utf-8-sig\") as f:\n- return f.read()\n-\n-\n-# ground truth package metadata is loaded from pyproject.toml\n-# for context see:\n-# - [PEP 621 -- Storing project metadata in pyproject.toml]\n-# (https://www.python.org/dev/peps/pep-0621)\n-pyproject = toml.load(\"pyproject.toml\")\n-\n-\n-def setup_package():\n- \"\"\"Set up package.\"\"\"\n- projectname = pyproject[\"project\"][\"name\"]\n- setup(\n- author_email=\"[email protected]\",\n- author=f\"{projectname} developers\",\n- classifiers=pyproject[\"project\"][\"classifiers\"],\n- description=pyproject[\"project\"][\"description\"],\n- download_url=pyproject[\"project\"][\"urls\"][\"download\"],\n- extras_require=pyproject[\"project\"][\"optional-dependencies\"],\n- include_package_data=True,\n- install_requires=pyproject[\"project\"][\"dependencies\"],\n- keywords=pyproject[\"project\"][\"keywords\"],\n- license=pyproject[\"project\"][\"license\"],\n- long_description=long_description(),\n- maintainer_email=\"[email protected]\",\n- maintainer=f\"{projectname} developers\",\n- name=projectname,\n- package_data={\n- \"sktime\": [\n- \"*.csv\",\n- \"*.csv.gz\",\n- \"*.arff\",\n- \"*.arff.gz\",\n- \"*.txt\",\n- \"*.ts\",\n- \"*.tsv\",\n- ]\n- },\n- packages=find_packages(\n- where=\".\",\n- exclude=[\"tests\", \"tests.*\"],\n- ),\n- project_urls=pyproject[\"project\"][\"urls\"],\n- python_requires=pyproject[\"project\"][\"requires-python\"],\n- setup_requires=pyproject[\"build-system\"][\"requires\"],\n- url=pyproject[\"project\"][\"urls\"][\"repository\"],\n- version=pyproject[\"project\"][\"version\"],\n- zip_safe=False,\n- )\n-\n-\n-if __name__ == \"__main__\":\n- setup_package()\n", "issue": "[MNT] PyPi publish action for 0.14 failed\nSummary by @fkiraly:\r\nThe attempted 0.14.0 release has failed at pypi upload of successfully built wheels.\r\n\r\nHelp on diagnosing and solving this is appreciated.\r\n\r\nOriginal post:\r\n---\r\nJust to let you know: 0.14 has not been published on PyPI yet:\r\n\r\nhttps://github.com/sktime/sktime/actions/runs/3402037795\n", "before_files": [{"content": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Install script for sktime.\"\"\"\n\n__author__ = [\"mloning\", \"lmmentel\"]\n\nimport codecs\n\nimport toml\nfrom setuptools import find_packages, setup\n\npyproject = toml.load(\"pyproject.toml\")\n\n\ndef long_description():\n \"\"\"Read and return README as long description.\"\"\"\n with codecs.open(\"README.md\", encoding=\"utf-8-sig\") as f:\n return f.read()\n\n\n# ground truth package metadata is loaded from pyproject.toml\n# for context see:\n# - [PEP 621 -- Storing project metadata in pyproject.toml]\n# (https://www.python.org/dev/peps/pep-0621)\npyproject = toml.load(\"pyproject.toml\")\n\n\ndef setup_package():\n \"\"\"Set up package.\"\"\"\n projectname = pyproject[\"project\"][\"name\"]\n setup(\n author_email=\"[email protected]\",\n author=f\"{projectname} developers\",\n classifiers=pyproject[\"project\"][\"classifiers\"],\n description=pyproject[\"project\"][\"description\"],\n download_url=pyproject[\"project\"][\"urls\"][\"download\"],\n extras_require=pyproject[\"project\"][\"optional-dependencies\"],\n include_package_data=True,\n install_requires=pyproject[\"project\"][\"dependencies\"],\n keywords=pyproject[\"project\"][\"keywords\"],\n license=pyproject[\"project\"][\"license\"],\n long_description=long_description(),\n maintainer_email=\"[email protected]\",\n maintainer=f\"{projectname} developers\",\n name=projectname,\n package_data={\n \"sktime\": [\n \"*.csv\",\n \"*.csv.gz\",\n \"*.arff\",\n \"*.arff.gz\",\n \"*.txt\",\n \"*.ts\",\n \"*.tsv\",\n ]\n },\n packages=find_packages(\n where=\".\",\n exclude=[\"tests\", \"tests.*\"],\n ),\n project_urls=pyproject[\"project\"][\"urls\"],\n python_requires=pyproject[\"project\"][\"requires-python\"],\n setup_requires=pyproject[\"build-system\"][\"requires\"],\n url=pyproject[\"project\"][\"urls\"][\"repository\"],\n version=pyproject[\"project\"][\"version\"],\n zip_safe=False,\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n", "path": "setup.py"}]}
1,256
584
gh_patches_debug_24590
rasdani/github-patches
git_diff
apluslms__a-plus-1216
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Display personal deadline extensions on a student’s points page A student’s points page in A+ just shows the official deadlines for each module. Personalized deadlines are not shown. This has repeatedly caused confusion when a student believes there’s something wrong with their personalized deadline. It would be better if the student was able to see the actual deadlines on their points page (and possibly elsewhere?). This would also eliminate some confusion in communications between members of course staff, as staff members would also easily see the student’s DL extensions. I’m not sure off the top of my head how best to display this on the page. </issue> <code> [start of course/templatetags/course.py] 1 from typing import Any, Dict, List, Union 2 3 from django import template 4 from django.db import models 5 from django.utils.safestring import mark_safe 6 from django.utils.translation import get_language 7 8 from exercise.cache.content import CachedContent 9 from course.models import CourseInstance, UserTagging 10 from lib.localization_syntax import pick_localized 11 from userprofile.models import UserProfile 12 from ..cache.menu import CachedTopMenu 13 14 15 register = template.Library() 16 17 18 def _prepare_topmenu(context): 19 if 'topmenu' not in context: 20 request = context.get('request', None) 21 context['topmenu'] = CachedTopMenu(request.user if request else None) 22 return context['topmenu'] 23 24 25 @register.inclusion_tag("course/_course_dropdown_menu.html", takes_context=True) 26 def course_menu(context): 27 menu = _prepare_topmenu(context) 28 return { "instances": menu.courses() } 29 30 31 @register.inclusion_tag('course/_group_select.html', takes_context=True) 32 def group_select(context): 33 instance = context.get('instance', None) 34 if not instance: 35 return { 'groups': [] } 36 menu = _prepare_topmenu(context) 37 groups, selected = menu.groups(instance) 38 return { 39 'instance': instance, 40 'groups': groups, 41 'selected': selected, 42 } 43 44 45 @register.filter 46 def escape_slashes(string): 47 return str(string).replace('/', '\/') # noqa: W605 48 49 @register.filter 50 def parse_localization(entry): 51 return pick_localized(entry, get_language()) 52 53 54 @register.filter 55 def list_unselected(langs): 56 listed = list(filter(lambda x: x and x != get_language(), langs.split("|"))) 57 return listed 58 59 60 @register.filter 61 def is_visible(entry): 62 return CachedContent.is_visible(entry) 63 64 65 @register.filter 66 def is_visible_to(entry, user): 67 return entry.is_visible_to(user) 68 69 70 @register.filter 71 def is_listed(entry): 72 return CachedContent.is_listed(entry) 73 74 75 @register.filter 76 def len_listed(entries): 77 return len([e for e in entries if CachedContent.is_listed(e)]) 78 79 80 @register.filter 81 def is_in_maintenance(entry): 82 return CachedContent.is_in_maintenance(entry) 83 84 85 @register.filter 86 def exercises_open(entry, now): 87 return entry['opening_time'] <= now <= entry['closing_time'] 88 89 90 @register.filter 91 def exercises_submittable(entry, now): 92 if entry['late_allowed']: 93 return entry['opening_time'] <= now <= entry['late_time'] 94 return entry['opening_time'] <= now <= entry['closing_time'] 95 96 97 @register.filter 98 def has_opened(entry, now): 99 return entry['opening_time'] <= now 100 101 102 @register.filter 103 def url(model_object, name=None): 104 if name: 105 return model_object.get_url(name) 106 return model_object.get_display_url() 107 108 109 @register.filter 110 def names(profiles): 111 return ", ".join(p.user.get_full_name() for p in profiles) 112 113 114 @register.inclusion_tag('course/_avatars.html') 115 def avatars(profiles): 116 return { 'profiles': profiles } 117 118 119 @register.inclusion_tag("course/_profiles.html") 120 def profiles( 121 profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]], 122 instance: CourseInstance, 123 is_teacher: bool 124 ) -> Dict[str, Any]: 125 if isinstance(profiles, UserProfile): 126 profiles = [profiles] 127 elif isinstance(profiles, models.QuerySet): 128 # Avoid re-fetching the queryset 129 profiles = list(profiles) 130 return { 131 'instance': instance, 132 'profiles': profiles, 133 'is_teacher': is_teacher, 134 } 135 136 137 @register.simple_tag 138 def tags(profile, instance): 139 tags = UserTagging.objects.get_all(profile, instance) 140 return mark_safe(' '.join(tag.html_label for tag in tags)) 141 142 143 @register.filter 144 def enrollment_audience(enrollment_audience_val): 145 # convert enrollment audience Enum value to the string description 146 return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val] 147 148 149 @register.filter 150 def view_content_to(view_content_to_val): 151 # convert "view content to" Enum value to the string description 152 return CourseInstance.VIEW_ACCESS[view_content_to_val] 153 154 155 @register.filter 156 def is_banned_student(profile, course_instance): 157 return course_instance.is_banned(profile.user) 158 [end of course/templatetags/course.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/course/templatetags/course.py b/course/templatetags/course.py --- a/course/templatetags/course.py +++ b/course/templatetags/course.py @@ -1,4 +1,5 @@ from typing import Any, Dict, List, Union +from copy import deepcopy from django import template from django.db import models @@ -22,6 +23,11 @@ return context['topmenu'] +def _deadline_extended_exercise_open(entry, now): + personal_deadline = entry.get('personal_deadline') + return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline + + @register.inclusion_tag("course/_course_dropdown_menu.html", takes_context=True) def course_menu(context): menu = _prepare_topmenu(context) @@ -87,6 +93,17 @@ return entry['opening_time'] <= now <= entry['closing_time'] [email protected] +def deadline_extended_exercise_open(entry, now): + return _deadline_extended_exercise_open(entry, now) + + [email protected] +def deadline_extended_exercises_open(entry, now): + entries = deepcopy(entry['flatted']) + return any(_deadline_extended_exercise_open(entry, now) for entry in entries) + + @register.filter def exercises_submittable(entry, now): if entry['late_allowed']:
{"golden_diff": "diff --git a/course/templatetags/course.py b/course/templatetags/course.py\n--- a/course/templatetags/course.py\n+++ b/course/templatetags/course.py\n@@ -1,4 +1,5 @@\n from typing import Any, Dict, List, Union\n+from copy import deepcopy\n \n from django import template\n from django.db import models\n@@ -22,6 +23,11 @@\n return context['topmenu']\n \n \n+def _deadline_extended_exercise_open(entry, now):\n+ personal_deadline = entry.get('personal_deadline')\n+ return personal_deadline is not None and entry['opening_time'] <= now <= personal_deadline\n+\n+\n @register.inclusion_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\n def course_menu(context):\n menu = _prepare_topmenu(context)\n@@ -87,6 +93,17 @@\n return entry['opening_time'] <= now <= entry['closing_time']\n \n \[email protected]\n+def deadline_extended_exercise_open(entry, now):\n+ return _deadline_extended_exercise_open(entry, now)\n+\n+\[email protected]\n+def deadline_extended_exercises_open(entry, now):\n+ entries = deepcopy(entry['flatted'])\n+ return any(_deadline_extended_exercise_open(entry, now) for entry in entries)\n+\n+\n @register.filter\n def exercises_submittable(entry, now):\n if entry['late_allowed']:\n", "issue": "Display personal deadline extensions on a student\u2019s points page\nA student\u2019s points page in A+ just shows the official deadlines for each module. Personalized deadlines are not shown. This has repeatedly caused confusion when a student believes there\u2019s something wrong with their personalized deadline. It would be better if the student was able to see the actual deadlines on their points page (and possibly elsewhere?). \r\n\r\nThis would also eliminate some confusion in communications between members of course staff, as staff members would also easily see the student\u2019s DL extensions. \r\n\r\nI\u2019m not sure off the top of my head how best to display this on the page.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Any, Dict, List, Union\n\nfrom django import template\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import get_language\n\nfrom exercise.cache.content import CachedContent\nfrom course.models import CourseInstance, UserTagging\nfrom lib.localization_syntax import pick_localized\nfrom userprofile.models import UserProfile\nfrom ..cache.menu import CachedTopMenu\n\n\nregister = template.Library()\n\n\ndef _prepare_topmenu(context):\n if 'topmenu' not in context:\n request = context.get('request', None)\n context['topmenu'] = CachedTopMenu(request.user if request else None)\n return context['topmenu']\n\n\[email protected]_tag(\"course/_course_dropdown_menu.html\", takes_context=True)\ndef course_menu(context):\n menu = _prepare_topmenu(context)\n return { \"instances\": menu.courses() }\n\n\[email protected]_tag('course/_group_select.html', takes_context=True)\ndef group_select(context):\n instance = context.get('instance', None)\n if not instance:\n return { 'groups': [] }\n menu = _prepare_topmenu(context)\n groups, selected = menu.groups(instance)\n return {\n 'instance': instance,\n 'groups': groups,\n 'selected': selected,\n }\n\n\[email protected]\ndef escape_slashes(string):\n return str(string).replace('/', '\\/') # noqa: W605\n\[email protected]\ndef parse_localization(entry):\n return pick_localized(entry, get_language())\n\n\[email protected]\ndef list_unselected(langs):\n listed = list(filter(lambda x: x and x != get_language(), langs.split(\"|\")))\n return listed\n\n\[email protected]\ndef is_visible(entry):\n return CachedContent.is_visible(entry)\n\n\[email protected]\ndef is_visible_to(entry, user):\n return entry.is_visible_to(user)\n\n\[email protected]\ndef is_listed(entry):\n return CachedContent.is_listed(entry)\n\n\[email protected]\ndef len_listed(entries):\n return len([e for e in entries if CachedContent.is_listed(e)])\n\n\[email protected]\ndef is_in_maintenance(entry):\n return CachedContent.is_in_maintenance(entry)\n\n\[email protected]\ndef exercises_open(entry, now):\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef exercises_submittable(entry, now):\n if entry['late_allowed']:\n return entry['opening_time'] <= now <= entry['late_time']\n return entry['opening_time'] <= now <= entry['closing_time']\n\n\[email protected]\ndef has_opened(entry, now):\n return entry['opening_time'] <= now\n\n\[email protected]\ndef url(model_object, name=None):\n if name:\n return model_object.get_url(name)\n return model_object.get_display_url()\n\n\[email protected]\ndef names(profiles):\n return \", \".join(p.user.get_full_name() for p in profiles)\n\n\[email protected]_tag('course/_avatars.html')\ndef avatars(profiles):\n return { 'profiles': profiles }\n\n\[email protected]_tag(\"course/_profiles.html\")\ndef profiles(\n profiles: Union[UserProfile, List[UserProfile], models.QuerySet[UserProfile]],\n instance: CourseInstance,\n is_teacher: bool\n ) -> Dict[str, Any]:\n if isinstance(profiles, UserProfile):\n profiles = [profiles]\n elif isinstance(profiles, models.QuerySet):\n # Avoid re-fetching the queryset\n profiles = list(profiles)\n return {\n 'instance': instance,\n 'profiles': profiles,\n 'is_teacher': is_teacher,\n }\n\n\[email protected]_tag\ndef tags(profile, instance):\n tags = UserTagging.objects.get_all(profile, instance)\n return mark_safe(' '.join(tag.html_label for tag in tags))\n\n\[email protected]\ndef enrollment_audience(enrollment_audience_val):\n # convert enrollment audience Enum value to the string description\n return CourseInstance.ENROLLMENT_AUDIENCE[enrollment_audience_val]\n\n\[email protected]\ndef view_content_to(view_content_to_val):\n # convert \"view content to\" Enum value to the string description\n return CourseInstance.VIEW_ACCESS[view_content_to_val]\n\n\[email protected]\ndef is_banned_student(profile, course_instance):\n return course_instance.is_banned(profile.user)\n", "path": "course/templatetags/course.py"}]}
1,980
311
gh_patches_debug_13156
rasdani/github-patches
git_diff
googleapis__google-cloud-python-5599
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Added retry on internal error Added retry on internal error as suggested by the internal error message: '500 Error encountered during execution. Retrying may solve the problem.'. Rationalised the conditions structure so it would simplify addition of other retry conditions. </issue> <code> [start of bigquery/google/cloud/bigquery/retry.py] 1 2 # Copyright 2018 Google LLC 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from google.api_core import retry 17 18 19 def _should_retry(exc): 20 """Predicate for determining when to retry. 21 22 We retry if and only if the 'reason' is 'backendError' 23 or 'rateLimitExceeded'. 24 """ 25 if not hasattr(exc, 'errors'): 26 return False 27 if len(exc.errors) == 0: 28 return False 29 reason = exc.errors[0]['reason'] 30 return reason == 'backendError' or reason == 'rateLimitExceeded' 31 32 33 DEFAULT_RETRY = retry.Retry(predicate=_should_retry) 34 """The default retry object. 35 36 Any method with a ``retry`` parameter will be retried automatically, 37 with reasonable defaults. To disable retry, pass ``retry=None``. 38 To modify the default retry behavior, call a ``with_XXX`` method 39 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds, 40 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``. 41 """ 42 [end of bigquery/google/cloud/bigquery/retry.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bigquery/google/cloud/bigquery/retry.py b/bigquery/google/cloud/bigquery/retry.py --- a/bigquery/google/cloud/bigquery/retry.py +++ b/bigquery/google/cloud/bigquery/retry.py @@ -16,6 +16,13 @@ from google.api_core import retry +_RETRYABLE_REASONS = frozenset([ + 'backendError', + 'rateLimitExceeded', + 'internalError', +]) + + def _should_retry(exc): """Predicate for determining when to retry. @@ -27,7 +34,7 @@ if len(exc.errors) == 0: return False reason = exc.errors[0]['reason'] - return reason == 'backendError' or reason == 'rateLimitExceeded' + return reason in _RETRYABLE_REASONS DEFAULT_RETRY = retry.Retry(predicate=_should_retry)
{"golden_diff": "diff --git a/bigquery/google/cloud/bigquery/retry.py b/bigquery/google/cloud/bigquery/retry.py\n--- a/bigquery/google/cloud/bigquery/retry.py\n+++ b/bigquery/google/cloud/bigquery/retry.py\n@@ -16,6 +16,13 @@\n from google.api_core import retry\n \n \n+_RETRYABLE_REASONS = frozenset([\n+ 'backendError',\n+ 'rateLimitExceeded',\n+ 'internalError',\n+])\n+\n+\n def _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n \n@@ -27,7 +34,7 @@\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n- return reason == 'backendError' or reason == 'rateLimitExceeded'\n+ return reason in _RETRYABLE_REASONS\n \n \n DEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n", "issue": "Added retry on internal error\nAdded retry on internal error as suggested by the internal error message: '500 Error encountered during execution. Retrying may solve the problem.'. \r\nRationalised the conditions structure so it would simplify addition of other retry conditions.\n", "before_files": [{"content": "\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import retry\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, 'errors'):\n return False\n if len(exc.errors) == 0:\n return False\n reason = exc.errors[0]['reason']\n return reason == 'backendError' or reason == 'rateLimitExceeded'\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "bigquery/google/cloud/bigquery/retry.py"}]}
1,009
201
gh_patches_debug_17764
rasdani/github-patches
git_diff
joke2k__faker-2038
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BE Vat Numbers should have a modulo 97 check and start wit 1 or 0 * Faker version: 25 * OS: OS. X Monterery VAT Numbers generated in Belgium (currently set-up in the provider ssn for locale nl_BE should start with 1 or 0 and have a controle number with a module 97 check. Refer to https://en.wikipedia.org/wiki/VAT_identification_number ### Steps to reproduce 1. Generate vat_id wit locale nl_BE ### Expected behavior Vat numbers should be starting with BE0 or BE1 and have a module 97 check as the last two numbers </issue> <code> [start of faker/providers/ssn/nl_BE/__init__.py] 1 from .. import Provider as SsnProvider 2 3 """ 4 For more info on rijksregisternummer, see https://nl.wikipedia.org/wiki/Rijksregisternummer 5 Dutch/French only for now ... 6 """ 7 8 9 class Provider(SsnProvider): 10 def ssn(self) -> str: 11 """ 12 Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string 13 14 The first 6 digits represent the birthdate with (in order) year, month and day. 15 The second group of 3 digits is represents a sequence number (order of birth). 16 It is even for women and odd for men. 17 For men the range starts at 1 and ends 997, for women 2 until 998. 18 The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97). 19 Divide those 9 digits by 97, subtract the remainder from 97 and that's the result. 20 For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2 21 (add 2000000000) before the division by 97. 22 23 """ 24 25 # see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch) 26 def _checksum(digits): 27 res = 97 - (digits % 97) 28 return res 29 30 # Generate a date (random) 31 mydate = self.generator.date() 32 # Convert it to an int 33 elms = mydate.split("-") 34 # Adjust for year 2000 if necessary 35 if elms[0][0] == "2": 36 above = True 37 else: 38 above = False 39 # Only keep the last 2 digits of the year 40 elms[0] = elms[0][2:4] 41 # Simulate the gender/sequence - should be 3 digits 42 seq = self.generator.random_int(1, 998) 43 # Right justify sequence and append to list 44 seq_str = f"{seq:0>3}" 45 elms.append(seq_str) 46 # Now convert list to an integer so the checksum can be calculated 47 date_as_int = int("".join(elms)) 48 if above: 49 date_as_int += 2000000000 50 # Generate checksum 51 s = _checksum(date_as_int) 52 s_rjust = f"{s:0>2}" 53 # return result as a string 54 elms.append(s_rjust) 55 return "".join(elms) 56 57 vat_id_formats = ("BE##########",) 58 59 def vat_id(self) -> str: 60 """ 61 http://ec.europa.eu/taxation_customs/vies/faq.html#item_11 62 :return: A random Belgian VAT ID 63 """ 64 return self.bothify(self.random_element(self.vat_id_formats)) 65 [end of faker/providers/ssn/nl_BE/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/faker/providers/ssn/nl_BE/__init__.py b/faker/providers/ssn/nl_BE/__init__.py --- a/faker/providers/ssn/nl_BE/__init__.py +++ b/faker/providers/ssn/nl_BE/__init__.py @@ -57,8 +57,24 @@ vat_id_formats = ("BE##########",) def vat_id(self) -> str: + + vat_id_random_section = ( + '#######' + ) + + vat_id_possible_initial_numbers = ( + '0', + '1' + ) """ http://ec.europa.eu/taxation_customs/vies/faq.html#item_11 - :return: A random Belgian VAT ID + https://en.wikipedia.org/wiki/VAT_identification_number + :return: A random Belgian VAT ID starting with 0 or 1 and has a correct checksum with a modulo 97 check """ - return self.bothify(self.random_element(self.vat_id_formats)) + generated_initial_number = self.random_element(vat_id_possible_initial_numbers) + vat_without_check = self.bothify(generated_initial_number + vat_id_random_section) + vat_as_int = int(vat_without_check) + vat_check = 97 - (vat_as_int % 97) + vat_check_str = f"{vat_check:0>2}" + + return "BE" + vat_without_check + vat_check_str
{"golden_diff": "diff --git a/faker/providers/ssn/nl_BE/__init__.py b/faker/providers/ssn/nl_BE/__init__.py\n--- a/faker/providers/ssn/nl_BE/__init__.py\n+++ b/faker/providers/ssn/nl_BE/__init__.py\n@@ -57,8 +57,24 @@\n vat_id_formats = (\"BE##########\",)\n \n def vat_id(self) -> str:\n+\n+ vat_id_random_section = (\n+ '#######'\n+ )\n+\n+ vat_id_possible_initial_numbers = (\n+ '0',\n+ '1'\n+ )\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n- :return: A random Belgian VAT ID\n+ https://en.wikipedia.org/wiki/VAT_identification_number\n+ :return: A random Belgian VAT ID starting with 0 or 1 and has a correct checksum with a modulo 97 check\n \"\"\"\n- return self.bothify(self.random_element(self.vat_id_formats))\n+ generated_initial_number = self.random_element(vat_id_possible_initial_numbers)\n+ vat_without_check = self.bothify(generated_initial_number + vat_id_random_section)\n+ vat_as_int = int(vat_without_check)\n+ vat_check = 97 - (vat_as_int % 97)\n+ vat_check_str = f\"{vat_check:0>2}\"\n+\n+ return \"BE\" + vat_without_check + vat_check_str\n", "issue": "BE Vat Numbers should have a modulo 97 check and start wit 1 or 0\n* Faker version: 25\r\n* OS: OS. X Monterery\r\n\r\nVAT Numbers generated in Belgium (currently set-up in the provider ssn for locale nl_BE should start with 1 or 0 and have a controle number with a module 97 check.\r\n\r\nRefer to https://en.wikipedia.org/wiki/VAT_identification_number\r\n\r\n### Steps to reproduce\r\n\r\n1. Generate vat_id wit locale nl_BE\r\n\r\n\r\n### Expected behavior\r\n\r\nVat numbers should be starting with BE0 or BE1 and have a module 97 check as the last two numbers\r\n\r\n\r\n\n", "before_files": [{"content": "from .. import Provider as SsnProvider\n\n\"\"\"\nFor more info on rijksregisternummer, see https://nl.wikipedia.org/wiki/Rijksregisternummer\nDutch/French only for now ...\n\"\"\"\n\n\nclass Provider(SsnProvider):\n def ssn(self) -> str:\n \"\"\"\n Returns a 11 digits Belgian SSN called \"rijksregisternummer\" as a string\n\n The first 6 digits represent the birthdate with (in order) year, month and day.\n The second group of 3 digits is represents a sequence number (order of birth).\n It is even for women and odd for men.\n For men the range starts at 1 and ends 997, for women 2 until 998.\n The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).\n Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.\n For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2\n (add 2000000000) before the division by 97.\n\n \"\"\"\n\n # see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)\n def _checksum(digits):\n res = 97 - (digits % 97)\n return res\n\n # Generate a date (random)\n mydate = self.generator.date()\n # Convert it to an int\n elms = mydate.split(\"-\")\n # Adjust for year 2000 if necessary\n if elms[0][0] == \"2\":\n above = True\n else:\n above = False\n # Only keep the last 2 digits of the year\n elms[0] = elms[0][2:4]\n # Simulate the gender/sequence - should be 3 digits\n seq = self.generator.random_int(1, 998)\n # Right justify sequence and append to list\n seq_str = f\"{seq:0>3}\"\n elms.append(seq_str)\n # Now convert list to an integer so the checksum can be calculated\n date_as_int = int(\"\".join(elms))\n if above:\n date_as_int += 2000000000\n # Generate checksum\n s = _checksum(date_as_int)\n s_rjust = f\"{s:0>2}\"\n # return result as a string\n elms.append(s_rjust)\n return \"\".join(elms)\n\n vat_id_formats = (\"BE##########\",)\n\n def vat_id(self) -> str:\n \"\"\"\n http://ec.europa.eu/taxation_customs/vies/faq.html#item_11\n :return: A random Belgian VAT ID\n \"\"\"\n return self.bothify(self.random_element(self.vat_id_formats))\n", "path": "faker/providers/ssn/nl_BE/__init__.py"}]}
1,448
330
gh_patches_debug_21801
rasdani/github-patches
git_diff
SciTools__cartopy-2287
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove "cfeature.LAND" from "Feature Creation" example ? In the nice example provided on the [Feature Creation page](https://scitools.org.uk/cartopy/docs/latest/gallery/feature_creation.html), you could probably remove the useless line: B) `ax.add_feature(cfeature.LAND)` because you have already called A) `ax.stock_img()` As a matter of fact, B) being called after A), it should theoretically be plotted above A). I see you are using a default `zorder=-1` for LAND, so it is probably below everything else (did not know you could use negative zorder values) ``` >>> cfeature.LAND.kwargs {'edgecolor': 'face', 'zorder': -1, 'facecolor': array([0.9375 , 0.9375 , 0.859375])} ``` If I use `ax.add_feature(cfeature.LAND, zorder=10)`, I do get land with a uniform color above the stock image on land (and the stock ocean elsewhere) </issue> <code> [start of examples/lines_and_polygons/feature_creation.py] 1 """ 2 Feature Creation 3 ---------------- 4 5 This example manually instantiates a 6 :class:`cartopy.feature.NaturalEarthFeature` to access administrative 7 boundaries (states and provinces). 8 9 Note that this example is intended to illustrate the ability to construct 10 Natural Earth features that cartopy does not necessarily know about 11 *a priori*. 12 In this instance however, it would be possible to make use of the 13 pre-defined :data:`cartopy.feature.STATES` constant. 14 15 """ 16 from matplotlib.offsetbox import AnchoredText 17 import matplotlib.pyplot as plt 18 19 import cartopy.crs as ccrs 20 import cartopy.feature as cfeature 21 22 23 def main(): 24 fig = plt.figure() 25 ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree()) 26 ax.set_extent([80, 170, -45, 30], crs=ccrs.PlateCarree()) 27 28 # Put a background image on for nice sea rendering. 29 ax.stock_img() 30 31 # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth 32 states_provinces = cfeature.NaturalEarthFeature( 33 category='cultural', 34 name='admin_1_states_provinces_lines', 35 scale='50m', 36 facecolor='none') 37 38 SOURCE = 'Natural Earth' 39 LICENSE = 'public domain' 40 41 ax.add_feature(cfeature.LAND) 42 ax.add_feature(cfeature.COASTLINE) 43 ax.add_feature(states_provinces, edgecolor='gray') 44 45 # Add a text annotation for the license information to the 46 # the bottom right corner. 47 text = AnchoredText('\u00A9 {}; license: {}' 48 ''.format(SOURCE, LICENSE), 49 loc=4, prop={'size': 12}, frameon=True) 50 ax.add_artist(text) 51 52 plt.show() 53 54 55 if __name__ == '__main__': 56 main() 57 [end of examples/lines_and_polygons/feature_creation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/lines_and_polygons/feature_creation.py b/examples/lines_and_polygons/feature_creation.py --- a/examples/lines_and_polygons/feature_creation.py +++ b/examples/lines_and_polygons/feature_creation.py @@ -28,7 +28,7 @@ # Put a background image on for nice sea rendering. ax.stock_img() - # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth + # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth. states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', @@ -38,9 +38,11 @@ SOURCE = 'Natural Earth' LICENSE = 'public domain' - ax.add_feature(cfeature.LAND) - ax.add_feature(cfeature.COASTLINE) + # Add our states feature. ax.add_feature(states_provinces, edgecolor='gray') + # Add land feature, overriding the default negative zorder so it shows + # above the background image. + ax.add_feature(cfeature.LAND, zorder=1, edgecolor='k') # Add a text annotation for the license information to the # the bottom right corner.
{"golden_diff": "diff --git a/examples/lines_and_polygons/feature_creation.py b/examples/lines_and_polygons/feature_creation.py\n--- a/examples/lines_and_polygons/feature_creation.py\n+++ b/examples/lines_and_polygons/feature_creation.py\n@@ -28,7 +28,7 @@\n # Put a background image on for nice sea rendering.\n ax.stock_img()\n \n- # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth\n+ # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth.\n states_provinces = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n@@ -38,9 +38,11 @@\n SOURCE = 'Natural Earth'\n LICENSE = 'public domain'\n \n- ax.add_feature(cfeature.LAND)\n- ax.add_feature(cfeature.COASTLINE)\n+ # Add our states feature.\n ax.add_feature(states_provinces, edgecolor='gray')\n+ # Add land feature, overriding the default negative zorder so it shows\n+ # above the background image.\n+ ax.add_feature(cfeature.LAND, zorder=1, edgecolor='k')\n \n # Add a text annotation for the license information to the\n # the bottom right corner.\n", "issue": "Remove \"cfeature.LAND\" from \"Feature Creation\" example ?\nIn the nice example provided on the [Feature Creation page](https://scitools.org.uk/cartopy/docs/latest/gallery/feature_creation.html), you could probably remove the useless line:\r\nB) `ax.add_feature(cfeature.LAND)`\r\nbecause you have already called\r\nA) `ax.stock_img()`\r\n\r\nAs a matter of fact, B) being called after A), it should theoretically be plotted above A). I see you are using a default `zorder=-1` for LAND, so it is probably below everything else (did not know you could use negative zorder values)\r\n```\r\n>>> cfeature.LAND.kwargs\r\n{'edgecolor': 'face', 'zorder': -1, 'facecolor': array([0.9375 , 0.9375 , 0.859375])}\r\n```\r\nIf I use `ax.add_feature(cfeature.LAND, zorder=10)`, I do get land with a uniform color above the stock image on land (and the stock ocean elsewhere)\n", "before_files": [{"content": "\"\"\"\nFeature Creation\n----------------\n\nThis example manually instantiates a\n:class:`cartopy.feature.NaturalEarthFeature` to access administrative\nboundaries (states and provinces).\n\nNote that this example is intended to illustrate the ability to construct\nNatural Earth features that cartopy does not necessarily know about\n*a priori*.\nIn this instance however, it would be possible to make use of the\npre-defined :data:`cartopy.feature.STATES` constant.\n\n\"\"\"\nfrom matplotlib.offsetbox import AnchoredText\nimport matplotlib.pyplot as plt\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\n\n\ndef main():\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())\n ax.set_extent([80, 170, -45, 30], crs=ccrs.PlateCarree())\n\n # Put a background image on for nice sea rendering.\n ax.stock_img()\n\n # Create a feature for States/Admin 1 regions at 1:50m from Natural Earth\n states_provinces = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n scale='50m',\n facecolor='none')\n\n SOURCE = 'Natural Earth'\n LICENSE = 'public domain'\n\n ax.add_feature(cfeature.LAND)\n ax.add_feature(cfeature.COASTLINE)\n ax.add_feature(states_provinces, edgecolor='gray')\n\n # Add a text annotation for the license information to the\n # the bottom right corner.\n text = AnchoredText('\\u00A9 {}; license: {}'\n ''.format(SOURCE, LICENSE),\n loc=4, prop={'size': 12}, frameon=True)\n ax.add_artist(text)\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "examples/lines_and_polygons/feature_creation.py"}]}
1,298
294
gh_patches_debug_26290
rasdani/github-patches
git_diff
scrapy__scrapy-4052
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exception when using DummyStatsCollector ### Description Using the DummyStatsCollector results in an exception: ``` 2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>> Traceback (most recent call last): File ".../lib/python3.6/site-packages/twisted/internet/defer.py", line 150, in maybeDeferred result = f(*args, **kw) File ".../lib/python3.6/site-packages/pydispatch/robustapply.py", line 55, in robustApply return receiver(*arguments, **named) File ".../lib/python3.6/site-packages/scrapy/extensions/corestats.py", line 28, in spider_closed elapsed_time = finish_time - self.stats.get_value('start_time') TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType' ``` This problem has been introduced in aa46e1995cd5cb1099aba17535372b538bd656b3. ### Steps to Reproduce Set `STATS_CLASS = "scrapy.statscollectors.DummyStatsCollector"` in the settings module as described in the documentation (https://docs.scrapy.org/en/latest/topics/stats.html#dummystatscollector). **Expected behavior:** no exception **Actual behavior:** exception thrown **Reproduces how often:** always ### Versions At least master as of 534de7395da3a53b5a2c89960db9ec5d8fdab60c ### Fix A possible fix is to use the elapsed time as a default argument so that `get_value()` does not return None. I can prepare a PR if needed. ```diff --- a/scrapy/extensions/corestats.py +++ b/scrapy/extensions/corestats.py @@ -25,7 +25,7 @@ class CoreStats(object): def spider_closed(self, spider, reason): finish_time = datetime.datetime.utcnow() - elapsed_time = finish_time - self.stats.get_value('start_time') + elapsed_time = finish_time - self.stats.get_value('start_time', finish_time) elapsed_time_seconds = elapsed_time.total_seconds() self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider) self.stats.set_value('finish_time', finish_time, spider=spider) ``` </issue> <code> [start of scrapy/extensions/corestats.py] 1 """ 2 Extension for collecting core stats like items scraped and start/finish times 3 """ 4 import datetime 5 6 from scrapy import signals 7 8 class CoreStats(object): 9 10 def __init__(self, stats): 11 self.stats = stats 12 13 @classmethod 14 def from_crawler(cls, crawler): 15 o = cls(crawler.stats) 16 crawler.signals.connect(o.spider_opened, signal=signals.spider_opened) 17 crawler.signals.connect(o.spider_closed, signal=signals.spider_closed) 18 crawler.signals.connect(o.item_scraped, signal=signals.item_scraped) 19 crawler.signals.connect(o.item_dropped, signal=signals.item_dropped) 20 crawler.signals.connect(o.response_received, signal=signals.response_received) 21 return o 22 23 def spider_opened(self, spider): 24 self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider) 25 26 def spider_closed(self, spider, reason): 27 finish_time = datetime.datetime.utcnow() 28 elapsed_time = finish_time - self.stats.get_value('start_time') 29 elapsed_time_seconds = elapsed_time.total_seconds() 30 self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider) 31 self.stats.set_value('finish_time', finish_time, spider=spider) 32 self.stats.set_value('finish_reason', reason, spider=spider) 33 34 def item_scraped(self, item, spider): 35 self.stats.inc_value('item_scraped_count', spider=spider) 36 37 def response_received(self, spider): 38 self.stats.inc_value('response_received_count', spider=spider) 39 40 def item_dropped(self, item, spider, exception): 41 reason = exception.__class__.__name__ 42 self.stats.inc_value('item_dropped_count', spider=spider) 43 self.stats.inc_value('item_dropped_reasons_count/%s' % reason, spider=spider) 44 [end of scrapy/extensions/corestats.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/extensions/corestats.py b/scrapy/extensions/corestats.py --- a/scrapy/extensions/corestats.py +++ b/scrapy/extensions/corestats.py @@ -1,14 +1,16 @@ """ Extension for collecting core stats like items scraped and start/finish times """ -import datetime +from datetime import datetime from scrapy import signals + class CoreStats(object): def __init__(self, stats): self.stats = stats + self.start_time = None @classmethod def from_crawler(cls, crawler): @@ -21,11 +23,12 @@ return o def spider_opened(self, spider): - self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider) + self.start_time = datetime.utcnow() + self.stats.set_value('start_time', self.start_time, spider=spider) def spider_closed(self, spider, reason): - finish_time = datetime.datetime.utcnow() - elapsed_time = finish_time - self.stats.get_value('start_time') + finish_time = datetime.utcnow() + elapsed_time = finish_time - self.start_time elapsed_time_seconds = elapsed_time.total_seconds() self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider) self.stats.set_value('finish_time', finish_time, spider=spider)
{"golden_diff": "diff --git a/scrapy/extensions/corestats.py b/scrapy/extensions/corestats.py\n--- a/scrapy/extensions/corestats.py\n+++ b/scrapy/extensions/corestats.py\n@@ -1,14 +1,16 @@\n \"\"\"\n Extension for collecting core stats like items scraped and start/finish times\n \"\"\"\n-import datetime\n+from datetime import datetime\n \n from scrapy import signals\n \n+\n class CoreStats(object):\n \n def __init__(self, stats):\n self.stats = stats\n+ self.start_time = None\n \n @classmethod\n def from_crawler(cls, crawler):\n@@ -21,11 +23,12 @@\n return o\n \n def spider_opened(self, spider):\n- self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider)\n+ self.start_time = datetime.utcnow()\n+ self.stats.set_value('start_time', self.start_time, spider=spider)\n \n def spider_closed(self, spider, reason):\n- finish_time = datetime.datetime.utcnow()\n- elapsed_time = finish_time - self.stats.get_value('start_time')\n+ finish_time = datetime.utcnow()\n+ elapsed_time = finish_time - self.start_time\n elapsed_time_seconds = elapsed_time.total_seconds()\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\n self.stats.set_value('finish_time', finish_time, spider=spider)\n", "issue": "Exception when using DummyStatsCollector\n### Description\r\n\r\nUsing the DummyStatsCollector results in an exception:\r\n\r\n```\r\n2019-09-09 13:51:23 [scrapy.utils.signal] ERROR: Error caught on signal handler: <bound method CoreStats.spider_closed of <scrapy.extensions.corestats.CoreStats object at 0x7f86269cac18>>\r\nTraceback (most recent call last):\r\n File \".../lib/python3.6/site-packages/twisted/internet/defer.py\", line 150, in maybeDeferred\r\n result = f(*args, **kw)\r\n File \".../lib/python3.6/site-packages/pydispatch/robustapply.py\", line 55, in robustApply\r\n return receiver(*arguments, **named)\r\n File \".../lib/python3.6/site-packages/scrapy/extensions/corestats.py\", line 28, in spider_closed\r\n elapsed_time = finish_time - self.stats.get_value('start_time')\r\nTypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'\r\n```\r\n\r\nThis problem has been introduced in aa46e1995cd5cb1099aba17535372b538bd656b3.\r\n\r\n### Steps to Reproduce\r\n\r\nSet `STATS_CLASS = \"scrapy.statscollectors.DummyStatsCollector\"` in the settings module as described in the documentation (https://docs.scrapy.org/en/latest/topics/stats.html#dummystatscollector).\r\n\r\n**Expected behavior:** no exception\r\n**Actual behavior:** exception thrown\r\n**Reproduces how often:** always\r\n\r\n### Versions\r\n\r\nAt least master as of 534de7395da3a53b5a2c89960db9ec5d8fdab60c\r\n\r\n### Fix\r\n\r\nA possible fix is to use the elapsed time as a default argument so that `get_value()` does not return None. I can prepare a PR if needed.\r\n\r\n```diff\r\n--- a/scrapy/extensions/corestats.py\r\n+++ b/scrapy/extensions/corestats.py\r\n@@ -25,7 +25,7 @@ class CoreStats(object):\r\n \r\n def spider_closed(self, spider, reason):\r\n finish_time = datetime.datetime.utcnow()\r\n- elapsed_time = finish_time - self.stats.get_value('start_time')\r\n+ elapsed_time = finish_time - self.stats.get_value('start_time', finish_time)\r\n elapsed_time_seconds = elapsed_time.total_seconds()\r\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\r\n self.stats.set_value('finish_time', finish_time, spider=spider)\r\n```\n", "before_files": [{"content": "\"\"\"\nExtension for collecting core stats like items scraped and start/finish times\n\"\"\"\nimport datetime\n\nfrom scrapy import signals\n\nclass CoreStats(object):\n\n def __init__(self, stats):\n self.stats = stats\n\n @classmethod\n def from_crawler(cls, crawler):\n o = cls(crawler.stats)\n crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)\n crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)\n crawler.signals.connect(o.item_scraped, signal=signals.item_scraped)\n crawler.signals.connect(o.item_dropped, signal=signals.item_dropped)\n crawler.signals.connect(o.response_received, signal=signals.response_received)\n return o\n\n def spider_opened(self, spider):\n self.stats.set_value('start_time', datetime.datetime.utcnow(), spider=spider)\n\n def spider_closed(self, spider, reason):\n finish_time = datetime.datetime.utcnow()\n elapsed_time = finish_time - self.stats.get_value('start_time')\n elapsed_time_seconds = elapsed_time.total_seconds()\n self.stats.set_value('elapsed_time_seconds', elapsed_time_seconds, spider=spider)\n self.stats.set_value('finish_time', finish_time, spider=spider)\n self.stats.set_value('finish_reason', reason, spider=spider)\n\n def item_scraped(self, item, spider):\n self.stats.inc_value('item_scraped_count', spider=spider)\n\n def response_received(self, spider):\n self.stats.inc_value('response_received_count', spider=spider)\n\n def item_dropped(self, item, spider, exception):\n reason = exception.__class__.__name__\n self.stats.inc_value('item_dropped_count', spider=spider)\n self.stats.inc_value('item_dropped_reasons_count/%s' % reason, spider=spider)\n", "path": "scrapy/extensions/corestats.py"}]}
1,581
298
gh_patches_debug_29
rasdani/github-patches
git_diff
MongoEngine__mongoengine-2424
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> When will be new release? When are you planning a new release? Some features has been added since last one in May. Thanks! </issue> <code> [start of mongoengine/__init__.py] 1 # Import submodules so that we can expose their __all__ 2 from mongoengine import connection 3 from mongoengine import document 4 from mongoengine import errors 5 from mongoengine import fields 6 from mongoengine import queryset 7 from mongoengine import signals 8 9 # Import everything from each submodule so that it can be accessed via 10 # mongoengine, e.g. instead of `from mongoengine.connection import connect`, 11 # users can simply use `from mongoengine import connect`, or even 12 # `from mongoengine import *` and then `connect('testdb')`. 13 from mongoengine.connection import * 14 from mongoengine.document import * 15 from mongoengine.errors import * 16 from mongoengine.fields import * 17 from mongoengine.queryset import * 18 from mongoengine.signals import * 19 20 21 __all__ = ( 22 list(document.__all__) 23 + list(fields.__all__) 24 + list(connection.__all__) 25 + list(queryset.__all__) 26 + list(signals.__all__) 27 + list(errors.__all__) 28 ) 29 30 31 VERSION = (0, 20, 0) 32 33 34 def get_version(): 35 """Return the VERSION as a string. 36 37 For example, if `VERSION == (0, 10, 7)`, return '0.10.7'. 38 """ 39 return ".".join(map(str, VERSION)) 40 41 42 __version__ = get_version() 43 [end of mongoengine/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py --- a/mongoengine/__init__.py +++ b/mongoengine/__init__.py @@ -28,7 +28,7 @@ ) -VERSION = (0, 20, 0) +VERSION = (0, 21, 0) def get_version():
{"golden_diff": "diff --git a/mongoengine/__init__.py b/mongoengine/__init__.py\n--- a/mongoengine/__init__.py\n+++ b/mongoengine/__init__.py\n@@ -28,7 +28,7 @@\n )\n \n \n-VERSION = (0, 20, 0)\n+VERSION = (0, 21, 0)\n \n \n def get_version():\n", "issue": "When will be new release?\nWhen are you planning a new release? Some features has been added since last one in May. Thanks!\n", "before_files": [{"content": "# Import submodules so that we can expose their __all__\nfrom mongoengine import connection\nfrom mongoengine import document\nfrom mongoengine import errors\nfrom mongoengine import fields\nfrom mongoengine import queryset\nfrom mongoengine import signals\n\n# Import everything from each submodule so that it can be accessed via\n# mongoengine, e.g. instead of `from mongoengine.connection import connect`,\n# users can simply use `from mongoengine import connect`, or even\n# `from mongoengine import *` and then `connect('testdb')`.\nfrom mongoengine.connection import *\nfrom mongoengine.document import *\nfrom mongoengine.errors import *\nfrom mongoengine.fields import *\nfrom mongoengine.queryset import *\nfrom mongoengine.signals import *\n\n\n__all__ = (\n list(document.__all__)\n + list(fields.__all__)\n + list(connection.__all__)\n + list(queryset.__all__)\n + list(signals.__all__)\n + list(errors.__all__)\n)\n\n\nVERSION = (0, 20, 0)\n\n\ndef get_version():\n \"\"\"Return the VERSION as a string.\n\n For example, if `VERSION == (0, 10, 7)`, return '0.10.7'.\n \"\"\"\n return \".\".join(map(str, VERSION))\n\n\n__version__ = get_version()\n", "path": "mongoengine/__init__.py"}]}
922
86
gh_patches_debug_1519
rasdani/github-patches
git_diff
databricks__koalas-1959
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plot does not work in Ubuntu Plotting with `matplotlib` 3.3.0+ causes an exception because of Pillow dependency added at https://github.com/matplotlib/matplotlib/commit/370e9a2d5d9e637abc90b3270d368642c69f66c6#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7 on Ubuntu: ```python import databricks.koalas as ks df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6]}) df.plot... ``` ``` ImportError: /.../lib/python3.8/site-packages/PIL/_imaging.cpython-38-x86_64-linux-gnu.so: ELF load command address/offset not properly aligned ``` </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 # 4 # Copyright (C) 2019 Databricks, Inc. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 # 18 from __future__ import print_function 19 20 from io import open 21 import sys 22 from setuptools import setup 23 from os import path 24 25 DESCRIPTION = "Koalas: pandas API on Apache Spark" 26 27 this_directory = path.abspath(path.dirname(__file__)) 28 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: 29 LONG_DESCRIPTION = f.read() 30 31 try: 32 exec(open('databricks/koalas/version.py').read()) 33 except IOError: 34 print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.", 35 file=sys.stderr) 36 sys.exit(-1) 37 VERSION = __version__ # noqa 38 39 setup( 40 name='koalas', 41 version=VERSION, 42 packages=[ 43 'databricks', 44 'databricks.koalas', 45 'databricks.koalas.missing', 46 'databricks.koalas.plot', 47 'databricks.koalas.spark', 48 'databricks.koalas.typedef', 49 'databricks.koalas.usage_logging'], 50 extras_require={ 51 'spark': ['pyspark>=2.4.0'], 52 'mlflow': ['mlflow>=1.0'], 53 'plotly': ['plotly>=4.8'], 54 }, 55 python_requires='>=3.5,<3.9', 56 install_requires=[ 57 'pandas>=0.23.2', 58 'pyarrow>=0.10', 59 'numpy>=1.14', 60 'matplotlib>=3.0.0', 61 ], 62 author="Databricks", 63 author_email="[email protected]", 64 license='http://www.apache.org/licenses/LICENSE-2.0', 65 url="https://github.com/databricks/koalas", 66 project_urls={ 67 'Bug Tracker': 'https://github.com/databricks/koalas/issues', 68 'Documentation': 'https://koalas.readthedocs.io/', 69 'Source Code': 'https://github.com/databricks/koalas' 70 }, 71 description=DESCRIPTION, 72 long_description=LONG_DESCRIPTION, 73 long_description_content_type='text/markdown', 74 classifiers=[ 75 'Programming Language :: Python :: 3.5', 76 'Programming Language :: Python :: 3.6', 77 'Programming Language :: Python :: 3.7', 78 'Programming Language :: Python :: 3.8', 79 ], 80 ) 81 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ 'pandas>=0.23.2', 'pyarrow>=0.10', 'numpy>=1.14', - 'matplotlib>=3.0.0', + 'matplotlib>=3.0.0,<3.3.0', ], author="Databricks", author_email="[email protected]",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n 'pandas>=0.23.2',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n- 'matplotlib>=3.0.0',\n+ 'matplotlib>=3.0.0,<3.3.0',\n ],\n author=\"Databricks\",\n author_email=\"[email protected]\",\n", "issue": "plot does not work in Ubuntu\nPlotting with `matplotlib` 3.3.0+ causes an exception because of Pillow dependency added at https://github.com/matplotlib/matplotlib/commit/370e9a2d5d9e637abc90b3270d368642c69f66c6#diff-60f61ab7a8d1910d86d9fda2261620314edcae5894d5aaa236b821c7256badd7 on Ubuntu:\r\n\r\n```python\r\nimport databricks.koalas as ks\r\ndf = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6]})\r\ndf.plot...\r\n```\r\n\r\n```\r\nImportError: /.../lib/python3.8/site-packages/PIL/_imaging.cpython-38-x86_64-linux-gnu.so: ELF load command address/offset not properly aligned\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import print_function\n\nfrom io import open\nimport sys\nfrom setuptools import setup\nfrom os import path\n\nDESCRIPTION = \"Koalas: pandas API on Apache Spark\"\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\ntry:\n exec(open('databricks/koalas/version.py').read())\nexcept IOError:\n print(\"Failed to load Koalas version file for packaging. You must be in Koalas root dir.\",\n file=sys.stderr)\n sys.exit(-1)\nVERSION = __version__ # noqa\n\nsetup(\n name='koalas',\n version=VERSION,\n packages=[\n 'databricks',\n 'databricks.koalas',\n 'databricks.koalas.missing',\n 'databricks.koalas.plot',\n 'databricks.koalas.spark',\n 'databricks.koalas.typedef',\n 'databricks.koalas.usage_logging'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n 'mlflow': ['mlflow>=1.0'],\n 'plotly': ['plotly>=4.8'],\n },\n python_requires='>=3.5,<3.9',\n install_requires=[\n 'pandas>=0.23.2',\n 'pyarrow>=0.10',\n 'numpy>=1.14',\n 'matplotlib>=3.0.0',\n ],\n author=\"Databricks\",\n author_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n url=\"https://github.com/databricks/koalas\",\n project_urls={\n 'Bug Tracker': 'https://github.com/databricks/koalas/issues',\n 'Documentation': 'https://koalas.readthedocs.io/',\n 'Source Code': 'https://github.com/databricks/koalas'\n },\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n", "path": "setup.py"}]}
1,593
113
gh_patches_debug_30358
rasdani/github-patches
git_diff
nvaccess__nvda-10338
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> winVersion.isWin10: modify it to return early if major is not 10 Hi, This feature request should not be done for 2019.3 in order to avoid introducing possible regressions, but perhaps defer it to 2020.1: ### Is your feature request related to a problem? Please describe. In NVDA 2019.2, as part of UIA console support, winVersion module introduced isWin10 function which returns whether a system is running a specific version of Windows 10 or later. At the moment it checks major version in two places: * If "at least" flag is specified: return False if version is less than 10. * If "at least" flag is not specified: return False if major version is not 10. At least it is understandable. But I would argue that for sake of performance and keeping the subject line and body in sync, I propose the below modification. ### Describe the solution you'd like Instead of checking flags after defining version information map, return early if major version isn't even 10. Not only flag checks can be avoided on Windows 8.1 and earlier, it can make the function body in line with what the function title says. Thus I propose: 1. First, check winVersion.major and return False if it isn't 10. 2. Define version to build map. 3. Same as current function. ### Describe alternatives you've considered Keep the function as is. ### Additional context I'm sure there might be other opportunities to optimize this function, but that's in the future. Thanks. </issue> <code> [start of source/winVersion.py] 1 # A part of NonVisual Desktop Access (NVDA) 2 # Copyright (C) 2006-2019 NV Access Limited 3 # This file is covered by the GNU General Public License. 4 # See the file COPYING for more details. 5 6 import sys 7 import os 8 import winUser 9 10 winVersion=sys.getwindowsversion() 11 winVersionText="{v.major}.{v.minor}.{v.build}".format(v=winVersion) 12 if winVersion.service_pack_major!=0: 13 winVersionText+=" service pack %d"%winVersion.service_pack_major 14 if winVersion.service_pack_minor!=0: 15 winVersionText+=".%d"%winVersion.service_pack_minor 16 winVersionText+=" %s" % ("workstation","domain controller","server")[winVersion.product_type-1] 17 18 def isSupportedOS(): 19 # NVDA can only run on Windows 7 Service pack 1 and above 20 return (winVersion.major,winVersion.minor,winVersion.service_pack_major) >= (6,1,1) 21 22 def canRunVc2010Builds(): 23 return isSupportedOS() 24 25 UWP_OCR_DATA_PATH = os.path.expandvars(r"$windir\OCR") 26 def isUwpOcrAvailable(): 27 return os.path.isdir(UWP_OCR_DATA_PATH) 28 29 def isWin10(version=1507, atLeast=True): 30 """ 31 Returns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases. 32 @param version: a release version of Windows 10 (such as 1903). 33 @param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher). 34 """ 35 from logHandler import log 36 win10VersionsToBuilds={ 37 1507: 10240, 38 1511: 10586, 39 1607: 14393, 40 1703: 15063, 41 1709: 16299, 42 1803: 17134, 43 1809: 17763, 44 1903: 18362, 45 1909: 18363, 46 } 47 if atLeast and winVersion.major < 10: 48 return False 49 elif not atLeast and winVersion.major != 10: 50 return False 51 try: 52 if atLeast: 53 return winVersion.build >= win10VersionsToBuilds[version] 54 else: 55 return winVersion.build == win10VersionsToBuilds[version] 56 except KeyError: 57 log.error("Unknown Windows 10 version {}".format(version)) 58 return False 59 60 61 def isFullScreenMagnificationAvailable(): 62 return (winVersion.major, winVersion.minor) >= (6, 2) 63 [end of source/winVersion.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/winVersion.py b/source/winVersion.py --- a/source/winVersion.py +++ b/source/winVersion.py @@ -26,34 +26,35 @@ def isUwpOcrAvailable(): return os.path.isdir(UWP_OCR_DATA_PATH) -def isWin10(version=1507, atLeast=True): + +WIN10_VERSIONS_TO_BUILDS = { + 1507: 10240, + 1511: 10586, + 1607: 14393, + 1703: 15063, + 1709: 16299, + 1803: 17134, + 1809: 17763, + 1903: 18362, + 1909: 18363, +} + + +def isWin10(version: int = 1507, atLeast: bool = True): """ Returns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases. @param version: a release version of Windows 10 (such as 1903). @param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher). """ - from logHandler import log - win10VersionsToBuilds={ - 1507: 10240, - 1511: 10586, - 1607: 14393, - 1703: 15063, - 1709: 16299, - 1803: 17134, - 1809: 17763, - 1903: 18362, - 1909: 18363, - } - if atLeast and winVersion.major < 10: - return False - elif not atLeast and winVersion.major != 10: + if winVersion.major != 10: return False try: if atLeast: - return winVersion.build >= win10VersionsToBuilds[version] + return winVersion.build >= WIN10_VERSIONS_TO_BUILDS[version] else: - return winVersion.build == win10VersionsToBuilds[version] + return winVersion.build == WIN10_VERSIONS_TO_BUILDS[version] except KeyError: + from logHandler import log log.error("Unknown Windows 10 version {}".format(version)) return False
{"golden_diff": "diff --git a/source/winVersion.py b/source/winVersion.py\n--- a/source/winVersion.py\n+++ b/source/winVersion.py\n@@ -26,34 +26,35 @@\n def isUwpOcrAvailable():\r\n \treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n \r\n-def isWin10(version=1507, atLeast=True):\r\n+\r\n+WIN10_VERSIONS_TO_BUILDS = {\r\n+\t1507: 10240,\r\n+\t1511: 10586,\r\n+\t1607: 14393,\r\n+\t1703: 15063,\r\n+\t1709: 16299,\r\n+\t1803: 17134,\r\n+\t1809: 17763,\r\n+\t1903: 18362,\r\n+\t1909: 18363,\r\n+}\r\n+\r\n+\r\n+def isWin10(version: int = 1507, atLeast: bool = True):\r\n \t\"\"\"\r\n \tReturns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.\r\n \t@param version: a release version of Windows 10 (such as 1903).\r\n \t@param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).\r\n \t\"\"\"\r\n-\tfrom logHandler import log\r\n-\twin10VersionsToBuilds={\r\n-\t\t1507: 10240,\r\n-\t\t1511: 10586,\r\n-\t\t1607: 14393,\r\n-\t\t1703: 15063,\r\n-\t\t1709: 16299,\r\n-\t\t1803: 17134,\r\n-\t\t1809: 17763,\r\n-\t\t1903: 18362,\r\n-\t\t1909: 18363,\r\n-\t}\r\n-\tif atLeast and winVersion.major < 10:\r\n-\t\treturn False\r\n-\telif not atLeast and winVersion.major != 10:\r\n+\tif winVersion.major != 10:\r\n \t\treturn False\r\n \ttry:\r\n \t\tif atLeast:\r\n-\t\t\treturn winVersion.build >= win10VersionsToBuilds[version]\r\n+\t\t\treturn winVersion.build >= WIN10_VERSIONS_TO_BUILDS[version]\r\n \t\telse:\r\n-\t\t\treturn winVersion.build == win10VersionsToBuilds[version]\r\n+\t\t\treturn winVersion.build == WIN10_VERSIONS_TO_BUILDS[version]\r\n \texcept KeyError:\r\n+\t\tfrom logHandler import log\r\n \t\tlog.error(\"Unknown Windows 10 version {}\".format(version))\r\n \t\treturn False\n", "issue": "winVersion.isWin10: modify it to return early if major is not 10\nHi,\r\n\r\nThis feature request should not be done for 2019.3 in order to avoid introducing possible regressions, but perhaps defer it to 2020.1:\r\n\r\n### Is your feature request related to a problem? Please describe.\r\nIn NVDA 2019.2, as part of UIA console support, winVersion module introduced isWin10 function which returns whether a system is running a specific version of Windows 10 or later. At the moment it checks major version in two places:\r\n\r\n* If \"at least\" flag is specified: return False if version is less than 10.\r\n* If \"at least\" flag is not specified: return False if major version is not 10.\r\n\r\nAt least it is understandable. But I would argue that for sake of performance and keeping the subject line and body in sync, I propose the below modification.\r\n\r\n### Describe the solution you'd like\r\nInstead of checking flags after defining version information map, return early if major version isn't even 10. Not only flag checks can be avoided on Windows 8.1 and earlier, it can make the function body in line with what the function title says.\r\n\r\nThus I propose:\r\n\r\n1. First, check winVersion.major and return False if it isn't 10.\r\n2. Define version to build map.\r\n3. Same as current function.\r\n\r\n### Describe alternatives you've considered\r\nKeep the function as is.\r\n\r\n### Additional context\r\nI'm sure there might be other opportunities to optimize this function, but that's in the future.\r\n\r\nThanks.\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2019 NV Access Limited\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport sys\r\nimport os\r\nimport winUser\r\n\r\nwinVersion=sys.getwindowsversion()\r\nwinVersionText=\"{v.major}.{v.minor}.{v.build}\".format(v=winVersion)\r\nif winVersion.service_pack_major!=0:\r\n\twinVersionText+=\" service pack %d\"%winVersion.service_pack_major\r\n\tif winVersion.service_pack_minor!=0:\r\n\t\twinVersionText+=\".%d\"%winVersion.service_pack_minor\r\nwinVersionText+=\" %s\" % (\"workstation\",\"domain controller\",\"server\")[winVersion.product_type-1]\r\n\r\ndef isSupportedOS():\r\n\t# NVDA can only run on Windows 7 Service pack 1 and above\r\n\treturn (winVersion.major,winVersion.minor,winVersion.service_pack_major) >= (6,1,1)\r\n\r\ndef canRunVc2010Builds():\r\n\treturn isSupportedOS()\r\n\r\nUWP_OCR_DATA_PATH = os.path.expandvars(r\"$windir\\OCR\")\r\ndef isUwpOcrAvailable():\r\n\treturn os.path.isdir(UWP_OCR_DATA_PATH)\r\n\r\ndef isWin10(version=1507, atLeast=True):\r\n\t\"\"\"\r\n\tReturns True if NVDA is running on the supplied release version of Windows 10. If no argument is supplied, returns True for all public Windows 10 releases.\r\n\t@param version: a release version of Windows 10 (such as 1903).\r\n\t@param atLeast: return True if NVDA is running on at least this Windows 10 build (i.e. this version or higher).\r\n\t\"\"\"\r\n\tfrom logHandler import log\r\n\twin10VersionsToBuilds={\r\n\t\t1507: 10240,\r\n\t\t1511: 10586,\r\n\t\t1607: 14393,\r\n\t\t1703: 15063,\r\n\t\t1709: 16299,\r\n\t\t1803: 17134,\r\n\t\t1809: 17763,\r\n\t\t1903: 18362,\r\n\t\t1909: 18363,\r\n\t}\r\n\tif atLeast and winVersion.major < 10:\r\n\t\treturn False\r\n\telif not atLeast and winVersion.major != 10:\r\n\t\treturn False\r\n\ttry:\r\n\t\tif atLeast:\r\n\t\t\treturn winVersion.build >= win10VersionsToBuilds[version]\r\n\t\telse:\r\n\t\t\treturn winVersion.build == win10VersionsToBuilds[version]\r\n\texcept KeyError:\r\n\t\tlog.error(\"Unknown Windows 10 version {}\".format(version))\r\n\t\treturn False\r\n\r\n\r\ndef isFullScreenMagnificationAvailable():\r\n\treturn (winVersion.major, winVersion.minor) >= (6, 2)\r\n", "path": "source/winVersion.py"}]}
1,651
658
gh_patches_debug_15883
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1311
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> South Glos UK doesn't return all Recycling / Food Bin dates The South Gloucester UK source is only returning the Food Bin and Recycling bins every other week, when they should be every week. Calendar in HA shows like this for me: ![image](https://github.com/mampfes/hacs_waste_collection_schedule/assets/4178651/ac283ac4-584a-429d-a20b-3f0dcf3a28c6) The South Glos webpage shows: ![image](https://github.com/mampfes/hacs_waste_collection_schedule/assets/4178651/9ec0502d-0fc9-48a5-a16b-a44d69409f5f) And the service endpoint that you are querying replies with: ![image](https://github.com/mampfes/hacs_waste_collection_schedule/assets/4178651/5f7dc0ec-762f-40a1-93a7-8824ac8c91e9) I could put a PR in to treat the Rx dates as Recycling and Food as well? </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py] 1 import datetime 2 import json 3 4 import requests 5 from waste_collection_schedule import Collection 6 7 TITLE = "South Gloucestershire Council" # Title will show up in README.md and info.md 8 DESCRIPTION = "Source script for southglos.gov.uk" # Describe your source 9 URL = "https://southglos.gov.uk" # Insert url to service homepage. URL will show up in README.md and info.md 10 TEST_CASES = { # Insert arguments for test cases to be used by test_sources.py script 11 "Test_001": {"uprn": "643346"}, 12 "Test_002": {"uprn": "641084"} 13 } 14 15 ICON_MAP = { # Optional: Dict of waste types and suitable mdi icons 16 "BLACK BIN": "mdi:trash-can", 17 "RECYCLING": "mdi:recycle", 18 "GARDEN WASTE": "mdi:leaf", 19 "FOOD BIN": "mdi:food" 20 } 21 22 23 class Source: 24 def __init__(self, uprn: str): # argX correspond to the args dict in the source configuration 25 self._uprn = uprn 26 27 def fetch(self): 28 session = requests.Session() 29 r = session.get( 30 f"https://webapps.southglos.gov.uk/Webservices/SGC.RefuseCollectionService/RefuseCollectionService.svc" 31 f"/getCollections/{self._uprn}") 32 r.raise_for_status() 33 output = r.text.strip('[]') 34 output = json.loads(output) 35 recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']] 36 black_bin_dates = [output['R1'], output['R2'], output['R3']] 37 garden_bin_dates = [output['G1'], output['G2'], output['G3']] 38 entries = [] # List that holds collection schedule 39 40 for collection in recycling_and_food_bin_dates: 41 entries.append( 42 Collection( 43 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(), 44 t="RECYCLING", 45 icon=ICON_MAP.get("RECYCLING"), 46 ) 47 ) 48 entries.append( 49 Collection( 50 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(), 51 t="FOOD BIN", 52 icon=ICON_MAP.get("FOOD BIN"), 53 ) 54 ) 55 56 for collection in black_bin_dates: 57 entries.append( 58 Collection( 59 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(), 60 t="BLACK BIN", 61 icon=ICON_MAP.get("BLACK BIN"), 62 ) 63 ) 64 65 if garden_bin_dates[1] != '': # 66 for collection in garden_bin_dates: 67 entries.append( 68 Collection( 69 date=datetime.datetime.strptime(collection, "%d/%m/%Y").date(), 70 t="GARDEN WASTE", 71 icon=ICON_MAP.get("GARDEN WASTE"), 72 ) 73 ) 74 75 return entries 76 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py @@ -32,8 +32,11 @@ r.raise_for_status() output = r.text.strip('[]') output = json.loads(output) - recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']] + # Recycling and food are fields starting with C and R + recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3'], output['R1'], output['R2'], output['R3']] + # Black bin dates are fields starting R black_bin_dates = [output['R1'], output['R2'], output['R3']] + # Garden bin dates are fields starting G garden_bin_dates = [output['G1'], output['G2'], output['G3']] entries = [] # List that holds collection schedule
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py\n@@ -32,8 +32,11 @@\n r.raise_for_status()\n output = r.text.strip('[]')\n output = json.loads(output)\n- recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']]\n+ # Recycling and food are fields starting with C and R\n+ recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3'], output['R1'], output['R2'], output['R3']]\n+ # Black bin dates are fields starting R\n black_bin_dates = [output['R1'], output['R2'], output['R3']]\n+ # Garden bin dates are fields starting G\n garden_bin_dates = [output['G1'], output['G2'], output['G3']]\n entries = [] # List that holds collection schedule\n", "issue": "South Glos UK doesn't return all Recycling / Food Bin dates\nThe South Gloucester UK source is only returning the Food Bin and Recycling bins every other week, when they should be every week.\r\n\r\nCalendar in HA shows like this for me:\r\n![image](https://github.com/mampfes/hacs_waste_collection_schedule/assets/4178651/ac283ac4-584a-429d-a20b-3f0dcf3a28c6)\r\n\r\nThe South Glos webpage shows:\r\n![image](https://github.com/mampfes/hacs_waste_collection_schedule/assets/4178651/9ec0502d-0fc9-48a5-a16b-a44d69409f5f)\r\n\r\nAnd the service endpoint that you are querying replies with:\r\n![image](https://github.com/mampfes/hacs_waste_collection_schedule/assets/4178651/5f7dc0ec-762f-40a1-93a7-8824ac8c91e9)\r\n\r\nI could put a PR in to treat the Rx dates as Recycling and Food as well?\n", "before_files": [{"content": "import datetime\nimport json\n\nimport requests\nfrom waste_collection_schedule import Collection\n\nTITLE = \"South Gloucestershire Council\" # Title will show up in README.md and info.md\nDESCRIPTION = \"Source script for southglos.gov.uk\" # Describe your source\nURL = \"https://southglos.gov.uk\" # Insert url to service homepage. URL will show up in README.md and info.md\nTEST_CASES = { # Insert arguments for test cases to be used by test_sources.py script\n \"Test_001\": {\"uprn\": \"643346\"},\n \"Test_002\": {\"uprn\": \"641084\"}\n}\n\nICON_MAP = { # Optional: Dict of waste types and suitable mdi icons\n \"BLACK BIN\": \"mdi:trash-can\",\n \"RECYCLING\": \"mdi:recycle\",\n \"GARDEN WASTE\": \"mdi:leaf\",\n \"FOOD BIN\": \"mdi:food\"\n}\n\n\nclass Source:\n def __init__(self, uprn: str): # argX correspond to the args dict in the source configuration\n self._uprn = uprn\n\n def fetch(self):\n session = requests.Session()\n r = session.get(\n f\"https://webapps.southglos.gov.uk/Webservices/SGC.RefuseCollectionService/RefuseCollectionService.svc\"\n f\"/getCollections/{self._uprn}\")\n r.raise_for_status()\n output = r.text.strip('[]')\n output = json.loads(output)\n recycling_and_food_bin_dates = [output['C1'], output['C2'], output['C3']]\n black_bin_dates = [output['R1'], output['R2'], output['R3']]\n garden_bin_dates = [output['G1'], output['G2'], output['G3']]\n entries = [] # List that holds collection schedule\n\n for collection in recycling_and_food_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"RECYCLING\",\n icon=ICON_MAP.get(\"RECYCLING\"),\n )\n )\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"FOOD BIN\",\n icon=ICON_MAP.get(\"FOOD BIN\"),\n )\n )\n\n for collection in black_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"BLACK BIN\",\n icon=ICON_MAP.get(\"BLACK BIN\"),\n )\n )\n\n if garden_bin_dates[1] != '': #\n for collection in garden_bin_dates:\n entries.append(\n Collection(\n date=datetime.datetime.strptime(collection, \"%d/%m/%Y\").date(),\n t=\"GARDEN WASTE\",\n icon=ICON_MAP.get(\"GARDEN WASTE\"),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/southglos_gov_uk.py"}]}
1,621
281
gh_patches_debug_64458
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1834
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E3021 after upgrading to cfn_lint-0.44.0 cfn-lint version: cfn_lint-0.44.0 Hi! After upgrading to version cfn_lint 0.44.0 don't validate AWS::Events::Rule This was reported valid since yesterday: ```yaml TaskSchedule: Type: AWS::Events::Rule Properties: Description: xxxxxxxxx Name: !Join ["-", [!Ref EnvironmentName, scheduled, !Ref App]] ScheduleExpression: "cron(15 9 * * ? *)" State: "ENABLED" Targets: - Arn: Fn::ImportValue: !Sub ${EnvironmentName}-microservice-cluster-arn RoleArn: !GetAtt ServiceRole.Arn Id: my-task EcsParameters: TaskCount: 1 TaskDefinitionArn: !Ref TaskDefinition ``` The error reported is: `E3021 An Events Rule can have up to 5 Targets` There is one Target so should still be valid </issue> <code> [start of src/cfnlint/rules/resources/events/RuleTargetsLimit.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 from cfnlint.rules import CloudFormationLintRule 6 from cfnlint.rules import RuleMatch 7 8 9 class RuleTargetsLimit(CloudFormationLintRule): 10 """Check State Machine Definition""" 11 id = 'E3021' 12 shortdesc = 'Check Events Rule Targets are less than or equal to 5' 13 description = 'CloudWatch Events Rule can only support up to 5 targets' 14 source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/cloudwatch_limits_cwe.html' 15 tags = ['resources', 'events'] 16 max_count = 5 17 18 def __init__(self): 19 """Init""" 20 super(RuleTargetsLimit, self).__init__() 21 self.resource_property_types = ['AWS::Events::Rule'] 22 self.limits = {} 23 24 # pylint: disable=W0613 25 def check_value(self, value, path): 26 """Count them up """ 27 28 resource_name = path[1] 29 if len(path) > 4: 30 if path[4] == 'Fn::If': 31 resource_name = '%s.%s' % (path[1], path[5]) 32 33 if resource_name not in self.limits: 34 self.limits[resource_name] = { 35 'count': 0, 36 'path': path[:-1] 37 } 38 39 self.limits[resource_name]['count'] += 1 40 return [] 41 42 def match_resource_properties(self, properties, _, path, cfn): 43 """Check CloudFormation Properties""" 44 matches = [] 45 matches.extend( 46 cfn.check_value( 47 obj=properties, key='Targets', 48 path=path[:], 49 check_value=self.check_value 50 )) 51 52 for _, limit in self.limits.items(): 53 if limit['count'] > self.max_count: 54 message = 'An Events Rule can have up to {0} Targets' 55 matches.append(RuleMatch(limit['path'], message.format(self.max_count))) 56 57 return matches 58 [end of src/cfnlint/rules/resources/events/RuleTargetsLimit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py --- a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py +++ b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py @@ -21,6 +21,9 @@ self.resource_property_types = ['AWS::Events::Rule'] self.limits = {} + def initialize(self, cfn): + self.limits = {} + # pylint: disable=W0613 def check_value(self, value, path): """Count them up """
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py\n--- a/src/cfnlint/rules/resources/events/RuleTargetsLimit.py\n+++ b/src/cfnlint/rules/resources/events/RuleTargetsLimit.py\n@@ -21,6 +21,9 @@\n self.resource_property_types = ['AWS::Events::Rule']\n self.limits = {}\n \n+ def initialize(self, cfn):\n+ self.limits = {}\n+\n # pylint: disable=W0613\n def check_value(self, value, path):\n \"\"\"Count them up \"\"\"\n", "issue": "E3021 after upgrading to cfn_lint-0.44.0\ncfn-lint version: cfn_lint-0.44.0\r\n\r\nHi! \r\n\r\nAfter upgrading to version cfn_lint 0.44.0 don't validate AWS::Events::Rule\r\n\r\nThis was reported valid since yesterday:\r\n\r\n```yaml\r\n TaskSchedule:\r\n Type: AWS::Events::Rule\r\n Properties:\r\n Description: xxxxxxxxx\r\n Name: !Join [\"-\", [!Ref EnvironmentName, scheduled, !Ref App]]\r\n ScheduleExpression: \"cron(15 9 * * ? *)\"\r\n State: \"ENABLED\"\r\n Targets:\r\n - Arn:\r\n Fn::ImportValue: !Sub ${EnvironmentName}-microservice-cluster-arn\r\n RoleArn: !GetAtt ServiceRole.Arn\r\n Id: my-task\r\n EcsParameters:\r\n TaskCount: 1\r\n TaskDefinitionArn: !Ref TaskDefinition\r\n```\r\n\r\nThe error reported is: `E3021 An Events Rule can have up to 5 Targets`\r\n\r\nThere is one Target so should still be valid \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass RuleTargetsLimit(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E3021'\n shortdesc = 'Check Events Rule Targets are less than or equal to 5'\n description = 'CloudWatch Events Rule can only support up to 5 targets'\n source_url = 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/cloudwatch_limits_cwe.html'\n tags = ['resources', 'events']\n max_count = 5\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(RuleTargetsLimit, self).__init__()\n self.resource_property_types = ['AWS::Events::Rule']\n self.limits = {}\n\n # pylint: disable=W0613\n def check_value(self, value, path):\n \"\"\"Count them up \"\"\"\n\n resource_name = path[1]\n if len(path) > 4:\n if path[4] == 'Fn::If':\n resource_name = '%s.%s' % (path[1], path[5])\n\n if resource_name not in self.limits:\n self.limits[resource_name] = {\n 'count': 0,\n 'path': path[:-1]\n }\n\n self.limits[resource_name]['count'] += 1\n return []\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n matches.extend(\n cfn.check_value(\n obj=properties, key='Targets',\n path=path[:],\n check_value=self.check_value\n ))\n\n for _, limit in self.limits.items():\n if limit['count'] > self.max_count:\n message = 'An Events Rule can have up to {0} Targets'\n matches.append(RuleMatch(limit['path'], message.format(self.max_count)))\n\n return matches\n", "path": "src/cfnlint/rules/resources/events/RuleTargetsLimit.py"}]}
1,342
137
gh_patches_debug_32851
rasdani/github-patches
git_diff
liqd__a4-meinberlin-447
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Initiator dashboard: too many notification I get notifications of any projects of my organization, even it's not my project. I'm not sure if I want to see all changes in all projects of my organization. I think it will be too much. </issue> <code> [start of apps/notifications/signals.py] 1 from django.db.models import signals 2 from django.dispatch import receiver 3 4 from adhocracy4.actions.models import Action 5 from adhocracy4.actions.verbs import Verbs 6 from adhocracy4.follows.models import Follow 7 from adhocracy4.phases.models import Phase 8 from adhocracy4.projects.models import Project 9 from apps.organisations.models import Organisation 10 from . import emails 11 12 13 @receiver(signals.post_save, sender=Action) 14 def send_notifications(instance, created, **kwargs): 15 action = instance 16 verb = Verbs(action.verb) 17 18 if verb == Verbs.CREATE or verb == Verbs.ADD: 19 emails.NotifyCreatorEmail.send(action) 20 21 if action.project: 22 emails.NotifyModeratorsEmail.send(action) 23 emails.NotifyFollowersOnNewItemCreated.send(action) 24 25 elif verb == Verbs.SCHEDULE: 26 if isinstance(action.obj, Phase): 27 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action) 28 29 30 @receiver(signals.m2m_changed, sender=Project.moderators.through) 31 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs): 32 if action == 'post_add': 33 if not reverse: 34 project = instance 35 users_pks = pk_set 36 37 for user_pk in users_pks: 38 Follow.objects.update_or_create( 39 project=project, 40 creator_id=user_pk, 41 defaults={ 42 'enabled': True 43 } 44 ) 45 else: 46 user = instance 47 project_pks = pk_set 48 49 for project_pk in project_pks: 50 Follow.objects.update_or_create( 51 project_id=project_pk, 52 creator_id=user, 53 defaults={ 54 'enabled': True 55 } 56 ) 57 58 59 @receiver(signals.m2m_changed, sender=Organisation.initiators.through) 60 def autofollow_organisation_initiators(instance, action, pk_set, reverse, 61 **kwargs): 62 if action == 'post_add': 63 if not reverse: 64 organisation = instance 65 users_pks = pk_set 66 67 for project in Project.objects.filter(organisation=organisation): 68 for user_pk in users_pks: 69 Follow.objects.update_or_create( 70 project=project, 71 creator_id=user_pk, 72 defaults={ 73 'enabled': True 74 } 75 ) 76 else: 77 user = instance 78 organisation_pk_set = pk_set 79 80 for project in Project.objects.filter( 81 organisation_id__in=organisation_pk_set): 82 Follow.objects.update_or_create( 83 project=project, 84 creator=user, 85 defaults={ 86 'enabled': True 87 } 88 ) 89 90 91 @receiver(signals.post_save) 92 def autofollow_organisation_initiators_new_projects(sender, instance, created, 93 **kwargs): 94 if issubclass(sender, Project): 95 # we have to check if the senders inherits from Project to catch 96 # signals from external projects and bplans 97 project = instance 98 if created: 99 for user in project.organisation.initiators.all(): 100 Follow.objects.update_or_create( 101 project=project, 102 creator=user, 103 defaults={ 104 'enabled': True 105 } 106 ) 107 [end of apps/notifications/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/notifications/signals.py b/apps/notifications/signals.py --- a/apps/notifications/signals.py +++ b/apps/notifications/signals.py @@ -6,7 +6,6 @@ from adhocracy4.follows.models import Follow from adhocracy4.phases.models import Phase from adhocracy4.projects.models import Project -from apps.organisations.models import Organisation from . import emails @@ -54,53 +53,3 @@ 'enabled': True } ) - - -@receiver(signals.m2m_changed, sender=Organisation.initiators.through) -def autofollow_organisation_initiators(instance, action, pk_set, reverse, - **kwargs): - if action == 'post_add': - if not reverse: - organisation = instance - users_pks = pk_set - - for project in Project.objects.filter(organisation=organisation): - for user_pk in users_pks: - Follow.objects.update_or_create( - project=project, - creator_id=user_pk, - defaults={ - 'enabled': True - } - ) - else: - user = instance - organisation_pk_set = pk_set - - for project in Project.objects.filter( - organisation_id__in=organisation_pk_set): - Follow.objects.update_or_create( - project=project, - creator=user, - defaults={ - 'enabled': True - } - ) - - -@receiver(signals.post_save) -def autofollow_organisation_initiators_new_projects(sender, instance, created, - **kwargs): - if issubclass(sender, Project): - # we have to check if the senders inherits from Project to catch - # signals from external projects and bplans - project = instance - if created: - for user in project.organisation.initiators.all(): - Follow.objects.update_or_create( - project=project, - creator=user, - defaults={ - 'enabled': True - } - )
{"golden_diff": "diff --git a/apps/notifications/signals.py b/apps/notifications/signals.py\n--- a/apps/notifications/signals.py\n+++ b/apps/notifications/signals.py\n@@ -6,7 +6,6 @@\n from adhocracy4.follows.models import Follow\n from adhocracy4.phases.models import Phase\n from adhocracy4.projects.models import Project\n-from apps.organisations.models import Organisation\n from . import emails\n \n \n@@ -54,53 +53,3 @@\n 'enabled': True\n }\n )\n-\n-\n-@receiver(signals.m2m_changed, sender=Organisation.initiators.through)\n-def autofollow_organisation_initiators(instance, action, pk_set, reverse,\n- **kwargs):\n- if action == 'post_add':\n- if not reverse:\n- organisation = instance\n- users_pks = pk_set\n-\n- for project in Project.objects.filter(organisation=organisation):\n- for user_pk in users_pks:\n- Follow.objects.update_or_create(\n- project=project,\n- creator_id=user_pk,\n- defaults={\n- 'enabled': True\n- }\n- )\n- else:\n- user = instance\n- organisation_pk_set = pk_set\n-\n- for project in Project.objects.filter(\n- organisation_id__in=organisation_pk_set):\n- Follow.objects.update_or_create(\n- project=project,\n- creator=user,\n- defaults={\n- 'enabled': True\n- }\n- )\n-\n-\n-@receiver(signals.post_save)\n-def autofollow_organisation_initiators_new_projects(sender, instance, created,\n- **kwargs):\n- if issubclass(sender, Project):\n- # we have to check if the senders inherits from Project to catch\n- # signals from external projects and bplans\n- project = instance\n- if created:\n- for user in project.organisation.initiators.all():\n- Follow.objects.update_or_create(\n- project=project,\n- creator=user,\n- defaults={\n- 'enabled': True\n- }\n- )\n", "issue": "Initiator dashboard: too many notification\nI get notifications of any projects of my organization, even it's not my project. I'm not sure if I want to see all changes in all projects of my organization. I think it will be too much.\n", "before_files": [{"content": "from django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects.models import Project\nfrom apps.organisations.models import Organisation\nfrom . import emails\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if verb == Verbs.CREATE or verb == Verbs.ADD:\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n emails.NotifyFollowersOnNewItemCreated.send(action)\n\n elif verb == Verbs.SCHEDULE:\n if isinstance(action.obj, Phase):\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator_id=user,\n defaults={\n 'enabled': True\n }\n )\n\n\n@receiver(signals.m2m_changed, sender=Organisation.initiators.through)\ndef autofollow_organisation_initiators(instance, action, pk_set, reverse,\n **kwargs):\n if action == 'post_add':\n if not reverse:\n organisation = instance\n users_pks = pk_set\n\n for project in Project.objects.filter(organisation=organisation):\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n organisation_pk_set = pk_set\n\n for project in Project.objects.filter(\n organisation_id__in=organisation_pk_set):\n Follow.objects.update_or_create(\n project=project,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n\n\n@receiver(signals.post_save)\ndef autofollow_organisation_initiators_new_projects(sender, instance, created,\n **kwargs):\n if issubclass(sender, Project):\n # we have to check if the senders inherits from Project to catch\n # signals from external projects and bplans\n project = instance\n if created:\n for user in project.organisation.initiators.all():\n Follow.objects.update_or_create(\n project=project,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "apps/notifications/signals.py"}]}
1,459
452
gh_patches_debug_1020
rasdani/github-patches
git_diff
liqd__a4-meinberlin-1970
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> district, topic and localisation for external projects and bplans external projects also need district, topic and localisation as the other projects do. same is true for b-plans as not all of them come via imperia </issue> <code> [start of meinberlin/apps/bplan/tasks.py] 1 import json 2 import logging 3 import urllib 4 5 from background_task import background 6 7 from adhocracy4.administrative_districts.models import AdministrativeDistrict 8 from meinberlin.apps.bplan.models import Bplan 9 10 logger = logging.getLogger(__name__) 11 12 13 def get_features_from_bplan_api(endpoint): 14 url = 'https://bplan-prod.liqd.net/api/' + endpoint 15 req = urllib.request.Request(url) 16 res = urllib.request.urlopen(req) 17 res_body = res.read() 18 res_json = json.loads(res_body.decode("utf-8")) 19 20 return res_json.get('features') 21 22 23 def get_bplan_point_and_district_pk(bplan_identifier): 24 url_poi = 'bplan/points/' + \ 25 '?bplan={}'.format(bplan_identifier.replace(' ', '%20')) 26 27 try: 28 features = get_features_from_bplan_api(url_poi) 29 if features: 30 district_pk = features[0]['properties']['bezirk'] 31 point = features[0] 32 33 return point, district_pk 34 35 return None, None 36 37 except UnicodeEncodeError: 38 # catches bplan-identifiers with problematic chars 39 pass 40 41 42 def get_bplan_api_pk_to_a4_admin_district_dict(): 43 url_dis = 'bezirke/' 44 features = get_features_from_bplan_api(url_dis) 45 dis_dict = {} 46 if features: 47 for district in features: 48 49 dis_model = AdministrativeDistrict.objects.filter( 50 name=district['properties']['name'] 51 ) 52 if dis_model: 53 dis_dict[district['properties']['pk']] = \ 54 dis_model[0] 55 else: 56 dis_dict[district['properties']['pk']] = None 57 58 return dis_dict 59 60 61 @background(schedule=0) 62 def get_location_information(bplan_id): 63 bplan = Bplan.objects.get(pk=bplan_id) 64 point, district_pk = get_bplan_point_and_district_pk(bplan.identifier) 65 dis_dict = get_bplan_api_pk_to_a4_admin_district_dict() 66 67 if district_pk: 68 bplan.administrative_district = \ 69 dis_dict[district_pk] 70 else: 71 logger.error( 72 "The identifier '{}' for bplan '{}' seems to be wrong. " 73 "It doesn't exist on https://bplan-prod.liqd.net/api/" 74 .format(bplan.identifier, bplan) 75 ) 76 bplan.point = point 77 bplan.save(update_fields=['point', 'administrative_district']) 78 [end of meinberlin/apps/bplan/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/bplan/tasks.py b/meinberlin/apps/bplan/tasks.py --- a/meinberlin/apps/bplan/tasks.py +++ b/meinberlin/apps/bplan/tasks.py @@ -74,4 +74,5 @@ .format(bplan.identifier, bplan) ) bplan.point = point - bplan.save(update_fields=['point', 'administrative_district']) + bplan.topics = ['URB'] + bplan.save(update_fields=['point', 'administrative_district', 'topics'])
{"golden_diff": "diff --git a/meinberlin/apps/bplan/tasks.py b/meinberlin/apps/bplan/tasks.py\n--- a/meinberlin/apps/bplan/tasks.py\n+++ b/meinberlin/apps/bplan/tasks.py\n@@ -74,4 +74,5 @@\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n- bplan.save(update_fields=['point', 'administrative_district'])\n+ bplan.topics = ['URB']\n+ bplan.save(update_fields=['point', 'administrative_district', 'topics'])\n", "issue": "district, topic and localisation for external projects and bplans\nexternal projects also need district, topic and localisation as the other projects do. \r\n\r\nsame is true for b-plans as not all of them come via imperia\n", "before_files": [{"content": "import json\nimport logging\nimport urllib\n\nfrom background_task import background\n\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom meinberlin.apps.bplan.models import Bplan\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_features_from_bplan_api(endpoint):\n url = 'https://bplan-prod.liqd.net/api/' + endpoint\n req = urllib.request.Request(url)\n res = urllib.request.urlopen(req)\n res_body = res.read()\n res_json = json.loads(res_body.decode(\"utf-8\"))\n\n return res_json.get('features')\n\n\ndef get_bplan_point_and_district_pk(bplan_identifier):\n url_poi = 'bplan/points/' + \\\n '?bplan={}'.format(bplan_identifier.replace(' ', '%20'))\n\n try:\n features = get_features_from_bplan_api(url_poi)\n if features:\n district_pk = features[0]['properties']['bezirk']\n point = features[0]\n\n return point, district_pk\n\n return None, None\n\n except UnicodeEncodeError:\n # catches bplan-identifiers with problematic chars\n pass\n\n\ndef get_bplan_api_pk_to_a4_admin_district_dict():\n url_dis = 'bezirke/'\n features = get_features_from_bplan_api(url_dis)\n dis_dict = {}\n if features:\n for district in features:\n\n dis_model = AdministrativeDistrict.objects.filter(\n name=district['properties']['name']\n )\n if dis_model:\n dis_dict[district['properties']['pk']] = \\\n dis_model[0]\n else:\n dis_dict[district['properties']['pk']] = None\n\n return dis_dict\n\n\n@background(schedule=0)\ndef get_location_information(bplan_id):\n bplan = Bplan.objects.get(pk=bplan_id)\n point, district_pk = get_bplan_point_and_district_pk(bplan.identifier)\n dis_dict = get_bplan_api_pk_to_a4_admin_district_dict()\n\n if district_pk:\n bplan.administrative_district = \\\n dis_dict[district_pk]\n else:\n logger.error(\n \"The identifier '{}' for bplan '{}' seems to be wrong. \"\n \"It doesn't exist on https://bplan-prod.liqd.net/api/\"\n .format(bplan.identifier, bplan)\n )\n bplan.point = point\n bplan.save(update_fields=['point', 'administrative_district'])\n", "path": "meinberlin/apps/bplan/tasks.py"}]}
1,259
124
gh_patches_debug_3356
rasdani/github-patches
git_diff
pypa__setuptools-3106
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Entry points are missing when supplied as a string. ### setuptools version setuptools==60.9.0 ### Python version 3.8.12 ### OS Debian ### Additional environment information _No response_ ### Description When installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`. I have verified that this does work with `setuptools=60.8.2` but not `60.9.0`. ### Expected behavior The expected `entry_point` script `tap-postgres` is present in `venv/bin/`. ### How to Reproduce ``` python -m venv venv source venv/bin/activate pip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a ``` ### Output ```console ``` [BUG] Entry points are missing when supplied as a string. ### setuptools version setuptools==60.9.0 ### Python version 3.8.12 ### OS Debian ### Additional environment information _No response_ ### Description When installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`. I have verified that this does work with `setuptools=60.8.2` but not `60.9.0`. ### Expected behavior The expected `entry_point` script `tap-postgres` is present in `venv/bin/`. ### How to Reproduce ``` python -m venv venv source venv/bin/activate pip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a ``` ### Output ```console ``` </issue> <code> [start of setuptools/_entry_points.py] 1 import functools 2 import operator 3 import itertools 4 5 from .extern.jaraco.text import yield_lines 6 from .extern.jaraco.functools import pass_none 7 from ._importlib import metadata 8 from ._itertools import ensure_unique 9 from .extern.more_itertools import consume 10 11 12 def ensure_valid(ep): 13 """ 14 Exercise one of the dynamic properties to trigger 15 the pattern match. 16 """ 17 ep.extras 18 19 20 def load_group(value, group): 21 """ 22 Given a value of an entry point or series of entry points, 23 return each as an EntryPoint. 24 """ 25 # normalize to a single sequence of lines 26 lines = yield_lines(value) 27 text = f'[{group}]\n' + '\n'.join(lines) 28 return metadata.EntryPoints._from_text(text) 29 30 31 def by_group_and_name(ep): 32 return ep.group, ep.name 33 34 35 def validate(eps: metadata.EntryPoints): 36 """ 37 Ensure entry points are unique by group and name and validate each. 38 """ 39 consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name))) 40 return eps 41 42 43 @functools.singledispatch 44 def load(eps): 45 """ 46 Given a Distribution.entry_points, produce EntryPoints. 47 """ 48 groups = itertools.chain.from_iterable( 49 load_group(value, group) 50 for group, value in eps.items()) 51 return validate(metadata.EntryPoints(groups)) 52 53 54 @load.register(str) 55 def _(eps): 56 return validate(metadata.EntryPoints._from_text(eps)) 57 58 59 load.register(type(None), lambda x: x) 60 61 62 @pass_none 63 def render(eps: metadata.EntryPoints): 64 by_group = operator.attrgetter('group') 65 groups = itertools.groupby(sorted(eps, key=by_group), by_group) 66 67 return '\n'.join( 68 f'[{group}]\n{render_items(items)}\n' 69 for group, items in groups 70 ) 71 72 73 def render_items(eps): 74 return '\n'.join( 75 f'{ep.name} = {ep.value}' 76 for ep in sorted(eps) 77 ) 78 [end of setuptools/_entry_points.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setuptools/_entry_points.py b/setuptools/_entry_points.py --- a/setuptools/_entry_points.py +++ b/setuptools/_entry_points.py @@ -53,7 +53,16 @@ @load.register(str) def _(eps): - return validate(metadata.EntryPoints._from_text(eps)) + r""" + >>> ep, = load('[console_scripts]\nfoo=bar') + >>> ep.group + 'console_scripts' + >>> ep.name + 'foo' + >>> ep.value + 'bar' + """ + return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps))) load.register(type(None), lambda x: x)
{"golden_diff": "diff --git a/setuptools/_entry_points.py b/setuptools/_entry_points.py\n--- a/setuptools/_entry_points.py\n+++ b/setuptools/_entry_points.py\n@@ -53,7 +53,16 @@\n \n @load.register(str)\n def _(eps):\n- return validate(metadata.EntryPoints._from_text(eps))\n+ r\"\"\"\n+ >>> ep, = load('[console_scripts]\\nfoo=bar')\n+ >>> ep.group\n+ 'console_scripts'\n+ >>> ep.name\n+ 'foo'\n+ >>> ep.value\n+ 'bar'\n+ \"\"\"\n+ return validate(metadata.EntryPoints(metadata.EntryPoints._from_text(eps)))\n \n \n load.register(type(None), lambda x: x)\n", "issue": "[BUG] Entry points are missing when supplied as a string.\n### setuptools version\r\n\r\nsetuptools==60.9.0\r\n\r\n### Python version\r\n\r\n3.8.12\r\n\r\n### OS\r\n\r\nDebian\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\nWhen installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`.\r\n\r\nI have verified that this does work with `setuptools=60.8.2` but not `60.9.0`.\r\n\r\n### Expected behavior\r\n\r\nThe expected `entry_point` script `tap-postgres` is present in `venv/bin/`.\r\n\r\n### How to Reproduce\r\n\r\n```\r\npython -m venv venv\r\nsource venv/bin/activate\r\npip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a\r\n```\r\n\r\n### Output\r\n\r\n```console\r\n\r\n```\r\n\n[BUG] Entry points are missing when supplied as a string.\n### setuptools version\r\n\r\nsetuptools==60.9.0\r\n\r\n### Python version\r\n\r\n3.8.12\r\n\r\n### OS\r\n\r\nDebian\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\nWhen installing a Python package which uses `setuptools` and has at least one `entry_point` directly from Git the `entry_point` script is not created with `setuptools==60.9.0`.\r\n\r\nI have verified that this does work with `setuptools=60.8.2` but not `60.9.0`.\r\n\r\n### Expected behavior\r\n\r\nThe expected `entry_point` script `tap-postgres` is present in `venv/bin/`.\r\n\r\n### How to Reproduce\r\n\r\n```\r\npython -m venv venv\r\nsource venv/bin/activate\r\npip install git+https://github.com/thread/pipelinewise-tap-postgres.git@ee9bedb4711bb27c9660892f56c8fb27f3770d2a\r\n```\r\n\r\n### Output\r\n\r\n```console\r\n\r\n```\r\n\n", "before_files": [{"content": "import functools\nimport operator\nimport itertools\n\nfrom .extern.jaraco.text import yield_lines\nfrom .extern.jaraco.functools import pass_none\nfrom ._importlib import metadata\nfrom ._itertools import ensure_unique\nfrom .extern.more_itertools import consume\n\n\ndef ensure_valid(ep):\n \"\"\"\n Exercise one of the dynamic properties to trigger\n the pattern match.\n \"\"\"\n ep.extras\n\n\ndef load_group(value, group):\n \"\"\"\n Given a value of an entry point or series of entry points,\n return each as an EntryPoint.\n \"\"\"\n # normalize to a single sequence of lines\n lines = yield_lines(value)\n text = f'[{group}]\\n' + '\\n'.join(lines)\n return metadata.EntryPoints._from_text(text)\n\n\ndef by_group_and_name(ep):\n return ep.group, ep.name\n\n\ndef validate(eps: metadata.EntryPoints):\n \"\"\"\n Ensure entry points are unique by group and name and validate each.\n \"\"\"\n consume(map(ensure_valid, ensure_unique(eps, key=by_group_and_name)))\n return eps\n\n\[email protected]\ndef load(eps):\n \"\"\"\n Given a Distribution.entry_points, produce EntryPoints.\n \"\"\"\n groups = itertools.chain.from_iterable(\n load_group(value, group)\n for group, value in eps.items())\n return validate(metadata.EntryPoints(groups))\n\n\[email protected](str)\ndef _(eps):\n return validate(metadata.EntryPoints._from_text(eps))\n\n\nload.register(type(None), lambda x: x)\n\n\n@pass_none\ndef render(eps: metadata.EntryPoints):\n by_group = operator.attrgetter('group')\n groups = itertools.groupby(sorted(eps, key=by_group), by_group)\n\n return '\\n'.join(\n f'[{group}]\\n{render_items(items)}\\n'\n for group, items in groups\n )\n\n\ndef render_items(eps):\n return '\\n'.join(\n f'{ep.name} = {ep.value}'\n for ep in sorted(eps)\n )\n", "path": "setuptools/_entry_points.py"}]}
1,619
157
gh_patches_debug_14850
rasdani/github-patches
git_diff
liqd__adhocracy4-893
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> django-admin: place comment's is_blocked next to is_censored **URL:** **user:** **expected behaviour:** As is_removed, is_censored and is_blocked have a very similar behaviour, the is_blocked field should be next to the first two fields. In the detail as well as in the list view **behaviour:** **important screensize:** **device & browser:** **Comment/Question:** Screenshot? ![Screenshot from 2021-11-12 11-20-55](https://user-images.githubusercontent.com/8178179/141452772-f03df35f-b6e4-45d6-8703-c587ff0baef5.png) ![Screenshot from 2021-11-12 11-20-37](https://user-images.githubusercontent.com/8178179/141452773-c511dc84-3c92-49e4-abe8-dbb8022794be.png) </issue> <code> [start of adhocracy4/comments/admin.py] 1 from django.contrib import admin 2 3 from .models import Comment 4 5 6 @admin.register(Comment) 7 class CommentAdmin(admin.ModelAdmin): 8 fields = ( 9 'content_type', 'content_object', 'comment', 'is_removed', 10 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories', 11 'is_blocked' 12 ) 13 readonly_fields = ('creator', 'content_type', 'content_object') 14 list_display = ( 15 '__str__', 'creator', 'is_removed', 'is_censored', 'created', 16 'is_blocked' 17 ) 18 search_fields = ('comment',) 19 date_hierarchy = 'created' 20 [end of adhocracy4/comments/admin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/adhocracy4/comments/admin.py b/adhocracy4/comments/admin.py --- a/adhocracy4/comments/admin.py +++ b/adhocracy4/comments/admin.py @@ -7,13 +7,13 @@ class CommentAdmin(admin.ModelAdmin): fields = ( 'content_type', 'content_object', 'comment', 'is_removed', - 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories', - 'is_blocked' + 'is_censored', 'is_blocked', 'is_moderator_marked', + 'creator', 'comment_categories' ) readonly_fields = ('creator', 'content_type', 'content_object') list_display = ( - '__str__', 'creator', 'is_removed', 'is_censored', 'created', - 'is_blocked' + '__str__', 'creator', 'is_removed', 'is_censored', + 'is_blocked', 'created' ) search_fields = ('comment',) date_hierarchy = 'created'
{"golden_diff": "diff --git a/adhocracy4/comments/admin.py b/adhocracy4/comments/admin.py\n--- a/adhocracy4/comments/admin.py\n+++ b/adhocracy4/comments/admin.py\n@@ -7,13 +7,13 @@\n class CommentAdmin(admin.ModelAdmin):\n fields = (\n 'content_type', 'content_object', 'comment', 'is_removed',\n- 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories',\n- 'is_blocked'\n+ 'is_censored', 'is_blocked', 'is_moderator_marked',\n+ 'creator', 'comment_categories'\n )\n readonly_fields = ('creator', 'content_type', 'content_object')\n list_display = (\n- '__str__', 'creator', 'is_removed', 'is_censored', 'created',\n- 'is_blocked'\n+ '__str__', 'creator', 'is_removed', 'is_censored',\n+ 'is_blocked', 'created'\n )\n search_fields = ('comment',)\n date_hierarchy = 'created'\n", "issue": "django-admin: place comment's is_blocked next to is_censored\n**URL:** \r\n**user:** \r\n**expected behaviour:** As is_removed, is_censored and is_blocked have a very similar behaviour, the is_blocked field should be next to the first two fields. In the detail as well as in the list view\r\n**behaviour:** \r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n![Screenshot from 2021-11-12 11-20-55](https://user-images.githubusercontent.com/8178179/141452772-f03df35f-b6e4-45d6-8703-c587ff0baef5.png)\r\n![Screenshot from 2021-11-12 11-20-37](https://user-images.githubusercontent.com/8178179/141452773-c511dc84-3c92-49e4-abe8-dbb8022794be.png)\r\n\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import Comment\n\n\[email protected](Comment)\nclass CommentAdmin(admin.ModelAdmin):\n fields = (\n 'content_type', 'content_object', 'comment', 'is_removed',\n 'is_censored', 'is_moderator_marked', 'creator', 'comment_categories',\n 'is_blocked'\n )\n readonly_fields = ('creator', 'content_type', 'content_object')\n list_display = (\n '__str__', 'creator', 'is_removed', 'is_censored', 'created',\n 'is_blocked'\n )\n search_fields = ('comment',)\n date_hierarchy = 'created'\n", "path": "adhocracy4/comments/admin.py"}]}
959
225
gh_patches_debug_3853
rasdani/github-patches
git_diff
spack__spack-29252
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix neovim install on M1 With #29228 and #29229 in, the `spack install neovim` fails with: ``` 1022 undef: _tigetstr 1023 undef: _cur_term 1024 undef: _setupterm >> 1025 Undefined symbols for architecture arm64: 1026 "_tigetstr", referenced from: 1027 _try_load_terminfo_key in libtermkey.a(driver-ti.o) 1028 "_cur_term", referenced from: 1029 _load_terminfo in libtermkey.a(driver-ti.o) 1030 "_setupterm", referenced from: 1031 _new_driver in libtermkey.a(driver-ti.o) 1032 _load_terminfo in libtermkey.a(driver-ti.o) 1033 ld: symbol(s) not found for architecture arm64 ``` While linking the `nvim` executable. These symbols seem to be coming from `ncurses`, but linking `ncurses` explicitly didn't seem to fix it. However, the current PR fixes it. One must turn off `termlib` in `ncurses` and then one must explicitly link it. Then `nvim` builds just fine. I am opening this PR as a Draft, because the `+termlib` seems hardwired in `lua`, so I don't know how to fix this properly. Also just adding `ncurses` in the cmake for `neovim` doesn't feel right, one should explicitly depend on `ncurses` and then find it using cmake. I don't have time to work on that. But this PR might be helpful to others to finish this work. Either way, neovim seems to work fine now. </issue> <code> [start of var/spack/repos/builtin/packages/libtermkey/package.py] 1 # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class Libtermkey(Package): 10 """Easy keyboard entry processing for terminal programs""" 11 homepage = "http://www.leonerd.org.uk/code/libtermkey/" 12 url = "http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz" 13 14 version('0.22', sha256='6945bd3c4aaa83da83d80a045c5563da4edd7d0374c62c0d35aec09eb3014600') 15 version('0.18', sha256='239746de41c845af52bb3c14055558f743292dd6c24ac26c2d6567a5a6093926') 16 version('0.17', sha256='68949364ed5eaad857b3dea10071cde74b00b9f236dfbb702169f246c3cef389') 17 version('0.16', sha256='6c8136efa5d0b3277014a5d4519ea81190079c82656b7db1655a1bd147326a70') 18 version('0.15b', sha256='6825422c6297e4f81b2c48962b4512585ca8a50bf31f24b3234a1be71a9d7a6e') 19 version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79') 20 21 depends_on('libtool', type='build') 22 depends_on('ncurses') 23 depends_on('pkgconfig') 24 25 def install(self, spec, prefix): 26 make() 27 make("install", "PREFIX=" + prefix) 28 [end of var/spack/repos/builtin/packages/libtermkey/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/libtermkey/package.py b/var/spack/repos/builtin/packages/libtermkey/package.py --- a/var/spack/repos/builtin/packages/libtermkey/package.py +++ b/var/spack/repos/builtin/packages/libtermkey/package.py @@ -19,7 +19,7 @@ version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79') depends_on('libtool', type='build') - depends_on('ncurses') + depends_on('unibilium') depends_on('pkgconfig') def install(self, spec, prefix):
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/libtermkey/package.py b/var/spack/repos/builtin/packages/libtermkey/package.py\n--- a/var/spack/repos/builtin/packages/libtermkey/package.py\n+++ b/var/spack/repos/builtin/packages/libtermkey/package.py\n@@ -19,7 +19,7 @@\n version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')\n \n depends_on('libtool', type='build')\n- depends_on('ncurses')\n+ depends_on('unibilium')\n depends_on('pkgconfig')\n \n def install(self, spec, prefix):\n", "issue": "Fix neovim install on M1\nWith #29228 and #29229 in, the `spack install neovim` fails with:\r\n```\r\n 1022 undef: _tigetstr\r\n 1023 undef: _cur_term\r\n 1024 undef: _setupterm\r\n >> 1025 Undefined symbols for architecture arm64:\r\n 1026 \"_tigetstr\", referenced from:\r\n 1027 _try_load_terminfo_key in libtermkey.a(driver-ti.o)\r\n 1028 \"_cur_term\", referenced from:\r\n 1029 _load_terminfo in libtermkey.a(driver-ti.o)\r\n 1030 \"_setupterm\", referenced from:\r\n 1031 _new_driver in libtermkey.a(driver-ti.o)\r\n 1032 _load_terminfo in libtermkey.a(driver-ti.o)\r\n 1033 ld: symbol(s) not found for architecture arm64\r\n```\r\nWhile linking the `nvim` executable. These symbols seem to be coming from `ncurses`, but linking `ncurses` explicitly didn't seem to fix it. However, the current PR fixes it. One must turn off `termlib` in `ncurses` and then one must explicitly link it. Then `nvim` builds just fine. I am opening this PR as a Draft, because the `+termlib` seems hardwired in `lua`, so I don't know how to fix this properly. Also just adding `ncurses` in the cmake for `neovim` doesn't feel right, one should explicitly depend on `ncurses` and then find it using cmake. I don't have time to work on that. But this PR might be helpful to others to finish this work. Either way, neovim seems to work fine now.\n", "before_files": [{"content": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Libtermkey(Package):\n \"\"\"Easy keyboard entry processing for terminal programs\"\"\"\n homepage = \"http://www.leonerd.org.uk/code/libtermkey/\"\n url = \"http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz\"\n\n version('0.22', sha256='6945bd3c4aaa83da83d80a045c5563da4edd7d0374c62c0d35aec09eb3014600')\n version('0.18', sha256='239746de41c845af52bb3c14055558f743292dd6c24ac26c2d6567a5a6093926')\n version('0.17', sha256='68949364ed5eaad857b3dea10071cde74b00b9f236dfbb702169f246c3cef389')\n version('0.16', sha256='6c8136efa5d0b3277014a5d4519ea81190079c82656b7db1655a1bd147326a70')\n version('0.15b', sha256='6825422c6297e4f81b2c48962b4512585ca8a50bf31f24b3234a1be71a9d7a6e')\n version('0.14', sha256='3d114d4509499b80a583ea39cd35f18268aacf4a7bbf56c142cd032632005c79')\n\n depends_on('libtool', type='build')\n depends_on('ncurses')\n depends_on('pkgconfig')\n\n def install(self, spec, prefix):\n make()\n make(\"install\", \"PREFIX=\" + prefix)\n", "path": "var/spack/repos/builtin/packages/libtermkey/package.py"}]}
1,633
191
gh_patches_debug_4404
rasdani/github-patches
git_diff
pallets__werkzeug-930
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add an 'extras_require' entry to setup.py for watchdog The reloader feature has an optional dependency on the watchdog package: http://werkzeug.pocoo.org/docs/0.11/serving/#reloader Whilst people can just add 'watchdog' to their requirements files, it's clearer to specifically add an `extras_require` entry for it to setup.py, which allows people to instead add eg `Werkzeug[watchdog]` to their requirements file, which more clearly shows why the dependency is required. In addition, should Werkzeug ever need a specific version of watchdog in the future (eg due to backwards incompatibilities), then the `extras_require` entry could be updated with a version range, causing zero breakage for people who'd used the `Werkzeug[watchdog]` form in their requirements file. I'll open a PR for this shortly :-) </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Werkzeug 4 ======== 5 6 Werkzeug started as simple collection of various utilities for WSGI 7 applications and has become one of the most advanced WSGI utility 8 modules. It includes a powerful debugger, full featured request and 9 response objects, HTTP utilities to handle entity tags, cache control 10 headers, HTTP dates, cookie handling, file uploads, a powerful URL 11 routing system and a bunch of community contributed addon modules. 12 13 Werkzeug is unicode aware and doesn't enforce a specific template 14 engine, database adapter or anything else. It doesn't even enforce 15 a specific way of handling requests and leaves all that up to the 16 developer. It's most useful for end user applications which should work 17 on as many server environments as possible (such as blogs, wikis, 18 bulletin boards, etc.). 19 20 Details and example applications are available on the 21 `Werkzeug website <http://werkzeug.pocoo.org/>`_. 22 23 24 Features 25 -------- 26 27 - unicode awareness 28 29 - request and response objects 30 31 - various utility functions for dealing with HTTP headers such as 32 `Accept` and `Cache-Control` headers. 33 34 - thread local objects with proper cleanup at request end 35 36 - an interactive debugger 37 38 - A simple WSGI server with support for threading and forking 39 with an automatic reloader. 40 41 - a flexible URL routing system with REST support. 42 43 - fully WSGI compatible 44 45 46 Development Version 47 ------------------- 48 49 The Werkzeug development version can be installed by cloning the git 50 repository from `github`_:: 51 52 git clone [email protected]:pallets/werkzeug.git 53 54 .. _github: http://github.com/pallets/werkzeug 55 """ 56 import ast 57 import re 58 try: 59 from setuptools import setup, Command 60 except ImportError: 61 from distutils.core import setup, Command 62 63 64 _version_re = re.compile(r'__version__\s+=\s+(.*)') 65 66 with open('werkzeug/__init__.py', 'rb') as f: 67 version = str(ast.literal_eval(_version_re.search( 68 f.read().decode('utf-8')).group(1))) 69 70 71 class TestCommand(Command): 72 user_options = [] 73 74 def initialize_options(self): 75 pass 76 77 def finalize_options(self): 78 pass 79 80 def run(self): 81 import pytest 82 pytest.cmdline.main(args=[]) 83 84 85 setup( 86 name='Werkzeug', 87 version=version, 88 url='http://werkzeug.pocoo.org/', 89 license='BSD', 90 author='Armin Ronacher', 91 author_email='[email protected]', 92 description='The Swiss Army knife of Python web development', 93 long_description=__doc__, 94 classifiers=[ 95 'Development Status :: 5 - Production/Stable', 96 'Environment :: Web Environment', 97 'Intended Audience :: Developers', 98 'License :: OSI Approved :: BSD License', 99 'Operating System :: OS Independent', 100 'Programming Language :: Python', 101 'Programming Language :: Python :: 2', 102 'Programming Language :: Python :: 2.6', 103 'Programming Language :: Python :: 2.7', 104 'Programming Language :: Python :: 3', 105 'Programming Language :: Python :: 3.3', 106 'Programming Language :: Python :: 3.4', 107 'Programming Language :: Python :: 3.5', 108 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 109 'Topic :: Software Development :: Libraries :: Python Modules' 110 ], 111 packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'], 112 cmdclass=dict(test=TestCommand), 113 include_package_data=True, 114 zip_safe=False, 115 platforms='any' 116 ) 117 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -109,6 +109,9 @@ 'Topic :: Software Development :: Libraries :: Python Modules' ], packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'], + extras_require={ + 'watchdog': ['watchdog'], + }, cmdclass=dict(test=TestCommand), include_package_data=True, zip_safe=False,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -109,6 +109,9 @@\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],\n+ extras_require={\n+ 'watchdog': ['watchdog'],\n+ },\n cmdclass=dict(test=TestCommand),\n include_package_data=True,\n zip_safe=False,\n", "issue": "Add an 'extras_require' entry to setup.py for watchdog\nThe reloader feature has an optional dependency on the watchdog package:\nhttp://werkzeug.pocoo.org/docs/0.11/serving/#reloader\n\nWhilst people can just add 'watchdog' to their requirements files, it's clearer to specifically add an `extras_require` entry for it to setup.py, which allows people to instead add eg `Werkzeug[watchdog]` to their requirements file, which more clearly shows why the dependency is required.\n\nIn addition, should Werkzeug ever need a specific version of watchdog in the future (eg due to backwards incompatibilities), then the `extras_require` entry could be updated with a version range, causing zero breakage for people who'd used the `Werkzeug[watchdog]` form in their requirements file.\n\nI'll open a PR for this shortly :-)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nWerkzeug\n========\n\nWerkzeug started as simple collection of various utilities for WSGI\napplications and has become one of the most advanced WSGI utility\nmodules. It includes a powerful debugger, full featured request and\nresponse objects, HTTP utilities to handle entity tags, cache control\nheaders, HTTP dates, cookie handling, file uploads, a powerful URL\nrouting system and a bunch of community contributed addon modules.\n\nWerkzeug is unicode aware and doesn't enforce a specific template\nengine, database adapter or anything else. It doesn't even enforce\na specific way of handling requests and leaves all that up to the\ndeveloper. It's most useful for end user applications which should work\non as many server environments as possible (such as blogs, wikis,\nbulletin boards, etc.).\n\nDetails and example applications are available on the\n`Werkzeug website <http://werkzeug.pocoo.org/>`_.\n\n\nFeatures\n--------\n\n- unicode awareness\n\n- request and response objects\n\n- various utility functions for dealing with HTTP headers such as\n `Accept` and `Cache-Control` headers.\n\n- thread local objects with proper cleanup at request end\n\n- an interactive debugger\n\n- A simple WSGI server with support for threading and forking\n with an automatic reloader.\n\n- a flexible URL routing system with REST support.\n\n- fully WSGI compatible\n\n\nDevelopment Version\n-------------------\n\nThe Werkzeug development version can be installed by cloning the git\nrepository from `github`_::\n\n git clone [email protected]:pallets/werkzeug.git\n\n.. _github: http://github.com/pallets/werkzeug\n\"\"\"\nimport ast\nimport re\ntry:\n from setuptools import setup, Command\nexcept ImportError:\n from distutils.core import setup, Command\n\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('werkzeug/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\n\nclass TestCommand(Command):\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n import pytest\n pytest.cmdline.main(args=[])\n\n\nsetup(\n name='Werkzeug',\n version=version,\n url='http://werkzeug.pocoo.org/',\n license='BSD',\n author='Armin Ronacher',\n author_email='[email protected]',\n description='The Swiss Army knife of Python web development',\n long_description=__doc__,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n packages=['werkzeug', 'werkzeug.debug', 'werkzeug.contrib'],\n cmdclass=dict(test=TestCommand),\n include_package_data=True,\n zip_safe=False,\n platforms='any'\n)\n", "path": "setup.py"}]}
1,750
103
gh_patches_debug_39452
rasdani/github-patches
git_diff
wagtail__wagtail-1375
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SnippetChooserPanel cannot take model string in model argument We should update `SnippetChooserPanel` to use `resolve_model_string` (https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailcore/utils.py#L13-L37). This will make it behave more like `PageChooserPanel`. </issue> <code> [start of wagtail/wagtailsnippets/edit_handlers.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from django.template.loader import render_to_string 4 from django.contrib.contenttypes.models import ContentType 5 from django.utils.safestring import mark_safe 6 from django.utils.encoding import force_text 7 8 from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel 9 from .widgets import AdminSnippetChooser 10 11 12 class BaseSnippetChooserPanel(BaseChooserPanel): 13 object_type_name = 'item' 14 15 _content_type = None 16 17 @classmethod 18 def widget_overrides(cls): 19 return {cls.field_name: AdminSnippetChooser( 20 content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)} 21 22 @classmethod 23 def content_type(cls): 24 if cls._content_type is None: 25 # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly 26 cls._content_type = ContentType.objects.get_for_model(cls.snippet_type) 27 28 return cls._content_type 29 30 def render_as_field(self): 31 instance_obj = self.get_chosen_item() 32 return mark_safe(render_to_string(self.field_template, { 33 'field': self.bound_field, 34 self.object_type_name: instance_obj, 35 'snippet_type_name': self.snippet_type_name, 36 })) 37 38 39 class SnippetChooserPanel(object): 40 def __init__(self, field_name, snippet_type): 41 self.field_name = field_name 42 self.snippet_type = snippet_type 43 44 def bind_to_model(self, model): 45 return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), { 46 'model': model, 47 'field_name': self.field_name, 48 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name), 49 'snippet_type': self.snippet_type, 50 }) 51 [end of wagtail/wagtailsnippets/edit_handlers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailsnippets/edit_handlers.py b/wagtail/wagtailsnippets/edit_handlers.py --- a/wagtail/wagtailsnippets/edit_handlers.py +++ b/wagtail/wagtailsnippets/edit_handlers.py @@ -4,28 +4,42 @@ from django.contrib.contenttypes.models import ContentType from django.utils.safestring import mark_safe from django.utils.encoding import force_text +from django.core.exceptions import ImproperlyConfigured from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel +from wagtail.wagtailcore.utils import resolve_model_string from .widgets import AdminSnippetChooser class BaseSnippetChooserPanel(BaseChooserPanel): object_type_name = 'item' - _content_type = None + _target_content_type = None @classmethod def widget_overrides(cls): return {cls.field_name: AdminSnippetChooser( - content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)} + content_type=cls.target_content_type(), snippet_type_name=cls.snippet_type_name)} @classmethod - def content_type(cls): - if cls._content_type is None: - # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly - cls._content_type = ContentType.objects.get_for_model(cls.snippet_type) + def target_content_type(cls): + if cls._target_content_type is None: + if cls.snippet_type: + try: + model = resolve_model_string(cls.snippet_type) + except LookupError: + raise ImproperlyConfigured("{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}".format( + cls.__name__, cls.snippet_type)) + except ValueError: + raise ImproperlyConfigured("{0}.snippet_type refers to model {1!r} that has not been installed".format( + cls.__name__, cls.snippet_type)) - return cls._content_type + cls._target_content_type = ContentType.objects.get_for_model(model) + else: + target_model = cls.model._meta.get_field(cls.field_name).rel.to + cls._target_content_type = ContentType.objects.get_for_model(target_model) + + return cls._target_content_type def render_as_field(self): instance_obj = self.get_chosen_item() @@ -35,6 +49,10 @@ 'snippet_type_name': self.snippet_type_name, })) + @property + def snippet_type_name(self): + return force_text(self.target_content_type()._meta.verbose_name) + class SnippetChooserPanel(object): def __init__(self, field_name, snippet_type): @@ -45,6 +63,5 @@ return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), { 'model': model, 'field_name': self.field_name, - 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name), 'snippet_type': self.snippet_type, })
{"golden_diff": "diff --git a/wagtail/wagtailsnippets/edit_handlers.py b/wagtail/wagtailsnippets/edit_handlers.py\n--- a/wagtail/wagtailsnippets/edit_handlers.py\n+++ b/wagtail/wagtailsnippets/edit_handlers.py\n@@ -4,28 +4,42 @@\n from django.contrib.contenttypes.models import ContentType\n from django.utils.safestring import mark_safe\n from django.utils.encoding import force_text\n+from django.core.exceptions import ImproperlyConfigured\n \n from wagtail.wagtailadmin.edit_handlers import BaseChooserPanel\n+from wagtail.wagtailcore.utils import resolve_model_string\n from .widgets import AdminSnippetChooser\n \n \n class BaseSnippetChooserPanel(BaseChooserPanel):\n object_type_name = 'item'\n \n- _content_type = None\n+ _target_content_type = None\n \n @classmethod\n def widget_overrides(cls):\n return {cls.field_name: AdminSnippetChooser(\n- content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)}\n+ content_type=cls.target_content_type(), snippet_type_name=cls.snippet_type_name)}\n \n @classmethod\n- def content_type(cls):\n- if cls._content_type is None:\n- # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly\n- cls._content_type = ContentType.objects.get_for_model(cls.snippet_type)\n+ def target_content_type(cls):\n+ if cls._target_content_type is None:\n+ if cls.snippet_type:\n+ try:\n+ model = resolve_model_string(cls.snippet_type)\n+ except LookupError:\n+ raise ImproperlyConfigured(\"{0}.snippet_type must be of the form 'app_label.model_name', given {1!r}\".format(\n+ cls.__name__, cls.snippet_type))\n+ except ValueError:\n+ raise ImproperlyConfigured(\"{0}.snippet_type refers to model {1!r} that has not been installed\".format(\n+ cls.__name__, cls.snippet_type))\n \n- return cls._content_type\n+ cls._target_content_type = ContentType.objects.get_for_model(model)\n+ else:\n+ target_model = cls.model._meta.get_field(cls.field_name).rel.to\n+ cls._target_content_type = ContentType.objects.get_for_model(target_model)\n+\n+ return cls._target_content_type\n \n def render_as_field(self):\n instance_obj = self.get_chosen_item()\n@@ -35,6 +49,10 @@\n 'snippet_type_name': self.snippet_type_name,\n }))\n \n+ @property\n+ def snippet_type_name(self):\n+ return force_text(self.target_content_type()._meta.verbose_name)\n+\n \n class SnippetChooserPanel(object):\n def __init__(self, field_name, snippet_type):\n@@ -45,6 +63,5 @@\n return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {\n 'model': model,\n 'field_name': self.field_name,\n- 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name),\n 'snippet_type': self.snippet_type,\n })\n", "issue": "SnippetChooserPanel cannot take model string in model argument\nWe should update `SnippetChooserPanel` to use `resolve_model_string` (https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailcore/utils.py#L13-L37). This will make it behave more like `PageChooserPanel`.\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django.template.loader import render_to_string\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\nfrom django.utils.encoding import force_text\n\nfrom wagtail.wagtailadmin.edit_handlers import BaseChooserPanel\nfrom .widgets import AdminSnippetChooser\n\n\nclass BaseSnippetChooserPanel(BaseChooserPanel):\n object_type_name = 'item'\n\n _content_type = None\n\n @classmethod\n def widget_overrides(cls):\n return {cls.field_name: AdminSnippetChooser(\n content_type=cls.content_type(), snippet_type_name=cls.snippet_type_name)}\n\n @classmethod\n def content_type(cls):\n if cls._content_type is None:\n # TODO: infer the content type by introspection on the foreign key rather than having to pass it explicitly\n cls._content_type = ContentType.objects.get_for_model(cls.snippet_type)\n\n return cls._content_type\n\n def render_as_field(self):\n instance_obj = self.get_chosen_item()\n return mark_safe(render_to_string(self.field_template, {\n 'field': self.bound_field,\n self.object_type_name: instance_obj,\n 'snippet_type_name': self.snippet_type_name,\n }))\n\n\nclass SnippetChooserPanel(object):\n def __init__(self, field_name, snippet_type):\n self.field_name = field_name\n self.snippet_type = snippet_type\n\n def bind_to_model(self, model):\n return type(str('_SnippetChooserPanel'), (BaseSnippetChooserPanel,), {\n 'model': model,\n 'field_name': self.field_name,\n 'snippet_type_name': force_text(self.snippet_type._meta.verbose_name),\n 'snippet_type': self.snippet_type,\n })\n", "path": "wagtail/wagtailsnippets/edit_handlers.py"}]}
1,085
680