problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_2793 | rasdani/github-patches | git_diff | kornia__kornia-579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Backslash not showing in equation for Tversky Loss
## 📚 Documentation
The backslashs in the denominator of the Tversky loss equation is not rendered correctly with MathJax. As shown in this screenshot it only inserts a little space between P and G and thus does not correctly render the equation.

<!-- A clear and concise description of what content in https://kornia.readthedocs.io is an issue. If this has to do with the general https://kornia.org website, please file an issue at https://github.com/kornia/kornia.github.io/issues/new/choose instead. If this has to do with https://kornia.org/tutorials, please file an issue at https://github.com/kornia/tutorials/issues/new -->
</issue>
<code>
[start of kornia/losses/tversky.py]
1 from typing import Optional
2
3 import torch
4 import torch.nn as nn
5 import torch.nn.functional as F
6
7 from kornia.utils import one_hot
8
9 # based on:
10 # https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
11
12
13 def tversky_loss(input: torch.Tensor, target: torch.Tensor,
14 alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:
15 r"""Function that computes Tversky loss.
16
17 See :class:`~kornia.losses.TverskyLoss` for details.
18 """
19 if not torch.is_tensor(input):
20 raise TypeError("Input type is not a torch.Tensor. Got {}"
21 .format(type(input)))
22
23 if not len(input.shape) == 4:
24 raise ValueError("Invalid input shape, we expect BxNxHxW. Got: {}"
25 .format(input.shape))
26
27 if not input.shape[-2:] == target.shape[-2:]:
28 raise ValueError("input and target shapes must be the same. Got: {} and {}"
29 .format(input.shape, input.shape))
30
31 if not input.device == target.device:
32 raise ValueError(
33 "input and target must be in the same device. Got: {} and {}" .format(
34 input.device, target.device))
35
36 # compute softmax over the classes axis
37 input_soft: torch.Tensor = F.softmax(input, dim=1)
38
39 # create the labels one hot tensor
40 target_one_hot: torch.Tensor = one_hot(
41 target, num_classes=input.shape[1],
42 device=input.device, dtype=input.dtype)
43
44 # compute the actual dice score
45 dims = (1, 2, 3)
46 intersection = torch.sum(input_soft * target_one_hot, dims)
47 fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)
48 fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)
49
50 numerator = intersection
51 denominator = intersection + alpha * fps + beta * fns
52 tversky_loss = numerator / (denominator + eps)
53 return torch.mean(-tversky_loss + 1.)
54
55
56 class TverskyLoss(nn.Module):
57 r"""Criterion that computes Tversky Coeficient loss.
58
59 According to [1], we compute the Tversky Coefficient as follows:
60
61 .. math::
62
63 \text{S}(P, G, \alpha; \beta) =
64 \frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|}
65
66 where:
67 - :math:`P` and :math:`G` are the predicted and ground truth binary
68 labels.
69 - :math:`\alpha` and :math:`\beta` control the magnitude of the
70 penalties for FPs and FNs, respectively.
71
72 Notes:
73 - :math:`\alpha = \beta = 0.5` => dice coeff
74 - :math:`\alpha = \beta = 1` => tanimoto coeff
75 - :math:`\alpha + \beta = 1` => F beta coeff
76
77 Shape:
78 - Input: :math:`(N, C, H, W)` where C = number of classes.
79 - Target: :math:`(N, H, W)` where each value is
80 :math:`0 ≤ targets[i] ≤ C−1`.
81
82 Examples:
83 >>> N = 5 # num_classes
84 >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)
85 >>> input = torch.randn(1, N, 3, 5, requires_grad=True)
86 >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
87 >>> output = loss(input, target)
88 >>> output.backward()
89
90 References:
91 [1]: https://arxiv.org/abs/1706.05721
92 """
93
94 def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:
95 super(TverskyLoss, self).__init__()
96 self.alpha: float = alpha
97 self.beta: float = beta
98 self.eps: float = eps
99
100 def forward( # type: ignore
101 self,
102 input: torch.Tensor,
103 target: torch.Tensor) -> torch.Tensor:
104 return tversky_loss(input, target, self.alpha, self.beta, self.eps)
105
[end of kornia/losses/tversky.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/losses/tversky.py b/kornia/losses/tversky.py
--- a/kornia/losses/tversky.py
+++ b/kornia/losses/tversky.py
@@ -61,7 +61,7 @@
.. math::
\text{S}(P, G, \alpha; \beta) =
- \frac{|PG|}{|PG| + \alpha |P \ G| + \beta |G \ P|}
+ \frac{|PG|}{|PG| + \alpha |P \setminus G| + \beta |G \setminus P|}
where:
- :math:`P` and :math:`G` are the predicted and ground truth binary
| {"golden_diff": "diff --git a/kornia/losses/tversky.py b/kornia/losses/tversky.py\n--- a/kornia/losses/tversky.py\n+++ b/kornia/losses/tversky.py\n@@ -61,7 +61,7 @@\n .. math::\n \n \\text{S}(P, G, \\alpha; \\beta) =\n- \\frac{|PG|}{|PG| + \\alpha |P \\ G| + \\beta |G \\ P|}\n+ \\frac{|PG|}{|PG| + \\alpha |P \\setminus G| + \\beta |G \\setminus P|}\n \n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n", "issue": "Backslash not showing in equation for Tversky Loss\n## \ud83d\udcda Documentation\r\n\r\nThe backslashs in the denominator of the Tversky loss equation is not rendered correctly with MathJax. As shown in this screenshot it only inserts a little space between P and G and thus does not correctly render the equation.\r\n\r\n\r\n\r\n\r\n<!-- A clear and concise description of what content in https://kornia.readthedocs.io is an issue. If this has to do with the general https://kornia.org website, please file an issue at https://github.com/kornia/kornia.github.io/issues/new/choose instead. If this has to do with https://kornia.org/tutorials, please file an issue at https://github.com/kornia/tutorials/issues/new -->\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom kornia.utils import one_hot\n\n# based on:\n# https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py\n\n\ndef tversky_loss(input: torch.Tensor, target: torch.Tensor,\n alpha: float, beta: float, eps: float = 1e-8) -> torch.Tensor:\n r\"\"\"Function that computes Tversky loss.\n\n See :class:`~kornia.losses.TverskyLoss` for details.\n \"\"\"\n if not torch.is_tensor(input):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n .format(type(input)))\n\n if not len(input.shape) == 4:\n raise ValueError(\"Invalid input shape, we expect BxNxHxW. Got: {}\"\n .format(input.shape))\n\n if not input.shape[-2:] == target.shape[-2:]:\n raise ValueError(\"input and target shapes must be the same. Got: {} and {}\"\n .format(input.shape, input.shape))\n\n if not input.device == target.device:\n raise ValueError(\n \"input and target must be in the same device. Got: {} and {}\" .format(\n input.device, target.device))\n\n # compute softmax over the classes axis\n input_soft: torch.Tensor = F.softmax(input, dim=1)\n\n # create the labels one hot tensor\n target_one_hot: torch.Tensor = one_hot(\n target, num_classes=input.shape[1],\n device=input.device, dtype=input.dtype)\n\n # compute the actual dice score\n dims = (1, 2, 3)\n intersection = torch.sum(input_soft * target_one_hot, dims)\n fps = torch.sum(input_soft * (-target_one_hot + 1.), dims)\n fns = torch.sum((-input_soft + 1.) * target_one_hot, dims)\n\n numerator = intersection\n denominator = intersection + alpha * fps + beta * fns\n tversky_loss = numerator / (denominator + eps)\n return torch.mean(-tversky_loss + 1.)\n\n\nclass TverskyLoss(nn.Module):\n r\"\"\"Criterion that computes Tversky Coeficient loss.\n\n According to [1], we compute the Tversky Coefficient as follows:\n\n .. math::\n\n \\text{S}(P, G, \\alpha; \\beta) =\n \\frac{|PG|}{|PG| + \\alpha |P \\ G| + \\beta |G \\ P|}\n\n where:\n - :math:`P` and :math:`G` are the predicted and ground truth binary\n labels.\n - :math:`\\alpha` and :math:`\\beta` control the magnitude of the\n penalties for FPs and FNs, respectively.\n\n Notes:\n - :math:`\\alpha = \\beta = 0.5` => dice coeff\n - :math:`\\alpha = \\beta = 1` => tanimoto coeff\n - :math:`\\alpha + \\beta = 1` => F beta coeff\n\n Shape:\n - Input: :math:`(N, C, H, W)` where C = number of classes.\n - Target: :math:`(N, H, W)` where each value is\n :math:`0 \u2264 targets[i] \u2264 C\u22121`.\n\n Examples:\n >>> N = 5 # num_classes\n >>> loss = kornia.losses.TverskyLoss(alpha=0.5, beta=0.5)\n >>> input = torch.randn(1, N, 3, 5, requires_grad=True)\n >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)\n >>> output = loss(input, target)\n >>> output.backward()\n\n References:\n [1]: https://arxiv.org/abs/1706.05721\n \"\"\"\n\n def __init__(self, alpha: float, beta: float, eps: float = 1e-8) -> None:\n super(TverskyLoss, self).__init__()\n self.alpha: float = alpha\n self.beta: float = beta\n self.eps: float = eps\n\n def forward( # type: ignore\n self,\n input: torch.Tensor,\n target: torch.Tensor) -> torch.Tensor:\n return tversky_loss(input, target, self.alpha, self.beta, self.eps)\n", "path": "kornia/losses/tversky.py"}]} | 1,959 | 170 |
gh_patches_debug_18960 | rasdani/github-patches | git_diff | wagtail__wagtail-9119 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Locked pages report renders all users in "locked_by" filter
### Issue Summary
Currently, the select-field for the "locked_by" filter on the "Locked pages"-report renders all users unfiltered from the user table.
This breaks the view for one of our projects, which tries to list all 500k users...
Actually, only the users which have locked a page should be listed.
I will try to make a PR for that, in the meantime this quickfix works for us (with wagtail 4.0, django 3.2):
```python
# in one of our apps.py
from django.apps import AppConfig
class BaseConfig(AppConfig):
name = "base"
def ready(self):
patch_locked_pages_filter()
def patch_locked_pages_filter():
import django_filters
from django.contrib.auth import get_user_model
from wagtail.admin.views.reports.locked_pages import LockedPagesView, LockedPagesReportFilterSet
def get_users_for_filter():
User = get_user_model()
return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)
class PatchedLockedPagesReportFilterSet(LockedPagesReportFilterSet):
locked_by = django_filters.ModelChoiceFilter(
field_name="locked_by", queryset=lambda request: get_users_for_filter()
)
LockedPagesView.filterset_class = PatchedLockedPagesReportFilterSet
```
### Steps to Reproduce
1. Start a new project with `wagtail start myproject`
2. Create many users, no matter if they have access to the CMS or not
3. Open `/admin/reports/locked/` and find all those users in the "locked_by" dropdown on the right
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Python version: 3.10
- Django version: 3.2
- Wagtail version: 4.0, 2.15,... (according to the git history seems to be there from the beginning)
</issue>
<code>
[start of wagtail/admin/views/reports/locked_pages.py]
1 import datetime
2
3 import django_filters
4 from django.conf import settings
5 from django.core.exceptions import PermissionDenied
6 from django.utils.translation import gettext_lazy as _
7
8 from wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet
9 from wagtail.models import Page, UserPagePermissionsProxy
10
11 from .base import PageReportView
12
13
14 class LockedPagesReportFilterSet(WagtailFilterSet):
15 locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)
16
17 class Meta:
18 model = Page
19 fields = ["locked_by", "locked_at", "live"]
20
21
22 class LockedPagesView(PageReportView):
23 template_name = "wagtailadmin/reports/locked_pages.html"
24 title = _("Locked pages")
25 header_icon = "lock"
26 list_export = PageReportView.list_export + [
27 "locked_at",
28 "locked_by",
29 ]
30 filterset_class = LockedPagesReportFilterSet
31
32 def get_filename(self):
33 return "locked-pages-report-{}".format(
34 datetime.datetime.today().strftime("%Y-%m-%d")
35 )
36
37 def get_queryset(self):
38 pages = (
39 (
40 UserPagePermissionsProxy(self.request.user).editable_pages()
41 | Page.objects.filter(locked_by=self.request.user)
42 )
43 .filter(locked=True)
44 .specific(defer=True)
45 )
46
47 if getattr(settings, "WAGTAIL_I18N_ENABLED", False):
48 pages = pages.select_related("locale")
49
50 self.queryset = pages
51 return super().get_queryset()
52
53 def dispatch(self, request, *args, **kwargs):
54 if not UserPagePermissionsProxy(request.user).can_remove_locks():
55 raise PermissionDenied
56 return super().dispatch(request, *args, **kwargs)
57
[end of wagtail/admin/views/reports/locked_pages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/admin/views/reports/locked_pages.py b/wagtail/admin/views/reports/locked_pages.py
--- a/wagtail/admin/views/reports/locked_pages.py
+++ b/wagtail/admin/views/reports/locked_pages.py
@@ -2,6 +2,7 @@
import django_filters
from django.conf import settings
+from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
from django.utils.translation import gettext_lazy as _
@@ -11,8 +12,16 @@
from .base import PageReportView
+def get_users_for_filter():
+ User = get_user_model()
+ return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)
+
+
class LockedPagesReportFilterSet(WagtailFilterSet):
locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)
+ locked_by = django_filters.ModelChoiceFilter(
+ field_name="locked_by", queryset=lambda request: get_users_for_filter()
+ )
class Meta:
model = Page
| {"golden_diff": "diff --git a/wagtail/admin/views/reports/locked_pages.py b/wagtail/admin/views/reports/locked_pages.py\n--- a/wagtail/admin/views/reports/locked_pages.py\n+++ b/wagtail/admin/views/reports/locked_pages.py\n@@ -2,6 +2,7 @@\n \n import django_filters\n from django.conf import settings\n+from django.contrib.auth import get_user_model\n from django.core.exceptions import PermissionDenied\n from django.utils.translation import gettext_lazy as _\n \n@@ -11,8 +12,16 @@\n from .base import PageReportView\n \n \n+def get_users_for_filter():\n+ User = get_user_model()\n+ return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)\n+\n+\n class LockedPagesReportFilterSet(WagtailFilterSet):\n locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)\n+ locked_by = django_filters.ModelChoiceFilter(\n+ field_name=\"locked_by\", queryset=lambda request: get_users_for_filter()\n+ )\n \n class Meta:\n model = Page\n", "issue": "Locked pages report renders all users in \"locked_by\" filter\n### Issue Summary\r\n\r\nCurrently, the select-field for the \"locked_by\" filter on the \"Locked pages\"-report renders all users unfiltered from the user table.\r\nThis breaks the view for one of our projects, which tries to list all 500k users...\r\nActually, only the users which have locked a page should be listed.\r\n\r\nI will try to make a PR for that, in the meantime this quickfix works for us (with wagtail 4.0, django 3.2):\r\n```python\r\n# in one of our apps.py\r\nfrom django.apps import AppConfig\r\n\r\nclass BaseConfig(AppConfig):\r\n name = \"base\"\r\n\r\n def ready(self):\r\n patch_locked_pages_filter()\r\n\r\ndef patch_locked_pages_filter():\r\n import django_filters\r\n from django.contrib.auth import get_user_model\r\n\r\n from wagtail.admin.views.reports.locked_pages import LockedPagesView, LockedPagesReportFilterSet\r\n\r\n def get_users_for_filter():\r\n User = get_user_model()\r\n return User.objects.filter(locked_pages__isnull=False).order_by(User.USERNAME_FIELD)\r\n\r\n class PatchedLockedPagesReportFilterSet(LockedPagesReportFilterSet):\r\n locked_by = django_filters.ModelChoiceFilter(\r\n field_name=\"locked_by\", queryset=lambda request: get_users_for_filter()\r\n )\r\n\r\n LockedPagesView.filterset_class = PatchedLockedPagesReportFilterSet\r\n\r\n```\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. Create many users, no matter if they have access to the CMS or not\r\n3. Open `/admin/reports/locked/` and find all those users in the \"locked_by\" dropdown on the right\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Python version: 3.10\r\n- Django version: 3.2\r\n- Wagtail version: 4.0, 2.15,... (according to the git history seems to be there from the beginning)\r\n\n", "before_files": [{"content": "import datetime\n\nimport django_filters\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet\nfrom wagtail.models import Page, UserPagePermissionsProxy\n\nfrom .base import PageReportView\n\n\nclass LockedPagesReportFilterSet(WagtailFilterSet):\n locked_at = django_filters.DateFromToRangeFilter(widget=DateRangePickerWidget)\n\n class Meta:\n model = Page\n fields = [\"locked_by\", \"locked_at\", \"live\"]\n\n\nclass LockedPagesView(PageReportView):\n template_name = \"wagtailadmin/reports/locked_pages.html\"\n title = _(\"Locked pages\")\n header_icon = \"lock\"\n list_export = PageReportView.list_export + [\n \"locked_at\",\n \"locked_by\",\n ]\n filterset_class = LockedPagesReportFilterSet\n\n def get_filename(self):\n return \"locked-pages-report-{}\".format(\n datetime.datetime.today().strftime(\"%Y-%m-%d\")\n )\n\n def get_queryset(self):\n pages = (\n (\n UserPagePermissionsProxy(self.request.user).editable_pages()\n | Page.objects.filter(locked_by=self.request.user)\n )\n .filter(locked=True)\n .specific(defer=True)\n )\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n pages = pages.select_related(\"locale\")\n\n self.queryset = pages\n return super().get_queryset()\n\n def dispatch(self, request, *args, **kwargs):\n if not UserPagePermissionsProxy(request.user).can_remove_locks():\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n", "path": "wagtail/admin/views/reports/locked_pages.py"}]} | 1,467 | 237 |
gh_patches_debug_207 | rasdani/github-patches | git_diff | chainer__chainer-239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type check to Identity Function
Related to #123
</issue>
<code>
[start of chainer/functions/identity.py]
1 from chainer import function
2
3
4 class Identity(function.Function):
5
6 """Identity function."""
7
8 def forward(self, xs):
9 return xs
10
11 def backward(self, xs, gys):
12 return gys
13
14
15 def identity(*inputs):
16 """Just returns input variables."""
17 return Identity()(*inputs)
18
[end of chainer/functions/identity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/identity.py b/chainer/functions/identity.py
--- a/chainer/functions/identity.py
+++ b/chainer/functions/identity.py
@@ -5,6 +5,9 @@
"""Identity function."""
+ def check_type_forward(self, in_types):
+ pass
+
def forward(self, xs):
return xs
| {"golden_diff": "diff --git a/chainer/functions/identity.py b/chainer/functions/identity.py\n--- a/chainer/functions/identity.py\n+++ b/chainer/functions/identity.py\n@@ -5,6 +5,9 @@\n \n \"\"\"Identity function.\"\"\"\n \n+ def check_type_forward(self, in_types):\n+ pass\n+\n def forward(self, xs):\n return xs\n", "issue": "Add type check to Identity Function\nRelated to #123\n\n", "before_files": [{"content": "from chainer import function\n\n\nclass Identity(function.Function):\n\n \"\"\"Identity function.\"\"\"\n\n def forward(self, xs):\n return xs\n\n def backward(self, xs, gys):\n return gys\n\n\ndef identity(*inputs):\n \"\"\"Just returns input variables.\"\"\"\n return Identity()(*inputs)\n", "path": "chainer/functions/identity.py"}]} | 647 | 81 |
gh_patches_debug_4138 | rasdani/github-patches | git_diff | google__turbinia-793 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
path_spec.parent is None exception
```
FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py", line 881, in run_wrapper
self.evidence_setup(evidence)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py", line 499, in evidence_setup
evidence.preprocess(self.tmp_dir, required_states=self.REQUIRED_STATES)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py", line 341, in preprocess
self._preprocess(tmp_dir, required_states)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py", line 575, in _preprocess
encryption_type = partitions.GetPartitionEncryptionType(path_spec)
File "/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/processors/partitions.py", line 66, in GetPartitionEncryptionType
if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
AttributeError: 'NoneType' object has no attribute 'type_indicator'
No state_manager initialized, not updating Task info
Trying last ditch attempt to close result
Task Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]
Task Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]
```
</issue>
<code>
[start of turbinia/processors/partitions.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2021 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # https://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Evidence processor to enumerate partitions."""
16
17 import logging
18
19 from dfvfs.helpers import volume_scanner
20 from dfvfs.lib import definitions as dfvfs_definitions
21 from dfvfs.lib import errors as dfvfs_errors
22
23 from turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator
24 from turbinia import TurbiniaException
25
26 log = logging.getLogger('turbinia')
27
28
29 def Enumerate(evidence):
30 """Uses dfVFS to enumerate partitions in a disk / image.
31
32 Args:
33 evidence: Evidence object to be scanned.
34
35 Raises:
36 TurbiniaException if source evidence can't be scanned.
37
38 Returns:
39 list[dfVFS.path_spec]: path specs for identified partitions
40 """
41 dfvfs_definitions.PREFERRED_GPT_BACK_END = (
42 dfvfs_definitions.TYPE_INDICATOR_GPT)
43 mediator = UnattendedVolumeScannerMediator()
44 mediator.credentials = evidence.credentials
45 path_specs = []
46 try:
47 scanner = volume_scanner.VolumeScanner(mediator=mediator)
48 path_specs = scanner.GetBasePathSpecs(evidence.local_path)
49 except dfvfs_errors.ScannerError as e:
50 raise TurbiniaException(
51 'Could not enumerate partitions [{0!s}]: {1!s}'.format(
52 evidence.local_path, e))
53
54 return path_specs
55
56
57 def GetPartitionEncryptionType(path_spec):
58 """Checks a partition for encryption.
59
60 Args:
61 path_spec (dfVFS.path_spec): Partition path_spec.
62
63 Returns:
64 String representing the type of encryption, or None.
65 """
66 encryption_type = None
67 if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
68 encryption_type = 'BDE'
69 return encryption_type
70
71
72 def GetPathSpecByLocation(path_specs, location):
73 """Finds a path_spec from a list of path_specs for a given location.
74
75 Args:
76 path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.
77 location (str): dfVFS location to search for.
78
79 Returns:
80 dfVFS.path_spec for the given location or None if not found.
81 """
82 for path_spec in path_specs:
83 child_path_spec = path_spec
84 fs_location = getattr(path_spec, 'location', None)
85 while path_spec.HasParent():
86 type_indicator = path_spec.type_indicator
87 if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,
88 dfvfs_definitions.TYPE_INDICATOR_GPT):
89 if fs_location in ('\\', '/'):
90 fs_location = getattr(path_spec, 'location', None)
91 break
92 path_spec = path_spec.parent
93 if fs_location == location:
94 return child_path_spec
95 return None
96
[end of turbinia/processors/partitions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py
--- a/turbinia/processors/partitions.py
+++ b/turbinia/processors/partitions.py
@@ -64,6 +64,10 @@
String representing the type of encryption, or None.
"""
encryption_type = None
+
+ if not path_spec.HasParent():
+ return None
+
if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:
encryption_type = 'BDE'
return encryption_type
| {"golden_diff": "diff --git a/turbinia/processors/partitions.py b/turbinia/processors/partitions.py\n--- a/turbinia/processors/partitions.py\n+++ b/turbinia/processors/partitions.py\n@@ -64,6 +64,10 @@\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n+\n+ if not path_spec.HasParent():\n+ return None\n+\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n", "issue": "path_spec.parent is None exception\n```\r\nFsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py\", line 881, in run_wrapper\r\n self.evidence_setup(evidence)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/workers/__init__.py\", line 499, in evidence_setup\r\n evidence.preprocess(self.tmp_dir, required_states=self.REQUIRED_STATES)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py\", line 341, in preprocess\r\n self._preprocess(tmp_dir, required_states)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/evidence.py\", line 575, in _preprocess\r\n encryption_type = partitions.GetPartitionEncryptionType(path_spec)\r\n File \"/usr/local/lib/python3.6/dist-packages/turbinia-20210208.4.dev19+g7c02e5e-py3.6.egg/turbinia/processors/partitions.py\", line 66, in GetPartitionEncryptionType\r\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\r\nAttributeError: 'NoneType' object has no attribute 'type_indicator'\r\n\r\nNo state_manager initialized, not updating Task info\r\nTrying last ditch attempt to close result\r\nTask Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]\r\nTask Result was auto-closed from task executor on turbinia-worker-6eaf93e7aee7c9cf-0 likely due to previous failures. Previous status: [FsstatTask Task failed with exception: ['NoneType' object has no attribute 'type_indicator']]\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Evidence processor to enumerate partitions.\"\"\"\n\nimport logging\n\nfrom dfvfs.helpers import volume_scanner\nfrom dfvfs.lib import definitions as dfvfs_definitions\nfrom dfvfs.lib import errors as dfvfs_errors\n\nfrom turbinia.lib.dfvfs_classes import UnattendedVolumeScannerMediator\nfrom turbinia import TurbiniaException\n\nlog = logging.getLogger('turbinia')\n\n\ndef Enumerate(evidence):\n \"\"\"Uses dfVFS to enumerate partitions in a disk / image.\n\n Args:\n evidence: Evidence object to be scanned.\n\n Raises:\n TurbiniaException if source evidence can't be scanned.\n\n Returns:\n list[dfVFS.path_spec]: path specs for identified partitions\n \"\"\"\n dfvfs_definitions.PREFERRED_GPT_BACK_END = (\n dfvfs_definitions.TYPE_INDICATOR_GPT)\n mediator = UnattendedVolumeScannerMediator()\n mediator.credentials = evidence.credentials\n path_specs = []\n try:\n scanner = volume_scanner.VolumeScanner(mediator=mediator)\n path_specs = scanner.GetBasePathSpecs(evidence.local_path)\n except dfvfs_errors.ScannerError as e:\n raise TurbiniaException(\n 'Could not enumerate partitions [{0!s}]: {1!s}'.format(\n evidence.local_path, e))\n\n return path_specs\n\n\ndef GetPartitionEncryptionType(path_spec):\n \"\"\"Checks a partition for encryption.\n\n Args:\n path_spec (dfVFS.path_spec): Partition path_spec.\n\n Returns:\n String representing the type of encryption, or None.\n \"\"\"\n encryption_type = None\n if path_spec.parent.type_indicator == dfvfs_definitions.TYPE_INDICATOR_BDE:\n encryption_type = 'BDE'\n return encryption_type\n\n\ndef GetPathSpecByLocation(path_specs, location):\n \"\"\"Finds a path_spec from a list of path_specs for a given location.\n\n Args:\n path_specs (list[dfVFS.path_spec]): List of path_specs from volume scanner.\n location (str): dfVFS location to search for.\n\n Returns:\n dfVFS.path_spec for the given location or None if not found.\n \"\"\"\n for path_spec in path_specs:\n child_path_spec = path_spec\n fs_location = getattr(path_spec, 'location', None)\n while path_spec.HasParent():\n type_indicator = path_spec.type_indicator\n if type_indicator in (dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION,\n dfvfs_definitions.TYPE_INDICATOR_GPT):\n if fs_location in ('\\\\', '/'):\n fs_location = getattr(path_spec, 'location', None)\n break\n path_spec = path_spec.parent\n if fs_location == location:\n return child_path_spec\n return None\n", "path": "turbinia/processors/partitions.py"}]} | 2,033 | 128 |
gh_patches_debug_10351 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3993 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
In Bplans required field (start-date & end-date) for publication is not marked as such
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/sabinas-bebauungsplan/bplan/
**user:** A Bplan initiator
**expected behaviour:** As a Bplan initiator, I want the form to tell me which fields are required so that I can publish
**behaviour:** Because the start- and enddate are not marked as required for publication, it's unclear that not filling these in is hindering publication
**important screensize:** any
**device & browser:** ubuntu chrome
**Comment/Question:**
These are just missing red the exclamation point icons. It's a simple fix in forms.py
Screenshot?

</issue>
<code>
[start of meinberlin/apps/bplan/forms.py]
1 from django import forms
2
3 from meinberlin.apps.extprojects.forms import ExternalProjectCreateForm
4 from meinberlin.apps.extprojects.forms import ExternalProjectForm
5
6 from . import models
7
8
9 class StatementForm(forms.ModelForm):
10 class Meta:
11 model = models.Statement
12 fields = ['name', 'email', 'statement',
13 'street_number', 'postal_code_city']
14
15
16 class BplanProjectCreateForm(ExternalProjectCreateForm):
17
18 class Meta:
19 model = models.Bplan
20 fields = ['name', 'description', 'tile_image', 'tile_image_copyright']
21
22
23 class BplanProjectForm(ExternalProjectForm):
24
25 class Meta:
26 model = models.Bplan
27 fields = ['name', 'identifier', 'url', 'description', 'tile_image',
28 'tile_image_copyright', 'is_archived', 'office_worker_email',
29 'start_date', 'end_date']
30 required_for_project_publish = ['name', 'url', 'description',
31 'office_worker_email']
32
33 def __init__(self, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35 self.fields['name'].widget.attrs.update({
36 'autocomplete': 'off', 'autofill': 'off'
37 })
38
[end of meinberlin/apps/bplan/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py
--- a/meinberlin/apps/bplan/forms.py
+++ b/meinberlin/apps/bplan/forms.py
@@ -28,7 +28,8 @@
'tile_image_copyright', 'is_archived', 'office_worker_email',
'start_date', 'end_date']
required_for_project_publish = ['name', 'url', 'description',
- 'office_worker_email']
+ 'office_worker_email',
+ 'start_date', 'end_date']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py\n--- a/meinberlin/apps/bplan/forms.py\n+++ b/meinberlin/apps/bplan/forms.py\n@@ -28,7 +28,8 @@\n 'tile_image_copyright', 'is_archived', 'office_worker_email',\n 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n- 'office_worker_email']\n+ 'office_worker_email',\n+ 'start_date', 'end_date']\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n", "issue": "In Bplans required field (start-date & end-date) for publication is not marked as such\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/sabinas-bebauungsplan/bplan/\r\n**user:** A Bplan initiator\r\n**expected behaviour:** As a Bplan initiator, I want the form to tell me which fields are required so that I can publish\r\n**behaviour:** Because the start- and enddate are not marked as required for publication, it's unclear that not filling these in is hindering publication\r\n**important screensize:** any\r\n**device & browser:** ubuntu chrome\r\n**Comment/Question:**\r\nThese are just missing red the exclamation point icons. It's a simple fix in forms.py\r\n\r\nScreenshot?\r\n\r\n\r\n\n", "before_files": [{"content": "from django import forms\n\nfrom meinberlin.apps.extprojects.forms import ExternalProjectCreateForm\nfrom meinberlin.apps.extprojects.forms import ExternalProjectForm\n\nfrom . import models\n\n\nclass StatementForm(forms.ModelForm):\n class Meta:\n model = models.Statement\n fields = ['name', 'email', 'statement',\n 'street_number', 'postal_code_city']\n\n\nclass BplanProjectCreateForm(ExternalProjectCreateForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'description', 'tile_image', 'tile_image_copyright']\n\n\nclass BplanProjectForm(ExternalProjectForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived', 'office_worker_email',\n 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs.update({\n 'autocomplete': 'off', 'autofill': 'off'\n })\n", "path": "meinberlin/apps/bplan/forms.py"}]} | 1,093 | 150 |
gh_patches_debug_12785 | rasdani/github-patches | git_diff | learningequality__kolibri-8076 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Catch ValueError during network discovery for when JSON parsing fails
<!--
Instructions:
* Fill out the sections below, replace …'s with information about your issue
* Use the 'preview' function above this text box to verify formatting before submitting
-->
### Observed behavior
<!--
Description of the behavior that was observed, including screenshots or other references when applicable
-->
When trying to import content channels (from local network or internet), no error message is produced even though the connection isn't established. (The "trying to connect" message stays on forever) This happens when there is a typo in the address. (I used kolibri-demo.learningequality.omg) When there's no internet, an error message DOES get produced.
### Expected behavior
<!--
Description of what behavior was expected but did not occur
-->
An error message should've been produced.
### User-facing consequences
<!--
Implications and real-world consequences for learners, coaches, admins, and other users of the application
-->
The user will not know how long they have to wait to see if the connection is being established.
### Errors and logs
<!--
Relevant logs from:
* the command line
* ~/.kolibri/logs/kolibri.txt
* the browser console
Please wrap errors in triple backticks for clean formatting like this:
```
01:10 info: something happened
01:12 error: something bad happened
```
-->
…
### Steps to reproduce
<!--
Precise steps that someone else can follow in order to see this behavior
-->
Import content channels from local network or internet >input "kolibri-demo.learningequality.omg" or something else with a typo in it.
### Context
<!--
Tell us about your environment, including:
* Kolibri version
* Operating system
* Browser
-->
Kolibri version: 0.14.3
Operating system: Android 5.1
Browser: Android App
Device: BLU Advance 5.0
</issue>
<code>
[start of kolibri/core/discovery/utils/network/client.py]
1 import logging
2
3 import requests
4 from six.moves.urllib.parse import urljoin
5
6 from . import errors
7 from .urls import get_normalized_url_variations
8
9 logger = logging.getLogger(__name__)
10
11
12 class NetworkClient(object):
13 DEFAULT_TIMEOUT_IN_SECS = 5
14
15 def __init__(self, base_url=None, address=None, timeout=None, **kwargs):
16 """If an explicit base_url is already known, provide that. If a vague address is provided, we can try to infer the base_url"""
17 if not base_url and not address:
18 raise Exception(
19 "You must provide either a `base_url` or `address` argument"
20 )
21 self.timeout = timeout or self.DEFAULT_TIMEOUT_IN_SECS
22 self.session = requests.Session(**kwargs)
23 if base_url:
24 self.base_url = self._attempt_connections([base_url])
25 else:
26 # normalize the URL and try a number of variations until we find one that's able to connect
27 logger.info(
28 "Attempting connections to variations of the URL: {}".format(address)
29 )
30 self.base_url = self._attempt_connections(
31 get_normalized_url_variations(address)
32 )
33
34 def _attempt_connections(self, urls):
35 # try each of the URLs in turn, returning the first one that succeeds
36 for url in urls:
37 try:
38 logger.info("Attempting connection to: {}".format(url))
39 response = self.get(
40 "/api/public/info/",
41 base_url=url,
42 timeout=self.timeout,
43 allow_redirects=True,
44 )
45 # check that we successfully connected, and if we were redirected that it's still the right endpoint
46 if response.status_code == 200 and response.url.rstrip("/").endswith(
47 "/api/public/info"
48 ):
49 self.info = response.json()
50 if self.info["application"] not in ["studio", "kolibri"]:
51 raise requests.RequestException(
52 "Server is not running Kolibri or Studio"
53 )
54 logger.info("Success! We connected to: {}".format(response.url))
55 return response.url.rstrip("/").replace("api/public/info", "")
56 except (requests.RequestException) as e:
57 logger.info("Unable to connect: {}".format(e))
58
59 # we weren't able to connect to any of the URL variations, so all we can do is throw
60 raise errors.NetworkLocationNotFound()
61
62 def get(self, path, **kwargs):
63 return self.request("get", path, **kwargs)
64
65 def head(self, path, **kwargs):
66 return self.request("head", path, **kwargs)
67
68 def request(self, method, path, base_url=None, **kwargs):
69 base_url = base_url or self.base_url
70 url = urljoin(base_url, path)
71 response = getattr(self.session, method)(url, **kwargs)
72 response.raise_for_status()
73 return response
74
[end of kolibri/core/discovery/utils/network/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/discovery/utils/network/client.py b/kolibri/core/discovery/utils/network/client.py
--- a/kolibri/core/discovery/utils/network/client.py
+++ b/kolibri/core/discovery/utils/network/client.py
@@ -55,6 +55,10 @@
return response.url.rstrip("/").replace("api/public/info", "")
except (requests.RequestException) as e:
logger.info("Unable to connect: {}".format(e))
+ except ValueError:
+ logger.info(
+ "Invalid JSON returned when attempting to connect to a remote server"
+ )
# we weren't able to connect to any of the URL variations, so all we can do is throw
raise errors.NetworkLocationNotFound()
| {"golden_diff": "diff --git a/kolibri/core/discovery/utils/network/client.py b/kolibri/core/discovery/utils/network/client.py\n--- a/kolibri/core/discovery/utils/network/client.py\n+++ b/kolibri/core/discovery/utils/network/client.py\n@@ -55,6 +55,10 @@\n return response.url.rstrip(\"/\").replace(\"api/public/info\", \"\")\n except (requests.RequestException) as e:\n logger.info(\"Unable to connect: {}\".format(e))\n+ except ValueError:\n+ logger.info(\n+ \"Invalid JSON returned when attempting to connect to a remote server\"\n+ )\n \n # we weren't able to connect to any of the URL variations, so all we can do is throw\n raise errors.NetworkLocationNotFound()\n", "issue": "Catch ValueError during network discovery for when JSON parsing fails\n<!--\r\nInstructions:\r\n * Fill out the sections below, replace \u2026's with information about your issue\r\n * Use the 'preview' function above this text box to verify formatting before submitting\r\n-->\r\n\r\n### Observed behavior\r\n<!--\r\nDescription of the behavior that was observed, including screenshots or other references when applicable\r\n-->\r\n\r\nWhen trying to import content channels (from local network or internet), no error message is produced even though the connection isn't established. (The \"trying to connect\" message stays on forever) This happens when there is a typo in the address. (I used kolibri-demo.learningequality.omg) When there's no internet, an error message DOES get produced. \r\n\r\n### Expected behavior\r\n<!--\r\nDescription of what behavior was expected but did not occur\r\n-->\r\n\r\nAn error message should've been produced.\r\n\r\n### User-facing consequences\r\n<!--\r\nImplications and real-world consequences for learners, coaches, admins, and other users of the application\r\n-->\r\n\r\nThe user will not know how long they have to wait to see if the connection is being established.\r\n\r\n### Errors and logs\r\n<!--\r\nRelevant logs from:\r\n * the command line\r\n * ~/.kolibri/logs/kolibri.txt\r\n * the browser console\r\n\r\nPlease wrap errors in triple backticks for clean formatting like this:\r\n```\r\n01:10 info: something happened\r\n01:12 error: something bad happened\r\n```\r\n-->\r\n\r\n\u2026\r\n\r\n### Steps to reproduce\r\n<!--\r\nPrecise steps that someone else can follow in order to see this behavior\r\n-->\r\n\r\nImport content channels from local network or internet >input \"kolibri-demo.learningequality.omg\" or something else with a typo in it.\r\n\r\n### Context\r\n<!--\r\nTell us about your environment, including:\r\n * Kolibri version\r\n * Operating system\r\n * Browser\r\n-->\r\n\r\nKolibri version: 0.14.3\r\nOperating system: Android 5.1\r\nBrowser: Android App\r\nDevice: BLU Advance 5.0\r\n\n", "before_files": [{"content": "import logging\n\nimport requests\nfrom six.moves.urllib.parse import urljoin\n\nfrom . import errors\nfrom .urls import get_normalized_url_variations\n\nlogger = logging.getLogger(__name__)\n\n\nclass NetworkClient(object):\n DEFAULT_TIMEOUT_IN_SECS = 5\n\n def __init__(self, base_url=None, address=None, timeout=None, **kwargs):\n \"\"\"If an explicit base_url is already known, provide that. If a vague address is provided, we can try to infer the base_url\"\"\"\n if not base_url and not address:\n raise Exception(\n \"You must provide either a `base_url` or `address` argument\"\n )\n self.timeout = timeout or self.DEFAULT_TIMEOUT_IN_SECS\n self.session = requests.Session(**kwargs)\n if base_url:\n self.base_url = self._attempt_connections([base_url])\n else:\n # normalize the URL and try a number of variations until we find one that's able to connect\n logger.info(\n \"Attempting connections to variations of the URL: {}\".format(address)\n )\n self.base_url = self._attempt_connections(\n get_normalized_url_variations(address)\n )\n\n def _attempt_connections(self, urls):\n # try each of the URLs in turn, returning the first one that succeeds\n for url in urls:\n try:\n logger.info(\"Attempting connection to: {}\".format(url))\n response = self.get(\n \"/api/public/info/\",\n base_url=url,\n timeout=self.timeout,\n allow_redirects=True,\n )\n # check that we successfully connected, and if we were redirected that it's still the right endpoint\n if response.status_code == 200 and response.url.rstrip(\"/\").endswith(\n \"/api/public/info\"\n ):\n self.info = response.json()\n if self.info[\"application\"] not in [\"studio\", \"kolibri\"]:\n raise requests.RequestException(\n \"Server is not running Kolibri or Studio\"\n )\n logger.info(\"Success! We connected to: {}\".format(response.url))\n return response.url.rstrip(\"/\").replace(\"api/public/info\", \"\")\n except (requests.RequestException) as e:\n logger.info(\"Unable to connect: {}\".format(e))\n\n # we weren't able to connect to any of the URL variations, so all we can do is throw\n raise errors.NetworkLocationNotFound()\n\n def get(self, path, **kwargs):\n return self.request(\"get\", path, **kwargs)\n\n def head(self, path, **kwargs):\n return self.request(\"head\", path, **kwargs)\n\n def request(self, method, path, base_url=None, **kwargs):\n base_url = base_url or self.base_url\n url = urljoin(base_url, path)\n response = getattr(self.session, method)(url, **kwargs)\n response.raise_for_status()\n return response\n", "path": "kolibri/core/discovery/utils/network/client.py"}]} | 1,702 | 161 |
gh_patches_debug_18834 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3308 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider kopps is broken
During the global build at 2021-06-16-14-42-20, spider **kopps** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/kopps.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson))
</issue>
<code>
[start of locations/spiders/kopps.py]
1 import scrapy
2 import re
3 from locations.items import GeojsonPointItem
4
5 DAY_MAPPING = {
6 "Mon": "Mo",
7 "Tues": "Tu",
8 "Wed": "We",
9 "Thur": "Th",
10 "Fri": "Fr",
11 "Sat": "Sa",
12 "Sun": "Su"
13 }
14 class KoppsSpider(scrapy.Spider):
15 name = "kopps"
16 item_attributes = { 'brand': "Kopps" }
17 allowed_domains = ["www.kopps.com"]
18 download_delay = 1.5
19 start_urls = (
20 'https://www.kopps.com/',
21 )
22
23 def parse_day(self, day):
24 if re.search('-', day):
25 days = day.split('-')
26 osm_days = []
27 if len(days) == 2:
28 for day in days:
29 osm_day = DAY_MAPPING[day.strip()]
30 osm_days.append(osm_day)
31 return "-".join(osm_days)
32
33 def parse_times(self, times):
34 if times.strip() == 'Open 24 hours':
35 return '24/7'
36 hours_to = [x.strip() for x in times.split('-')]
37 cleaned_times = []
38
39 for hour in hours_to:
40 if re.search('pm$', hour):
41 hour = re.sub('pm', '', hour).strip()
42 hour_min = hour.split(":")
43 if int(hour_min[0]) < 12:
44 hour_min[0] = str(12 + int(hour_min[0]))
45 cleaned_times.append(":".join(hour_min))
46
47 if re.search('am$', hour):
48 hour = re.sub('am', '', hour).strip()
49 hour_min = hour.split(":")
50 if len(hour_min[0]) <2:
51 hour_min[0] = hour_min[0].zfill(2)
52 else:
53 hour_min[0] = str( int(hour_min[0]))
54
55 cleaned_times.append(":".join(hour_min))
56 return "-".join(cleaned_times)
57
58 def parse_hours(self, lis):
59 hours = []
60 for li in lis:
61 day_times = li.xpath('normalize-space(./text())').extract_first()
62 day = re.findall(r"^[a-zA-Z-]+" , day_times)
63 if(len(day)>0):
64 day = day[0]
65 else:
66 day = 'Mon-Sun'
67 times = re.findall(r"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m" ,day_times)
68 times = times[0]
69 if times and day:
70 parsed_time = self.parse_times(times)
71 parsed_day = self.parse_day(day)
72 hours.append(parsed_day + ' ' + parsed_time)
73
74 return "; ".join(hours)
75
76
77 def parse(self, response):
78 locations = response.xpath('//div[@id="locations"]/div/div')
79
80 for location in locations:
81 properties = {
82 'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(),
83 'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(),
84 'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\r\n' ,''),
85 'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1],
86 'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\r\n' ,''),
87 'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(),
88 'website': response.url,
89 'lat':re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0],
90 'lon': re.findall(r"\/[0-9]{2}[^(\/)]+z",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1],
91 }
92
93 hours = self.parse_hours(location.xpath('./div/ul/li[3]/span'))
94 if hours:
95 properties['opening_hours'] = hours
96
97 yield GeojsonPointItem(**properties)
[end of locations/spiders/kopps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/kopps.py b/locations/spiders/kopps.py
--- a/locations/spiders/kopps.py
+++ b/locations/spiders/kopps.py
@@ -26,8 +26,9 @@
osm_days = []
if len(days) == 2:
for day in days:
- osm_day = DAY_MAPPING[day.strip()]
- osm_days.append(osm_day)
+ if day.strip() in DAY_MAPPING:
+ osm_day = DAY_MAPPING[day.strip()]
+ osm_days.append(osm_day)
return "-".join(osm_days)
def parse_times(self, times):
@@ -69,7 +70,8 @@
if times and day:
parsed_time = self.parse_times(times)
parsed_day = self.parse_day(day)
- hours.append(parsed_day + ' ' + parsed_time)
+ if parsed_day and parsed_time:
+ hours.append(parsed_day + ' ' + parsed_time)
return "; ".join(hours)
| {"golden_diff": "diff --git a/locations/spiders/kopps.py b/locations/spiders/kopps.py\n--- a/locations/spiders/kopps.py\n+++ b/locations/spiders/kopps.py\n@@ -26,8 +26,9 @@\n osm_days = []\n if len(days) == 2:\n for day in days:\n- osm_day = DAY_MAPPING[day.strip()]\n- osm_days.append(osm_day)\n+ if day.strip() in DAY_MAPPING:\n+ osm_day = DAY_MAPPING[day.strip()]\n+ osm_days.append(osm_day)\n return \"-\".join(osm_days)\n \n def parse_times(self, times):\n@@ -69,7 +70,8 @@\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n- hours.append(parsed_day + ' ' + parsed_time)\n+ if parsed_day and parsed_time:\n+ hours.append(parsed_day + ' ' + parsed_time)\n \n return \"; \".join(hours)\n", "issue": "Spider kopps is broken\nDuring the global build at 2021-06-16-14-42-20, spider **kopps** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/kopps.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/kopps.geojson))\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n \"Mon\": \"Mo\",\n \"Tues\": \"Tu\",\n \"Wed\": \"We\",\n \"Thur\": \"Th\",\n \"Fri\": \"Fr\",\n \"Sat\": \"Sa\",\n \"Sun\": \"Su\"\n}\nclass KoppsSpider(scrapy.Spider):\n name = \"kopps\"\n item_attributes = { 'brand': \"Kopps\" }\n allowed_domains = [\"www.kopps.com\"]\n download_delay = 1.5\n start_urls = (\n 'https://www.kopps.com/',\n )\n\n def parse_day(self, day):\n if re.search('-', day):\n days = day.split('-')\n osm_days = []\n if len(days) == 2:\n for day in days:\n osm_day = DAY_MAPPING[day.strip()]\n osm_days.append(osm_day)\n return \"-\".join(osm_days)\n\n def parse_times(self, times):\n if times.strip() == 'Open 24 hours':\n return '24/7'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str( int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day_times = li.xpath('normalize-space(./text())').extract_first()\n day = re.findall(r\"^[a-zA-Z-]+\" , day_times)\n if(len(day)>0):\n day = day[0]\n else:\n day = 'Mon-Sun'\n times = re.findall(r\"[0-9]{2}:[0-9]{2}[a|p]m - [0-9]{2}:[0-9]{2}[a|p]m\" ,day_times)\n times = times[0]\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n\n def parse(self, response):\n locations = response.xpath('//div[@id=\"locations\"]/div/div')\n\n for location in locations:\n properties = {\n 'addr_full': location.xpath('normalize-space(./div/address/a/text())').extract_first(),\n 'phone': location.xpath('normalize-space(./div/ul/li/span/a/text())').extract_first(),\n 'city': location.xpath('./div/address/a/text()').extract()[1].replace(' ' ,'').split(',')[0].replace('\\r\\n' ,''),\n 'state': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[1],\n 'postcode': location.xpath('./div/address/a/text()').extract()[1].lstrip().split(',')[1].split(' ')[2].replace('\\r\\n' ,''),\n 'ref': location.xpath('normalize-space(./div/address/a/@href)').extract_first(),\n 'website': response.url,\n 'lat':re.findall(r\"\\/[0-9]{2}[^(\\/)]+z\",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[0],\n 'lon': re.findall(r\"\\/[0-9]{2}[^(\\/)]+z\",location.xpath('normalize-space(./div/address/a/@href)').extract_first())[0][1:].split(',')[1],\n }\n\n hours = self.parse_hours(location.xpath('./div/ul/li[3]/span'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/kopps.py"}]} | 1,867 | 229 |
gh_patches_debug_12705 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-bolts-315 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add caching for used dataset
## 🚀 Feature
atm for each CI run the datasets are downloaded (seems so as I observed som downloading failers)
### Motivation
Speed-up runs and avoids random connection failers
</issue>
<code>
[start of pl_bolts/models/mnist_module.py]
1 import os
2 from argparse import ArgumentParser
3 from warnings import warn
4
5 import torch
6 from pytorch_lightning import LightningModule, Trainer
7 from torch.nn import functional as F
8 from torch.utils.data import DataLoader, random_split
9
10 try:
11 from torchvision import transforms
12 from torchvision.datasets import MNIST
13 except ModuleNotFoundError:
14 warn('You want to use `torchvision` which is not installed yet,' # pragma: no-cover
15 ' install it with `pip install torchvision`.')
16
17
18 class LitMNIST(LightningModule):
19 def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4, data_dir='', **kwargs):
20 super().__init__()
21 self.save_hyperparameters()
22
23 self.l1 = torch.nn.Linear(28 * 28, self.hparams.hidden_dim)
24 self.l2 = torch.nn.Linear(self.hparams.hidden_dim, 10)
25
26 self.mnist_train = None
27 self.mnist_val = None
28
29 def forward(self, x):
30 x = x.view(x.size(0), -1)
31 x = torch.relu(self.l1(x))
32 x = torch.relu(self.l2(x))
33 return x
34
35 def training_step(self, batch, batch_idx):
36 x, y = batch
37 y_hat = self(x)
38 loss = F.cross_entropy(y_hat, y)
39 self.log('train_loss', loss)
40 return loss
41
42 def validation_step(self, batch, batch_idx):
43 x, y = batch
44 y_hat = self(x)
45 loss = F.cross_entropy(y_hat, y)
46 self.log('val_loss', loss)
47
48 def test_step(self, batch, batch_idx):
49 x, y = batch
50 y_hat = self(x)
51 loss = F.cross_entropy(y_hat, y)
52 self.log('test_loss', loss)
53
54 def configure_optimizers(self):
55 return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
56
57 def prepare_data(self):
58 MNIST(self.hparams.data_dir, train=True, download=True, transform=transforms.ToTensor())
59
60 def train_dataloader(self):
61 dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())
62 mnist_train, _ = random_split(dataset, [55000, 5000])
63 loader = DataLoader(mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
64 return loader
65
66 def val_dataloader(self):
67 dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())
68 _, mnist_val = random_split(dataset, [55000, 5000])
69 loader = DataLoader(mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
70 return loader
71
72 def test_dataloader(self):
73 test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
74 loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
75 return loader
76
77 @staticmethod
78 def add_model_specific_args(parent_parser):
79 parser = ArgumentParser(parents=[parent_parser], add_help=False)
80 parser.add_argument('--batch_size', type=int, default=32)
81 parser.add_argument('--num_workers', type=int, default=4)
82 parser.add_argument('--hidden_dim', type=int, default=128)
83 parser.add_argument('--data_dir', type=str, default='')
84 parser.add_argument('--learning_rate', type=float, default=0.0001)
85 return parser
86
87
88 def cli_main():
89 # args
90 parser = ArgumentParser()
91 parser = Trainer.add_argparse_args(parser)
92 parser = LitMNIST.add_model_specific_args(parser)
93 args = parser.parse_args()
94
95 # model
96 model = LitMNIST(**vars(args))
97
98 # training
99 trainer = Trainer.from_argparse_args(args)
100 trainer.fit(model)
101
102
103 if __name__ == '__main__': # pragma: no cover
104 cli_main()
105
[end of pl_bolts/models/mnist_module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pl_bolts/models/mnist_module.py b/pl_bolts/models/mnist_module.py
--- a/pl_bolts/models/mnist_module.py
+++ b/pl_bolts/models/mnist_module.py
@@ -1,4 +1,3 @@
-import os
from argparse import ArgumentParser
from warnings import warn
@@ -70,7 +69,7 @@
return loader
def test_dataloader(self):
- test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())
+ test_dataset = MNIST(self.hparams.data_dir, train=False, download=True, transform=transforms.ToTensor())
loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)
return loader
| {"golden_diff": "diff --git a/pl_bolts/models/mnist_module.py b/pl_bolts/models/mnist_module.py\n--- a/pl_bolts/models/mnist_module.py\n+++ b/pl_bolts/models/mnist_module.py\n@@ -1,4 +1,3 @@\n-import os\n from argparse import ArgumentParser\n from warnings import warn\n \n@@ -70,7 +69,7 @@\n return loader\n \n def test_dataloader(self):\n- test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())\n+ test_dataset = MNIST(self.hparams.data_dir, train=False, download=True, transform=transforms.ToTensor())\n loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n", "issue": "add caching for used dataset\n## \ud83d\ude80 Feature\r\n\r\natm for each CI run the datasets are downloaded (seems so as I observed som downloading failers)\r\n\r\n### Motivation\r\n\r\nSpeed-up runs and avoids random connection failers\r\n\n", "before_files": [{"content": "import os\nfrom argparse import ArgumentParser\nfrom warnings import warn\n\nimport torch\nfrom pytorch_lightning import LightningModule, Trainer\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\n\ntry:\n from torchvision import transforms\n from torchvision.datasets import MNIST\nexcept ModuleNotFoundError:\n warn('You want to use `torchvision` which is not installed yet,' # pragma: no-cover\n ' install it with `pip install torchvision`.')\n\n\nclass LitMNIST(LightningModule):\n def __init__(self, hidden_dim=128, learning_rate=1e-3, batch_size=32, num_workers=4, data_dir='', **kwargs):\n super().__init__()\n self.save_hyperparameters()\n\n self.l1 = torch.nn.Linear(28 * 28, self.hparams.hidden_dim)\n self.l2 = torch.nn.Linear(self.hparams.hidden_dim, 10)\n\n self.mnist_train = None\n self.mnist_val = None\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = torch.relu(self.l1(x))\n x = torch.relu(self.l2(x))\n return x\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('train_loss', loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('val_loss', loss)\n\n def test_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = F.cross_entropy(y_hat, y)\n self.log('test_loss', loss)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n\n def prepare_data(self):\n MNIST(self.hparams.data_dir, train=True, download=True, transform=transforms.ToTensor())\n\n def train_dataloader(self):\n dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())\n mnist_train, _ = random_split(dataset, [55000, 5000])\n loader = DataLoader(mnist_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n def val_dataloader(self):\n dataset = MNIST(self.hparams.data_dir, train=True, download=False, transform=transforms.ToTensor())\n _, mnist_val = random_split(dataset, [55000, 5000])\n loader = DataLoader(mnist_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n def test_dataloader(self):\n test_dataset = MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor())\n loader = DataLoader(test_dataset, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers)\n return loader\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--num_workers', type=int, default=4)\n parser.add_argument('--hidden_dim', type=int, default=128)\n parser.add_argument('--data_dir', type=str, default='')\n parser.add_argument('--learning_rate', type=float, default=0.0001)\n return parser\n\n\ndef cli_main():\n # args\n parser = ArgumentParser()\n parser = Trainer.add_argparse_args(parser)\n parser = LitMNIST.add_model_specific_args(parser)\n args = parser.parse_args()\n\n # model\n model = LitMNIST(**vars(args))\n\n # training\n trainer = Trainer.from_argparse_args(args)\n trainer.fit(model)\n\n\nif __name__ == '__main__': # pragma: no cover\n cli_main()\n", "path": "pl_bolts/models/mnist_module.py"}]} | 1,696 | 170 |
gh_patches_debug_5031 | rasdani/github-patches | git_diff | svthalia__concrexit-2190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Visual quirk in the login failure message
### Describe the bug
When you fail to log in on desktop (or probably any wider screen) the message saying this shows up on the left and the login fields are offset to the right. I would expect the message to appear on top, while the login fields were to stay in the same horizontal position.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to the login page
2. Enter invalid credentials
### Expected behaviour
The login fields do not move to the right. The error message is either shown above the login fields or next to them.
### Screenshots

### Additional context
Not sure in what other situations these message boxes are used, but it may occur in other places too. Haven't checked.
</issue>
<code>
[start of website/thaliawebsite/templatetags/alert.py]
1 from django import template
2
3 register = template.Library()
4
5
6 @register.inclusion_tag("includes/alert.html")
7 def alert(alert_type="info", message=None, dismissible=False, extra_classes=""):
8 if dismissible:
9 extra_classes += " alert-dimissable"
10 return {
11 "type": alert_type,
12 "message": message,
13 "dismissible": dismissible,
14 "extra_classes": extra_classes,
15 }
16
[end of website/thaliawebsite/templatetags/alert.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/thaliawebsite/templatetags/alert.py b/website/thaliawebsite/templatetags/alert.py
--- a/website/thaliawebsite/templatetags/alert.py
+++ b/website/thaliawebsite/templatetags/alert.py
@@ -6,7 +6,7 @@
@register.inclusion_tag("includes/alert.html")
def alert(alert_type="info", message=None, dismissible=False, extra_classes=""):
if dismissible:
- extra_classes += " alert-dimissable"
+ extra_classes += " alert-dismissible"
return {
"type": alert_type,
"message": message,
| {"golden_diff": "diff --git a/website/thaliawebsite/templatetags/alert.py b/website/thaliawebsite/templatetags/alert.py\n--- a/website/thaliawebsite/templatetags/alert.py\n+++ b/website/thaliawebsite/templatetags/alert.py\n@@ -6,7 +6,7 @@\n @register.inclusion_tag(\"includes/alert.html\")\n def alert(alert_type=\"info\", message=None, dismissible=False, extra_classes=\"\"):\n if dismissible:\n- extra_classes += \" alert-dimissable\"\n+ extra_classes += \" alert-dismissible\"\n return {\n \"type\": alert_type,\n \"message\": message,\n", "issue": "Visual quirk in the login failure message\n### Describe the bug\r\nWhen you fail to log in on desktop (or probably any wider screen) the message saying this shows up on the left and the login fields are offset to the right. I would expect the message to appear on top, while the login fields were to stay in the same horizontal position.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to the login page\r\n2. Enter invalid credentials\r\n\r\n### Expected behaviour\r\nThe login fields do not move to the right. The error message is either shown above the login fields or next to them.\r\n\r\n### Screenshots\r\n\r\n\r\n\r\n### Additional context\r\nNot sure in what other situations these message boxes are used, but it may occur in other places too. Haven't checked.\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag(\"includes/alert.html\")\ndef alert(alert_type=\"info\", message=None, dismissible=False, extra_classes=\"\"):\n if dismissible:\n extra_classes += \" alert-dimissable\"\n return {\n \"type\": alert_type,\n \"message\": message,\n \"dismissible\": dismissible,\n \"extra_classes\": extra_classes,\n }\n", "path": "website/thaliawebsite/templatetags/alert.py"}]} | 905 | 145 |
gh_patches_debug_31883 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1385 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PS-1] RPC service `push_model` implementation
</issue>
<code>
[start of elasticdl/python/ps/parameters.py]
1 import tensorflow as tf
2
3 from elasticdl.python.common.ndarray import tensor_to_ndarray
4 from elasticdl.python.ps.embedding_table import create_embedding_table
5
6
7 class Parameters(object):
8 """
9 There are two kinds of parameters:
10
11 1. non-embedding parameters, or dense tensor parameters. We save it
12 in a hashmap `non-embedding_params`, the key is the parameter name,
13 the value is a tf.Variable` object.
14 2. embedding parameters, or row-sparse parameters. We save it in a
15 hashmap `embedding_params`, the key is the embedding layer name,
16 the value is an `EmbeddingTable` object.
17
18 """
19
20 def __init__(self):
21 self.init_status = False
22 self.non_embedding_params = {}
23 self.embedding_params = {}
24
25 def get_embedding_param(self, name, indices):
26 if name not in self.embedding_params:
27 raise ValueError(
28 "Please initialize embedding param %s first!", name
29 )
30 return self.embedding_params[name].get(indices)
31
32 def set_embedding_param(self, name, indices, values):
33 if name not in self.embedding_params:
34 raise ValueError(
35 "Please initialize embedding param %s first!", name
36 )
37 self.embedding_params[name].set(indices, values)
38
39 def init_from_model_pb(self, model_pb):
40 if not self.init_status:
41 tensors_pb = model_pb.param
42 embeddings_pb = model_pb.embedding_table_info
43 self._init_non_embedding_params(tensors_pb)
44 self._init_embedding_params(embeddings_pb)
45 self.init_status = True
46
47 def _init_non_embedding_params(self, tensors_pb):
48 for pb in tensors_pb:
49 name = pb.name
50 arr = tensor_to_ndarray(pb)
51 var = tf.Variable(name=name, initial_value=arr, trainable=True)
52 self.non_embedding_params[name] = var
53
54 def _init_embedding_params(self, embeddings_pb):
55 for pb in embeddings_pb:
56 self.embedding_params[pb.name] = create_embedding_table(pb)
57
[end of elasticdl/python/ps/parameters.py]
[start of elasticdl/python/ps/servicer.py]
1 from google.protobuf import empty_pb2
2
3 from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
4
5
6 class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
7 """PS service implementation"""
8
9 def __init__(
10 self,
11 parameters,
12 grads_to_wait,
13 optimizer,
14 lr_staleness_modulation=False,
15 use_async=False,
16 ):
17 self._parameters = parameters
18 self._grads_to_wait = grads_to_wait
19 self._optimizer = optimizer
20 self._lr_staleness_modulation = lr_staleness_modulation
21 self._use_async = use_async
22 self._version = 0
23
24 def pull_variable(self, request, _):
25 # TODO: implement this RPC service
26 return elasticdl_pb2.PullVariableResponse()
27
28 def pull_embedding_vector(self, request, _):
29 # TODO: implement this RPC service
30 return elasticdl_pb2.Tensor()
31
32 def push_model(self, request, _):
33 # TODO: implement this RPC service
34 return empty_pb2.Empty()
35
36 def push_gradient(self, request, _):
37 # TODO: implement this RPC service
38 return elasticdl_pb2.PushGradientResponse()
39
[end of elasticdl/python/ps/servicer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py
--- a/elasticdl/python/ps/parameters.py
+++ b/elasticdl/python/ps/parameters.py
@@ -18,6 +18,7 @@
"""
def __init__(self):
+ self.version = 0
self.init_status = False
self.non_embedding_params = {}
self.embedding_params = {}
@@ -42,6 +43,7 @@
embeddings_pb = model_pb.embedding_table_info
self._init_non_embedding_params(tensors_pb)
self._init_embedding_params(embeddings_pb)
+ self.version = model_pb.version
self.init_status = True
def _init_non_embedding_params(self, tensors_pb):
diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py
--- a/elasticdl/python/ps/servicer.py
+++ b/elasticdl/python/ps/servicer.py
@@ -1,3 +1,5 @@
+import threading
+
from google.protobuf import empty_pb2
from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
@@ -20,6 +22,7 @@
self._lr_staleness_modulation = lr_staleness_modulation
self._use_async = use_async
self._version = 0
+ self._lock = threading.Lock()
def pull_variable(self, request, _):
# TODO: implement this RPC service
@@ -30,7 +33,8 @@
return elasticdl_pb2.Tensor()
def push_model(self, request, _):
- # TODO: implement this RPC service
+ with self._lock:
+ self._parameters.init_from_model_pb(request)
return empty_pb2.Empty()
def push_gradient(self, request, _):
| {"golden_diff": "diff --git a/elasticdl/python/ps/parameters.py b/elasticdl/python/ps/parameters.py\n--- a/elasticdl/python/ps/parameters.py\n+++ b/elasticdl/python/ps/parameters.py\n@@ -18,6 +18,7 @@\n \"\"\"\n \n def __init__(self):\n+ self.version = 0\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n@@ -42,6 +43,7 @@\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n+ self.version = model_pb.version\n self.init_status = True\n \n def _init_non_embedding_params(self, tensors_pb):\ndiff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -1,3 +1,5 @@\n+import threading\n+\n from google.protobuf import empty_pb2\n \n from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n@@ -20,6 +22,7 @@\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n+ self._lock = threading.Lock()\n \n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n@@ -30,7 +33,8 @@\n return elasticdl_pb2.Tensor()\n \n def push_model(self, request, _):\n- # TODO: implement this RPC service\n+ with self._lock:\n+ self._parameters.init_from_model_pb(request)\n return empty_pb2.Empty()\n \n def push_gradient(self, request, _):\n", "issue": "[PS-1] RPC service `push_model` implementation\n\n", "before_files": [{"content": "import tensorflow as tf\n\nfrom elasticdl.python.common.ndarray import tensor_to_ndarray\nfrom elasticdl.python.ps.embedding_table import create_embedding_table\n\n\nclass Parameters(object):\n \"\"\"\n There are two kinds of parameters:\n\n 1. non-embedding parameters, or dense tensor parameters. We save it\n in a hashmap `non-embedding_params`, the key is the parameter name,\n the value is a tf.Variable` object.\n 2. embedding parameters, or row-sparse parameters. We save it in a\n hashmap `embedding_params`, the key is the embedding layer name,\n the value is an `EmbeddingTable` object.\n\n \"\"\"\n\n def __init__(self):\n self.init_status = False\n self.non_embedding_params = {}\n self.embedding_params = {}\n\n def get_embedding_param(self, name, indices):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n return self.embedding_params[name].get(indices)\n\n def set_embedding_param(self, name, indices, values):\n if name not in self.embedding_params:\n raise ValueError(\n \"Please initialize embedding param %s first!\", name\n )\n self.embedding_params[name].set(indices, values)\n\n def init_from_model_pb(self, model_pb):\n if not self.init_status:\n tensors_pb = model_pb.param\n embeddings_pb = model_pb.embedding_table_info\n self._init_non_embedding_params(tensors_pb)\n self._init_embedding_params(embeddings_pb)\n self.init_status = True\n\n def _init_non_embedding_params(self, tensors_pb):\n for pb in tensors_pb:\n name = pb.name\n arr = tensor_to_ndarray(pb)\n var = tf.Variable(name=name, initial_value=arr, trainable=True)\n self.non_embedding_params[name] = var\n\n def _init_embedding_params(self, embeddings_pb):\n for pb in embeddings_pb:\n self.embedding_params[pb.name] = create_embedding_table(pb)\n", "path": "elasticdl/python/ps/parameters.py"}, {"content": "from google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n\n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PullVariableResponse()\n\n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.Tensor()\n\n def push_model(self, request, _):\n # TODO: implement this RPC service\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return elasticdl_pb2.PushGradientResponse()\n", "path": "elasticdl/python/ps/servicer.py"}]} | 1,445 | 414 |
gh_patches_debug_6 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-1273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PEP257 docstrings for file "./docs/__init__.py"
Cover `./docs/__init__.py` file with docstrings and follow [PEP257](https://www.python.org/dev/peps/pep-0257/). We use [pydocstyle](https://pypi.org/project/pydocstyle/) for validation.
Current validation log:
```
./docs/__init__.py:1 at module level:
D104: Missing docstring in public package
```
Subtask for #742
</issue>
<code>
[start of docs/__init__.py]
[end of docs/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/__init__.py b/docs/__init__.py
--- a/docs/__init__.py
+++ b/docs/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+"""Main package for docs."""
| {"golden_diff": "diff --git a/docs/__init__.py b/docs/__init__.py\n--- a/docs/__init__.py\n+++ b/docs/__init__.py\n@@ -0,0 +1,3 @@\n+# -*- coding: utf-8 -*-\n+\n+\"\"\"Main package for docs.\"\"\"\n", "issue": "PEP257 docstrings for file \"./docs/__init__.py\"\nCover `./docs/__init__.py` file with docstrings and follow [PEP257](https://www.python.org/dev/peps/pep-0257/). We use [pydocstyle](https://pypi.org/project/pydocstyle/) for validation.\r\n\r\nCurrent validation log:\r\n\r\n```\r\n./docs/__init__.py:1 at module level:\r\n D104: Missing docstring in public package\r\n```\r\n\r\nSubtask for #742 \n", "before_files": [{"content": "", "path": "docs/__init__.py"}]} | 653 | 59 |
gh_patches_debug_17994 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-6938 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Anthropologie spider produces transposed coordinates
https://www.alltheplaces.xyz/map/#7.69/-75.171/39.95

The cause is the upstream data:
https://www.anthropologie.com/stores/rittenhouse-square-philadelphia

It might be worth doing any of the following:
- Suspend the lat/long from the parser for now
- Contact the company (I'll probably do that shortly) about the bug
- Any kind of high level validations that can check the expected bounds for a scraper, vs the results?
</issue>
<code>
[start of locations/spiders/anthropologie.py]
1 from scrapy.spiders import SitemapSpider
2
3 from locations.structured_data_spider import StructuredDataSpider
4
5
6 class AnthropologieSpider(SitemapSpider, StructuredDataSpider):
7 name = "anthropologie"
8 item_attributes = {"brand": "Anthropologie", "brand_wikidata": "Q4773903"}
9 allowed_domains = ["anthropologie.com"]
10 sitemap_urls = ["https://www.anthropologie.com/store_sitemap.xml"]
11 sitemap_rules = [("/stores/", "parse_sd")]
12 requires_proxy = True
13
[end of locations/spiders/anthropologie.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/anthropologie.py b/locations/spiders/anthropologie.py
--- a/locations/spiders/anthropologie.py
+++ b/locations/spiders/anthropologie.py
@@ -1,5 +1,6 @@
from scrapy.spiders import SitemapSpider
+from locations.items import set_closed
from locations.structured_data_spider import StructuredDataSpider
@@ -10,3 +11,17 @@
sitemap_urls = ["https://www.anthropologie.com/store_sitemap.xml"]
sitemap_rules = [("/stores/", "parse_sd")]
requires_proxy = True
+
+ def pre_process_data(self, ld_data, **kwargs):
+ ld_data["geo"]["latitude"], ld_data["geo"]["longitude"] = (
+ ld_data["geo"]["longitude"],
+ ld_data["geo"]["latitude"],
+ )
+
+ def post_process_item(self, item, response, ld_data, **kwargs):
+ item["branch"] = item.pop("name").removeprefix(" - Anthropologie Store")
+
+ if item["branch"].startswith("Closed - ") or item["branch"].endswith(" - Closed"):
+ set_closed(item)
+
+ yield item
| {"golden_diff": "diff --git a/locations/spiders/anthropologie.py b/locations/spiders/anthropologie.py\n--- a/locations/spiders/anthropologie.py\n+++ b/locations/spiders/anthropologie.py\n@@ -1,5 +1,6 @@\n from scrapy.spiders import SitemapSpider\n \n+from locations.items import set_closed\n from locations.structured_data_spider import StructuredDataSpider\n \n \n@@ -10,3 +11,17 @@\n sitemap_urls = [\"https://www.anthropologie.com/store_sitemap.xml\"]\n sitemap_rules = [(\"/stores/\", \"parse_sd\")]\n requires_proxy = True\n+\n+ def pre_process_data(self, ld_data, **kwargs):\n+ ld_data[\"geo\"][\"latitude\"], ld_data[\"geo\"][\"longitude\"] = (\n+ ld_data[\"geo\"][\"longitude\"],\n+ ld_data[\"geo\"][\"latitude\"],\n+ )\n+\n+ def post_process_item(self, item, response, ld_data, **kwargs):\n+ item[\"branch\"] = item.pop(\"name\").removeprefix(\" - Anthropologie Store\")\n+\n+ if item[\"branch\"].startswith(\"Closed - \") or item[\"branch\"].endswith(\" - Closed\"):\n+ set_closed(item)\n+\n+ yield item\n", "issue": "Anthropologie spider produces transposed coordinates\nhttps://www.alltheplaces.xyz/map/#7.69/-75.171/39.95\r\n\r\n\r\n\r\nThe cause is the upstream data:\r\n\r\nhttps://www.anthropologie.com/stores/rittenhouse-square-philadelphia\r\n\r\n\r\nIt might be worth doing any of the following:\r\n\r\n- Suspend the lat/long from the parser for now\r\n- Contact the company (I'll probably do that shortly) about the bug\r\n- Any kind of high level validations that can check the expected bounds for a scraper, vs the results?\r\n\r\n\n", "before_files": [{"content": "from scrapy.spiders import SitemapSpider\n\nfrom locations.structured_data_spider import StructuredDataSpider\n\n\nclass AnthropologieSpider(SitemapSpider, StructuredDataSpider):\n name = \"anthropologie\"\n item_attributes = {\"brand\": \"Anthropologie\", \"brand_wikidata\": \"Q4773903\"}\n allowed_domains = [\"anthropologie.com\"]\n sitemap_urls = [\"https://www.anthropologie.com/store_sitemap.xml\"]\n sitemap_rules = [(\"/stores/\", \"parse_sd\")]\n requires_proxy = True\n", "path": "locations/spiders/anthropologie.py"}]} | 910 | 269 |
gh_patches_debug_25775 | rasdani/github-patches | git_diff | apache__tvm-2759 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[TEST][FLAKY] test_dlpack
Both #2749 and #2353 encountered seg fault error at test_dlpack.
http://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2749/2/pipeline
http://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2353/48/pipeline
cc @eqy , could you help look at this?
</issue>
<code>
[start of python/tvm/_ffi/_ctypes/ndarray.py]
1 # pylint: disable=invalid-name
2 """Runtime NDArray api"""
3 from __future__ import absolute_import
4
5 import ctypes
6 from ..base import _LIB, check_call, c_str
7 from ..runtime_ctypes import TVMArrayHandle, TVMNDArrayContainerHandle
8 from .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle
9
10
11 TVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
12 _c_str_dltensor = c_str('dltensor')
13 _c_str_used_dltensor = c_str('used_dltensor')
14
15
16 # used for PyCapsule manipulation
17 if hasattr(ctypes, 'pythonapi'):
18 ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p
19 ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
20 ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
21
22
23 def _from_dlpack(dltensor):
24 dltensor = ctypes.py_object(dltensor)
25 if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
26 ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
27 handle = TVMArrayHandle()
28 check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
29 ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
30 ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
31 return _make_array(handle, False, False)
32 raise ValueError("Expect a dltensor field, PyCapsule can only be consumed once")
33
34
35 def _dlpack_deleter(pycapsule):
36 pycapsule = ctypes.cast(pycapsule, ctypes.py_object)
37 if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
38 ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
39 _LIB.TVMDLManagedTensorCallDeleter(ptr)
40 ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
41
42 _c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)
43
44
45 class NDArrayBase(object):
46 """A simple Device/CPU Array object in runtime."""
47 __slots__ = ["handle", "is_view"]
48 # pylint: disable=no-member
49 def __init__(self, handle, is_view=False):
50 """Initialize the function with handle
51
52 Parameters
53 ----------
54 handle : TVMArrayHandle
55 the handle to the underlying C++ TVMArray
56 """
57 self.handle = handle
58 self.is_view = is_view
59
60 def __del__(self):
61 if not self.is_view and _LIB:
62 check_call(_LIB.TVMArrayFree(self.handle))
63
64 @property
65 def _tvm_handle(self):
66 return ctypes.cast(self.handle, ctypes.c_void_p).value
67
68 def to_dlpack(self):
69 """Produce an array from a DLPack Tensor without copying memory
70
71 Returns
72 -------
73 dlpack : DLPack tensor view of the array data
74 """
75 handle = ctypes.c_void_p()
76 check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))
77 return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)
78
79
80 def _make_array(handle, is_view, is_container):
81 global _TVM_ND_CLS
82 handle = ctypes.cast(handle, TVMArrayHandle)
83 fcreate = _CLASS_NDARRAY
84 if is_container and _TVM_ND_CLS:
85 array_type_info = ctypes.cast(handle, TVMNDArrayContainerHandle).array_type_info.value
86 if array_type_info > 0:
87 fcreate = _TVM_ND_CLS[array_type_info]
88 return fcreate(handle, is_view)
89
90 _TVM_COMPATS = ()
91
92 def _reg_extension(cls, fcreate):
93 global _TVM_COMPATS
94 _TVM_COMPATS += (cls,)
95 if fcreate:
96 fret = lambda x: fcreate(_return_handle(x))
97 RETURN_SWITCH[cls._tvm_tcode] = fret
98 C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)
99
100 _TVM_ND_CLS = {}
101
102 def _reg_ndarray(cls, fcreate):
103 global _TVM_ND_CLS
104 _TVM_ND_CLS[cls._array_type_code] = fcreate
105
106 _CLASS_NDARRAY = None
107
108 def _set_class_ndarray(cls):
109 global _CLASS_NDARRAY
110 _CLASS_NDARRAY = cls
111
[end of python/tvm/_ffi/_ctypes/ndarray.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/tvm/_ffi/_ctypes/ndarray.py b/python/tvm/_ffi/_ctypes/ndarray.py
--- a/python/tvm/_ffi/_ctypes/ndarray.py
+++ b/python/tvm/_ffi/_ctypes/ndarray.py
@@ -24,6 +24,8 @@
dltensor = ctypes.py_object(dltensor)
if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)
+ # enforce type to make sure it works for all ctypes
+ ptr = ctypes.cast(ptr, ctypes.c_void_p)
handle = TVMArrayHandle()
check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))
ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)
@@ -36,6 +38,8 @@
pycapsule = ctypes.cast(pycapsule, ctypes.py_object)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)
+ # enforce type to make sure it works for all ctypes
+ ptr = ctypes.cast(ctypes.c_void_p, ptr)
_LIB.TVMDLManagedTensorCallDeleter(ptr)
ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))
| {"golden_diff": "diff --git a/python/tvm/_ffi/_ctypes/ndarray.py b/python/tvm/_ffi/_ctypes/ndarray.py\n--- a/python/tvm/_ffi/_ctypes/ndarray.py\n+++ b/python/tvm/_ffi/_ctypes/ndarray.py\n@@ -24,6 +24,8 @@\n dltensor = ctypes.py_object(dltensor)\n if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)\n+ # enforce type to make sure it works for all ctypes\n+ ptr = ctypes.cast(ptr, ctypes.c_void_p)\n handle = TVMArrayHandle()\n check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))\n ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)\n@@ -36,6 +38,8 @@\n pycapsule = ctypes.cast(pycapsule, ctypes.py_object)\n if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)\n+ # enforce type to make sure it works for all ctypes\n+ ptr = ctypes.cast(ctypes.c_void_p, ptr)\n _LIB.TVMDLManagedTensorCallDeleter(ptr)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n", "issue": "[TEST][FLAKY] test_dlpack\nBoth #2749 and #2353 encountered seg fault error at test_dlpack.\r\nhttp://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2749/2/pipeline\r\nhttp://ci.tvm.ai:8080/blue/organizations/jenkins/tvm/detail/PR-2353/48/pipeline\r\n\r\ncc @eqy , could you help look at this?\n", "before_files": [{"content": "# pylint: disable=invalid-name\n\"\"\"Runtime NDArray api\"\"\"\nfrom __future__ import absolute_import\n\nimport ctypes\nfrom ..base import _LIB, check_call, c_str\nfrom ..runtime_ctypes import TVMArrayHandle, TVMNDArrayContainerHandle\nfrom .types import RETURN_SWITCH, C_TO_PY_ARG_SWITCH, _wrap_arg_func, _return_handle\n\n\nTVMPyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)\n_c_str_dltensor = c_str('dltensor')\n_c_str_used_dltensor = c_str('used_dltensor')\n\n\n# used for PyCapsule manipulation\nif hasattr(ctypes, 'pythonapi'):\n ctypes.pythonapi.PyCapsule_GetName.restype = ctypes.c_char_p\n ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p\n ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object\n\n\ndef _from_dlpack(dltensor):\n dltensor = ctypes.py_object(dltensor)\n if ctypes.pythonapi.PyCapsule_IsValid(dltensor, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(dltensor, _c_str_dltensor)\n handle = TVMArrayHandle()\n check_call(_LIB.TVMArrayFromDLPack(ptr, ctypes.byref(handle)))\n ctypes.pythonapi.PyCapsule_SetName(dltensor, _c_str_used_dltensor)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n return _make_array(handle, False, False)\n raise ValueError(\"Expect a dltensor field, PyCapsule can only be consumed once\")\n\n\ndef _dlpack_deleter(pycapsule):\n pycapsule = ctypes.cast(pycapsule, ctypes.py_object)\n if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):\n ptr = ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)\n _LIB.TVMDLManagedTensorCallDeleter(ptr)\n ctypes.pythonapi.PyCapsule_SetDestructor(dltensor, TVMPyCapsuleDestructor(0))\n\n_c_dlpack_deleter = TVMPyCapsuleDestructor(_dlpack_deleter)\n\n\nclass NDArrayBase(object):\n \"\"\"A simple Device/CPU Array object in runtime.\"\"\"\n __slots__ = [\"handle\", \"is_view\"]\n # pylint: disable=no-member\n def __init__(self, handle, is_view=False):\n \"\"\"Initialize the function with handle\n\n Parameters\n ----------\n handle : TVMArrayHandle\n the handle to the underlying C++ TVMArray\n \"\"\"\n self.handle = handle\n self.is_view = is_view\n\n def __del__(self):\n if not self.is_view and _LIB:\n check_call(_LIB.TVMArrayFree(self.handle))\n\n @property\n def _tvm_handle(self):\n return ctypes.cast(self.handle, ctypes.c_void_p).value\n\n def to_dlpack(self):\n \"\"\"Produce an array from a DLPack Tensor without copying memory\n\n Returns\n -------\n dlpack : DLPack tensor view of the array data\n \"\"\"\n handle = ctypes.c_void_p()\n check_call(_LIB.TVMArrayToDLPack(self.handle, ctypes.byref(handle)))\n return ctypes.pythonapi.PyCapsule_New(handle, _c_str_dltensor, _c_dlpack_deleter)\n\n\ndef _make_array(handle, is_view, is_container):\n global _TVM_ND_CLS\n handle = ctypes.cast(handle, TVMArrayHandle)\n fcreate = _CLASS_NDARRAY\n if is_container and _TVM_ND_CLS:\n array_type_info = ctypes.cast(handle, TVMNDArrayContainerHandle).array_type_info.value\n if array_type_info > 0:\n fcreate = _TVM_ND_CLS[array_type_info]\n return fcreate(handle, is_view)\n\n_TVM_COMPATS = ()\n\ndef _reg_extension(cls, fcreate):\n global _TVM_COMPATS\n _TVM_COMPATS += (cls,)\n if fcreate:\n fret = lambda x: fcreate(_return_handle(x))\n RETURN_SWITCH[cls._tvm_tcode] = fret\n C_TO_PY_ARG_SWITCH[cls._tvm_tcode] = _wrap_arg_func(fret, cls._tvm_tcode)\n\n_TVM_ND_CLS = {}\n\ndef _reg_ndarray(cls, fcreate):\n global _TVM_ND_CLS\n _TVM_ND_CLS[cls._array_type_code] = fcreate\n\n_CLASS_NDARRAY = None\n\ndef _set_class_ndarray(cls):\n global _CLASS_NDARRAY\n _CLASS_NDARRAY = cls\n", "path": "python/tvm/_ffi/_ctypes/ndarray.py"}]} | 1,947 | 348 |
gh_patches_debug_15795 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6544 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Faciliter la programmation des Unes en modifiant le type de la date de publication.
À ce jour, quand on créer une Unes il faut remplir le champ "Date de publication" avec un format texte de style "2023/08/21 10:00". C'est assez désagréable à remplir.

Les propositions sont des Unes que j'ai déjà faite.
En ajoutant le type "datetime-local" à cette input on pourrait accèder aux interfaces natives des navigateurs/OS pour ce genre d'input.

</issue>
<code>
[start of zds/featured/forms.py]
1 from crispy_forms.bootstrap import StrictButton
2 from crispy_forms.helper import FormHelper
3 from crispy_forms.layout import Layout, Field, ButtonHolder
4 from django import forms
5 from django.urls import reverse
6 from django.utils.translation import gettext_lazy as _
7
8 from zds.featured.models import FeaturedResource, FeaturedMessage
9
10
11 class FeaturedResourceForm(forms.ModelForm):
12 class Meta:
13 model = FeaturedResource
14
15 fields = ["title", "type", "authors", "image_url", "url"]
16
17 widgets = {
18 "title": forms.TextInput(attrs={"placeholder": _("Titre de la Une")}),
19 "type": forms.TextInput(attrs={"placeholder": _("ex: Un projet, Un article, Un tutoriel...")}),
20 "authors": forms.TextInput(attrs={"placeholder": _("Des auteurs (ou pas) ?")}),
21 "image_url": forms.URLInput(
22 attrs={"placeholder": _("Lien vers l'image de la Une (dimensions: 228x228px).")}
23 ),
24 "url": forms.URLInput(attrs={"placeholder": _("Lien vers la ressource.")}),
25 }
26
27 major_update = forms.BooleanField(
28 label=_("Mise à jour majeure (fera passer la Une en première position lors d'un changement)"),
29 initial=False,
30 required=False,
31 )
32
33 pubdate = forms.DateTimeField(
34 label=_("Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)"),
35 input_formats=[
36 "%d/%m/%Y %H:%M:%S",
37 "%Y-%m-%d %H:%M:%S", # full format with second
38 "%Y-%m-%dT%H:%M", # datetime field format
39 "%Y-%m-%d %H:%M",
40 "%d/%m/%Y %H:%M", # without second
41 "%Y-%m-%d",
42 "%d/%m/%Y", # day only
43 ],
44 widget=forms.DateTimeInput(
45 attrs={"placeholder": _("Exemple : 25/12/2016 10:00"), "type": "text"},
46 format="%d/%m/%Y %H:%M", # datetime field format
47 ),
48 )
49
50 request = forms.IntegerField(widget=forms.HiddenInput(), required=False)
51
52 def __init__(self, *args, **kwargs):
53 hide_major_update_field = kwargs.pop("hide_major_update_field", False)
54
55 super().__init__(*args, **kwargs)
56 self.helper = FormHelper()
57 self.helper.form_class = "content-wrapper"
58 self.helper.form_method = "post"
59 self.helper.form_action = reverse("featured:resource-create")
60
61 fields = [Field("request"), Field("title"), Field("type"), Field("authors"), Field("image_url"), Field("url")]
62
63 if not hide_major_update_field:
64 fields.append(Field("major_update"))
65
66 fields.extend(
67 [
68 Field("pubdate"),
69 ButtonHolder(
70 StrictButton(_("Enregistrer"), type="submit"),
71 ),
72 ]
73 )
74
75 self.helper.layout = Layout(*fields)
76
77
78 class FeaturedMessageForm(forms.ModelForm):
79 class Meta:
80 model = FeaturedMessage
81
82 fields = ["hook", "message", "url"]
83
84 widgets = {
85 "hook": forms.TextInput(attrs={"placeholder": _('Mot d\'accroche court ("Nouveau !")')}),
86 "message": forms.TextInput(attrs={"placeholder": _("Message à afficher")}),
87 "url": forms.URLInput(attrs={"placeholder": _("Lien vers la description de la ressource")}),
88 }
89
90 def __init__(self, *args, **kwargs):
91 super().__init__(*args, **kwargs)
92 self.helper = FormHelper()
93 self.helper.form_class = "content-wrapper"
94 self.helper.form_method = "post"
95 self.helper.form_action = reverse("featured:message-create")
96
97 self.helper.layout = Layout(
98 Field("hook"),
99 Field("message"),
100 Field("url"),
101 ButtonHolder(
102 StrictButton(_("Enregistrer"), type="submit"),
103 ),
104 )
105
[end of zds/featured/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/featured/forms.py b/zds/featured/forms.py
--- a/zds/featured/forms.py
+++ b/zds/featured/forms.py
@@ -31,20 +31,8 @@
)
pubdate = forms.DateTimeField(
- label=_("Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)"),
- input_formats=[
- "%d/%m/%Y %H:%M:%S",
- "%Y-%m-%d %H:%M:%S", # full format with second
- "%Y-%m-%dT%H:%M", # datetime field format
- "%Y-%m-%d %H:%M",
- "%d/%m/%Y %H:%M", # without second
- "%Y-%m-%d",
- "%d/%m/%Y", # day only
- ],
- widget=forms.DateTimeInput(
- attrs={"placeholder": _("Exemple : 25/12/2016 10:00"), "type": "text"},
- format="%d/%m/%Y %H:%M", # datetime field format
- ),
+ label=_("Date de publication (exemple: 25/12/2015 15:00)"),
+ widget=forms.DateTimeInput(attrs={"type": "datetime-local"}),
)
request = forms.IntegerField(widget=forms.HiddenInput(), required=False)
| {"golden_diff": "diff --git a/zds/featured/forms.py b/zds/featured/forms.py\n--- a/zds/featured/forms.py\n+++ b/zds/featured/forms.py\n@@ -31,20 +31,8 @@\n )\n \n pubdate = forms.DateTimeField(\n- label=_(\"Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)\"),\n- input_formats=[\n- \"%d/%m/%Y %H:%M:%S\",\n- \"%Y-%m-%d %H:%M:%S\", # full format with second\n- \"%Y-%m-%dT%H:%M\", # datetime field format\n- \"%Y-%m-%d %H:%M\",\n- \"%d/%m/%Y %H:%M\", # without second\n- \"%Y-%m-%d\",\n- \"%d/%m/%Y\", # day only\n- ],\n- widget=forms.DateTimeInput(\n- attrs={\"placeholder\": _(\"Exemple : 25/12/2016 10:00\"), \"type\": \"text\"},\n- format=\"%d/%m/%Y %H:%M\", # datetime field format\n- ),\n+ label=_(\"Date de publication (exemple: 25/12/2015 15:00)\"),\n+ widget=forms.DateTimeInput(attrs={\"type\": \"datetime-local\"}),\n )\n \n request = forms.IntegerField(widget=forms.HiddenInput(), required=False)\n", "issue": "Faciliter la programmation des Unes en modifiant le type de la date de publication.\n\u00c0 ce jour, quand on cr\u00e9er une Unes il faut remplir le champ \"Date de publication\" avec un format texte de style \"2023/08/21 10:00\". C'est assez d\u00e9sagr\u00e9able \u00e0 remplir. \r\n\r\n\r\nLes propositions sont des Unes que j'ai d\u00e9j\u00e0 faite.\r\n\r\nEn ajoutant le type \"datetime-local\" \u00e0 cette input on pourrait acc\u00e8der aux interfaces natives des navigateurs/OS pour ce genre d'input.\r\n\r\n\n", "before_files": [{"content": "from crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field, ButtonHolder\nfrom django import forms\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.featured.models import FeaturedResource, FeaturedMessage\n\n\nclass FeaturedResourceForm(forms.ModelForm):\n class Meta:\n model = FeaturedResource\n\n fields = [\"title\", \"type\", \"authors\", \"image_url\", \"url\"]\n\n widgets = {\n \"title\": forms.TextInput(attrs={\"placeholder\": _(\"Titre de la Une\")}),\n \"type\": forms.TextInput(attrs={\"placeholder\": _(\"ex: Un projet, Un article, Un tutoriel...\")}),\n \"authors\": forms.TextInput(attrs={\"placeholder\": _(\"Des auteurs (ou pas)\u00a0?\")}),\n \"image_url\": forms.URLInput(\n attrs={\"placeholder\": _(\"Lien vers l'image de la Une (dimensions: 228x228px).\")}\n ),\n \"url\": forms.URLInput(attrs={\"placeholder\": _(\"Lien vers la ressource.\")}),\n }\n\n major_update = forms.BooleanField(\n label=_(\"Mise \u00e0 jour majeure (fera passer la Une en premi\u00e8re position lors d'un changement)\"),\n initial=False,\n required=False,\n )\n\n pubdate = forms.DateTimeField(\n label=_(\"Date de publication (exemple: 25/12/2015 15:00 ou 2015-12-25T15:00)\"),\n input_formats=[\n \"%d/%m/%Y %H:%M:%S\",\n \"%Y-%m-%d %H:%M:%S\", # full format with second\n \"%Y-%m-%dT%H:%M\", # datetime field format\n \"%Y-%m-%d %H:%M\",\n \"%d/%m/%Y %H:%M\", # without second\n \"%Y-%m-%d\",\n \"%d/%m/%Y\", # day only\n ],\n widget=forms.DateTimeInput(\n attrs={\"placeholder\": _(\"Exemple : 25/12/2016 10:00\"), \"type\": \"text\"},\n format=\"%d/%m/%Y %H:%M\", # datetime field format\n ),\n )\n\n request = forms.IntegerField(widget=forms.HiddenInput(), required=False)\n\n def __init__(self, *args, **kwargs):\n hide_major_update_field = kwargs.pop(\"hide_major_update_field\", False)\n\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"content-wrapper\"\n self.helper.form_method = \"post\"\n self.helper.form_action = reverse(\"featured:resource-create\")\n\n fields = [Field(\"request\"), Field(\"title\"), Field(\"type\"), Field(\"authors\"), Field(\"image_url\"), Field(\"url\")]\n\n if not hide_major_update_field:\n fields.append(Field(\"major_update\"))\n\n fields.extend(\n [\n Field(\"pubdate\"),\n ButtonHolder(\n StrictButton(_(\"Enregistrer\"), type=\"submit\"),\n ),\n ]\n )\n\n self.helper.layout = Layout(*fields)\n\n\nclass FeaturedMessageForm(forms.ModelForm):\n class Meta:\n model = FeaturedMessage\n\n fields = [\"hook\", \"message\", \"url\"]\n\n widgets = {\n \"hook\": forms.TextInput(attrs={\"placeholder\": _('Mot d\\'accroche court (\"Nouveau\u00a0!\")')}),\n \"message\": forms.TextInput(attrs={\"placeholder\": _(\"Message \u00e0 afficher\")}),\n \"url\": forms.URLInput(attrs={\"placeholder\": _(\"Lien vers la description de la ressource\")}),\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"content-wrapper\"\n self.helper.form_method = \"post\"\n self.helper.form_action = reverse(\"featured:message-create\")\n\n self.helper.layout = Layout(\n Field(\"hook\"),\n Field(\"message\"),\n Field(\"url\"),\n ButtonHolder(\n StrictButton(_(\"Enregistrer\"), type=\"submit\"),\n ),\n )\n", "path": "zds/featured/forms.py"}]} | 1,883 | 347 |
gh_patches_debug_8191 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
missing_number algorithm dosen't work as intended (bit_manipulation/missing_number.py)
### Repository commit
d0c54acd75cedf14cff353869482a0487fea1697
### Python version (python --version)
Python 3.12.0
### Dependencies version (pip freeze)
setuptools==68.2.2
wheel==0.41.2
### Expected behavior
for array [1,3,4,5,6] the output should be 2
### Actual behavior
the output got is 4
</issue>
<code>
[start of bit_manipulation/missing_number.py]
1 def find_missing_number(nums: list[int]) -> int:
2 """
3 Finds the missing number in a list of consecutive integers.
4
5 Args:
6 nums: A list of integers.
7
8 Returns:
9 The missing number.
10
11 Example:
12 >>> find_missing_number([0, 1, 3, 4])
13 2
14 """
15 n = len(nums)
16 missing_number = n
17
18 for i in range(n):
19 missing_number ^= i ^ nums[i]
20
21 return missing_number
22
[end of bit_manipulation/missing_number.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py
--- a/bit_manipulation/missing_number.py
+++ b/bit_manipulation/missing_number.py
@@ -11,11 +11,18 @@
Example:
>>> find_missing_number([0, 1, 3, 4])
2
+ >>> find_missing_number([1, 3, 4, 5, 6])
+ 2
+ >>> find_missing_number([6, 5, 4, 2, 1])
+ 3
+ >>> find_missing_number([6, 1, 5, 3, 4])
+ 2
"""
- n = len(nums)
- missing_number = n
+ low = min(nums)
+ high = max(nums)
+ missing_number = high
- for i in range(n):
- missing_number ^= i ^ nums[i]
+ for i in range(low, high):
+ missing_number ^= i ^ nums[i - low]
return missing_number
| {"golden_diff": "diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py\n--- a/bit_manipulation/missing_number.py\n+++ b/bit_manipulation/missing_number.py\n@@ -11,11 +11,18 @@\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n+ >>> find_missing_number([1, 3, 4, 5, 6])\n+ 2\n+ >>> find_missing_number([6, 5, 4, 2, 1])\n+ 3\n+ >>> find_missing_number([6, 1, 5, 3, 4])\n+ 2\n \"\"\"\n- n = len(nums)\n- missing_number = n\n+ low = min(nums)\n+ high = max(nums)\n+ missing_number = high\n \n- for i in range(n):\n- missing_number ^= i ^ nums[i]\n+ for i in range(low, high):\n+ missing_number ^= i ^ nums[i - low]\n \n return missing_number\n", "issue": "missing_number algorithm dosen't work as intended (bit_manipulation/missing_number.py)\n### Repository commit\n\nd0c54acd75cedf14cff353869482a0487fea1697\n\n### Python version (python --version)\n\nPython 3.12.0\n\n### Dependencies version (pip freeze)\n\nsetuptools==68.2.2\r\nwheel==0.41.2\n\n### Expected behavior\n\nfor array [1,3,4,5,6] the output should be 2\n\n### Actual behavior\n\nthe output got is 4\n", "before_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n \"\"\"\n n = len(nums)\n missing_number = n\n\n for i in range(n):\n missing_number ^= i ^ nums[i]\n\n return missing_number\n", "path": "bit_manipulation/missing_number.py"}]} | 820 | 244 |
gh_patches_debug_15727 | rasdani/github-patches | git_diff | crytic__slither-561 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AssertionError when obtaining address of library
```solidity
library UnsafeMath {
function add(uint a, uint b) external returns (uint) {
return a + b;
}
}
contract Test {
function getUnsafeMathAddr() public view returns (address) {
return address(UnsafeMath);
}
}
```
https://solidity.readthedocs.io/en/latest/contracts.html#libraries:~:text=It%20is%20possible%20to%20obtain%20the%20address%20of%20a%20library
</issue>
<code>
[start of slither/slithir/operations/type_conversion.py]
1 from slither.core.solidity_types.type import Type
2 from slither.slithir.operations.lvalue import OperationWithLValue
3 from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue
4
5
6 class TypeConversion(OperationWithLValue):
7
8 def __init__(self, result, variable, variable_type):
9 super().__init__()
10 assert is_valid_rvalue(variable)
11 assert is_valid_lvalue(result)
12 assert isinstance(variable_type, Type)
13
14 self._variable = variable
15 self._type = variable_type
16 self._lvalue = result
17
18
19 @property
20 def variable(self):
21 return self._variable
22
23 @property
24 def type(self):
25 return self._type
26
27 @property
28 def read(self):
29 return [self.variable]
30
31 def __str__(self):
32 return str(self.lvalue) +' = CONVERT {} to {}'.format(self.variable, self.type)
33
[end of slither/slithir/operations/type_conversion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/slithir/operations/type_conversion.py b/slither/slithir/operations/type_conversion.py
--- a/slither/slithir/operations/type_conversion.py
+++ b/slither/slithir/operations/type_conversion.py
@@ -1,3 +1,4 @@
+from slither.core.declarations import Contract
from slither.core.solidity_types.type import Type
from slither.slithir.operations.lvalue import OperationWithLValue
from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue
@@ -7,7 +8,7 @@
def __init__(self, result, variable, variable_type):
super().__init__()
- assert is_valid_rvalue(variable)
+ assert is_valid_rvalue(variable) or isinstance(variable, Contract)
assert is_valid_lvalue(result)
assert isinstance(variable_type, Type)
| {"golden_diff": "diff --git a/slither/slithir/operations/type_conversion.py b/slither/slithir/operations/type_conversion.py\n--- a/slither/slithir/operations/type_conversion.py\n+++ b/slither/slithir/operations/type_conversion.py\n@@ -1,3 +1,4 @@\n+from slither.core.declarations import Contract\n from slither.core.solidity_types.type import Type\n from slither.slithir.operations.lvalue import OperationWithLValue\n from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\n@@ -7,7 +8,7 @@\n \n def __init__(self, result, variable, variable_type):\n super().__init__()\n- assert is_valid_rvalue(variable)\n+ assert is_valid_rvalue(variable) or isinstance(variable, Contract)\n assert is_valid_lvalue(result)\n assert isinstance(variable_type, Type)\n", "issue": "AssertionError when obtaining address of library\n```solidity\r\nlibrary UnsafeMath {\r\n function add(uint a, uint b) external returns (uint) {\r\n return a + b;\r\n }\r\n}\r\n\r\ncontract Test {\r\n function getUnsafeMathAddr() public view returns (address) {\r\n return address(UnsafeMath);\r\n }\r\n}\r\n```\r\n\r\nhttps://solidity.readthedocs.io/en/latest/contracts.html#libraries:~:text=It%20is%20possible%20to%20obtain%20the%20address%20of%20a%20library\n", "before_files": [{"content": "from slither.core.solidity_types.type import Type\nfrom slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\n\n\nclass TypeConversion(OperationWithLValue):\n\n def __init__(self, result, variable, variable_type):\n super().__init__()\n assert is_valid_rvalue(variable)\n assert is_valid_lvalue(result)\n assert isinstance(variable_type, Type)\n\n self._variable = variable\n self._type = variable_type\n self._lvalue = result\n \n\n @property\n def variable(self):\n return self._variable\n\n @property\n def type(self):\n return self._type\n\n @property\n def read(self):\n return [self.variable]\n\n def __str__(self):\n return str(self.lvalue) +' = CONVERT {} to {}'.format(self.variable, self.type)\n", "path": "slither/slithir/operations/type_conversion.py"}]} | 932 | 189 |
gh_patches_debug_22747 | rasdani/github-patches | git_diff | scrapy__scrapy-2275 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Idea: warn users when trying to use TextResponse functionality with plain Response
Currently, if we try to use TextResponse functionality like response.text or css()/xpath() methods with a plain Response (e.g. in case of binary content), we get an AttributeError:
```
>>> response.css
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-7d6e256164d4> in <module>()
----> 1 response.css
AttributeError: 'Response' object has no attribute 'css'
>>> response.xpath
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-2-4f61f6e9fc6e> in <module>()
----> 1 response.xpath
AttributeError: 'Response' object has no attribute 'xpath'
>>> response.text
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-be6a4a00df5e> in <module>()
----> 1 response.text
AttributeError: 'Response' object has no attribute 'text'
```
Would it make sense to add a few methods/properties to explain what's going on for new users?
I was thinking instead of AttributeError, a better behavior could be a ValueError with a message giving a bit more context.
So, in plain `Response`, we could have:
```
def css(self, *args, **kw):
raise ValueError('Response content is not text')
def xpath(self, *args, **kw):
raise ValueError('Response content is not text')
@property
def text(self, *args, **kw):
raise ValueError('Response content is not text')
```
This would be nice, because we'd had to explain fewer things when teaching people about responses and also about using `.css` and `.xpath` methods.
What do you think?
</issue>
<code>
[start of scrapy/http/response/__init__.py]
1 """
2 This module implements the Response class which is used to represent HTTP
3 responses in Scrapy.
4
5 See documentation in docs/topics/request-response.rst
6 """
7 from six.moves.urllib.parse import urljoin
8
9 from scrapy.http.headers import Headers
10 from scrapy.utils.trackref import object_ref
11 from scrapy.http.common import obsolete_setter
12
13 class Response(object_ref):
14
15 def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):
16 self.headers = Headers(headers or {})
17 self.status = int(status)
18 self._set_body(body)
19 self._set_url(url)
20 self.request = request
21 self.flags = [] if flags is None else list(flags)
22
23 @property
24 def meta(self):
25 try:
26 return self.request.meta
27 except AttributeError:
28 raise AttributeError(
29 "Response.meta not available, this response "
30 "is not tied to any request"
31 )
32
33 def _get_url(self):
34 return self._url
35
36 def _set_url(self, url):
37 if isinstance(url, str):
38 self._url = url
39 else:
40 raise TypeError('%s url must be str, got %s:' % (type(self).__name__,
41 type(url).__name__))
42
43 url = property(_get_url, obsolete_setter(_set_url, 'url'))
44
45 def _get_body(self):
46 return self._body
47
48 def _set_body(self, body):
49 if body is None:
50 self._body = b''
51 elif not isinstance(body, bytes):
52 raise TypeError(
53 "Response body must be bytes. "
54 "If you want to pass unicode body use TextResponse "
55 "or HtmlResponse.")
56 else:
57 self._body = body
58
59 body = property(_get_body, obsolete_setter(_set_body, 'body'))
60
61 def __str__(self):
62 return "<%d %s>" % (self.status, self.url)
63
64 __repr__ = __str__
65
66 def copy(self):
67 """Return a copy of this Response"""
68 return self.replace()
69
70 def replace(self, *args, **kwargs):
71 """Create a new Response with the same attributes except for those
72 given new values.
73 """
74 for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:
75 kwargs.setdefault(x, getattr(self, x))
76 cls = kwargs.pop('cls', self.__class__)
77 return cls(*args, **kwargs)
78
79 def urljoin(self, url):
80 """Join this Response's url with a possible relative url to form an
81 absolute interpretation of the latter."""
82 return urljoin(self.url, url)
83
[end of scrapy/http/response/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/http/response/__init__.py b/scrapy/http/response/__init__.py
--- a/scrapy/http/response/__init__.py
+++ b/scrapy/http/response/__init__.py
@@ -9,6 +9,8 @@
from scrapy.http.headers import Headers
from scrapy.utils.trackref import object_ref
from scrapy.http.common import obsolete_setter
+from scrapy.exceptions import NotSupported
+
class Response(object_ref):
@@ -80,3 +82,22 @@
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
+
+ @property
+ def text(self):
+ """For subclasses of TextResponse, this will return the body
+ as text (unicode object in Python 2 and str in Python 3)
+ """
+ raise AttributeError("Response content isn't text")
+
+ def css(self, *a, **kw):
+ """Shortcut method implemented only by responses whose content
+ is text (subclasses of TextResponse).
+ """
+ raise NotSupported("Response content isn't text")
+
+ def xpath(self, *a, **kw):
+ """Shortcut method implemented only by responses whose content
+ is text (subclasses of TextResponse).
+ """
+ raise NotSupported("Response content isn't text")
| {"golden_diff": "diff --git a/scrapy/http/response/__init__.py b/scrapy/http/response/__init__.py\n--- a/scrapy/http/response/__init__.py\n+++ b/scrapy/http/response/__init__.py\n@@ -9,6 +9,8 @@\n from scrapy.http.headers import Headers\n from scrapy.utils.trackref import object_ref\n from scrapy.http.common import obsolete_setter\n+from scrapy.exceptions import NotSupported\n+\n \n class Response(object_ref):\n \n@@ -80,3 +82,22 @@\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(self.url, url)\n+\n+ @property\n+ def text(self):\n+ \"\"\"For subclasses of TextResponse, this will return the body\n+ as text (unicode object in Python 2 and str in Python 3)\n+ \"\"\"\n+ raise AttributeError(\"Response content isn't text\")\n+\n+ def css(self, *a, **kw):\n+ \"\"\"Shortcut method implemented only by responses whose content\n+ is text (subclasses of TextResponse).\n+ \"\"\"\n+ raise NotSupported(\"Response content isn't text\")\n+\n+ def xpath(self, *a, **kw):\n+ \"\"\"Shortcut method implemented only by responses whose content\n+ is text (subclasses of TextResponse).\n+ \"\"\"\n+ raise NotSupported(\"Response content isn't text\")\n", "issue": "Idea: warn users when trying to use TextResponse functionality with plain Response\nCurrently, if we try to use TextResponse functionality like response.text or css()/xpath() methods with a plain Response (e.g. in case of binary content), we get an AttributeError:\n\n```\n>>> response.css\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-1-7d6e256164d4> in <module>()\n----> 1 response.css\n\nAttributeError: 'Response' object has no attribute 'css'\n>>> response.xpath\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-2-4f61f6e9fc6e> in <module>()\n----> 1 response.xpath\n\nAttributeError: 'Response' object has no attribute 'xpath'\n>>> response.text\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-3-be6a4a00df5e> in <module>()\n----> 1 response.text\n\nAttributeError: 'Response' object has no attribute 'text'\n```\n\nWould it make sense to add a few methods/properties to explain what's going on for new users?\n\nI was thinking instead of AttributeError, a better behavior could be a ValueError with a message giving a bit more context.\n\nSo, in plain `Response`, we could have:\n\n```\ndef css(self, *args, **kw):\n raise ValueError('Response content is not text')\n\ndef xpath(self, *args, **kw):\n raise ValueError('Response content is not text')\n\n@property\ndef text(self, *args, **kw):\n raise ValueError('Response content is not text')\n```\n\nThis would be nice, because we'd had to explain fewer things when teaching people about responses and also about using `.css` and `.xpath` methods.\n\nWhat do you think?\n\n", "before_files": [{"content": "\"\"\"\nThis module implements the Response class which is used to represent HTTP\nresponses in Scrapy.\n\nSee documentation in docs/topics/request-response.rst\n\"\"\"\nfrom six.moves.urllib.parse import urljoin\n\nfrom scrapy.http.headers import Headers\nfrom scrapy.utils.trackref import object_ref\nfrom scrapy.http.common import obsolete_setter\n\nclass Response(object_ref):\n\n def __init__(self, url, status=200, headers=None, body=b'', flags=None, request=None):\n self.headers = Headers(headers or {})\n self.status = int(status)\n self._set_body(body)\n self._set_url(url)\n self.request = request\n self.flags = [] if flags is None else list(flags)\n\n @property\n def meta(self):\n try:\n return self.request.meta\n except AttributeError:\n raise AttributeError(\n \"Response.meta not available, this response \"\n \"is not tied to any request\"\n )\n\n def _get_url(self):\n return self._url\n\n def _set_url(self, url):\n if isinstance(url, str):\n self._url = url\n else:\n raise TypeError('%s url must be str, got %s:' % (type(self).__name__,\n type(url).__name__))\n\n url = property(_get_url, obsolete_setter(_set_url, 'url'))\n\n def _get_body(self):\n return self._body\n\n def _set_body(self, body):\n if body is None:\n self._body = b''\n elif not isinstance(body, bytes):\n raise TypeError(\n \"Response body must be bytes. \"\n \"If you want to pass unicode body use TextResponse \"\n \"or HtmlResponse.\")\n else:\n self._body = body\n\n body = property(_get_body, obsolete_setter(_set_body, 'body'))\n\n def __str__(self):\n return \"<%d %s>\" % (self.status, self.url)\n\n __repr__ = __str__\n\n def copy(self):\n \"\"\"Return a copy of this Response\"\"\"\n return self.replace()\n\n def replace(self, *args, **kwargs):\n \"\"\"Create a new Response with the same attributes except for those\n given new values.\n \"\"\"\n for x in ['url', 'status', 'headers', 'body', 'request', 'flags']:\n kwargs.setdefault(x, getattr(self, x))\n cls = kwargs.pop('cls', self.__class__)\n return cls(*args, **kwargs)\n\n def urljoin(self, url):\n \"\"\"Join this Response's url with a possible relative url to form an\n absolute interpretation of the latter.\"\"\"\n return urljoin(self.url, url)\n", "path": "scrapy/http/response/__init__.py"}]} | 1,661 | 302 |
gh_patches_debug_66309 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1463 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No module named 'elasticdl.python.elasticdl.layers' on master
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/elasticdl/python/master/main.py", line 28, in <module>
from elasticdl.python.elasticdl.layers.embedding import Embedding
ModuleNotFoundError: No module named 'elasticdl.python.elasticdl.layers'
```
Seems `layers` directory is not installed to `/usr/local/lib/python3.7/site-packages/elasticdl-develop-py3.7.egg/elasticdl/python/elasticdl` after running `python setup.py install`
Steps to reproduce:
1. In a Python Docker container, clone ElasticDL and run `python setup.py install`
1. remove the cloned source
1. execute a demo job by: `elasticdl train ...`
</issue>
<code>
[start of elasticdl/python/elasticdl/__init__.py]
[end of elasticdl/python/elasticdl/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/elasticdl/__init__.py b/elasticdl/python/elasticdl/__init__.py
--- a/elasticdl/python/elasticdl/__init__.py
+++ b/elasticdl/python/elasticdl/__init__.py
@@ -0,0 +1 @@
+from elasticdl.python.elasticdl import layers # noqa: F401
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/__init__.py b/elasticdl/python/elasticdl/__init__.py\n--- a/elasticdl/python/elasticdl/__init__.py\n+++ b/elasticdl/python/elasticdl/__init__.py\n@@ -0,0 +1 @@\n+from elasticdl.python.elasticdl import layers # noqa: F401\n", "issue": "No module named 'elasticdl.python.elasticdl.layers' on master\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/elasticdl/python/master/main.py\", line 28, in <module>\r\n from elasticdl.python.elasticdl.layers.embedding import Embedding\r\nModuleNotFoundError: No module named 'elasticdl.python.elasticdl.layers'\r\n```\r\n\r\nSeems `layers` directory is not installed to `/usr/local/lib/python3.7/site-packages/elasticdl-develop-py3.7.egg/elasticdl/python/elasticdl` after running `python setup.py install`\r\n\r\nSteps to reproduce:\r\n\r\n1. In a Python Docker container, clone ElasticDL and run `python setup.py install`\r\n1. remove the cloned source\r\n1. execute a demo job by: `elasticdl train ...`\n", "before_files": [{"content": "", "path": "elasticdl/python/elasticdl/__init__.py"}]} | 778 | 84 |
gh_patches_debug_30901 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auto add builtin provider to Generic based on passed locale
# Feature request
An idea is very simple:
```python
generic = Generic('ru', auto_add_builtin=True)
generic.russia_provider.inn()
```
Instead of this:
```python
from mimesis import Generic
from mimesis.builtins import RussiaSpecProvider
generic = Generic('ru')
generic.add_provider(RussiaSpecProvider)
generic.russia_provider.inn()
```
Optionally we can make builtin's name customizable:
```python
generic = Generic('ru', auto_add_builtin=True, builtin_custom_name='russia')
generic.russia.inn()
```
</issue>
<code>
[start of mimesis/providers/generic.py]
1 # -*- coding: utf-8 -*-
2
3 """Provides all at one."""
4
5 import inspect
6 from typing import Any, List, Type
7
8 from mimesis.providers.address import Address
9 from mimesis.providers.base import BaseDataProvider, BaseProvider
10 from mimesis.providers.business import Business
11 from mimesis.providers.choice import Choice
12 from mimesis.providers.clothing import Clothing
13 from mimesis.providers.code import Code
14 from mimesis.providers.cryptographic import Cryptographic
15 from mimesis.providers.date import Datetime
16 from mimesis.providers.development import Development
17 from mimesis.providers.file import File
18 from mimesis.providers.food import Food
19 from mimesis.providers.hardware import Hardware
20 from mimesis.providers.internet import Internet
21 from mimesis.providers.numbers import Numbers
22 from mimesis.providers.path import Path
23 from mimesis.providers.payment import Payment
24 from mimesis.providers.person import Person
25 from mimesis.providers.science import Science
26 from mimesis.providers.structure import Structure
27 from mimesis.providers.text import Text
28 from mimesis.providers.transport import Transport
29 from mimesis.providers.units import UnitSystem
30
31 __all__ = ['Generic']
32
33
34 class Generic(BaseDataProvider):
35 """Class which contain all providers at one."""
36
37 def __init__(self, *args, **kwargs) -> None:
38 """Initialize attributes lazily.
39
40 :param args: Arguments.
41 :param kwargs: Keyword arguments.
42 """
43 super().__init__(*args, **kwargs)
44 self._person = Person
45 self._address = Address
46 self._datetime = Datetime
47 self._business = Business
48 self._text = Text
49 self._food = Food
50 self._science = Science
51 self.transport = Transport(seed=self.seed)
52 self.code = Code(seed=self.seed)
53 self.unit_system = UnitSystem(seed=self.seed)
54 self.file = File(seed=self.seed)
55 self.numbers = Numbers(seed=self.seed)
56 self.development = Development(seed=self.seed)
57 self.hardware = Hardware(seed=self.seed)
58 self.clothing = Clothing(seed=self.seed)
59 self.internet = Internet(seed=self.seed)
60 self.path = Path(seed=self.seed)
61 self.payment = Payment(seed=self.seed)
62 self.cryptographic = Cryptographic(seed=self.seed)
63 self.structure = Structure(seed=self.seed)
64 self.choice = Choice(seed=self.seed)
65
66 class Meta:
67 """Class for metadata."""
68
69 name = 'generic'
70
71 def __getattr__(self, attrname: str) -> Any:
72 """Get attribute without underscore.
73
74 :param attrname: Attribute name.
75 :return: An attribute.
76 """
77 attribute = object.__getattribute__(
78 self, '_' + attrname)
79 if attribute and callable(attribute):
80 self.__dict__[attrname] = attribute(
81 self.locale,
82 self.seed,
83 )
84 return self.__dict__[attrname]
85
86 def __dir__(self) -> List[str]:
87 """Available data providers.
88
89 The list of result will be used in AbstractField to
90 determine method's class.
91
92 :return: List of attributes.
93 """
94 attributes = []
95 exclude = BaseDataProvider().__dict__.keys()
96
97 for a in self.__dict__:
98 if a not in exclude:
99 if a.startswith('_'):
100 attribute = a.replace('_', '', 1)
101 attributes.append(attribute)
102 else:
103 attributes.append(a)
104 return attributes
105
106 def add_provider(self, cls: Type[BaseProvider]) -> None:
107 """Add a custom provider to Generic() object.
108
109 :param cls: Custom provider.
110 :return: None
111 :raises TypeError: if cls is not class.
112 """
113 if inspect.isclass(cls):
114 if not issubclass(cls, BaseProvider):
115 raise TypeError('The provider must be a '
116 'subclass of BaseProvider')
117 try:
118 meta = getattr(cls, 'Meta')
119 name = getattr(meta, 'name')
120 except AttributeError:
121 name = cls.__name__.lower()
122 setattr(self, name, cls(seed=self.seed))
123 else:
124 raise TypeError('The provider must be a class')
125
126 def add_providers(self, *providers: Type[BaseProvider]) -> None:
127 """Add a lot of custom providers to Generic() object.
128
129 :param providers: Custom providers.
130 :return: None
131 """
132 for provider in providers:
133 self.add_provider(provider)
134
[end of mimesis/providers/generic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mimesis/providers/generic.py b/mimesis/providers/generic.py
--- a/mimesis/providers/generic.py
+++ b/mimesis/providers/generic.py
@@ -5,6 +5,17 @@
import inspect
from typing import Any, List, Type
+from mimesis.builtins import (
+ BrazilSpecProvider,
+ DenmarkSpecProvider,
+ GermanySpecProvider,
+ ItalySpecProvider,
+ NetherlandsSpecProvider,
+ PolandSpecProvider,
+ RussiaSpecProvider,
+ UkraineSpecProvider,
+ USASpecProvider,
+)
from mimesis.providers.address import Address
from mimesis.providers.base import BaseDataProvider, BaseProvider
from mimesis.providers.business import Business
@@ -48,6 +59,21 @@
self._text = Text
self._food = Food
self._science = Science
+
+ _spec_providers = {
+ 'de': DenmarkSpecProvider,
+ 'ge': GermanySpecProvider,
+ 'en': USASpecProvider,
+ 'it': ItalySpecProvider,
+ 'nl': NetherlandsSpecProvider,
+ 'pl': PolandSpecProvider,
+ 'pt-br': BrazilSpecProvider,
+ 'ru': RussiaSpecProvider,
+ 'uk': UkraineSpecProvider,
+ }
+ if self.locale in _spec_providers:
+ self.add_provider(_spec_providers[self.locale])
+
self.transport = Transport(seed=self.seed)
self.code = Code(seed=self.seed)
self.unit_system = UnitSystem(seed=self.seed)
@@ -108,7 +134,8 @@
:param cls: Custom provider.
:return: None
- :raises TypeError: if cls is not class.
+ :raises TypeError: if cls is not class or is not a subclass
+ of BaseProvider.
"""
if inspect.isclass(cls):
if not issubclass(cls, BaseProvider):
| {"golden_diff": "diff --git a/mimesis/providers/generic.py b/mimesis/providers/generic.py\n--- a/mimesis/providers/generic.py\n+++ b/mimesis/providers/generic.py\n@@ -5,6 +5,17 @@\n import inspect\n from typing import Any, List, Type\n \n+from mimesis.builtins import (\n+ BrazilSpecProvider,\n+ DenmarkSpecProvider,\n+ GermanySpecProvider,\n+ ItalySpecProvider,\n+ NetherlandsSpecProvider,\n+ PolandSpecProvider,\n+ RussiaSpecProvider,\n+ UkraineSpecProvider,\n+ USASpecProvider,\n+)\n from mimesis.providers.address import Address\n from mimesis.providers.base import BaseDataProvider, BaseProvider\n from mimesis.providers.business import Business\n@@ -48,6 +59,21 @@\n self._text = Text\n self._food = Food\n self._science = Science\n+\n+ _spec_providers = {\n+ 'de': DenmarkSpecProvider,\n+ 'ge': GermanySpecProvider,\n+ 'en': USASpecProvider,\n+ 'it': ItalySpecProvider,\n+ 'nl': NetherlandsSpecProvider,\n+ 'pl': PolandSpecProvider,\n+ 'pt-br': BrazilSpecProvider,\n+ 'ru': RussiaSpecProvider,\n+ 'uk': UkraineSpecProvider,\n+ }\n+ if self.locale in _spec_providers:\n+ self.add_provider(_spec_providers[self.locale])\n+\n self.transport = Transport(seed=self.seed)\n self.code = Code(seed=self.seed)\n self.unit_system = UnitSystem(seed=self.seed)\n@@ -108,7 +134,8 @@\n \n :param cls: Custom provider.\n :return: None\n- :raises TypeError: if cls is not class.\n+ :raises TypeError: if cls is not class or is not a subclass\n+ of BaseProvider.\n \"\"\"\n if inspect.isclass(cls):\n if not issubclass(cls, BaseProvider):\n", "issue": "Auto add builtin provider to Generic based on passed locale\n# Feature request\r\n\r\nAn idea is very simple:\r\n\r\n```python\r\ngeneric = Generic('ru', auto_add_builtin=True)\r\ngeneric.russia_provider.inn()\r\n```\r\n\r\nInstead of this:\r\n\r\n```python\r\nfrom mimesis import Generic\r\nfrom mimesis.builtins import RussiaSpecProvider\r\n\r\ngeneric = Generic('ru')\r\ngeneric.add_provider(RussiaSpecProvider)\r\ngeneric.russia_provider.inn()\r\n```\r\n\r\nOptionally we can make builtin's name customizable: \r\n\r\n```python\r\ngeneric = Generic('ru', auto_add_builtin=True, builtin_custom_name='russia')\r\ngeneric.russia.inn()\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides all at one.\"\"\"\n\nimport inspect\nfrom typing import Any, List, Type\n\nfrom mimesis.providers.address import Address\nfrom mimesis.providers.base import BaseDataProvider, BaseProvider\nfrom mimesis.providers.business import Business\nfrom mimesis.providers.choice import Choice\nfrom mimesis.providers.clothing import Clothing\nfrom mimesis.providers.code import Code\nfrom mimesis.providers.cryptographic import Cryptographic\nfrom mimesis.providers.date import Datetime\nfrom mimesis.providers.development import Development\nfrom mimesis.providers.file import File\nfrom mimesis.providers.food import Food\nfrom mimesis.providers.hardware import Hardware\nfrom mimesis.providers.internet import Internet\nfrom mimesis.providers.numbers import Numbers\nfrom mimesis.providers.path import Path\nfrom mimesis.providers.payment import Payment\nfrom mimesis.providers.person import Person\nfrom mimesis.providers.science import Science\nfrom mimesis.providers.structure import Structure\nfrom mimesis.providers.text import Text\nfrom mimesis.providers.transport import Transport\nfrom mimesis.providers.units import UnitSystem\n\n__all__ = ['Generic']\n\n\nclass Generic(BaseDataProvider):\n \"\"\"Class which contain all providers at one.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes lazily.\n\n :param args: Arguments.\n :param kwargs: Keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._person = Person\n self._address = Address\n self._datetime = Datetime\n self._business = Business\n self._text = Text\n self._food = Food\n self._science = Science\n self.transport = Transport(seed=self.seed)\n self.code = Code(seed=self.seed)\n self.unit_system = UnitSystem(seed=self.seed)\n self.file = File(seed=self.seed)\n self.numbers = Numbers(seed=self.seed)\n self.development = Development(seed=self.seed)\n self.hardware = Hardware(seed=self.seed)\n self.clothing = Clothing(seed=self.seed)\n self.internet = Internet(seed=self.seed)\n self.path = Path(seed=self.seed)\n self.payment = Payment(seed=self.seed)\n self.cryptographic = Cryptographic(seed=self.seed)\n self.structure = Structure(seed=self.seed)\n self.choice = Choice(seed=self.seed)\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'generic'\n\n def __getattr__(self, attrname: str) -> Any:\n \"\"\"Get attribute without underscore.\n\n :param attrname: Attribute name.\n :return: An attribute.\n \"\"\"\n attribute = object.__getattribute__(\n self, '_' + attrname)\n if attribute and callable(attribute):\n self.__dict__[attrname] = attribute(\n self.locale,\n self.seed,\n )\n return self.__dict__[attrname]\n\n def __dir__(self) -> List[str]:\n \"\"\"Available data providers.\n\n The list of result will be used in AbstractField to\n determine method's class.\n\n :return: List of attributes.\n \"\"\"\n attributes = []\n exclude = BaseDataProvider().__dict__.keys()\n\n for a in self.__dict__:\n if a not in exclude:\n if a.startswith('_'):\n attribute = a.replace('_', '', 1)\n attributes.append(attribute)\n else:\n attributes.append(a)\n return attributes\n\n def add_provider(self, cls: Type[BaseProvider]) -> None:\n \"\"\"Add a custom provider to Generic() object.\n\n :param cls: Custom provider.\n :return: None\n :raises TypeError: if cls is not class.\n \"\"\"\n if inspect.isclass(cls):\n if not issubclass(cls, BaseProvider):\n raise TypeError('The provider must be a '\n 'subclass of BaseProvider')\n try:\n meta = getattr(cls, 'Meta')\n name = getattr(meta, 'name')\n except AttributeError:\n name = cls.__name__.lower()\n setattr(self, name, cls(seed=self.seed))\n else:\n raise TypeError('The provider must be a class')\n\n def add_providers(self, *providers: Type[BaseProvider]) -> None:\n \"\"\"Add a lot of custom providers to Generic() object.\n\n :param providers: Custom providers.\n :return: None\n \"\"\"\n for provider in providers:\n self.add_provider(provider)\n", "path": "mimesis/providers/generic.py"}]} | 1,908 | 427 |
gh_patches_debug_10035 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Helptext for supported file formats is not up-to-date
</issue>
<code>
[start of app/grandchallenge/cases/forms.py]
1 from typing import List
2
3 from crispy_forms.helper import FormHelper
4 from crispy_forms.layout import Submit
5 from django import forms
6 from django.conf import settings
7 from django.core.exceptions import ValidationError
8
9 from grandchallenge.cases.models import RawImageFile, RawImageUploadSession
10 from grandchallenge.jqfileupload.widgets import uploader
11 from grandchallenge.jqfileupload.widgets.uploader import (
12 StagedAjaxFile,
13 UploadedAjaxFileList,
14 )
15
16
17 class UploadRawImagesForm(forms.ModelForm):
18 files = UploadedAjaxFileList(
19 widget=uploader.AjaxUploadWidget(multifile=True, auto_commit=False),
20 label="Image files",
21 help_text=(
22 "The total size of all files uploaded in a single session "
23 "cannot exceed 10 GB.<br>"
24 "The following file formats are supported: "
25 ".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg."
26 ),
27 )
28
29 def __init__(self, *args, user, linked_task=None, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.helper = FormHelper()
32 self.helper.add_input(Submit("save", "Submit"))
33 self.fields["files"].widget.user = user
34 self._linked_task = linked_task
35
36 def clean_files(self):
37 files = self.cleaned_data["files"]
38
39 if len({f.name for f in files}) != len(files):
40 raise ValidationError("Filenames must be unique.")
41
42 if sum([f.size for f in files]) > settings.UPLOAD_SESSION_MAX_BYTES:
43 raise ValidationError(
44 "Total size of all files exceeds the upload limit."
45 )
46
47 return files
48
49 def save(self, commit=True):
50 instance = super().save(commit=False) # type: RawImageUploadSession
51
52 # Create links between the created session and all uploaded files
53 uploaded_files = self.cleaned_data[
54 "files"
55 ] # type: List[StagedAjaxFile]
56
57 raw_files = [
58 RawImageFile(
59 upload_session=instance,
60 filename=uploaded_file.name,
61 staged_file_id=uploaded_file.uuid,
62 )
63 for uploaded_file in uploaded_files
64 ]
65
66 if commit:
67 instance.save()
68 RawImageFile.objects.bulk_create(raw_files)
69 instance.process_images(linked_task=self._linked_task)
70
71 return instance
72
73 class Meta:
74 model = RawImageUploadSession
75 fields = ["files"]
76
[end of app/grandchallenge/cases/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/cases/forms.py b/app/grandchallenge/cases/forms.py
--- a/app/grandchallenge/cases/forms.py
+++ b/app/grandchallenge/cases/forms.py
@@ -22,7 +22,10 @@
"The total size of all files uploaded in a single session "
"cannot exceed 10 GB.<br>"
"The following file formats are supported: "
- ".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg."
+ ".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.<br>"
+ "The following file formats can be uploaded and will be converted to "
+ "tif: Aperio(.svs), Hamamatsu(.vms, .vmu, .ndpi), Leica(.scn), MIRAX"
+ "(.mrxs) and Ventana(.bif)."
),
)
| {"golden_diff": "diff --git a/app/grandchallenge/cases/forms.py b/app/grandchallenge/cases/forms.py\n--- a/app/grandchallenge/cases/forms.py\n+++ b/app/grandchallenge/cases/forms.py\n@@ -22,7 +22,10 @@\n \"The total size of all files uploaded in a single session \"\n \"cannot exceed 10 GB.<br>\"\n \"The following file formats are supported: \"\n- \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.\"\n+ \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.<br>\"\n+ \"The following file formats can be uploaded and will be converted to \"\n+ \"tif: Aperio(.svs), Hamamatsu(.vms, .vmu, .ndpi), Leica(.scn), MIRAX\"\n+ \"(.mrxs) and Ventana(.bif).\"\n ),\n )\n", "issue": "Helptext for supported file formats is not up-to-date\n\n", "before_files": [{"content": "from typing import List\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\n\nfrom grandchallenge.cases.models import RawImageFile, RawImageUploadSession\nfrom grandchallenge.jqfileupload.widgets import uploader\nfrom grandchallenge.jqfileupload.widgets.uploader import (\n StagedAjaxFile,\n UploadedAjaxFileList,\n)\n\n\nclass UploadRawImagesForm(forms.ModelForm):\n files = UploadedAjaxFileList(\n widget=uploader.AjaxUploadWidget(multifile=True, auto_commit=False),\n label=\"Image files\",\n help_text=(\n \"The total size of all files uploaded in a single session \"\n \"cannot exceed 10 GB.<br>\"\n \"The following file formats are supported: \"\n \".mha, .mhd, .raw, .zraw, .dcm, .tiff, .png, .jpeg and .jpg.\"\n ),\n )\n\n def __init__(self, *args, user, linked_task=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit(\"save\", \"Submit\"))\n self.fields[\"files\"].widget.user = user\n self._linked_task = linked_task\n\n def clean_files(self):\n files = self.cleaned_data[\"files\"]\n\n if len({f.name for f in files}) != len(files):\n raise ValidationError(\"Filenames must be unique.\")\n\n if sum([f.size for f in files]) > settings.UPLOAD_SESSION_MAX_BYTES:\n raise ValidationError(\n \"Total size of all files exceeds the upload limit.\"\n )\n\n return files\n\n def save(self, commit=True):\n instance = super().save(commit=False) # type: RawImageUploadSession\n\n # Create links between the created session and all uploaded files\n uploaded_files = self.cleaned_data[\n \"files\"\n ] # type: List[StagedAjaxFile]\n\n raw_files = [\n RawImageFile(\n upload_session=instance,\n filename=uploaded_file.name,\n staged_file_id=uploaded_file.uuid,\n )\n for uploaded_file in uploaded_files\n ]\n\n if commit:\n instance.save()\n RawImageFile.objects.bulk_create(raw_files)\n instance.process_images(linked_task=self._linked_task)\n\n return instance\n\n class Meta:\n model = RawImageUploadSession\n fields = [\"files\"]\n", "path": "app/grandchallenge/cases/forms.py"}]} | 1,229 | 236 |
gh_patches_debug_58053 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3312 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider upsstore is broken
During the global build at 2021-10-13-14-42-23, spider **upsstore** failed with **5176 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/upsstore.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson))
</issue>
<code>
[start of locations/spiders/upsstore.py]
1 import scrapy
2 import json
3 import re
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7 DAY_MAPPING = {
8 "MONDAY": "Mo",
9 "TUESDAY": "Tu",
10 "WEDNESDAY": "We",
11 "THURSDAY": "Th",
12 "FRIDAY": "Fr",
13 "SATURDAY": "Sa",
14 "SUNDAY": "Su"
15 }
16
17
18 class UpsStoreSpider(scrapy.Spider):
19 name = "upsstore"
20 item_attributes = { 'brand': "UPS Store" }
21 allowed_domains = ["theupsstore.com"]
22 download_delay = 0.1
23 start_urls = (
24 'https://locations.theupsstore.com/',
25 )
26
27 def parse_hours(self, hours):
28 """
29 :param hours:
30 :return:
31 """
32 hours = json.loads(hours)
33 o = OpeningHours()
34
35 for day in hours["hours"]["days"]:
36 if not day["isClosed"]:
37 interval = day["intervals"][0]
38
39 o.add_range(DAY_MAPPING[day["day"]],
40 open_time=str(interval["start"]),
41 close_time=str(interval["end"]),
42 time_format="%H%M")
43 return o.as_opening_hours()
44
45 def parse_store(self, response):
46 ref = response.xpath('//input[@id="store_id"]/@value').extract_first()
47 if not ref:
48 ref = re.search(r'store(\d+)@theupsstore.com',
49 response.xpath('//a[@itemprop="email"]/text()').extract_first()).groups()
50
51 properties = {
52 'name': response.xpath('//span[@class="LocationName-geo"]/text()').extract_first(),
53 'phone': response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
54 'addr_full': response.xpath('//meta[@itemprop="streetAddress"]/@content').extract_first(),
55 'city': response.xpath('//meta[@itemprop="addressLocality"]/@content').extract_first(),
56 'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
57 'country': response.xpath('//abbr[@itemprop="addressCountry"]/text()').extract_first(),
58 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
59 'ref': ref,
60 'website': response.url,
61 'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
62 'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
63 }
64
65 hours = response.xpath('//script[@id="location_info_hours"]/text()').extract_first()
66 try:
67 hours = self.parse_hours(hours)
68 if hours:
69 properties['opening_hours'] = hours
70 except:
71 pass
72
73 yield GeojsonPointItem(**properties)
74
75 def parse(self, response):
76 urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
77
78 if urls:
79 for url in urls:
80 if len(url.split('/')) == 3:
81 callback = self.parse_store
82 else:
83 callback = self.parse
84
85 yield scrapy.Request(
86 response.urljoin(url),
87 callback=callback,
88 )
89
90 else:
91 urls = response.xpath('//a[@class="Link"]/@href').extract()
92 for url in urls:
93 yield scrapy.Request(
94 response.urljoin(url),
95 callback=self.parse_store,
96 )
[end of locations/spiders/upsstore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/upsstore.py b/locations/spiders/upsstore.py
--- a/locations/spiders/upsstore.py
+++ b/locations/spiders/upsstore.py
@@ -43,6 +43,9 @@
return o.as_opening_hours()
def parse_store(self, response):
+ if "Permanently Closed" in response.text:
+ return
+
ref = response.xpath('//input[@id="store_id"]/@value').extract_first()
if not ref:
ref = re.search(r'store(\d+)@theupsstore.com',
| {"golden_diff": "diff --git a/locations/spiders/upsstore.py b/locations/spiders/upsstore.py\n--- a/locations/spiders/upsstore.py\n+++ b/locations/spiders/upsstore.py\n@@ -43,6 +43,9 @@\n return o.as_opening_hours()\n \n def parse_store(self, response):\n+ if \"Permanently Closed\" in response.text:\n+ return\n+\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n", "issue": "Spider upsstore is broken\nDuring the global build at 2021-10-13-14-42-23, spider **upsstore** failed with **5176 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/logs/upsstore.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-13-14-42-23/output/upsstore.geojson))\n", "before_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass UpsStoreSpider(scrapy.Spider):\n name = \"upsstore\"\n item_attributes = { 'brand': \"UPS Store\" }\n allowed_domains = [\"theupsstore.com\"]\n download_delay = 0.1\n start_urls = (\n 'https://locations.theupsstore.com/',\n )\n\n def parse_hours(self, hours):\n \"\"\"\n :param hours:\n :return:\n \"\"\"\n hours = json.loads(hours)\n o = OpeningHours()\n\n for day in hours[\"hours\"][\"days\"]:\n if not day[\"isClosed\"]:\n interval = day[\"intervals\"][0]\n\n o.add_range(DAY_MAPPING[day[\"day\"]],\n open_time=str(interval[\"start\"]),\n close_time=str(interval[\"end\"]),\n time_format=\"%H%M\")\n return o.as_opening_hours()\n\n def parse_store(self, response):\n ref = response.xpath('//input[@id=\"store_id\"]/@value').extract_first()\n if not ref:\n ref = re.search(r'store(\\d+)@theupsstore.com',\n response.xpath('//a[@itemprop=\"email\"]/text()').extract_first()).groups()\n\n properties = {\n 'name': response.xpath('//span[@class=\"LocationName-geo\"]/text()').extract_first(),\n 'phone': response.xpath('//span[@itemprop=\"telephone\"]/text()').extract_first(),\n 'addr_full': response.xpath('//meta[@itemprop=\"streetAddress\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@itemprop=\"addressLocality\"]/@content').extract_first(),\n 'state': response.xpath('//abbr[@itemprop=\"addressRegion\"]/text()').extract_first(),\n 'country': response.xpath('//abbr[@itemprop=\"addressCountry\"]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url,\n 'lat': float(response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first()),\n 'lon': float(response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first()),\n }\n\n hours = response.xpath('//script[@id=\"location_info_hours\"]/text()').extract_first()\n try:\n hours = self.parse_hours(hours)\n if hours:\n properties['opening_hours'] = hours\n except:\n pass\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n\n if urls:\n for url in urls:\n if len(url.split('/')) == 3:\n callback = self.parse_store\n else:\n callback = self.parse\n\n yield scrapy.Request(\n response.urljoin(url),\n callback=callback,\n )\n\n else:\n urls = response.xpath('//a[@class=\"Link\"]/@href').extract()\n for url in urls:\n yield scrapy.Request(\n response.urljoin(url),\n callback=self.parse_store,\n )", "path": "locations/spiders/upsstore.py"}]} | 1,650 | 134 |
gh_patches_debug_309 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-195 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix documentation main page's header
The header is gone:
<img width="1032" alt="2018-10-03 0 18 01" src="https://user-images.githubusercontent.com/4660275/46377643-d0ce1080-c6a1-11e8-950b-d2d0c515dee1.png">
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/numbers.py]
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from typing import Optional
5
6 from wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST
7 from wemake_python_styleguide.violations.best_practices import (
8 MagicNumberViolation,
9 )
10 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
11
12
13 class MagicNumberVisitor(BaseNodeVisitor):
14 """Checks magic numbers used in the code."""
15
16 _ALLOWED_PARENTS = (
17 ast.Assign,
18
19 # Constructor usages:
20 ast.FunctionDef,
21 ast.arguments,
22
23 # Primitives:
24 ast.List,
25 ast.Dict,
26 ast.Set,
27 ast.Tuple,
28 )
29
30 _PROXY_PARENTS = (
31 ast.UnaryOp,
32 )
33
34 def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:
35 """
36 Returns real number's parent.
37
38 What can go wrong?
39
40 1. Number can be negative: ``x = -1``,
41 so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``
42
43 """
44 parent = getattr(node, 'parent', None)
45 if isinstance(parent, self._PROXY_PARENTS):
46 return self._get_real_parent(parent)
47 return parent
48
49 def _check_is_magic(self, node: ast.Num) -> None:
50 parent = self._get_real_parent(node)
51 if isinstance(parent, self._ALLOWED_PARENTS):
52 return
53
54 if node.n in MAGIC_NUMBERS_WHITELIST:
55 return
56
57 if isinstance(node.n, int) and node.n <= 10:
58 return
59
60 self.add_violation(MagicNumberViolation(node, text=str(node.n)))
61
62 def visit_Num(self, node: ast.Num) -> None:
63 """
64 Checks numbers not to be magic constants inside the code.
65
66 Raises:
67 MagicNumberViolation
68
69 """
70 self._check_is_magic(node)
71 self.generic_visit(node)
72
[end of wemake_python_styleguide/visitors/ast/numbers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py
--- a/wemake_python_styleguide/visitors/ast/numbers.py
+++ b/wemake_python_styleguide/visitors/ast/numbers.py
@@ -27,6 +27,7 @@
ast.Tuple,
)
+ # TODO: make consistent naming rules for class attributes:
_PROXY_PARENTS = (
ast.UnaryOp,
)
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/numbers.py b/wemake_python_styleguide/visitors/ast/numbers.py\n--- a/wemake_python_styleguide/visitors/ast/numbers.py\n+++ b/wemake_python_styleguide/visitors/ast/numbers.py\n@@ -27,6 +27,7 @@\n ast.Tuple,\n )\n \n+ # TODO: make consistent naming rules for class attributes:\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n", "issue": "Fix documentation main page's header\nThe header is gone:\r\n<img width=\"1032\" alt=\"2018-10-03 0 18 01\" src=\"https://user-images.githubusercontent.com/4660275/46377643-d0ce1080-c6a1-11e8-950b-d2d0c515dee1.png\">\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom typing import Optional\n\nfrom wemake_python_styleguide.constants import MAGIC_NUMBERS_WHITELIST\nfrom wemake_python_styleguide.violations.best_practices import (\n MagicNumberViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass MagicNumberVisitor(BaseNodeVisitor):\n \"\"\"Checks magic numbers used in the code.\"\"\"\n\n _ALLOWED_PARENTS = (\n ast.Assign,\n\n # Constructor usages:\n ast.FunctionDef,\n ast.arguments,\n\n # Primitives:\n ast.List,\n ast.Dict,\n ast.Set,\n ast.Tuple,\n )\n\n _PROXY_PARENTS = (\n ast.UnaryOp,\n )\n\n def _get_real_parent(self, node: Optional[ast.AST]) -> Optional[ast.AST]:\n \"\"\"\n Returns real number's parent.\n\n What can go wrong?\n\n 1. Number can be negative: ``x = -1``,\n so ``1`` has ``UnaryOp`` as parent, but should return ``Assign``\n\n \"\"\"\n parent = getattr(node, 'parent', None)\n if isinstance(parent, self._PROXY_PARENTS):\n return self._get_real_parent(parent)\n return parent\n\n def _check_is_magic(self, node: ast.Num) -> None:\n parent = self._get_real_parent(node)\n if isinstance(parent, self._ALLOWED_PARENTS):\n return\n\n if node.n in MAGIC_NUMBERS_WHITELIST:\n return\n\n if isinstance(node.n, int) and node.n <= 10:\n return\n\n self.add_violation(MagicNumberViolation(node, text=str(node.n)))\n\n def visit_Num(self, node: ast.Num) -> None:\n \"\"\"\n Checks numbers not to be magic constants inside the code.\n\n Raises:\n MagicNumberViolation\n\n \"\"\"\n self._check_is_magic(node)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/numbers.py"}]} | 1,219 | 118 |
gh_patches_debug_32176 | rasdani/github-patches | git_diff | Qiskit__qiskit-2288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when number of qubits is of type numpy.int64
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
In `qiskit/validation/base.py`, function `check_types`: currently, if `n_qubits` or `memory_slots` are of type `numpy.int64`, then an error is triggered, because type `int` is expected.
I find it too strict. Especially considering that if the number of qubits is originated in a `numpy` array, then its default type is `numpy.int64`. Terra can allow additional types, or convert the type internally.
</issue>
<code>
[start of qiskit/circuit/register.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """
16 Base register reference object.
17 """
18 import re
19 import logging
20 import itertools
21
22 from qiskit.exceptions import QiskitError, QiskitIndexError
23
24 logger = logging.getLogger(__name__)
25
26
27 class Register:
28 """Implement a generic register."""
29
30 # Counter for the number of instances in this class.
31 instances_counter = itertools.count()
32 # Prefix to use for auto naming.
33 prefix = 'reg'
34
35 def __init__(self, size, name=None):
36 """Create a new generic register.
37 """
38
39 if name is None:
40 name = '%s%i' % (self.prefix, next(self.instances_counter))
41
42 if not isinstance(name, str):
43 raise QiskitError("The circuit name should be a string "
44 "(or None for autogenerate a name).")
45
46 test = re.compile('[a-z][a-zA-Z0-9_]*')
47 if test.match(name) is None:
48 raise QiskitError("%s is an invalid OPENQASM register name." % name)
49
50 self.name = name
51 self.size = size
52 if size <= 0:
53 raise QiskitError("register size must be positive")
54
55 def __repr__(self):
56 """Return the official string representing the register."""
57 return "%s(%d, '%s')" % (self.__class__.__qualname__,
58 self.size, self.name)
59
60 def __len__(self):
61 """Return register size"""
62 return self.size
63
64 def check_range(self, j):
65 """Check that j is a valid index into self."""
66 if isinstance(j, int):
67 if j < 0 or j >= self.size:
68 raise QiskitIndexError("register index out of range")
69 elif isinstance(j, slice):
70 if j.start < 0 or j.stop >= self.size or (j.step is not None and
71 j.step <= 0):
72 raise QiskitIndexError("register index slice out of range")
73
74 def __getitem__(self, key):
75 """
76 Arg:
77 key (int|slice|list): index of the bit/qubit to be retrieved.
78
79 Returns:
80 tuple[Register, int]: a tuple in the form `(self, key)` if key is int.
81 If key is a slice, return a `list((self,key))`.
82
83 Raises:
84 QiskitError: if the `key` is not an integer.
85 QiskitIndexError: if the `key` is not in the range
86 `(0, self.size)`.
87 """
88 if not isinstance(key, (int, slice, list)):
89 raise QiskitError("expected integer or slice index into register")
90 if isinstance(key, int) and key < 0:
91 key = self.size + key
92 self.check_range(key)
93 if isinstance(key, slice):
94 return [(self, ind) for ind in range(*key.indices(len(self)))]
95 elif isinstance(key, list): # list of qubit indices
96 if max(key) < len(self):
97 return [(self, ind) for ind in key]
98 else:
99 raise QiskitError('register index out of range')
100 else:
101 return self, key
102
103 def __iter__(self):
104 """
105 Returns:
106 iterator: an iterator over the bits/qubits of the register, in the
107 form `tuple (Register, int)`.
108 """
109 return zip([self]*self.size, range(self.size))
110
111 def __eq__(self, other):
112 """Two Registers are the same if they are of the same type
113 (i.e. quantum/classical), and have the same name and size.
114
115 Args:
116 other (Register): other Register
117
118 Returns:
119 bool: are self and other equal.
120 """
121 res = False
122 if type(self) is type(other) and \
123 self.name == other.name and \
124 self.size == other.size:
125 res = True
126 return res
127
128 def __hash__(self):
129 """Make object hashable, based on the name and size to hash."""
130 return hash((type(self), self.name, self.size))
131
[end of qiskit/circuit/register.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py
--- a/qiskit/circuit/register.py
+++ b/qiskit/circuit/register.py
@@ -36,21 +36,29 @@
"""Create a new generic register.
"""
+ # validate (or cast) size
+ try:
+ size = int(size)
+ except Exception:
+ raise QiskitError("size needs to be castable to an int")
+ if size <= 0:
+ raise QiskitError("register size must be positive")
+
+ # validate (or cast) name
if name is None:
name = '%s%i' % (self.prefix, next(self.instances_counter))
-
- if not isinstance(name, str):
- raise QiskitError("The circuit name should be a string "
- "(or None for autogenerate a name).")
-
- test = re.compile('[a-z][a-zA-Z0-9_]*')
- if test.match(name) is None:
- raise QiskitError("%s is an invalid OPENQASM register name." % name)
+ else:
+ try:
+ name = str(name)
+ except Exception:
+ raise QiskitError("The circuit name should be castable to a string "
+ "(or None for autogenerate a name).")
+ name_format = re.compile('[a-z][a-zA-Z0-9_]*')
+ if name_format.match(name) is None:
+ raise QiskitError("%s is an invalid OPENQASM register name." % name)
self.name = name
self.size = size
- if size <= 0:
- raise QiskitError("register size must be positive")
def __repr__(self):
"""Return the official string representing the register."""
@@ -106,7 +114,7 @@
iterator: an iterator over the bits/qubits of the register, in the
form `tuple (Register, int)`.
"""
- return zip([self]*self.size, range(self.size))
+ return zip([self] * self.size, range(self.size))
def __eq__(self, other):
"""Two Registers are the same if they are of the same type
| {"golden_diff": "diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py\n--- a/qiskit/circuit/register.py\n+++ b/qiskit/circuit/register.py\n@@ -36,21 +36,29 @@\n \"\"\"Create a new generic register.\n \"\"\"\n \n+ # validate (or cast) size\n+ try:\n+ size = int(size)\n+ except Exception:\n+ raise QiskitError(\"size needs to be castable to an int\")\n+ if size <= 0:\n+ raise QiskitError(\"register size must be positive\")\n+\n+ # validate (or cast) name\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n-\n- if not isinstance(name, str):\n- raise QiskitError(\"The circuit name should be a string \"\n- \"(or None for autogenerate a name).\")\n-\n- test = re.compile('[a-z][a-zA-Z0-9_]*')\n- if test.match(name) is None:\n- raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n+ else:\n+ try:\n+ name = str(name)\n+ except Exception:\n+ raise QiskitError(\"The circuit name should be castable to a string \"\n+ \"(or None for autogenerate a name).\")\n+ name_format = re.compile('[a-z][a-zA-Z0-9_]*')\n+ if name_format.match(name) is None:\n+ raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n \n self.name = name\n self.size = size\n- if size <= 0:\n- raise QiskitError(\"register size must be positive\")\n \n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n@@ -106,7 +114,7 @@\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n- return zip([self]*self.size, range(self.size))\n+ return zip([self] * self.size, range(self.size))\n \n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n", "issue": "Error when number of qubits is of type numpy.int64\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n\r\nIn `qiskit/validation/base.py`, function `check_types`: currently, if `n_qubits` or `memory_slots` are of type `numpy.int64`, then an error is triggered, because type `int` is expected.\r\n\r\nI find it too strict. Especially considering that if the number of qubits is originated in a `numpy` array, then its default type is `numpy.int64`. Terra can allow additional types, or convert the type internally.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"\nBase register reference object.\n\"\"\"\nimport re\nimport logging\nimport itertools\n\nfrom qiskit.exceptions import QiskitError, QiskitIndexError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Register:\n \"\"\"Implement a generic register.\"\"\"\n\n # Counter for the number of instances in this class.\n instances_counter = itertools.count()\n # Prefix to use for auto naming.\n prefix = 'reg'\n\n def __init__(self, size, name=None):\n \"\"\"Create a new generic register.\n \"\"\"\n\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n\n if not isinstance(name, str):\n raise QiskitError(\"The circuit name should be a string \"\n \"(or None for autogenerate a name).\")\n\n test = re.compile('[a-z][a-zA-Z0-9_]*')\n if test.match(name) is None:\n raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n\n self.name = name\n self.size = size\n if size <= 0:\n raise QiskitError(\"register size must be positive\")\n\n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n return \"%s(%d, '%s')\" % (self.__class__.__qualname__,\n self.size, self.name)\n\n def __len__(self):\n \"\"\"Return register size\"\"\"\n return self.size\n\n def check_range(self, j):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(j, int):\n if j < 0 or j >= self.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(j, slice):\n if j.start < 0 or j.stop >= self.size or (j.step is not None and\n j.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")\n\n def __getitem__(self, key):\n \"\"\"\n Arg:\n key (int|slice|list): index of the bit/qubit to be retrieved.\n\n Returns:\n tuple[Register, int]: a tuple in the form `(self, key)` if key is int.\n If key is a slice, return a `list((self,key))`.\n\n Raises:\n QiskitError: if the `key` is not an integer.\n QiskitIndexError: if the `key` is not in the range\n `(0, self.size)`.\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n if isinstance(key, int) and key < 0:\n key = self.size + key\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n elif isinstance(key, list): # list of qubit indices\n if max(key) < len(self):\n return [(self, ind) for ind in key]\n else:\n raise QiskitError('register index out of range')\n else:\n return self, key\n\n def __iter__(self):\n \"\"\"\n Returns:\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n return zip([self]*self.size, range(self.size))\n\n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n (i.e. quantum/classical), and have the same name and size.\n\n Args:\n other (Register): other Register\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n res = False\n if type(self) is type(other) and \\\n self.name == other.name and \\\n self.size == other.size:\n res = True\n return res\n\n def __hash__(self):\n \"\"\"Make object hashable, based on the name and size to hash.\"\"\"\n return hash((type(self), self.name, self.size))\n", "path": "qiskit/circuit/register.py"}]} | 2,002 | 510 |
gh_patches_debug_40866 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-5273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/kernel/triton/kvcache_copy.py]
1 import torch
2 import triton
3 import triton.language as tl
4
5
6 # Triton 2.1.0
7 @triton.jit
8 def _copy_to_kvcache_seqlen1_kernel(
9 KV, # K or V
10 KVCache, # KCache or VCache
11 BLOCK_TABLES,
12 context_lengths,
13 stride_kt,
14 stride_kh,
15 stride_kd,
16 stride_cacheb,
17 stride_cacheh,
18 stride_cached,
19 stride_cachebs,
20 stride_bts,
21 stride_btb,
22 block_size,
23 HEAD_DIM: tl.constexpr,
24 ):
25 cur_seq_idx = tl.program_id(0)
26 cur_kv_head_idx = tl.program_id(1)
27
28 cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)
29 last_bt_block_idx = cur_kv_seq_len // block_size
30 block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts
31 block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)
32 offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs
33 offsets_dmodel = tl.arange(0, HEAD_DIM)
34 offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd
35 kv = tl.load(KV + offsets_kv)
36 offsets_kvcache = (
37 block_id * stride_cacheb
38 + cur_kv_head_idx * stride_cacheh
39 + offsets_dmodel * stride_cached
40 + offsets_in_last_block
41 )
42 tl.store(KVCache + offsets_kvcache, kv)
43 return
44
45
46 # Used with blocked kv cache.
47 # Copy k or v to block k/v cache during decoding stage
48 def copy_kv_to_blocked_cache(
49 k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage
50 k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)
51 context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)
52 block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]
53 ):
54 assert k.dim() == 4, "Unsupported shape of k (supposed to be used for decoding stage)"
55 assert k.size(1) == 1, "Unsupported kv seq len (supposed to be used for decoding stage)"
56 assert k.size(-1) == k_cache.size(-2), "Incompatible head dim"
57 assert k.dtype == k_cache.dtype, "Expected consistent dtype for tensor and cache."
58 bsz, _, num_kv_heads, head_dim = k.shape
59 assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (
60 f"Got incompatible batch size (number of seqs):\n"
61 f" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, "
62 f"batch size {bsz}"
63 )
64
65 # Modify if the shape of kv cahce is changed.
66 block_size = k_cache.size(-1)
67 # [bsz, 1, num_kv_heads, head_dim] -> [bsz, num_kv_heads, head_dim]
68 k = k.squeeze(dim=1)
69
70 num_warps = 8 if head_dim > 128 else 4
71
72 grid = (bsz, num_kv_heads)
73 _copy_to_kvcache_seqlen1_kernel[grid](
74 k,
75 k_cache,
76 block_tables,
77 context_lengths,
78 k.stride(0),
79 k.stride(1),
80 k.stride(2),
81 k_cache.stride(0),
82 k_cache.stride(1),
83 k_cache.stride(2),
84 k_cache.stride(3),
85 block_tables.stride(0),
86 block_tables.stride(1),
87 block_size,
88 HEAD_DIM=head_dim,
89 num_warps=num_warps,
90 )
91
[end of colossalai/kernel/triton/kvcache_copy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/kernel/triton/kvcache_copy.py b/colossalai/kernel/triton/kvcache_copy.py
--- a/colossalai/kernel/triton/kvcache_copy.py
+++ b/colossalai/kernel/triton/kvcache_copy.py
@@ -25,11 +25,11 @@
cur_seq_idx = tl.program_id(0)
cur_kv_head_idx = tl.program_id(1)
- cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)
- last_bt_block_idx = cur_kv_seq_len // block_size
+ past_kv_seq_len = tl.load(context_lengths + cur_seq_idx) - 1
+ last_bt_block_idx = past_kv_seq_len // block_size
block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts
block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)
- offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs
+ offsets_in_last_block = (past_kv_seq_len % block_size) * stride_cachebs
offsets_dmodel = tl.arange(0, HEAD_DIM)
offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd
kv = tl.load(KV + offsets_kv)
@@ -43,23 +43,30 @@
return
-# Used with blocked kv cache.
-# Copy k or v to block k/v cache during decoding stage
def copy_kv_to_blocked_cache(
- k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage
- k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)
- context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)
- block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]
+ k: torch.Tensor,
+ k_cache: torch.Tensor,
+ kv_lengths: torch.Tensor,
+ block_tables: torch.Tensor,
):
+ """
+ Copy keys or values to the blocked key/value cache during decoding stage.
+
+ Parameters:
+ - k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim] - Keys or values during decoding with seq len 1.
+ - k_cache (torch.Tensor): [num_blocks, num_kv_heads, head_dim, block_size] - Blocked key or value cache.
+ - kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence.
+ - block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence.
+ """
assert k.dim() == 4, "Unsupported shape of k (supposed to be used for decoding stage)"
assert k.size(1) == 1, "Unsupported kv seq len (supposed to be used for decoding stage)"
assert k.size(-1) == k_cache.size(-2), "Incompatible head dim"
assert k.dtype == k_cache.dtype, "Expected consistent dtype for tensor and cache."
bsz, _, num_kv_heads, head_dim = k.shape
- assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (
+ assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, (
f"Got incompatible batch size (number of seqs):\n"
- f" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, "
- f"batch size {bsz}"
+ f" Past kv sequence lengths bsz {kv_lengths.shape[0]}; "
+ f" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}"
)
# Modify if the shape of kv cahce is changed.
@@ -74,7 +81,7 @@
k,
k_cache,
block_tables,
- context_lengths,
+ kv_lengths,
k.stride(0),
k.stride(1),
k.stride(2),
| {"golden_diff": "diff --git a/colossalai/kernel/triton/kvcache_copy.py b/colossalai/kernel/triton/kvcache_copy.py\n--- a/colossalai/kernel/triton/kvcache_copy.py\n+++ b/colossalai/kernel/triton/kvcache_copy.py\n@@ -25,11 +25,11 @@\n cur_seq_idx = tl.program_id(0)\n cur_kv_head_idx = tl.program_id(1)\n \n- cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)\n- last_bt_block_idx = cur_kv_seq_len // block_size\n+ past_kv_seq_len = tl.load(context_lengths + cur_seq_idx) - 1\n+ last_bt_block_idx = past_kv_seq_len // block_size\n block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts\n block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)\n- offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs\n+ offsets_in_last_block = (past_kv_seq_len % block_size) * stride_cachebs\n offsets_dmodel = tl.arange(0, HEAD_DIM)\n offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd\n kv = tl.load(KV + offsets_kv)\n@@ -43,23 +43,30 @@\n return\n \n \n-# Used with blocked kv cache.\n-# Copy k or v to block k/v cache during decoding stage\n def copy_kv_to_blocked_cache(\n- k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage\n- k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)\n- context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)\n- block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]\n+ k: torch.Tensor,\n+ k_cache: torch.Tensor,\n+ kv_lengths: torch.Tensor,\n+ block_tables: torch.Tensor,\n ):\n+ \"\"\"\n+ Copy keys or values to the blocked key/value cache during decoding stage.\n+\n+ Parameters:\n+ - k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim] - Keys or values during decoding with seq len 1.\n+ - k_cache (torch.Tensor): [num_blocks, num_kv_heads, head_dim, block_size] - Blocked key or value cache.\n+ - kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence.\n+ - block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence.\n+ \"\"\"\n assert k.dim() == 4, \"Unsupported shape of k (supposed to be used for decoding stage)\"\n assert k.size(1) == 1, \"Unsupported kv seq len (supposed to be used for decoding stage)\"\n assert k.size(-1) == k_cache.size(-2), \"Incompatible head dim\"\n assert k.dtype == k_cache.dtype, \"Expected consistent dtype for tensor and cache.\"\n bsz, _, num_kv_heads, head_dim = k.shape\n- assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (\n+ assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, (\n f\"Got incompatible batch size (number of seqs):\\n\"\n- f\" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, \"\n- f\"batch size {bsz}\"\n+ f\" Past kv sequence lengths bsz {kv_lengths.shape[0]}; \"\n+ f\" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}\"\n )\n \n # Modify if the shape of kv cahce is changed.\n@@ -74,7 +81,7 @@\n k,\n k_cache,\n block_tables,\n- context_lengths,\n+ kv_lengths,\n k.stride(0),\n k.stride(1),\n k.stride(2),\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import torch\nimport triton\nimport triton.language as tl\n\n\n# Triton 2.1.0\[email protected]\ndef _copy_to_kvcache_seqlen1_kernel(\n KV, # K or V\n KVCache, # KCache or VCache\n BLOCK_TABLES,\n context_lengths,\n stride_kt,\n stride_kh,\n stride_kd,\n stride_cacheb,\n stride_cacheh,\n stride_cached,\n stride_cachebs,\n stride_bts,\n stride_btb,\n block_size,\n HEAD_DIM: tl.constexpr,\n):\n cur_seq_idx = tl.program_id(0)\n cur_kv_head_idx = tl.program_id(1)\n\n cur_kv_seq_len = tl.load(context_lengths + cur_seq_idx)\n last_bt_block_idx = cur_kv_seq_len // block_size\n block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts\n block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb)\n offsets_in_last_block = (cur_kv_seq_len % block_size) * stride_cachebs\n offsets_dmodel = tl.arange(0, HEAD_DIM)\n offsets_kv = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd\n kv = tl.load(KV + offsets_kv)\n offsets_kvcache = (\n block_id * stride_cacheb\n + cur_kv_head_idx * stride_cacheh\n + offsets_dmodel * stride_cached\n + offsets_in_last_block\n )\n tl.store(KVCache + offsets_kvcache, kv)\n return\n\n\n# Used with blocked kv cache.\n# Copy k or v to block k/v cache during decoding stage\ndef copy_kv_to_blocked_cache(\n k: torch.Tensor, # [bsz, 1, num_kv_heads, head_dim], k or v during decoding stage\n k_cache: torch.Tensor, # [num_blocks, num_kv_heads, head_dim, block_size], blocked k or v cache (for now, the shapes of them are the same)\n context_lengths: torch.Tensor, # [bsz], past kv seq len (not incorporating the current kv of length 1)\n block_tables: torch.Tensor, # [bsz, max_blocks_per_sequence]\n):\n assert k.dim() == 4, \"Unsupported shape of k (supposed to be used for decoding stage)\"\n assert k.size(1) == 1, \"Unsupported kv seq len (supposed to be used for decoding stage)\"\n assert k.size(-1) == k_cache.size(-2), \"Incompatible head dim\"\n assert k.dtype == k_cache.dtype, \"Expected consistent dtype for tensor and cache.\"\n bsz, _, num_kv_heads, head_dim = k.shape\n assert context_lengths.shape[0] == block_tables.shape[0] == bsz, (\n f\"Got incompatible batch size (number of seqs):\\n\"\n f\" Conext lengths bsz {context_lengths.shape[0]}, Block tables bsz {block_tables.shape[0]}, \"\n f\"batch size {bsz}\"\n )\n\n # Modify if the shape of kv cahce is changed.\n block_size = k_cache.size(-1)\n # [bsz, 1, num_kv_heads, head_dim] -> [bsz, num_kv_heads, head_dim]\n k = k.squeeze(dim=1)\n\n num_warps = 8 if head_dim > 128 else 4\n\n grid = (bsz, num_kv_heads)\n _copy_to_kvcache_seqlen1_kernel[grid](\n k,\n k_cache,\n block_tables,\n context_lengths,\n k.stride(0),\n k.stride(1),\n k.stride(2),\n k_cache.stride(0),\n k_cache.stride(1),\n k_cache.stride(2),\n k_cache.stride(3),\n block_tables.stride(0),\n block_tables.stride(1),\n block_size,\n HEAD_DIM=head_dim,\n num_warps=num_warps,\n )\n", "path": "colossalai/kernel/triton/kvcache_copy.py"}]} | 1,626 | 949 |
gh_patches_debug_14637 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-124 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add public properties to google.oauth2.credentials.Credentials
Resolves #124
</issue>
<code>
[start of google/oauth2/credentials.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """OAuth 2.0 Credentials.
16
17 This module provides credentials based on OAuth 2.0 access and refresh tokens.
18 These credentials usually access resources on behalf of a user (resource
19 owner).
20
21 Specifically, this is intended to use access tokens acquired using the
22 `Authorization Code grant`_ and can refresh those tokens using a
23 optional `refresh token`_.
24
25 Obtaining the initial access and refresh token is outside of the scope of this
26 module. Consult `rfc6749 section 4.1`_ for complete details on the
27 Authorization Code grant flow.
28
29 .. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1
30 .. _refresh token: https://tools.ietf.org/html/rfc6749#section-6
31 .. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1
32 """
33
34 from google.auth import _helpers
35 from google.auth import credentials
36 from google.oauth2 import _client
37
38
39 class Credentials(credentials.Scoped, credentials.Credentials):
40 """Credentials using OAuth 2.0 access and refresh tokens."""
41
42 def __init__(self, token, refresh_token=None, token_uri=None,
43 client_id=None, client_secret=None, scopes=None):
44 """
45 Args:
46 token (Optional(str)): The OAuth 2.0 access token. Can be None
47 if refresh information is provided.
48 refresh_token (str): The OAuth 2.0 refresh token. If specified,
49 credentials can be refreshed.
50 token_uri (str): The OAuth 2.0 authorization server's token
51 endpoint URI. Must be specified for refresh, can be left as
52 None if the token can not be refreshed.
53 client_id (str): The OAuth 2.0 client ID. Must be specified for
54 refresh, can be left as None if the token can not be refreshed.
55 client_secret(str): The OAuth 2.0 client secret. Must be specified
56 for refresh, can be left as None if the token can not be
57 refreshed.
58 scopes (Sequence[str]): The scopes that were originally used
59 to obtain authorization. This is a purely informative parameter
60 that can be used by :meth:`has_scopes`. OAuth 2.0 credentials
61 can not request additional scopes after authorization.
62 """
63 super(Credentials, self).__init__()
64 self.token = token
65 self._refresh_token = refresh_token
66 self._scopes = scopes
67 self._token_uri = token_uri
68 self._client_id = client_id
69 self._client_secret = client_secret
70
71 @property
72 def requires_scopes(self):
73 """False: OAuth 2.0 credentials have their scopes set when
74 the initial token is requested and can not be changed."""
75 return False
76
77 def with_scopes(self, scopes):
78 """Unavailable, OAuth 2.0 credentials can not be re-scoped.
79
80 OAuth 2.0 credentials have their scopes set when the initial token is
81 requested and can not be changed.
82 """
83 raise NotImplementedError(
84 'OAuth 2.0 Credentials can not modify their scopes.')
85
86 @_helpers.copy_docstring(credentials.Credentials)
87 def refresh(self, request):
88 access_token, refresh_token, expiry, _ = _client.refresh_grant(
89 request, self._token_uri, self._refresh_token, self._client_id,
90 self._client_secret)
91
92 self.token = access_token
93 self.expiry = expiry
94 self._refresh_token = refresh_token
95
[end of google/oauth2/credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py
--- a/google/oauth2/credentials.py
+++ b/google/oauth2/credentials.py
@@ -68,6 +68,27 @@
self._client_id = client_id
self._client_secret = client_secret
+ @property
+ def refresh_token(self):
+ """Optional[str]: The OAuth 2.0 refresh token."""
+ return self._refresh_token
+
+ @property
+ def token_uri(self):
+ """Optional[str]: The OAuth 2.0 authorization server's token endpoint
+ URI."""
+ return self._token_uri
+
+ @property
+ def client_id(self):
+ """Optional[str]: The OAuth 2.0 client ID."""
+ return self._client_id
+
+ @property
+ def client_secret(self):
+ """Optional[str]: The OAuth 2.0 client secret."""
+ return self._client_secret
+
@property
def requires_scopes(self):
"""False: OAuth 2.0 credentials have their scopes set when
| {"golden_diff": "diff --git a/google/oauth2/credentials.py b/google/oauth2/credentials.py\n--- a/google/oauth2/credentials.py\n+++ b/google/oauth2/credentials.py\n@@ -68,6 +68,27 @@\n self._client_id = client_id\n self._client_secret = client_secret\n \n+ @property\n+ def refresh_token(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 refresh token.\"\"\"\n+ return self._refresh_token\n+\n+ @property\n+ def token_uri(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 authorization server's token endpoint\n+ URI.\"\"\"\n+ return self._token_uri\n+\n+ @property\n+ def client_id(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 client ID.\"\"\"\n+ return self._client_id\n+\n+ @property\n+ def client_secret(self):\n+ \"\"\"Optional[str]: The OAuth 2.0 client secret.\"\"\"\n+ return self._client_secret\n+\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n", "issue": "Add public properties to google.oauth2.credentials.Credentials\nResolves #124 \n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"OAuth 2.0 Credentials.\n\nThis module provides credentials based on OAuth 2.0 access and refresh tokens.\nThese credentials usually access resources on behalf of a user (resource\nowner).\n\nSpecifically, this is intended to use access tokens acquired using the\n`Authorization Code grant`_ and can refresh those tokens using a\noptional `refresh token`_.\n\nObtaining the initial access and refresh token is outside of the scope of this\nmodule. Consult `rfc6749 section 4.1`_ for complete details on the\nAuthorization Code grant flow.\n\n.. _Authorization Code grant: https://tools.ietf.org/html/rfc6749#section-1.3.1\n.. _refresh token: https://tools.ietf.org/html/rfc6749#section-6\n.. _rfc6749 section 4.1: https://tools.ietf.org/html/rfc6749#section-4.1\n\"\"\"\n\nfrom google.auth import _helpers\nfrom google.auth import credentials\nfrom google.oauth2 import _client\n\n\nclass Credentials(credentials.Scoped, credentials.Credentials):\n \"\"\"Credentials using OAuth 2.0 access and refresh tokens.\"\"\"\n\n def __init__(self, token, refresh_token=None, token_uri=None,\n client_id=None, client_secret=None, scopes=None):\n \"\"\"\n Args:\n token (Optional(str)): The OAuth 2.0 access token. Can be None\n if refresh information is provided.\n refresh_token (str): The OAuth 2.0 refresh token. If specified,\n credentials can be refreshed.\n token_uri (str): The OAuth 2.0 authorization server's token\n endpoint URI. Must be specified for refresh, can be left as\n None if the token can not be refreshed.\n client_id (str): The OAuth 2.0 client ID. Must be specified for\n refresh, can be left as None if the token can not be refreshed.\n client_secret(str): The OAuth 2.0 client secret. Must be specified\n for refresh, can be left as None if the token can not be\n refreshed.\n scopes (Sequence[str]): The scopes that were originally used\n to obtain authorization. This is a purely informative parameter\n that can be used by :meth:`has_scopes`. OAuth 2.0 credentials\n can not request additional scopes after authorization.\n \"\"\"\n super(Credentials, self).__init__()\n self.token = token\n self._refresh_token = refresh_token\n self._scopes = scopes\n self._token_uri = token_uri\n self._client_id = client_id\n self._client_secret = client_secret\n\n @property\n def requires_scopes(self):\n \"\"\"False: OAuth 2.0 credentials have their scopes set when\n the initial token is requested and can not be changed.\"\"\"\n return False\n\n def with_scopes(self, scopes):\n \"\"\"Unavailable, OAuth 2.0 credentials can not be re-scoped.\n\n OAuth 2.0 credentials have their scopes set when the initial token is\n requested and can not be changed.\n \"\"\"\n raise NotImplementedError(\n 'OAuth 2.0 Credentials can not modify their scopes.')\n\n @_helpers.copy_docstring(credentials.Credentials)\n def refresh(self, request):\n access_token, refresh_token, expiry, _ = _client.refresh_grant(\n request, self._token_uri, self._refresh_token, self._client_id,\n self._client_secret)\n\n self.token = access_token\n self.expiry = expiry\n self._refresh_token = refresh_token\n", "path": "google/oauth2/credentials.py"}]} | 1,634 | 246 |
gh_patches_debug_5344 | rasdani/github-patches | git_diff | nilearn__nilearn-2822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use plot_event in a example
The function `plot_event` has currently no example linked to its [doc](https://nilearn.github.io/modules/generated/nilearn.plotting.plot_event.html#nilearn.plotting.plot_event).
It wouldn't be too costly to use it in one example somewhere.
</issue>
<code>
[start of examples/04_glm_first_level/write_events_file.py]
1 """Example of a events.tsv file generation: the neurospin/localizer events.
2 =============================================================================
3
4 The protocol described is the so-called "archi standard" localizer
5 event sequence. See Pinel et al., BMC neuroscience 2007 for reference.
6 """
7
8 print(__doc__)
9
10 #########################################################################
11 # Define the onset times in seconds. Those are typically extracted
12 # from the stimulation software used.
13 import numpy as np
14 onset = np.array([
15 0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,
16 41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,
17 96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,
18 143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,
19 173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,
20 212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,
21 248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,
22 284.4, 288., 291., 293.4, 296.7])
23
24 #########################################################################
25 # Associated trial types: these are numbered between 0 and 9, hence
26 # correspond to 10 different conditions.
27 trial_idx = np.array(
28 [7, 7, 0, 2, 9, 4, 9, 3, 5, 9, 1, 6, 8, 8, 6, 6, 8, 0, 3, 4, 5, 8, 6,
29 2, 9, 1, 6, 5, 9, 1, 7, 8, 6, 6, 1, 2, 9, 0, 7, 1, 8, 2, 7, 8, 3, 6,
30 0, 0, 6, 8, 7, 7, 1, 1, 1, 5, 5, 0, 7, 0, 4, 2, 7, 9, 8, 0, 6, 3, 3,
31 7, 1, 0, 0, 4, 1, 9, 8, 4, 9, 9])
32
33 #########################################################################
34 # We may want to map these indices to explicit condition names.
35 # For that, we define a list of 10 strings.
36 condition_ids = ['horizontal checkerboard',
37 'vertical checkerboard',
38 'right button press, auditory instructions',
39 'left button press, auditory instructions',
40 'right button press, visual instructions',
41 'left button press, visual instructions',
42 'mental computation, auditory instructions',
43 'mental computation, visual instructions',
44 'visual sentence',
45 'auditory sentence']
46
47 trial_type = np.array([condition_ids[i] for i in trial_idx])
48
49 #########################################################################
50 # We also define a duration (required by BIDS conventions).
51 duration = np.ones_like(onset)
52
53
54 #########################################################################
55 # Form an event dataframe from these information.
56 import pandas as pd
57 events = pd.DataFrame({'trial_type': trial_type,
58 'onset': onset,
59 'duration': duration})
60
61 #########################################################################
62 # Export them to a tsv file.
63 tsvfile = 'localizer_events.tsv'
64 events.to_csv(tsvfile, sep='\t', index=False)
65 print("Created the events file in %s " % tsvfile)
66
[end of examples/04_glm_first_level/write_events_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/04_glm_first_level/write_events_file.py b/examples/04_glm_first_level/write_events_file.py
--- a/examples/04_glm_first_level/write_events_file.py
+++ b/examples/04_glm_first_level/write_events_file.py
@@ -63,3 +63,10 @@
tsvfile = 'localizer_events.tsv'
events.to_csv(tsvfile, sep='\t', index=False)
print("Created the events file in %s " % tsvfile)
+
+#########################################################################
+# Optionally, the events can be visualized using the plot_event function.
+from matplotlib import pyplot as plt
+from nilearn.plotting import plot_event
+plot_event(events, figsize=(15, 5))
+plt.show()
| {"golden_diff": "diff --git a/examples/04_glm_first_level/write_events_file.py b/examples/04_glm_first_level/write_events_file.py\n--- a/examples/04_glm_first_level/write_events_file.py\n+++ b/examples/04_glm_first_level/write_events_file.py\n@@ -63,3 +63,10 @@\n tsvfile = 'localizer_events.tsv'\n events.to_csv(tsvfile, sep='\\t', index=False)\n print(\"Created the events file in %s \" % tsvfile)\n+\n+#########################################################################\n+# Optionally, the events can be visualized using the plot_event function.\n+from matplotlib import pyplot as plt\n+from nilearn.plotting import plot_event\n+plot_event(events, figsize=(15, 5))\n+plt.show()\n", "issue": "Use plot_event in a example\nThe function `plot_event` has currently no example linked to its [doc](https://nilearn.github.io/modules/generated/nilearn.plotting.plot_event.html#nilearn.plotting.plot_event). \r\nIt wouldn't be too costly to use it in one example somewhere.\n", "before_files": [{"content": "\"\"\"Example of a events.tsv file generation: the neurospin/localizer events.\n=============================================================================\n\nThe protocol described is the so-called \"archi standard\" localizer\nevent sequence. See Pinel et al., BMC neuroscience 2007 for reference.\n\"\"\"\n\nprint(__doc__)\n\n#########################################################################\n# Define the onset times in seconds. Those are typically extracted\n# from the stimulation software used.\nimport numpy as np\nonset = np.array([\n 0., 2.4, 8.7, 11.4, 15., 18., 20.7, 23.7, 26.7, 29.7, 33., 35.4, 39.,\n 41.7, 44.7, 48., 56.4, 59.7, 62.4, 69., 71.4, 75., 83.4, 87., 89.7,\n 96., 108., 116.7, 119.4, 122.7, 125.4, 131.4, 135., 137.7, 140.4,\n 143.4, 146.7, 149.4, 153., 156., 159., 162., 164.4, 167.7, 170.4,\n 173.7, 176.7, 188.4, 191.7, 195., 198., 201., 203.7, 207., 210.,\n 212.7, 215.7, 218.7, 221.4, 224.7, 227.7, 230.7, 234., 236.7, 246.,\n 248.4, 251.7, 254.7, 257.4, 260.4, 264., 266.7, 269.7, 275.4, 278.4,\n 284.4, 288., 291., 293.4, 296.7])\n\n#########################################################################\n# Associated trial types: these are numbered between 0 and 9, hence\n# correspond to 10 different conditions.\ntrial_idx = np.array(\n [7, 7, 0, 2, 9, 4, 9, 3, 5, 9, 1, 6, 8, 8, 6, 6, 8, 0, 3, 4, 5, 8, 6,\n 2, 9, 1, 6, 5, 9, 1, 7, 8, 6, 6, 1, 2, 9, 0, 7, 1, 8, 2, 7, 8, 3, 6,\n 0, 0, 6, 8, 7, 7, 1, 1, 1, 5, 5, 0, 7, 0, 4, 2, 7, 9, 8, 0, 6, 3, 3,\n 7, 1, 0, 0, 4, 1, 9, 8, 4, 9, 9])\n\n#########################################################################\n# We may want to map these indices to explicit condition names.\n# For that, we define a list of 10 strings.\ncondition_ids = ['horizontal checkerboard',\n 'vertical checkerboard',\n 'right button press, auditory instructions',\n 'left button press, auditory instructions',\n 'right button press, visual instructions',\n 'left button press, visual instructions',\n 'mental computation, auditory instructions',\n 'mental computation, visual instructions',\n 'visual sentence',\n 'auditory sentence']\n\ntrial_type = np.array([condition_ids[i] for i in trial_idx])\n\n#########################################################################\n# We also define a duration (required by BIDS conventions).\nduration = np.ones_like(onset)\n\n\n#########################################################################\n# Form an event dataframe from these information.\nimport pandas as pd\nevents = pd.DataFrame({'trial_type': trial_type,\n 'onset': onset,\n 'duration': duration})\n\n#########################################################################\n# Export them to a tsv file.\ntsvfile = 'localizer_events.tsv'\nevents.to_csv(tsvfile, sep='\\t', index=False)\nprint(\"Created the events file in %s \" % tsvfile)\n", "path": "examples/04_glm_first_level/write_events_file.py"}]} | 1,836 | 168 |
gh_patches_debug_4117 | rasdani/github-patches | git_diff | kivy__kivy-6178 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MacOS: Clipboard nspaste make app crash when copying text
<!--
The issue tracker is a tool to address bugs.
Please use the #support Discord channel at https://chat.kivy.org/ or Stack Overflow for
support questions, more information at https://git.io/vM1yQ.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://git.io/vM1iE
* prepare a short, runnable example that reproduces the issue
* reproduce the problem with the latest development version of Kivy
* double-check that the issue is indeed a bug and not a support request
-->
### Versions
* Python: 3.7.1
* OS: MacOS 10.13.6
* Kivy: 1.10.1
* Kivy installation method: pypi
### Description
When I try copy text in TextInput, this make app crash. But paste is OK.
### Code and Logs
```log
Traceback (most recent call last):
File "main.py", line 56, in <module>
app.run()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/app.py", line 826, in run
runTouchApp()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/base.py", line 502, in runTouchApp
EventLoop.window.mainloop()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py", line 727, in mainloop
self._mainloop()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py", line 662, in _mainloop
self.modifiers):
File "kivy/_event.pyx", line 703, in kivy._event.EventDispatcher.dispatch
File "kivy/_event.pyx", line 1214, in kivy._event.EventObservers.dispatch
File "kivy/_event.pyx", line 1138, in kivy._event.EventObservers._dispatch
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/__init__.py", line 162, in _on_window_key_down
return self.dispatch('on_key_down', keycode, text, modifiers)
File "kivy/_event.pyx", line 703, in kivy._event.EventDispatcher.dispatch
File "kivy/_event.pyx", line 1214, in kivy._event.EventObservers.dispatch
File "kivy/_event.pyx", line 1138, in kivy._event.EventObservers._dispatch
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py", line 2434, in keyboard_on_key_down
self.copy()
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py", line 1727, in copy
return Clipboard.copy(self.selection_text)
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py", line 73, in copy
self._copy(data)
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py", line 87, in _copy
self.put(data, self._clip_mime_type)
File "/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/clipboard_nspaste.py", line 40, in put
pb.writeObjects_([data])
File "pyobjus/pyobjus.pyx", line 393, in pyobjus.ObjcMethod.__call__
File "pyobjus/pyobjus_conversions.pxi", line 617, in pyobjus.convert_py_arg_to_cy
File "pyobjus/pyobjus_conversions.pxi", line 441, in pyobjus.convert_py_to_nsobject
File "pyobjus/pyobjus.pyx", line 393, in pyobjus.ObjcMethod.__call__
File "pyobjus/pyobjus_conversions.pxi", line 617, in pyobjus.convert_py_arg_to_cy
File "pyobjus/pyobjus_conversions.pxi", line 452, in pyobjus.convert_py_to_nsobject
File "pyobjus/pyobjus.pyx", line 974, in pyobjus.objc_create_delegate
pyobjus.ObjcException: You've passed b'kivyproject' as delegate, but there is no @protocol methods declared.
```
</issue>
<code>
[start of kivy/core/clipboard/clipboard_nspaste.py]
1 '''
2 Clipboard OsX: implementation of clipboard using Appkit
3 '''
4
5 __all__ = ('ClipboardNSPaste', )
6
7 from kivy.core.clipboard import ClipboardBase
8 from kivy.utils import platform
9
10 if platform != 'macosx':
11 raise SystemError('Unsupported platform for appkit clipboard.')
12 try:
13 from pyobjus import autoclass
14 from pyobjus.dylib_manager import load_framework, INCLUDE
15 load_framework(INCLUDE.AppKit)
16 except ImportError:
17 raise SystemError('Pyobjus not installed. Please run the following'
18 ' command to install it. `pip install --user pyobjus`')
19
20 NSPasteboard = autoclass('NSPasteboard')
21 NSString = autoclass('NSString')
22
23
24 class ClipboardNSPaste(ClipboardBase):
25
26 def __init__(self):
27 super(ClipboardNSPaste, self).__init__()
28 self._clipboard = NSPasteboard.generalPasteboard()
29
30 def get(self, mimetype='text/plain'):
31 pb = self._clipboard
32 data = pb.stringForType_('public.utf8-plain-text')
33 if not data:
34 return ""
35 return data.UTF8String()
36
37 def put(self, data, mimetype='text/plain'):
38 pb = self._clipboard
39 pb.clearContents()
40 pb.writeObjects_([data])
41
42 def get_types(self):
43 return list('text/plain',)
44
[end of kivy/core/clipboard/clipboard_nspaste.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/core/clipboard/clipboard_nspaste.py b/kivy/core/clipboard/clipboard_nspaste.py
--- a/kivy/core/clipboard/clipboard_nspaste.py
+++ b/kivy/core/clipboard/clipboard_nspaste.py
@@ -37,7 +37,8 @@
def put(self, data, mimetype='text/plain'):
pb = self._clipboard
pb.clearContents()
- pb.writeObjects_([data])
+ utf8 = NSString.alloc().initWithUTF8String_(data)
+ pb.setString_forType_(utf8, 'public.utf8-plain-text')
def get_types(self):
return list('text/plain',)
| {"golden_diff": "diff --git a/kivy/core/clipboard/clipboard_nspaste.py b/kivy/core/clipboard/clipboard_nspaste.py\n--- a/kivy/core/clipboard/clipboard_nspaste.py\n+++ b/kivy/core/clipboard/clipboard_nspaste.py\n@@ -37,7 +37,8 @@\n def put(self, data, mimetype='text/plain'):\n pb = self._clipboard\n pb.clearContents()\n- pb.writeObjects_([data])\n+ utf8 = NSString.alloc().initWithUTF8String_(data)\n+ pb.setString_forType_(utf8, 'public.utf8-plain-text')\n \n def get_types(self):\n return list('text/plain',)\n", "issue": "MacOS: Clipboard nspaste make app crash when copying text\n<!--\r\nThe issue tracker is a tool to address bugs.\r\nPlease use the #support Discord channel at https://chat.kivy.org/ or Stack Overflow for\r\nsupport questions, more information at https://git.io/vM1yQ.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://git.io/vM1iE\r\n * prepare a short, runnable example that reproduces the issue\r\n * reproduce the problem with the latest development version of Kivy\r\n * double-check that the issue is indeed a bug and not a support request\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 3.7.1\r\n* OS: MacOS 10.13.6\r\n* Kivy: 1.10.1\r\n* Kivy installation method: pypi\r\n\r\n### Description\r\n\r\nWhen I try copy text in TextInput, this make app crash. But paste is OK.\r\n\r\n### Code and Logs\r\n\r\n```log\r\nTraceback (most recent call last):\r\n File \"main.py\", line 56, in <module>\r\n app.run()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/app.py\", line 826, in run\r\n runTouchApp()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/base.py\", line 502, in runTouchApp\r\n EventLoop.window.mainloop()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py\", line 727, in mainloop\r\n self._mainloop()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/window_sdl2.py\", line 662, in _mainloop\r\n self.modifiers):\r\n File \"kivy/_event.pyx\", line 703, in kivy._event.EventDispatcher.dispatch\r\n File \"kivy/_event.pyx\", line 1214, in kivy._event.EventObservers.dispatch\r\n File \"kivy/_event.pyx\", line 1138, in kivy._event.EventObservers._dispatch\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/window/__init__.py\", line 162, in _on_window_key_down\r\n return self.dispatch('on_key_down', keycode, text, modifiers)\r\n File \"kivy/_event.pyx\", line 703, in kivy._event.EventDispatcher.dispatch\r\n File \"kivy/_event.pyx\", line 1214, in kivy._event.EventObservers.dispatch\r\n File \"kivy/_event.pyx\", line 1138, in kivy._event.EventObservers._dispatch\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py\", line 2434, in keyboard_on_key_down\r\n self.copy()\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/uix/textinput.py\", line 1727, in copy\r\n return Clipboard.copy(self.selection_text)\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py\", line 73, in copy\r\n self._copy(data)\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/__init__.py\", line 87, in _copy\r\n self.put(data, self._clip_mime_type)\r\n File \"/Users/ivc/kivy/.env3/lib/python3.7/site-packages/kivy/core/clipboard/clipboard_nspaste.py\", line 40, in put\r\n pb.writeObjects_([data])\r\n File \"pyobjus/pyobjus.pyx\", line 393, in pyobjus.ObjcMethod.__call__\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 617, in pyobjus.convert_py_arg_to_cy\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 441, in pyobjus.convert_py_to_nsobject\r\n File \"pyobjus/pyobjus.pyx\", line 393, in pyobjus.ObjcMethod.__call__\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 617, in pyobjus.convert_py_arg_to_cy\r\n File \"pyobjus/pyobjus_conversions.pxi\", line 452, in pyobjus.convert_py_to_nsobject\r\n File \"pyobjus/pyobjus.pyx\", line 974, in pyobjus.objc_create_delegate\r\n pyobjus.ObjcException: You've passed b'kivyproject' as delegate, but there is no @protocol methods declared.\r\n```\r\n\n", "before_files": [{"content": "'''\nClipboard OsX: implementation of clipboard using Appkit\n'''\n\n__all__ = ('ClipboardNSPaste', )\n\nfrom kivy.core.clipboard import ClipboardBase\nfrom kivy.utils import platform\n\nif platform != 'macosx':\n raise SystemError('Unsupported platform for appkit clipboard.')\ntry:\n from pyobjus import autoclass\n from pyobjus.dylib_manager import load_framework, INCLUDE\n load_framework(INCLUDE.AppKit)\nexcept ImportError:\n raise SystemError('Pyobjus not installed. Please run the following'\n ' command to install it. `pip install --user pyobjus`')\n\nNSPasteboard = autoclass('NSPasteboard')\nNSString = autoclass('NSString')\n\n\nclass ClipboardNSPaste(ClipboardBase):\n\n def __init__(self):\n super(ClipboardNSPaste, self).__init__()\n self._clipboard = NSPasteboard.generalPasteboard()\n\n def get(self, mimetype='text/plain'):\n pb = self._clipboard\n data = pb.stringForType_('public.utf8-plain-text')\n if not data:\n return \"\"\n return data.UTF8String()\n\n def put(self, data, mimetype='text/plain'):\n pb = self._clipboard\n pb.clearContents()\n pb.writeObjects_([data])\n\n def get_types(self):\n return list('text/plain',)\n", "path": "kivy/core/clipboard/clipboard_nspaste.py"}]} | 2,006 | 149 |
gh_patches_debug_33844 | rasdani/github-patches | git_diff | getredash__redash-4354 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make Cypress tests work with [email protected]
Running our tests with [email protected] doesn't work. Need to figure out what happened, until then pinning the version to 3.4.1 (#4284).
</issue>
<code>
[start of redash/app.py]
1 from flask import Flask
2 from werkzeug.contrib.fixers import ProxyFix
3
4 from . import settings
5
6
7 class Redash(Flask):
8 """A custom Flask app for Redash"""
9 def __init__(self, *args, **kwargs):
10 kwargs.update({
11 'template_folder': settings.STATIC_ASSETS_PATH,
12 'static_folder': settings.STATIC_ASSETS_PATH,
13 'static_url_path': '/static',
14 })
15 super(Redash, self).__init__(__name__, *args, **kwargs)
16 # Make sure we get the right referral address even behind proxies like nginx.
17 self.wsgi_app = ProxyFix(self.wsgi_app, settings.PROXIES_COUNT)
18 # Configure Redash using our settings
19 self.config.from_object('redash.settings')
20
21
22 def create_app():
23 from . import authentication, extensions, handlers, limiter, mail, migrate, security
24 from .handlers import chrome_logger
25 from .handlers.webpack import configure_webpack
26 from .metrics import request as request_metrics
27 from .models import db, users
28 from .utils import sentry
29 from .version_check import reset_new_version_status
30
31 sentry.init()
32 app = Redash()
33
34 # Check and update the cached version for use by the client
35 app.before_first_request(reset_new_version_status)
36
37 security.init_app(app)
38 request_metrics.init_app(app)
39 db.init_app(app)
40 migrate.init_app(app, db)
41 mail.init_app(app)
42 authentication.init_app(app)
43 limiter.init_app(app)
44 handlers.init_app(app)
45 configure_webpack(app)
46 extensions.init_app(app)
47 chrome_logger.init_app(app)
48 users.init_app(app)
49
50 return app
51
[end of redash/app.py]
[start of redash/handlers/chrome_logger.py]
1 import time
2 import chromelogger
3 from flask import g, request
4 from flask_sqlalchemy import get_debug_queries
5
6
7 def log_queries():
8 total_duration = 0.0
9 queries_count = 0
10
11 chromelogger.group("SQL Queries")
12
13 for q in get_debug_queries():
14 total_duration += q.duration
15 queries_count += 1
16 chromelogger.info(q.statement % q.parameters)
17 chromelogger.info("Runtime: {:.2f}ms".format(1000 * q.duration))
18
19 chromelogger.info("{} queries executed in {:.2f}ms.".format(queries_count, total_duration*1000))
20
21 chromelogger.group_end("SQL Queries")
22
23
24 def chrome_log(response):
25 request_duration = (time.time() - g.start_time) * 1000
26 queries_duration = g.get('queries_duration', 0.0)
27 queries_count = g.get('queries_count', 0)
28
29 group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(
30 request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)
31
32 chromelogger.group_collapsed(group_name)
33
34 endpoint = (request.endpoint or 'unknown').replace('.', '_')
35 chromelogger.info('Endpoint: {}'.format(endpoint))
36 chromelogger.info('Content Type: {}'.format(response.content_type))
37 chromelogger.info('Content Length: {}'.format(response.content_length or -1))
38
39 log_queries()
40
41 chromelogger.group_end(group_name)
42
43 header = chromelogger.get_header()
44 if header is not None:
45 response.headers.add(*header)
46
47 return response
48
49
50 def init_app(app):
51 if not app.debug:
52 return
53
54 app.after_request(chrome_log)
55
[end of redash/handlers/chrome_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/app.py b/redash/app.py
--- a/redash/app.py
+++ b/redash/app.py
@@ -21,7 +21,6 @@
def create_app():
from . import authentication, extensions, handlers, limiter, mail, migrate, security
- from .handlers import chrome_logger
from .handlers.webpack import configure_webpack
from .metrics import request as request_metrics
from .models import db, users
@@ -44,7 +43,6 @@
handlers.init_app(app)
configure_webpack(app)
extensions.init_app(app)
- chrome_logger.init_app(app)
users.init_app(app)
return app
diff --git a/redash/handlers/chrome_logger.py b/redash/handlers/chrome_logger.py
deleted file mode 100644
--- a/redash/handlers/chrome_logger.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import time
-import chromelogger
-from flask import g, request
-from flask_sqlalchemy import get_debug_queries
-
-
-def log_queries():
- total_duration = 0.0
- queries_count = 0
-
- chromelogger.group("SQL Queries")
-
- for q in get_debug_queries():
- total_duration += q.duration
- queries_count += 1
- chromelogger.info(q.statement % q.parameters)
- chromelogger.info("Runtime: {:.2f}ms".format(1000 * q.duration))
-
- chromelogger.info("{} queries executed in {:.2f}ms.".format(queries_count, total_duration*1000))
-
- chromelogger.group_end("SQL Queries")
-
-
-def chrome_log(response):
- request_duration = (time.time() - g.start_time) * 1000
- queries_duration = g.get('queries_duration', 0.0)
- queries_count = g.get('queries_count', 0)
-
- group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(
- request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)
-
- chromelogger.group_collapsed(group_name)
-
- endpoint = (request.endpoint or 'unknown').replace('.', '_')
- chromelogger.info('Endpoint: {}'.format(endpoint))
- chromelogger.info('Content Type: {}'.format(response.content_type))
- chromelogger.info('Content Length: {}'.format(response.content_length or -1))
-
- log_queries()
-
- chromelogger.group_end(group_name)
-
- header = chromelogger.get_header()
- if header is not None:
- response.headers.add(*header)
-
- return response
-
-
-def init_app(app):
- if not app.debug:
- return
-
- app.after_request(chrome_log)
| {"golden_diff": "diff --git a/redash/app.py b/redash/app.py\n--- a/redash/app.py\n+++ b/redash/app.py\n@@ -21,7 +21,6 @@\n \n def create_app():\n from . import authentication, extensions, handlers, limiter, mail, migrate, security\n- from .handlers import chrome_logger\n from .handlers.webpack import configure_webpack\n from .metrics import request as request_metrics\n from .models import db, users\n@@ -44,7 +43,6 @@\n handlers.init_app(app)\n configure_webpack(app)\n extensions.init_app(app)\n- chrome_logger.init_app(app)\n users.init_app(app)\n \n return app\ndiff --git a/redash/handlers/chrome_logger.py b/redash/handlers/chrome_logger.py\ndeleted file mode 100644\n--- a/redash/handlers/chrome_logger.py\n+++ /dev/null\n@@ -1,54 +0,0 @@\n-import time\n-import chromelogger\n-from flask import g, request\n-from flask_sqlalchemy import get_debug_queries\n-\n-\n-def log_queries():\n- total_duration = 0.0\n- queries_count = 0\n-\n- chromelogger.group(\"SQL Queries\")\n-\n- for q in get_debug_queries():\n- total_duration += q.duration\n- queries_count += 1\n- chromelogger.info(q.statement % q.parameters)\n- chromelogger.info(\"Runtime: {:.2f}ms\".format(1000 * q.duration))\n-\n- chromelogger.info(\"{} queries executed in {:.2f}ms.\".format(queries_count, total_duration*1000))\n-\n- chromelogger.group_end(\"SQL Queries\")\n-\n-\n-def chrome_log(response):\n- request_duration = (time.time() - g.start_time) * 1000\n- queries_duration = g.get('queries_duration', 0.0)\n- queries_count = g.get('queries_count', 0)\n-\n- group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(\n- request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)\n-\n- chromelogger.group_collapsed(group_name)\n-\n- endpoint = (request.endpoint or 'unknown').replace('.', '_')\n- chromelogger.info('Endpoint: {}'.format(endpoint))\n- chromelogger.info('Content Type: {}'.format(response.content_type))\n- chromelogger.info('Content Length: {}'.format(response.content_length or -1))\n-\n- log_queries()\n-\n- chromelogger.group_end(group_name)\n-\n- header = chromelogger.get_header()\n- if header is not None:\n- response.headers.add(*header)\n-\n- return response\n-\n-\n-def init_app(app):\n- if not app.debug:\n- return\n-\n- app.after_request(chrome_log)\n", "issue": "Make Cypress tests work with [email protected]\nRunning our tests with [email protected] doesn't work. Need to figure out what happened, until then pinning the version to 3.4.1 (#4284).\n", "before_files": [{"content": "from flask import Flask\nfrom werkzeug.contrib.fixers import ProxyFix\n\nfrom . import settings\n\n\nclass Redash(Flask):\n \"\"\"A custom Flask app for Redash\"\"\"\n def __init__(self, *args, **kwargs):\n kwargs.update({\n 'template_folder': settings.STATIC_ASSETS_PATH,\n 'static_folder': settings.STATIC_ASSETS_PATH,\n 'static_url_path': '/static',\n })\n super(Redash, self).__init__(__name__, *args, **kwargs)\n # Make sure we get the right referral address even behind proxies like nginx.\n self.wsgi_app = ProxyFix(self.wsgi_app, settings.PROXIES_COUNT)\n # Configure Redash using our settings\n self.config.from_object('redash.settings')\n\n\ndef create_app():\n from . import authentication, extensions, handlers, limiter, mail, migrate, security\n from .handlers import chrome_logger\n from .handlers.webpack import configure_webpack\n from .metrics import request as request_metrics\n from .models import db, users\n from .utils import sentry\n from .version_check import reset_new_version_status\n\n sentry.init()\n app = Redash()\n\n # Check and update the cached version for use by the client\n app.before_first_request(reset_new_version_status)\n\n security.init_app(app)\n request_metrics.init_app(app)\n db.init_app(app)\n migrate.init_app(app, db)\n mail.init_app(app)\n authentication.init_app(app)\n limiter.init_app(app)\n handlers.init_app(app)\n configure_webpack(app)\n extensions.init_app(app)\n chrome_logger.init_app(app)\n users.init_app(app)\n\n return app\n", "path": "redash/app.py"}, {"content": "import time\nimport chromelogger\nfrom flask import g, request\nfrom flask_sqlalchemy import get_debug_queries\n\n\ndef log_queries():\n total_duration = 0.0\n queries_count = 0\n\n chromelogger.group(\"SQL Queries\")\n\n for q in get_debug_queries():\n total_duration += q.duration\n queries_count += 1\n chromelogger.info(q.statement % q.parameters)\n chromelogger.info(\"Runtime: {:.2f}ms\".format(1000 * q.duration))\n\n chromelogger.info(\"{} queries executed in {:.2f}ms.\".format(queries_count, total_duration*1000))\n\n chromelogger.group_end(\"SQL Queries\")\n\n\ndef chrome_log(response):\n request_duration = (time.time() - g.start_time) * 1000\n queries_duration = g.get('queries_duration', 0.0)\n queries_count = g.get('queries_count', 0)\n\n group_name = '{} {} ({}, {:.2f}ms runtime, {} queries in {:.2f}ms)'.format(\n request.method, request.path, response.status_code, request_duration, queries_count, queries_duration)\n\n chromelogger.group_collapsed(group_name)\n\n endpoint = (request.endpoint or 'unknown').replace('.', '_')\n chromelogger.info('Endpoint: {}'.format(endpoint))\n chromelogger.info('Content Type: {}'.format(response.content_type))\n chromelogger.info('Content Length: {}'.format(response.content_length or -1))\n\n log_queries()\n\n chromelogger.group_end(group_name)\n\n header = chromelogger.get_header()\n if header is not None:\n response.headers.add(*header)\n\n return response\n\n\ndef init_app(app):\n if not app.debug:\n return\n\n app.after_request(chrome_log)\n", "path": "redash/handlers/chrome_logger.py"}]} | 1,572 | 643 |
gh_patches_debug_7863 | rasdani/github-patches | git_diff | facebookresearch__hydra-1363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Nevergrad-Plugin] Add support for Python 3.9
Python 3.9 support pending on scikit 2.4.0 release. Relevant comment: scikit-learn/scikit-learn#18621 (comment)
Related to #1062
</issue>
<code>
[start of plugins/hydra_nevergrad_sweeper/setup.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from setuptools import find_namespace_packages, setup
4
5 with open("README.md", "r") as fh:
6 LONG_DESC = fh.read()
7 setup(
8 name="hydra-nevergrad-sweeper",
9 version="1.1.0rc1",
10 author="Jeremy Rapin, Omry Yadan, Jieru Hu",
11 author_email="[email protected], [email protected], [email protected]",
12 description="Hydra Nevergrad Sweeper plugin",
13 long_description=LONG_DESC,
14 long_description_content_type="text/markdown",
15 url="https://github.com/facebookresearch/hydra/",
16 packages=find_namespace_packages(include=["hydra_plugins.*"]),
17 classifiers=[
18 "License :: OSI Approved :: MIT License",
19 "Programming Language :: Python :: 3.6",
20 "Programming Language :: Python :: 3.7",
21 "Programming Language :: Python :: 3.8",
22 # "Programming Language :: Python :: 3.9",
23 "Operating System :: OS Independent",
24 "Development Status :: 4 - Beta",
25 ],
26 install_requires=["hydra-core>=1.0.0", "nevergrad>=0.4.1.post4"],
27 include_package_data=True,
28 )
29
[end of plugins/hydra_nevergrad_sweeper/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py
--- a/plugins/hydra_nevergrad_sweeper/setup.py
+++ b/plugins/hydra_nevergrad_sweeper/setup.py
@@ -19,7 +19,7 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
- # "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
],
| {"golden_diff": "diff --git a/plugins/hydra_nevergrad_sweeper/setup.py b/plugins/hydra_nevergrad_sweeper/setup.py\n--- a/plugins/hydra_nevergrad_sweeper/setup.py\n+++ b/plugins/hydra_nevergrad_sweeper/setup.py\n@@ -19,7 +19,7 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n- # \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n", "issue": "[Nevergrad-Plugin] Add support for Python 3.9\nPython 3.9 support pending on scikit 2.4.0 release. Relevant comment: scikit-learn/scikit-learn#18621 (comment)\r\n\r\nRelated to #1062\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-nevergrad-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Jeremy Rapin, Omry Yadan, Jieru Hu\",\n author_email=\"[email protected], [email protected], [email protected]\",\n description=\"Hydra Nevergrad Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"nevergrad>=0.4.1.post4\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_nevergrad_sweeper/setup.py"}]} | 947 | 156 |
gh_patches_debug_13339 | rasdani/github-patches | git_diff | pypi__warehouse-1491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[requires.io] dependency update on master branch
</issue>
<code>
[start of warehouse/celery.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import celery.backends
14
15 # We need to trick Celery into supporting rediss:// URLs which is how redis-py
16 # signals that you should use Redis with TLS.
17 celery.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
18
19 from celery import Celery, Task
20 from celery.backends.redis import RedisBackend as _RedisBackend
21 from celery.signals import celeryd_init
22 from pyramid import scripting
23 from pyramid.threadlocal import get_current_request
24 from raven.contrib.celery import register_signal, register_logger_signal
25
26 from warehouse.config import Environment, configure
27
28
29 @celeryd_init.connect
30 def _configure_celery(*args, **kwargs):
31 config = configure()
32 register_logger_signal(config.registry["raven.client"])
33 register_signal(config.registry["raven.client"])
34
35
36 class TLSRedisBackend(_RedisBackend):
37
38 def _params_from_url(self, url, defaults):
39 params = super()._params_from_url(url, defaults)
40 params.update({"connection_class": self.redis.SSLConnection})
41 return params
42
43
44 class WarehouseTask(Task):
45
46 abstract = True
47
48 def __call__(self, *args, **kwargs):
49 registry = self.app.pyramid_config.registry
50 pyramid_env = scripting.prepare(registry=registry)
51
52 try:
53 return super().__call__(pyramid_env["request"], *args, **kwargs)
54 finally:
55 pyramid_env["closer"]()
56
57 def apply_async(self, *args, **kwargs):
58 # The API design of Celery makes this threadlocal pretty impossible to
59 # avoid :(
60 request = get_current_request()
61
62 # If for whatever reason we were unable to get a request we'll just
63 # skip this and call the original method to send this immediately.
64 if request is None or not hasattr(request, "tm"):
65 return super().apply_async(*args, **kwargs)
66
67 # This will break things that expect to get an AsyncResult because
68 # we're no longer going to be returning an async result from this when
69 # called from within a request, response cycle. Ideally we shouldn't be
70 # waiting for responses in a request/response cycle anyways though.
71 request.tm.get().addAfterCommitHook(
72 self._after_commit_hook,
73 args=args,
74 kws=kwargs,
75 )
76
77 def _after_commit_hook(self, success, *args, **kwargs):
78 if success:
79 super().apply_async(*args, **kwargs)
80
81
82 app = Celery("warehouse")
83 app.Task = WarehouseTask
84
85
86 task = app.task
87
88
89 def includeme(config):
90 s = config.registry.settings
91 app.pyramid_config = config
92 app.conf.update(
93 BROKER_URL=s["celery.broker_url"],
94 BROKER_USE_SSL=s["warehouse.env"] == Environment.production,
95 CELERY_DISABLE_RATE_LIMITS=True,
96 CELERY_RESULT_BACKEND=s["celery.result_url"],
97 CELERY_RESULT_SERIALIZER="json",
98 CELERY_TASK_SERIALIZER="json",
99 CELERY_ACCEPT_CONTENT=["json", "msgpack"],
100 CELERY_MESSAGE_COMPRESSION="gzip",
101 CELERY_QUEUE_HA_POLICY="all",
102 )
103
[end of warehouse/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/celery.py b/warehouse/celery.py
--- a/warehouse/celery.py
+++ b/warehouse/celery.py
@@ -10,11 +10,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-import celery.backends
+import celery.app.backends
# We need to trick Celery into supporting rediss:// URLs which is how redis-py
# signals that you should use Redis with TLS.
-celery.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
+celery.app.backends.BACKEND_ALIASES["rediss"] = "warehouse.celery:TLSRedisBackend" # noqa
from celery import Celery, Task
from celery.backends.redis import RedisBackend as _RedisBackend
| {"golden_diff": "diff --git a/warehouse/celery.py b/warehouse/celery.py\n--- a/warehouse/celery.py\n+++ b/warehouse/celery.py\n@@ -10,11 +10,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-import celery.backends\n+import celery.app.backends\n \n # We need to trick Celery into supporting rediss:// URLs which is how redis-py\n # signals that you should use Redis with TLS.\n-celery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n+celery.app.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n \n from celery import Celery, Task\n from celery.backends.redis import RedisBackend as _RedisBackend\n", "issue": "[requires.io] dependency update on master branch\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport celery.backends\n\n# We need to trick Celery into supporting rediss:// URLs which is how redis-py\n# signals that you should use Redis with TLS.\ncelery.backends.BACKEND_ALIASES[\"rediss\"] = \"warehouse.celery:TLSRedisBackend\" # noqa\n\nfrom celery import Celery, Task\nfrom celery.backends.redis import RedisBackend as _RedisBackend\nfrom celery.signals import celeryd_init\nfrom pyramid import scripting\nfrom pyramid.threadlocal import get_current_request\nfrom raven.contrib.celery import register_signal, register_logger_signal\n\nfrom warehouse.config import Environment, configure\n\n\n@celeryd_init.connect\ndef _configure_celery(*args, **kwargs):\n config = configure()\n register_logger_signal(config.registry[\"raven.client\"])\n register_signal(config.registry[\"raven.client\"])\n\n\nclass TLSRedisBackend(_RedisBackend):\n\n def _params_from_url(self, url, defaults):\n params = super()._params_from_url(url, defaults)\n params.update({\"connection_class\": self.redis.SSLConnection})\n return params\n\n\nclass WarehouseTask(Task):\n\n abstract = True\n\n def __call__(self, *args, **kwargs):\n registry = self.app.pyramid_config.registry\n pyramid_env = scripting.prepare(registry=registry)\n\n try:\n return super().__call__(pyramid_env[\"request\"], *args, **kwargs)\n finally:\n pyramid_env[\"closer\"]()\n\n def apply_async(self, *args, **kwargs):\n # The API design of Celery makes this threadlocal pretty impossible to\n # avoid :(\n request = get_current_request()\n\n # If for whatever reason we were unable to get a request we'll just\n # skip this and call the original method to send this immediately.\n if request is None or not hasattr(request, \"tm\"):\n return super().apply_async(*args, **kwargs)\n\n # This will break things that expect to get an AsyncResult because\n # we're no longer going to be returning an async result from this when\n # called from within a request, response cycle. Ideally we shouldn't be\n # waiting for responses in a request/response cycle anyways though.\n request.tm.get().addAfterCommitHook(\n self._after_commit_hook,\n args=args,\n kws=kwargs,\n )\n\n def _after_commit_hook(self, success, *args, **kwargs):\n if success:\n super().apply_async(*args, **kwargs)\n\n\napp = Celery(\"warehouse\")\napp.Task = WarehouseTask\n\n\ntask = app.task\n\n\ndef includeme(config):\n s = config.registry.settings\n app.pyramid_config = config\n app.conf.update(\n BROKER_URL=s[\"celery.broker_url\"],\n BROKER_USE_SSL=s[\"warehouse.env\"] == Environment.production,\n CELERY_DISABLE_RATE_LIMITS=True,\n CELERY_RESULT_BACKEND=s[\"celery.result_url\"],\n CELERY_RESULT_SERIALIZER=\"json\",\n CELERY_TASK_SERIALIZER=\"json\",\n CELERY_ACCEPT_CONTENT=[\"json\", \"msgpack\"],\n CELERY_MESSAGE_COMPRESSION=\"gzip\",\n CELERY_QUEUE_HA_POLICY=\"all\",\n )\n", "path": "warehouse/celery.py"}]} | 1,546 | 189 |
gh_patches_debug_17543 | rasdani/github-patches | git_diff | coreruleset__coreruleset-3002 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move data files from util/regexp-assemble directory to the top level
### Description
Data files used to generate regular expressions have been somehow in a difficult-to-find place, dependent on the tool.
Now with the new crs-toolchain, this is not needed anymore.
So let's move the data files to the top level directory.
### Requirements
- move all data files to the top level dir
- review dependencies and check that all references are updated
</issue>
<code>
[start of util/regexp-assemble/lib/context.py]
1 import argparse
2 from pathlib import Path
3 import logging
4
5
6
7 class Context(object):
8 def __init__(self, root_directory: Path, namespace: argparse.Namespace=None):
9 self.root_directory = root_directory
10 self.rules_directory = self.root_directory / "rules"
11 self.util_directory = self.root_directory / "util"
12 self.regexp_assemble_directory = self.util_directory / "regexp-assemble"
13 self.data_files_directory = self.regexp_assemble_directory / "data"
14 self.include_files_directory = self.regexp_assemble_directory / "data" / "include"
15 self.regexp_assemble_pl_path = self.regexp_assemble_directory / "lib" / "regexp-assemble.pl"
16 self.single_rule_id = namespace.rule_id if namespace else None
17 self.single_chain_offset = None
18 if namespace and "chain_offset" in namespace:
19 self.single_chain_offset = namespace.chain_offset
20
21 self._dump_to_debug_log()
22
23 assert (
24 self.rules_directory.exists()
25 and self.util_directory.exists()
26 and self.regexp_assemble_directory.exists()
27 and self.data_files_directory.exists()
28 and self.include_files_directory.exists()
29 )
30
31
32 def _dump_to_debug_log(self):
33 logger = logging.getLogger()
34 logger.debug("Root directory: %s", self.root_directory)
35 logger.debug("Rules directory: %s", self.rules_directory)
36 logger.debug("Data files directory: %s", self.data_files_directory)
37 logger.debug("Include files directory: %s", self.include_files_directory)
38 logger.debug("Parsed rule ID: %s", self.single_rule_id)
39 logger.debug("Parsed chain offset: %s", self.single_chain_offset)
40
[end of util/regexp-assemble/lib/context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/util/regexp-assemble/lib/context.py b/util/regexp-assemble/lib/context.py
--- a/util/regexp-assemble/lib/context.py
+++ b/util/regexp-assemble/lib/context.py
@@ -10,8 +10,8 @@
self.rules_directory = self.root_directory / "rules"
self.util_directory = self.root_directory / "util"
self.regexp_assemble_directory = self.util_directory / "regexp-assemble"
- self.data_files_directory = self.regexp_assemble_directory / "data"
- self.include_files_directory = self.regexp_assemble_directory / "data" / "include"
+ self.data_files_directory = self.root_directory / "data"
+ self.include_files_directory = self.root_directory / "data" / "include"
self.regexp_assemble_pl_path = self.regexp_assemble_directory / "lib" / "regexp-assemble.pl"
self.single_rule_id = namespace.rule_id if namespace else None
self.single_chain_offset = None
| {"golden_diff": "diff --git a/util/regexp-assemble/lib/context.py b/util/regexp-assemble/lib/context.py\n--- a/util/regexp-assemble/lib/context.py\n+++ b/util/regexp-assemble/lib/context.py\n@@ -10,8 +10,8 @@\n self.rules_directory = self.root_directory / \"rules\"\n self.util_directory = self.root_directory / \"util\"\n self.regexp_assemble_directory = self.util_directory / \"regexp-assemble\"\n- self.data_files_directory = self.regexp_assemble_directory / \"data\"\n- self.include_files_directory = self.regexp_assemble_directory / \"data\" / \"include\"\n+ self.data_files_directory = self.root_directory / \"data\"\n+ self.include_files_directory = self.root_directory / \"data\" / \"include\"\n self.regexp_assemble_pl_path = self.regexp_assemble_directory / \"lib\" / \"regexp-assemble.pl\"\n self.single_rule_id = namespace.rule_id if namespace else None\n self.single_chain_offset = None\n", "issue": "Move data files from util/regexp-assemble directory to the top level\n### Description\r\n\r\nData files used to generate regular expressions have been somehow in a difficult-to-find place, dependent on the tool.\r\n\r\nNow with the new crs-toolchain, this is not needed anymore.\r\n\r\nSo let's move the data files to the top level directory.\r\n\r\n### Requirements\r\n\r\n- move all data files to the top level dir\r\n- review dependencies and check that all references are updated\n", "before_files": [{"content": "import argparse\nfrom pathlib import Path\nimport logging\n\n\n\nclass Context(object):\n def __init__(self, root_directory: Path, namespace: argparse.Namespace=None):\n self.root_directory = root_directory\n self.rules_directory = self.root_directory / \"rules\"\n self.util_directory = self.root_directory / \"util\"\n self.regexp_assemble_directory = self.util_directory / \"regexp-assemble\"\n self.data_files_directory = self.regexp_assemble_directory / \"data\"\n self.include_files_directory = self.regexp_assemble_directory / \"data\" / \"include\"\n self.regexp_assemble_pl_path = self.regexp_assemble_directory / \"lib\" / \"regexp-assemble.pl\"\n self.single_rule_id = namespace.rule_id if namespace else None\n self.single_chain_offset = None\n if namespace and \"chain_offset\" in namespace:\n self.single_chain_offset = namespace.chain_offset\n\n self._dump_to_debug_log()\n\n assert (\n self.rules_directory.exists()\n and self.util_directory.exists()\n and self.regexp_assemble_directory.exists()\n and self.data_files_directory.exists()\n and self.include_files_directory.exists()\n )\n\n\n def _dump_to_debug_log(self):\n logger = logging.getLogger()\n logger.debug(\"Root directory: %s\", self.root_directory)\n logger.debug(\"Rules directory: %s\", self.rules_directory)\n logger.debug(\"Data files directory: %s\", self.data_files_directory)\n logger.debug(\"Include files directory: %s\", self.include_files_directory)\n logger.debug(\"Parsed rule ID: %s\", self.single_rule_id)\n logger.debug(\"Parsed chain offset: %s\", self.single_chain_offset)\n", "path": "util/regexp-assemble/lib/context.py"}]} | 1,059 | 216 |
gh_patches_debug_4420 | rasdani/github-patches | git_diff | ephios-dev__ephios-220 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
List of own upcoming shifts
As a user, I want to see a list of shifts that I have been confirmed for on the main page.
</issue>
<code>
[start of ephios/event_management/templatetags/event_extras.py]
1 from django import template
2 from django.utils.safestring import mark_safe
3
4 from ephios.event_management.models import AbstractParticipation
5
6 register = template.Library()
7
8
9 @register.filter(name="shift_status")
10 def shift_status(shift, user):
11 participation = user.as_participant().participation_for(shift)
12 if participation is not None:
13 color = {
14 AbstractParticipation.States.USER_DECLINED: "text-danger",
15 AbstractParticipation.States.RESPONSIBLE_REJECTED: "text-danger",
16 AbstractParticipation.States.REQUESTED: "text-warning",
17 AbstractParticipation.States.CONFIRMED: "text-success",
18 }[participation.state]
19 return mark_safe(f'<span class="{color}">{participation.get_state_display()}</span><br>')
20 return ""
21
22
23 @register.filter(name="can_sign_up")
24 def can_sign_up(shift, user):
25 return shift.signup_method.can_sign_up(user.as_participant())
26
27
28 @register.filter(name="render_shift_state")
29 def render_shift_state(shift, request):
30 return shift.signup_method.render_shift_state(request)
31
32
33 @register.filter(name="signup_errors")
34 def signup_errors(shift, user):
35 return shift.signup_method.get_signup_errors(user.as_participant())
36
37
38 @register.filter(name="can_decline")
39 def can_decline(shift, user):
40 return shift.signup_method.can_decline(user.as_participant())
41
42
43 @register.filter(name="decline_errors")
44 def decline_errors(shift, user):
45 return shift.signup_method.get_decline_errors(user.as_participant())
46
[end of ephios/event_management/templatetags/event_extras.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/event_management/templatetags/event_extras.py b/ephios/event_management/templatetags/event_extras.py
--- a/ephios/event_management/templatetags/event_extras.py
+++ b/ephios/event_management/templatetags/event_extras.py
@@ -43,3 +43,10 @@
@register.filter(name="decline_errors")
def decline_errors(shift, user):
return shift.signup_method.get_decline_errors(user.as_participant())
+
+
[email protected](name="confirmed_shifts")
+def confirmed_shifts(user):
+ return user.get_shifts(
+ with_participation_state_in=[AbstractParticipation.States.CONFIRMED]
+ ).order_by("start_time")
| {"golden_diff": "diff --git a/ephios/event_management/templatetags/event_extras.py b/ephios/event_management/templatetags/event_extras.py\n--- a/ephios/event_management/templatetags/event_extras.py\n+++ b/ephios/event_management/templatetags/event_extras.py\n@@ -43,3 +43,10 @@\n @register.filter(name=\"decline_errors\")\n def decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n+\n+\[email protected](name=\"confirmed_shifts\")\n+def confirmed_shifts(user):\n+ return user.get_shifts(\n+ with_participation_state_in=[AbstractParticipation.States.CONFIRMED]\n+ ).order_by(\"start_time\")\n", "issue": "List of own upcoming shifts\nAs a user, I want to see a list of shifts that I have been confirmed for on the main page.\n", "before_files": [{"content": "from django import template\nfrom django.utils.safestring import mark_safe\n\nfrom ephios.event_management.models import AbstractParticipation\n\nregister = template.Library()\n\n\[email protected](name=\"shift_status\")\ndef shift_status(shift, user):\n participation = user.as_participant().participation_for(shift)\n if participation is not None:\n color = {\n AbstractParticipation.States.USER_DECLINED: \"text-danger\",\n AbstractParticipation.States.RESPONSIBLE_REJECTED: \"text-danger\",\n AbstractParticipation.States.REQUESTED: \"text-warning\",\n AbstractParticipation.States.CONFIRMED: \"text-success\",\n }[participation.state]\n return mark_safe(f'<span class=\"{color}\">{participation.get_state_display()}</span><br>')\n return \"\"\n\n\[email protected](name=\"can_sign_up\")\ndef can_sign_up(shift, user):\n return shift.signup_method.can_sign_up(user.as_participant())\n\n\[email protected](name=\"render_shift_state\")\ndef render_shift_state(shift, request):\n return shift.signup_method.render_shift_state(request)\n\n\[email protected](name=\"signup_errors\")\ndef signup_errors(shift, user):\n return shift.signup_method.get_signup_errors(user.as_participant())\n\n\[email protected](name=\"can_decline\")\ndef can_decline(shift, user):\n return shift.signup_method.can_decline(user.as_participant())\n\n\[email protected](name=\"decline_errors\")\ndef decline_errors(shift, user):\n return shift.signup_method.get_decline_errors(user.as_participant())\n", "path": "ephios/event_management/templatetags/event_extras.py"}]} | 987 | 166 |
gh_patches_debug_15929 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-4405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[REQUEST] Add timeout as entry-point option or environment variable
**Is your feature request related to a problem? Please describe.**
I am using Hugging Face `transformers` for my deep learning, and it has a nice option to restrict specific processing to the main process only. This is useful if a function caches the result: the main process does the processing while the other processes wait, and when main is done, the other processes can just load from the cache. That's pretty neat.
The problem arises when these are long running processes. In distributed environment (torch or deepspeed, for instance), the communication between processes has a default timeout. If no communication has occurred for `timeout` seconds, the whole program will exit.
**Describe the solution you'd like**
Both [`torch`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) and [`deepspeed`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) provide options in the Python init methods to set the timeout parameter to a higher value than the default 30 minutes, but this option is not available from the command-line or through an environment, which is what I would like.
**Describe alternatives you've considered**
I could make a custom fork but I think that this is something that more people might need as soon as they scale to larger projects.
**Additional context**
I can work on this, depending on what you suggest as a solution (CLI argument for the `deepspeed` command or as environment variable).
</issue>
<code>
[start of deepspeed/constants.py]
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 from datetime import timedelta
7
8 #############################################
9 # Torch distributed constants
10 #############################################
11 TORCH_DISTRIBUTED_DEFAULT_PORT = 29500
12
13 # Default process group wide timeout, if applicable.
14 # This only applies to the gloo and nccl backends
15 # (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
16 # To make an attempt at backwards compatibility with THD, we use an
17 # extraordinarily high default timeout, given that THD did not have timeouts.
18 default_pg_timeout = timedelta(minutes=30)
19 INFERENCE_GENERIC_MODE = 'generic'
20 INFERENCE_SPECIALIZED_MODE = 'specialized'
21
[end of deepspeed/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepspeed/constants.py b/deepspeed/constants.py
--- a/deepspeed/constants.py
+++ b/deepspeed/constants.py
@@ -3,6 +3,7 @@
# DeepSpeed Team
+import os
from datetime import timedelta
#############################################
@@ -15,6 +16,6 @@
# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).
# To make an attempt at backwards compatibility with THD, we use an
# extraordinarily high default timeout, given that THD did not have timeouts.
-default_pg_timeout = timedelta(minutes=30)
+default_pg_timeout = timedelta(minutes=int(os.getenv("DEEPSPEED_TIMEOUT", default=30)))
INFERENCE_GENERIC_MODE = 'generic'
INFERENCE_SPECIALIZED_MODE = 'specialized'
| {"golden_diff": "diff --git a/deepspeed/constants.py b/deepspeed/constants.py\n--- a/deepspeed/constants.py\n+++ b/deepspeed/constants.py\n@@ -3,6 +3,7 @@\n \n # DeepSpeed Team\n \n+import os\n from datetime import timedelta\n \n #############################################\n@@ -15,6 +16,6 @@\n # (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).\n # To make an attempt at backwards compatibility with THD, we use an\n # extraordinarily high default timeout, given that THD did not have timeouts.\n-default_pg_timeout = timedelta(minutes=30)\n+default_pg_timeout = timedelta(minutes=int(os.getenv(\"DEEPSPEED_TIMEOUT\", default=30)))\n INFERENCE_GENERIC_MODE = 'generic'\n INFERENCE_SPECIALIZED_MODE = 'specialized'\n", "issue": "[REQUEST] Add timeout as entry-point option or environment variable\n**Is your feature request related to a problem? Please describe.**\r\nI am using Hugging Face `transformers` for my deep learning, and it has a nice option to restrict specific processing to the main process only. This is useful if a function caches the result: the main process does the processing while the other processes wait, and when main is done, the other processes can just load from the cache. That's pretty neat.\r\n\r\nThe problem arises when these are long running processes. In distributed environment (torch or deepspeed, for instance), the communication between processes has a default timeout. If no communication has occurred for `timeout` seconds, the whole program will exit. \r\n\r\n**Describe the solution you'd like**\r\n\r\nBoth [`torch`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) and [`deepspeed`](https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) provide options in the Python init methods to set the timeout parameter to a higher value than the default 30 minutes, but this option is not available from the command-line or through an environment, which is what I would like.\r\n\r\n**Describe alternatives you've considered**\r\nI could make a custom fork but I think that this is something that more people might need as soon as they scale to larger projects.\r\n\r\n**Additional context**\r\n\r\nI can work on this, depending on what you suggest as a solution (CLI argument for the `deepspeed` command or as environment variable).\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nfrom datetime import timedelta\n\n#############################################\n# Torch distributed constants\n#############################################\nTORCH_DISTRIBUTED_DEFAULT_PORT = 29500\n\n# Default process group wide timeout, if applicable.\n# This only applies to the gloo and nccl backends\n# (only if NCCL_BLOCKING_WAIT or NCCL_ASYNC_ERROR_HANDLING is set to 1).\n# To make an attempt at backwards compatibility with THD, we use an\n# extraordinarily high default timeout, given that THD did not have timeouts.\ndefault_pg_timeout = timedelta(minutes=30)\nINFERENCE_GENERIC_MODE = 'generic'\nINFERENCE_SPECIALIZED_MODE = 'specialized'\n", "path": "deepspeed/constants.py"}]} | 1,053 | 175 |
gh_patches_debug_28442 | rasdani/github-patches | git_diff | pypa__pipenv-1326 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pipenv starts slow when IPython is installed.
IPython is imported when importing dotenv.
(ref: theskumar/python-dotenv#84 and [import profile](https://paste.ubuntu.com/26409167/))
Since pipenv uses patched version of dotenv, pipenv should port upstream fix
or patch `dotenv/__init__.py` to stop importing dotenv.ipython.
##### Describe your environment
1. Ubuntu 17.10
1. Python version: 3.7.0a4
1. Pipenv version: 9.0.3
##### Steps to replicate
* Install Python 3.7.0a4 or newer
* ` PYTHONPROFILEIMPORTTIME=1 path/to/pipenv --version 2>pipenv-version`
</issue>
<code>
[start of pipenv/patched/dotenv/__init__.py]
1 from .cli import get_cli_string
2 from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv
3 try:
4 from .ipython import load_ipython_extension
5 except ImportError:
6 pass
7
8 __all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']
9
[end of pipenv/patched/dotenv/__init__.py]
[start of pipenv/patched/dotenv/ipython.py]
1 from __future__ import print_function
2 from .main import load_dotenv, find_dotenv
3
4 from IPython.core.magic import Magics, magics_class, line_magic
5 from IPython.core.magic_arguments import (argument, magic_arguments,
6 parse_argstring)
7
8
9 @magics_class
10 class IPythonDotEnv(Magics):
11
12 @magic_arguments()
13 @argument(
14 '-o', '--override', action='store_true',
15 help="Indicate to override existing variables"
16 )
17 @argument(
18 '-v', '--verbose', action='store_true',
19 help="Indicate function calls to be verbose"
20 )
21 @argument('dotenv_path', nargs='?', type=str, default='.env',
22 help='Search in increasingly higher folders for the `dotenv_path`')
23 @line_magic
24 def dotenv(self, line):
25 args = parse_argstring(self.dotenv, line)
26 # Locate the .env file
27 dotenv_path = args.dotenv_path
28 try:
29 dotenv_path = find_dotenv(dotenv_path, True, True)
30 except IOError:
31 print("cannot find .env file")
32 return
33
34 # Load the .env file
35 load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
36
37
38 def load_ipython_extension(ipython):
39 """Register the %dotenv magic."""
40 ipython.register_magics(IPythonDotEnv)
41
[end of pipenv/patched/dotenv/ipython.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pipenv/patched/dotenv/__init__.py b/pipenv/patched/dotenv/__init__.py
--- a/pipenv/patched/dotenv/__init__.py
+++ b/pipenv/patched/dotenv/__init__.py
@@ -1,8 +1,4 @@
from .cli import get_cli_string
from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv
-try:
- from .ipython import load_ipython_extension
-except ImportError:
- pass
-__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']
+__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv']
diff --git a/pipenv/patched/dotenv/ipython.py b/pipenv/patched/dotenv/ipython.py
deleted file mode 100644
--- a/pipenv/patched/dotenv/ipython.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from __future__ import print_function
-from .main import load_dotenv, find_dotenv
-
-from IPython.core.magic import Magics, magics_class, line_magic
-from IPython.core.magic_arguments import (argument, magic_arguments,
- parse_argstring)
-
-
-@magics_class
-class IPythonDotEnv(Magics):
-
- @magic_arguments()
- @argument(
- '-o', '--override', action='store_true',
- help="Indicate to override existing variables"
- )
- @argument(
- '-v', '--verbose', action='store_true',
- help="Indicate function calls to be verbose"
- )
- @argument('dotenv_path', nargs='?', type=str, default='.env',
- help='Search in increasingly higher folders for the `dotenv_path`')
- @line_magic
- def dotenv(self, line):
- args = parse_argstring(self.dotenv, line)
- # Locate the .env file
- dotenv_path = args.dotenv_path
- try:
- dotenv_path = find_dotenv(dotenv_path, True, True)
- except IOError:
- print("cannot find .env file")
- return
-
- # Load the .env file
- load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
-
-
-def load_ipython_extension(ipython):
- """Register the %dotenv magic."""
- ipython.register_magics(IPythonDotEnv)
| {"golden_diff": "diff --git a/pipenv/patched/dotenv/__init__.py b/pipenv/patched/dotenv/__init__.py\n--- a/pipenv/patched/dotenv/__init__.py\n+++ b/pipenv/patched/dotenv/__init__.py\n@@ -1,8 +1,4 @@\n from .cli import get_cli_string\n from .main import load_dotenv, get_key, set_key, unset_key, find_dotenv\n-try:\n- from .ipython import load_ipython_extension\n-except ImportError:\n- pass\n \n-__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']\n+__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv']\ndiff --git a/pipenv/patched/dotenv/ipython.py b/pipenv/patched/dotenv/ipython.py\ndeleted file mode 100644\n--- a/pipenv/patched/dotenv/ipython.py\n+++ /dev/null\n@@ -1,40 +0,0 @@\n-from __future__ import print_function\n-from .main import load_dotenv, find_dotenv\n-\n-from IPython.core.magic import Magics, magics_class, line_magic\n-from IPython.core.magic_arguments import (argument, magic_arguments,\n- parse_argstring)\n-\n-\n-@magics_class\n-class IPythonDotEnv(Magics):\n-\n- @magic_arguments()\n- @argument(\n- '-o', '--override', action='store_true',\n- help=\"Indicate to override existing variables\"\n- )\n- @argument(\n- '-v', '--verbose', action='store_true',\n- help=\"Indicate function calls to be verbose\"\n- )\n- @argument('dotenv_path', nargs='?', type=str, default='.env',\n- help='Search in increasingly higher folders for the `dotenv_path`')\n- @line_magic\n- def dotenv(self, line):\n- args = parse_argstring(self.dotenv, line)\n- # Locate the .env file\n- dotenv_path = args.dotenv_path\n- try:\n- dotenv_path = find_dotenv(dotenv_path, True, True)\n- except IOError:\n- print(\"cannot find .env file\")\n- return\n-\n- # Load the .env file\n- load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)\n-\n-\n-def load_ipython_extension(ipython):\n- \"\"\"Register the %dotenv magic.\"\"\"\n- ipython.register_magics(IPythonDotEnv)\n", "issue": "pipenv starts slow when IPython is installed.\nIPython is imported when importing dotenv. \r\n(ref: theskumar/python-dotenv#84 and [import profile](https://paste.ubuntu.com/26409167/))\r\n\r\nSince pipenv uses patched version of dotenv, pipenv should port upstream fix\r\nor patch `dotenv/__init__.py` to stop importing dotenv.ipython.\r\n\r\n##### Describe your environment\r\n\r\n1. Ubuntu 17.10\r\n1. Python version: 3.7.0a4\r\n1. Pipenv version: 9.0.3\r\n\r\n##### Steps to replicate\r\n\r\n* Install Python 3.7.0a4 or newer\r\n* ` PYTHONPROFILEIMPORTTIME=1 path/to/pipenv --version 2>pipenv-version`\n", "before_files": [{"content": "from .cli import get_cli_string\nfrom .main import load_dotenv, get_key, set_key, unset_key, find_dotenv\ntry:\n from .ipython import load_ipython_extension\nexcept ImportError:\n pass\n\n__all__ = ['get_cli_string', 'load_dotenv', 'get_key', 'set_key', 'unset_key', 'find_dotenv', 'load_ipython_extension']\n", "path": "pipenv/patched/dotenv/__init__.py"}, {"content": "from __future__ import print_function\nfrom .main import load_dotenv, find_dotenv\n\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.core.magic_arguments import (argument, magic_arguments,\n parse_argstring)\n\n\n@magics_class\nclass IPythonDotEnv(Magics):\n\n @magic_arguments()\n @argument(\n '-o', '--override', action='store_true',\n help=\"Indicate to override existing variables\"\n )\n @argument(\n '-v', '--verbose', action='store_true',\n help=\"Indicate function calls to be verbose\"\n )\n @argument('dotenv_path', nargs='?', type=str, default='.env',\n help='Search in increasingly higher folders for the `dotenv_path`')\n @line_magic\n def dotenv(self, line):\n args = parse_argstring(self.dotenv, line)\n # Locate the .env file\n dotenv_path = args.dotenv_path\n try:\n dotenv_path = find_dotenv(dotenv_path, True, True)\n except IOError:\n print(\"cannot find .env file\")\n return\n\n # Load the .env file\n load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)\n\n\ndef load_ipython_extension(ipython):\n \"\"\"Register the %dotenv magic.\"\"\"\n ipython.register_magics(IPythonDotEnv)\n", "path": "pipenv/patched/dotenv/ipython.py"}]} | 1,213 | 595 |
gh_patches_debug_4224 | rasdani/github-patches | git_diff | pypa__pip-5146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Development version number triggers a false positive warning
* Pip version: 10.0.0b1
* Python version: 3.6.4
* Operating system: Linux
### Description:
Say a package `foo` depends on `bar>=1.0.0`. If the installed version of `bar` is a development version such as `1.0.1.dev42`, pip issues an incompatible version warning upon installation of `foo`. Pip shouldn't issue any warning since `1.0.1.dev42>=1.0.0`. The weird thing is that pip is satisfied with that version when scanning the dependencies of `foo`, but issues that warning anyway.
For that matter, the real life scenario is installing a development library with a `setuptools_scm`-generated version number and then installing a library that depends on it.
### What I've run:
```
% tree
.
├── bar
│ └── setup.py
└── foo
└── setup.py
2 directories, 2 files
```
```
% cat bar/setup.py
from setuptools import setup
setup(
name='bar',
version='1.0.1.dev42')
```
```
% cat foo/setup.py
from setuptools import setup
setup(
name='foo',
install_requires=['bar>=1.0.0'],
version='3.14.15')
```
```
# setting up virtual environment
% python3 -m venv compat
% source compat/bin/activate
% pip install pip==10.0.0b1
```
```
% pip install ./bar
Processing ./bar
Installing collected packages: bar
Running setup.py install for bar ... done
Successfully installed bar-1.0.1.dev42
```
```
% pip install ./foo
Processing ./foo
Requirement already satisfied: bar>=1.0.0 in ./compat/lib/python3.6/site-packages (from foo==3.14.15) (1.0.1.dev42)
foo 3.14.15 has requirement bar>=1.0.0, but you'll have bar 1.0.1.dev42 which is incompatible.
Installing collected packages: foo
Running setup.py install for foo ... done
Successfully installed foo-3.14.15
```
</issue>
<code>
[start of src/pip/_internal/operations/check.py]
1 """Validation of dependencies of packages
2 """
3
4 from collections import namedtuple
5
6 from pip._vendor.packaging.utils import canonicalize_name
7
8 from pip._internal.operations.prepare import make_abstract_dist
9
10 from pip._internal.utils.misc import get_installed_distributions
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from pip._internal.req.req_install import InstallRequirement
15 from typing import Any, Dict, Iterator, Set, Tuple, List
16
17 # Shorthands
18 PackageSet = Dict[str, 'PackageDetails']
19 Missing = Tuple[str, Any]
20 Conflicting = Tuple[str, str, Any]
21
22 MissingDict = Dict[str, List[Missing]]
23 ConflictingDict = Dict[str, List[Conflicting]]
24 CheckResult = Tuple[MissingDict, ConflictingDict]
25
26 PackageDetails = namedtuple('PackageDetails', ['version', 'requires'])
27
28
29 def create_package_set_from_installed(**kwargs):
30 # type: (**Any) -> PackageSet
31 """Converts a list of distributions into a PackageSet.
32 """
33 retval = {}
34 for dist in get_installed_distributions(**kwargs):
35 name = canonicalize_name(dist.project_name)
36 retval[name] = PackageDetails(dist.version, dist.requires())
37 return retval
38
39
40 def check_package_set(package_set):
41 # type: (PackageSet) -> CheckResult
42 """Check if a package set is consistent
43 """
44 missing = dict()
45 conflicting = dict()
46
47 for package_name in package_set:
48 # Info about dependencies of package_name
49 missing_deps = set() # type: Set[Missing]
50 conflicting_deps = set() # type: Set[Conflicting]
51
52 for req in package_set[package_name].requires:
53 name = canonicalize_name(req.project_name) # type: str
54
55 # Check if it's missing
56 if name not in package_set:
57 missed = True
58 if req.marker is not None:
59 missed = req.marker.evaluate()
60 if missed:
61 missing_deps.add((name, req))
62 continue
63
64 # Check if there's a conflict
65 version = package_set[name].version # type: str
66 if version not in req.specifier:
67 conflicting_deps.add((name, version, req))
68
69 def str_key(x):
70 return str(x)
71
72 if missing_deps:
73 missing[package_name] = sorted(missing_deps, key=str_key)
74 if conflicting_deps:
75 conflicting[package_name] = sorted(conflicting_deps, key=str_key)
76
77 return missing, conflicting
78
79
80 def check_install_conflicts(to_install):
81 # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]
82 """For checking if the dependency graph would be consistent after \
83 installing given requirements
84 """
85 # Start from the current state
86 state = create_package_set_from_installed()
87 _simulate_installation_of(to_install, state)
88 return state, check_package_set(state)
89
90
91 # NOTE from @pradyunsg
92 # This required a minor update in dependency link handling logic over at
93 # operations.prepare.IsSDist.dist() to get it working
94 def _simulate_installation_of(to_install, state):
95 # type: (List[InstallRequirement], PackageSet) -> None
96 """Computes the version of packages after installing to_install.
97 """
98
99 # Modify it as installing requirement_set would (assuming no errors)
100 for inst_req in to_install:
101 dist = make_abstract_dist(inst_req).dist(finder=None)
102 state[dist.key] = PackageDetails(dist.version, dist.requires())
103
[end of src/pip/_internal/operations/check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py
--- a/src/pip/_internal/operations/check.py
+++ b/src/pip/_internal/operations/check.py
@@ -63,7 +63,7 @@
# Check if there's a conflict
version = package_set[name].version # type: str
- if version not in req.specifier:
+ if not req.specifier.contains(version, prereleases=True):
conflicting_deps.add((name, version, req))
def str_key(x):
| {"golden_diff": "diff --git a/src/pip/_internal/operations/check.py b/src/pip/_internal/operations/check.py\n--- a/src/pip/_internal/operations/check.py\n+++ b/src/pip/_internal/operations/check.py\n@@ -63,7 +63,7 @@\n \n # Check if there's a conflict\n version = package_set[name].version # type: str\n- if version not in req.specifier:\n+ if not req.specifier.contains(version, prereleases=True):\n conflicting_deps.add((name, version, req))\n \n def str_key(x):\n", "issue": "Development version number triggers a false positive warning\n* Pip version: 10.0.0b1\r\n* Python version: 3.6.4\r\n* Operating system: Linux\r\n\r\n### Description:\r\n\r\nSay a package `foo` depends on `bar>=1.0.0`. If the installed version of `bar` is a development version such as `1.0.1.dev42`, pip issues an incompatible version warning upon installation of `foo`. Pip shouldn't issue any warning since `1.0.1.dev42>=1.0.0`. The weird thing is that pip is satisfied with that version when scanning the dependencies of `foo`, but issues that warning anyway.\r\n\r\nFor that matter, the real life scenario is installing a development library with a `setuptools_scm`-generated version number and then installing a library that depends on it.\r\n\r\n### What I've run:\r\n\r\n```\r\n% tree\r\n.\r\n\u251c\u2500\u2500 bar\r\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 setup.py\r\n\u2514\u2500\u2500 foo\r\n \u2514\u2500\u2500 setup.py\r\n\r\n2 directories, 2 files\r\n```\r\n\r\n```\r\n% cat bar/setup.py\r\nfrom setuptools import setup\r\n\r\nsetup(\r\n name='bar',\r\n version='1.0.1.dev42')\r\n```\r\n\r\n```\r\n% cat foo/setup.py\r\nfrom setuptools import setup\r\n\r\nsetup(\r\n name='foo',\r\n install_requires=['bar>=1.0.0'],\r\n version='3.14.15')\r\n```\r\n\r\n```\r\n# setting up virtual environment\r\n% python3 -m venv compat\r\n% source compat/bin/activate\r\n% pip install pip==10.0.0b1\r\n```\r\n\r\n```\r\n% pip install ./bar\r\nProcessing ./bar\r\nInstalling collected packages: bar\r\n Running setup.py install for bar ... done\r\nSuccessfully installed bar-1.0.1.dev42\r\n```\r\n\r\n```\r\n% pip install ./foo\r\nProcessing ./foo\r\nRequirement already satisfied: bar>=1.0.0 in ./compat/lib/python3.6/site-packages (from foo==3.14.15) (1.0.1.dev42)\r\nfoo 3.14.15 has requirement bar>=1.0.0, but you'll have bar 1.0.1.dev42 which is incompatible.\r\nInstalling collected packages: foo\r\n Running setup.py install for foo ... done\r\nSuccessfully installed foo-3.14.15\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Validation of dependencies of packages\n\"\"\"\n\nfrom collections import namedtuple\n\nfrom pip._vendor.packaging.utils import canonicalize_name\n\nfrom pip._internal.operations.prepare import make_abstract_dist\n\nfrom pip._internal.utils.misc import get_installed_distributions\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from pip._internal.req.req_install import InstallRequirement\n from typing import Any, Dict, Iterator, Set, Tuple, List\n\n # Shorthands\n PackageSet = Dict[str, 'PackageDetails']\n Missing = Tuple[str, Any]\n Conflicting = Tuple[str, str, Any]\n\n MissingDict = Dict[str, List[Missing]]\n ConflictingDict = Dict[str, List[Conflicting]]\n CheckResult = Tuple[MissingDict, ConflictingDict]\n\nPackageDetails = namedtuple('PackageDetails', ['version', 'requires'])\n\n\ndef create_package_set_from_installed(**kwargs):\n # type: (**Any) -> PackageSet\n \"\"\"Converts a list of distributions into a PackageSet.\n \"\"\"\n retval = {}\n for dist in get_installed_distributions(**kwargs):\n name = canonicalize_name(dist.project_name)\n retval[name] = PackageDetails(dist.version, dist.requires())\n return retval\n\n\ndef check_package_set(package_set):\n # type: (PackageSet) -> CheckResult\n \"\"\"Check if a package set is consistent\n \"\"\"\n missing = dict()\n conflicting = dict()\n\n for package_name in package_set:\n # Info about dependencies of package_name\n missing_deps = set() # type: Set[Missing]\n conflicting_deps = set() # type: Set[Conflicting]\n\n for req in package_set[package_name].requires:\n name = canonicalize_name(req.project_name) # type: str\n\n # Check if it's missing\n if name not in package_set:\n missed = True\n if req.marker is not None:\n missed = req.marker.evaluate()\n if missed:\n missing_deps.add((name, req))\n continue\n\n # Check if there's a conflict\n version = package_set[name].version # type: str\n if version not in req.specifier:\n conflicting_deps.add((name, version, req))\n\n def str_key(x):\n return str(x)\n\n if missing_deps:\n missing[package_name] = sorted(missing_deps, key=str_key)\n if conflicting_deps:\n conflicting[package_name] = sorted(conflicting_deps, key=str_key)\n\n return missing, conflicting\n\n\ndef check_install_conflicts(to_install):\n # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]\n \"\"\"For checking if the dependency graph would be consistent after \\\n installing given requirements\n \"\"\"\n # Start from the current state\n state = create_package_set_from_installed()\n _simulate_installation_of(to_install, state)\n return state, check_package_set(state)\n\n\n# NOTE from @pradyunsg\n# This required a minor update in dependency link handling logic over at\n# operations.prepare.IsSDist.dist() to get it working\ndef _simulate_installation_of(to_install, state):\n # type: (List[InstallRequirement], PackageSet) -> None\n \"\"\"Computes the version of packages after installing to_install.\n \"\"\"\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n dist = make_abstract_dist(inst_req).dist(finder=None)\n state[dist.key] = PackageDetails(dist.version, dist.requires())\n", "path": "src/pip/_internal/operations/check.py"}]} | 2,020 | 127 |
gh_patches_debug_63956 | rasdani/github-patches | git_diff | redis__redis-py-1780 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Module installation fails due to missing dependency
https://github.com/redis/redis-py/blob/039488d97ec545b37e903d1b791a88bac8f77973/redis/connection.py#L1
the deprecated distutils was replaced with the packaging module as part of release v4.0.0b1
packaging is not a builtin python module but was not added to setup.py as a dependency which causes applications that require redis-py to fail if packaging isn't already installed on the machine.
the packaging module should probably be added as a dependency in setup.py to resolve this
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from setuptools import find_packages, setup
3
4 import redis
5
6 setup(
7 name="redis",
8 description="Python client for Redis database and key-value store",
9 long_description=open("README.md").read().strip(),
10 long_description_content_type="text/markdown",
11 keywords=["Redis", "key-value store", "database"],
12 license="MIT",
13 version=redis.__version__,
14 packages=find_packages(
15 include=[
16 "redis",
17 "redis.commands",
18 "redis.commands.bf",
19 "redis.commands.json",
20 "redis.commands.search",
21 "redis.commands.timeseries",
22 "redis.commands.graph",
23 ]
24 ),
25 url="https://github.com/redis/redis-py",
26 author="Redis Inc.",
27 author_email="[email protected]",
28 python_requires=">=3.6",
29 install_requires=[
30 "deprecated==1.2.3",
31 "packaging==21.3",
32 ],
33 classifiers=[
34 "Development Status :: 5 - Production/Stable",
35 "Environment :: Console",
36 "Intended Audience :: Developers",
37 "License :: OSI Approved :: MIT License",
38 "Operating System :: OS Independent",
39 "Programming Language :: Python",
40 "Programming Language :: Python :: 3",
41 "Programming Language :: Python :: 3 :: Only",
42 "Programming Language :: Python :: 3.6",
43 "Programming Language :: Python :: 3.7",
44 "Programming Language :: Python :: 3.8",
45 "Programming Language :: Python :: 3.9",
46 "Programming Language :: Python :: 3.10",
47 "Programming Language :: Python :: Implementation :: CPython",
48 "Programming Language :: Python :: Implementation :: PyPy",
49 ],
50 extras_require={
51 "hiredis": ["hiredis>=1.0.0"],
52 },
53 )
54
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,9 +26,12 @@
author="Redis Inc.",
author_email="[email protected]",
python_requires=">=3.6",
+ setup_requires=[
+ "packaging>=21.3",
+ ],
install_requires=[
- "deprecated==1.2.3",
- "packaging==21.3",
+ "deprecated>=1.2.3",
+ "packaging>=21.3",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,9 +26,12 @@\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n+ setup_requires=[\n+ \"packaging>=21.3\",\n+ ],\n install_requires=[\n- \"deprecated==1.2.3\",\n- \"packaging==21.3\",\n+ \"deprecated>=1.2.3\",\n+ \"packaging>=21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "Module installation fails due to missing dependency\nhttps://github.com/redis/redis-py/blob/039488d97ec545b37e903d1b791a88bac8f77973/redis/connection.py#L1\r\nthe deprecated distutils was replaced with the packaging module as part of release v4.0.0b1\r\npackaging is not a builtin python module but was not added to setup.py as a dependency which causes applications that require redis-py to fail if packaging isn't already installed on the machine.\r\nthe packaging module should probably be added as a dependency in setup.py to resolve this\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import find_packages, setup\n\nimport redis\n\nsetup(\n name=\"redis\",\n description=\"Python client for Redis database and key-value store\",\n long_description=open(\"README.md\").read().strip(),\n long_description_content_type=\"text/markdown\",\n keywords=[\"Redis\", \"key-value store\", \"database\"],\n license=\"MIT\",\n version=redis.__version__,\n packages=find_packages(\n include=[\n \"redis\",\n \"redis.commands\",\n \"redis.commands.bf\",\n \"redis.commands.json\",\n \"redis.commands.search\",\n \"redis.commands.timeseries\",\n \"redis.commands.graph\",\n ]\n ),\n url=\"https://github.com/redis/redis-py\",\n author=\"Redis Inc.\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n install_requires=[\n \"deprecated==1.2.3\",\n \"packaging==21.3\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n extras_require={\n \"hiredis\": [\"hiredis>=1.0.0\"],\n },\n)\n", "path": "setup.py"}]} | 1,163 | 142 |
gh_patches_debug_20147 | rasdani/github-patches | git_diff | kartoza__prj.app-447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
After creating a new organization it should appear in the pending approval menu
Please make sure if a user adds an organization the Pending Approval menu is updated
http://staging.changelog.qgis.org/en/qgis/pending-certifyingorganisation/list/
</issue>
<code>
[start of django_project/core/custom_middleware.py]
1 # coding=utf-8
2 # flake8: noqa
3 """
4 core.custom_middleware
5 """
6 from base.models import Project, Version
7 from changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry
8
9
10 class NavContextMiddleware(object):
11 """
12 Adds the required navigation variables to each response
13 """
14
15 def __init__(self):
16 pass
17
18 @staticmethod
19 def process_template_response(request, response):
20 """
21 Add 'the_project', 'the_entry', 'the_version' to context for the
22 navigation.
23
24 Justification: To make the navigation functional, we need to know
25 which Project (or Version, Committee etc) the current context
26 relates to. This is required for URLs. Rather than include lots of
27 if/else in the navigation template, it seems cleaner to add the
28 above variables to the context here.
29
30 :param request: Http Request obj
31 :param response: Http Response obj
32 :return: context :rtype: dict
33 """
34 context = response.context_data
35
36 if context.get('project', None):
37 context['the_project'] = context.get('project')
38 versions = Version.objects.filter(project=context.get('project'))
39 context['has_pending_versions'] = (
40 Version.unapproved_objects.filter(
41 project=context.get('project')).exists())
42 context['has_pending_categories'] = (
43 Category.unapproved_objects.filter(
44 project=context.get('project')).exists())
45 context['has_pending_sponsor_lvl'] = (
46 SponsorshipLevel.unapproved_objects.filter(
47 project=context.get('project')).exists())
48 context['has_pending_sponsor_period'] = (
49 SponsorshipPeriod.unapproved_objects.filter(
50 project=context.get('project')).exists())
51 if versions:
52 context['has_pending_entries'] = (
53 Entry.unapproved_objects.filter(
54 version__in=versions).exists())
55
56 else:
57 if request.user.is_staff:
58 context['the_projects'] = Project.objects.all()
59 else:
60 context['the_projects'] = Project.approved_objects.filter(
61 private=False
62 )
63
64 if context.get('version', None):
65 context['the_version'] = context.get('version')
66 context['the_project'] = context.get('version').project
67
68 if context.get('committee', None):
69 context['the_committee'] = context.get('committee')
70 context['the_project'] = context.get('committee').project
71
72 if context.get('ballot', None):
73 context['the_committee'] = context.get('ballot').committee
74 context['the_project'] = context.get('ballot').committee.project
75
76 if context.get('category', None):
77 context['the_project'] = context.get('category').project
78
79 if context.get('ballots', None):
80 try:
81 context['the_project'] = \
82 context.get('ballots')[0].committee.project
83 except (KeyError, IndexError):
84 pass
85
86 if context.get('entry', None):
87 context['the_entry'] = context.get('entry')
88 context['the_version'] = context.get('entry').version
89 context['the_project'] = context.get('entry').version.project
90
91 if context.get('committees', None):
92 try:
93 context['the_project'] = context.get('committees')[0].project
94 except (KeyError, IndexError):
95 pass
96
97 if context.get('versions', None):
98 try:
99 context['the_project'] = context.get('versions')[0].project
100 except (KeyError, IndexError):
101 pass
102
103 if context.get('entries', None):
104 try:
105 context['the_version'] = context.get('entries')[0].version
106 context['the_project'] = \
107 context.get('entries')[0].version.project
108 except (KeyError, IndexError):
109 pass
110
111 if context.get('categories', None):
112 try:
113 context['the_project'] = \
114 context.get('categories')[0].project
115 except (KeyError, IndexError):
116 pass
117
118 return response
119
[end of django_project/core/custom_middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/core/custom_middleware.py b/django_project/core/custom_middleware.py
--- a/django_project/core/custom_middleware.py
+++ b/django_project/core/custom_middleware.py
@@ -5,6 +5,7 @@
"""
from base.models import Project, Version
from changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry
+from certification.models import CertifyingOrganisation
class NavContextMiddleware(object):
@@ -48,6 +49,9 @@
context['has_pending_sponsor_period'] = (
SponsorshipPeriod.unapproved_objects.filter(
project=context.get('project')).exists())
+ context['has_pending_organisations'] = (
+ CertifyingOrganisation.unapproved_objects.filter(
+ project=context.get('project')).exists())
if versions:
context['has_pending_entries'] = (
Entry.unapproved_objects.filter(
| {"golden_diff": "diff --git a/django_project/core/custom_middleware.py b/django_project/core/custom_middleware.py\n--- a/django_project/core/custom_middleware.py\n+++ b/django_project/core/custom_middleware.py\n@@ -5,6 +5,7 @@\n \"\"\"\n from base.models import Project, Version\n from changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry\n+from certification.models import CertifyingOrganisation\n \n \n class NavContextMiddleware(object):\n@@ -48,6 +49,9 @@\n context['has_pending_sponsor_period'] = (\n SponsorshipPeriod.unapproved_objects.filter(\n project=context.get('project')).exists())\n+ context['has_pending_organisations'] = (\n+ CertifyingOrganisation.unapproved_objects.filter(\n+ project=context.get('project')).exists())\n if versions:\n context['has_pending_entries'] = (\n Entry.unapproved_objects.filter(\n", "issue": "After creating a new organization it should appear in the pending approval menu\nPlease make sure if a user adds an organization the Pending Approval menu is updated\r\n\r\nhttp://staging.changelog.qgis.org/en/qgis/pending-certifyingorganisation/list/\n", "before_files": [{"content": "# coding=utf-8\n# flake8: noqa\n\"\"\"\ncore.custom_middleware\n\"\"\"\nfrom base.models import Project, Version\nfrom changes.models import Category, SponsorshipLevel, SponsorshipPeriod, Entry\n\n\nclass NavContextMiddleware(object):\n \"\"\"\n Adds the required navigation variables to each response\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def process_template_response(request, response):\n \"\"\"\n Add 'the_project', 'the_entry', 'the_version' to context for the\n navigation.\n\n Justification: To make the navigation functional, we need to know\n which Project (or Version, Committee etc) the current context\n relates to. This is required for URLs. Rather than include lots of\n if/else in the navigation template, it seems cleaner to add the\n above variables to the context here.\n\n :param request: Http Request obj\n :param response: Http Response obj\n :return: context :rtype: dict\n \"\"\"\n context = response.context_data\n\n if context.get('project', None):\n context['the_project'] = context.get('project')\n versions = Version.objects.filter(project=context.get('project'))\n context['has_pending_versions'] = (\n Version.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_categories'] = (\n Category.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_sponsor_lvl'] = (\n SponsorshipLevel.unapproved_objects.filter(\n project=context.get('project')).exists())\n context['has_pending_sponsor_period'] = (\n SponsorshipPeriod.unapproved_objects.filter(\n project=context.get('project')).exists())\n if versions:\n context['has_pending_entries'] = (\n Entry.unapproved_objects.filter(\n version__in=versions).exists())\n\n else:\n if request.user.is_staff:\n context['the_projects'] = Project.objects.all()\n else:\n context['the_projects'] = Project.approved_objects.filter(\n private=False\n )\n\n if context.get('version', None):\n context['the_version'] = context.get('version')\n context['the_project'] = context.get('version').project\n\n if context.get('committee', None):\n context['the_committee'] = context.get('committee')\n context['the_project'] = context.get('committee').project\n\n if context.get('ballot', None):\n context['the_committee'] = context.get('ballot').committee\n context['the_project'] = context.get('ballot').committee.project\n\n if context.get('category', None):\n context['the_project'] = context.get('category').project\n\n if context.get('ballots', None):\n try:\n context['the_project'] = \\\n context.get('ballots')[0].committee.project\n except (KeyError, IndexError):\n pass\n\n if context.get('entry', None):\n context['the_entry'] = context.get('entry')\n context['the_version'] = context.get('entry').version\n context['the_project'] = context.get('entry').version.project\n\n if context.get('committees', None):\n try:\n context['the_project'] = context.get('committees')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('versions', None):\n try:\n context['the_project'] = context.get('versions')[0].project\n except (KeyError, IndexError):\n pass\n\n if context.get('entries', None):\n try:\n context['the_version'] = context.get('entries')[0].version\n context['the_project'] = \\\n context.get('entries')[0].version.project\n except (KeyError, IndexError):\n pass\n\n if context.get('categories', None):\n try:\n context['the_project'] = \\\n context.get('categories')[0].project\n except (KeyError, IndexError):\n pass\n\n return response\n", "path": "django_project/core/custom_middleware.py"}]} | 1,701 | 191 |
gh_patches_debug_3596 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Identity spoofing via secondary email
See https://github.com/pennersr/django-allauth/issues/2265
cc: @CarolingerSeilchenspringer @MagdaN @fuzzylogic2000
</issue>
<code>
[start of meinberlin/apps/users/adapters.py]
1 import re
2 from urllib.parse import quote
3
4 from allauth.account.adapter import DefaultAccountAdapter
5 from django.conf import settings
6 from django.utils.http import is_safe_url
7
8 from adhocracy4.emails.mixins import SyncEmailMixin
9 from meinberlin.apps.contrib.emails import Email
10 from meinberlin.apps.users import USERNAME_INVALID_MESSAGE
11 from meinberlin.apps.users import USERNAME_REGEX
12
13
14 class UserAccountEmail(SyncEmailMixin, Email):
15 def get_receivers(self):
16 return [self.object]
17
18 @property
19 def template_name(self):
20 return self.kwargs['template_name']
21
22 def get_context(self):
23 context = super().get_context()
24 context['contact_email'] = settings.CONTACT_EMAIL
25 return context
26
27
28 class AccountAdapter(DefaultAccountAdapter):
29 username_regex = re.compile(USERNAME_REGEX)
30 error_messages = dict(
31 DefaultAccountAdapter.error_messages,
32 invalid_username=USERNAME_INVALID_MESSAGE
33 )
34
35 def get_email_confirmation_url(self, request, emailconfirmation):
36 url = super().get_email_confirmation_url(request, emailconfirmation)
37 if 'next' in request.POST and is_safe_url(request.POST['next']):
38 return '{}?next={}'.format(url, quote(request.POST['next']))
39 else:
40 return url
41
42 def send_mail(self, template_prefix, email, context):
43 user = context['user']
44 return UserAccountEmail.send(
45 user,
46 template_name=template_prefix,
47 **context
48 )
49
50 def get_email_confirmation_redirect_url(self, request):
51 if 'next' in request.GET and is_safe_url(request.GET['next']):
52 return request.GET['next']
53 else:
54 return super().get_email_confirmation_redirect_url(request)
55
[end of meinberlin/apps/users/adapters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/users/adapters.py b/meinberlin/apps/users/adapters.py
--- a/meinberlin/apps/users/adapters.py
+++ b/meinberlin/apps/users/adapters.py
@@ -40,9 +40,8 @@
return url
def send_mail(self, template_prefix, email, context):
- user = context['user']
return UserAccountEmail.send(
- user,
+ email,
template_name=template_prefix,
**context
)
| {"golden_diff": "diff --git a/meinberlin/apps/users/adapters.py b/meinberlin/apps/users/adapters.py\n--- a/meinberlin/apps/users/adapters.py\n+++ b/meinberlin/apps/users/adapters.py\n@@ -40,9 +40,8 @@\n return url\n \n def send_mail(self, template_prefix, email, context):\n- user = context['user']\n return UserAccountEmail.send(\n- user,\n+ email,\n template_name=template_prefix,\n **context\n )\n", "issue": "Identity spoofing via secondary email\nSee https://github.com/pennersr/django-allauth/issues/2265\r\n\r\ncc: @CarolingerSeilchenspringer @MagdaN @fuzzylogic2000 \n", "before_files": [{"content": "import re\nfrom urllib.parse import quote\n\nfrom allauth.account.adapter import DefaultAccountAdapter\nfrom django.conf import settings\nfrom django.utils.http import is_safe_url\n\nfrom adhocracy4.emails.mixins import SyncEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\nfrom meinberlin.apps.users import USERNAME_INVALID_MESSAGE\nfrom meinberlin.apps.users import USERNAME_REGEX\n\n\nclass UserAccountEmail(SyncEmailMixin, Email):\n def get_receivers(self):\n return [self.object]\n\n @property\n def template_name(self):\n return self.kwargs['template_name']\n\n def get_context(self):\n context = super().get_context()\n context['contact_email'] = settings.CONTACT_EMAIL\n return context\n\n\nclass AccountAdapter(DefaultAccountAdapter):\n username_regex = re.compile(USERNAME_REGEX)\n error_messages = dict(\n DefaultAccountAdapter.error_messages,\n invalid_username=USERNAME_INVALID_MESSAGE\n )\n\n def get_email_confirmation_url(self, request, emailconfirmation):\n url = super().get_email_confirmation_url(request, emailconfirmation)\n if 'next' in request.POST and is_safe_url(request.POST['next']):\n return '{}?next={}'.format(url, quote(request.POST['next']))\n else:\n return url\n\n def send_mail(self, template_prefix, email, context):\n user = context['user']\n return UserAccountEmail.send(\n user,\n template_name=template_prefix,\n **context\n )\n\n def get_email_confirmation_redirect_url(self, request):\n if 'next' in request.GET and is_safe_url(request.GET['next']):\n return request.GET['next']\n else:\n return super().get_email_confirmation_redirect_url(request)\n", "path": "meinberlin/apps/users/adapters.py"}]} | 1,057 | 114 |
gh_patches_debug_10828 | rasdani/github-patches | git_diff | open-mmlab__mmdeploy-700 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pytorch2onnx fails with mmedit models
error with master branch
```
TypeError: forward_dummy() got an unexpected keyword argument 'img_metas'
```
</issue>
<code>
[start of mmdeploy/apis/pytorch2onnx.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import os.path as osp
3 from typing import Any, Optional, Union
4
5 import mmcv
6 import torch
7
8 from mmdeploy.apis.core.pipeline_manager import no_mp
9 from mmdeploy.utils import (get_backend, get_dynamic_axes, get_input_shape,
10 get_onnx_config, load_config)
11 from .core import PIPELINE_MANAGER
12 from .onnx import export
13
14
15 @PIPELINE_MANAGER.register_pipeline()
16 def torch2onnx(img: Any,
17 work_dir: str,
18 save_file: str,
19 deploy_cfg: Union[str, mmcv.Config],
20 model_cfg: Union[str, mmcv.Config],
21 model_checkpoint: Optional[str] = None,
22 device: str = 'cuda:0'):
23 """Convert PyTorch model to ONNX model.
24
25 Examples:
26 >>> from mmdeploy.apis import torch2onnx
27 >>> img = 'demo.jpg'
28 >>> work_dir = 'work_dir'
29 >>> save_file = 'fcos.onnx'
30 >>> deploy_cfg = ('configs/mmdet/detection/'
31 'detection_onnxruntime_dynamic.py')
32 >>> model_cfg = ('mmdetection/configs/fcos/'
33 'fcos_r50_caffe_fpn_gn-head_1x_coco.py')
34 >>> model_checkpoint = ('checkpoints/'
35 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth')
36 >>> device = 'cpu'
37 >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \
38 model_cfg, model_checkpoint, device)
39
40 Args:
41 img (str | np.ndarray | torch.Tensor): Input image used to assist
42 converting model.
43 work_dir (str): A working directory to save files.
44 save_file (str): Filename to save onnx model.
45 deploy_cfg (str | mmcv.Config): Deployment config file or
46 Config object.
47 model_cfg (str | mmcv.Config): Model config file or Config object.
48 model_checkpoint (str): A checkpoint path of PyTorch model,
49 defaults to `None`.
50 device (str): A string specifying device type, defaults to 'cuda:0'.
51 """
52 # load deploy_cfg if necessary
53 deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)
54 mmcv.mkdir_or_exist(osp.abspath(work_dir))
55
56 input_shape = get_input_shape(deploy_cfg)
57
58 # create model an inputs
59 from mmdeploy.apis import build_task_processor
60 task_processor = build_task_processor(model_cfg, deploy_cfg, device)
61
62 torch_model = task_processor.init_pytorch_model(model_checkpoint)
63 data, model_inputs = task_processor.create_input(img, input_shape)
64 input_metas = dict(img_metas=data.get('img_metas', None))
65 if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:
66 model_inputs = model_inputs[0]
67
68 # export to onnx
69 context_info = dict()
70 context_info['deploy_cfg'] = deploy_cfg
71 output_prefix = osp.join(work_dir,
72 osp.splitext(osp.basename(save_file))[0])
73 backend = get_backend(deploy_cfg).value
74
75 onnx_cfg = get_onnx_config(deploy_cfg)
76 opset_version = onnx_cfg.get('opset_version', 11)
77
78 input_names = onnx_cfg['input_names']
79 output_names = onnx_cfg['output_names']
80 axis_names = input_names + output_names
81 dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)
82 verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(
83 'verbose', False)
84 keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',
85 True)
86 optimize = onnx_cfg.get('optimize', False)
87 with no_mp():
88 export(
89 torch_model,
90 model_inputs,
91 input_metas=input_metas,
92 output_path_prefix=output_prefix,
93 backend=backend,
94 input_names=input_names,
95 output_names=output_names,
96 context_info=context_info,
97 opset_version=opset_version,
98 dynamic_axes=dynamic_axes,
99 verbose=verbose,
100 keep_initializers_as_inputs=keep_initializers_as_inputs,
101 optimize=optimize)
102
[end of mmdeploy/apis/pytorch2onnx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py
--- a/mmdeploy/apis/pytorch2onnx.py
+++ b/mmdeploy/apis/pytorch2onnx.py
@@ -61,7 +61,11 @@
torch_model = task_processor.init_pytorch_model(model_checkpoint)
data, model_inputs = task_processor.create_input(img, input_shape)
- input_metas = dict(img_metas=data.get('img_metas', None))
+ if 'img_metas' in data:
+ input_metas = dict(img_metas=data['img_metas'])
+ else:
+ # codebases like mmedit do not have img_metas argument
+ input_metas = None
if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:
model_inputs = model_inputs[0]
| {"golden_diff": "diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py\n--- a/mmdeploy/apis/pytorch2onnx.py\n+++ b/mmdeploy/apis/pytorch2onnx.py\n@@ -61,7 +61,11 @@\n \n torch_model = task_processor.init_pytorch_model(model_checkpoint)\n data, model_inputs = task_processor.create_input(img, input_shape)\n- input_metas = dict(img_metas=data.get('img_metas', None))\n+ if 'img_metas' in data:\n+ input_metas = dict(img_metas=data['img_metas'])\n+ else:\n+ # codebases like mmedit do not have img_metas argument\n+ input_metas = None\n if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:\n model_inputs = model_inputs[0]\n", "issue": "pytorch2onnx fails with mmedit models\nerror with master branch\r\n```\r\nTypeError: forward_dummy() got an unexpected keyword argument 'img_metas'\r\n```\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nfrom typing import Any, Optional, Union\n\nimport mmcv\nimport torch\n\nfrom mmdeploy.apis.core.pipeline_manager import no_mp\nfrom mmdeploy.utils import (get_backend, get_dynamic_axes, get_input_shape,\n get_onnx_config, load_config)\nfrom .core import PIPELINE_MANAGER\nfrom .onnx import export\n\n\n@PIPELINE_MANAGER.register_pipeline()\ndef torch2onnx(img: Any,\n work_dir: str,\n save_file: str,\n deploy_cfg: Union[str, mmcv.Config],\n model_cfg: Union[str, mmcv.Config],\n model_checkpoint: Optional[str] = None,\n device: str = 'cuda:0'):\n \"\"\"Convert PyTorch model to ONNX model.\n\n Examples:\n >>> from mmdeploy.apis import torch2onnx\n >>> img = 'demo.jpg'\n >>> work_dir = 'work_dir'\n >>> save_file = 'fcos.onnx'\n >>> deploy_cfg = ('configs/mmdet/detection/'\n 'detection_onnxruntime_dynamic.py')\n >>> model_cfg = ('mmdetection/configs/fcos/'\n 'fcos_r50_caffe_fpn_gn-head_1x_coco.py')\n >>> model_checkpoint = ('checkpoints/'\n 'fcos_r50_caffe_fpn_gn-head_1x_coco-821213aa.pth')\n >>> device = 'cpu'\n >>> torch2onnx(img, work_dir, save_file, deploy_cfg, \\\n model_cfg, model_checkpoint, device)\n\n Args:\n img (str | np.ndarray | torch.Tensor): Input image used to assist\n converting model.\n work_dir (str): A working directory to save files.\n save_file (str): Filename to save onnx model.\n deploy_cfg (str | mmcv.Config): Deployment config file or\n Config object.\n model_cfg (str | mmcv.Config): Model config file or Config object.\n model_checkpoint (str): A checkpoint path of PyTorch model,\n defaults to `None`.\n device (str): A string specifying device type, defaults to 'cuda:0'.\n \"\"\"\n # load deploy_cfg if necessary\n deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg)\n mmcv.mkdir_or_exist(osp.abspath(work_dir))\n\n input_shape = get_input_shape(deploy_cfg)\n\n # create model an inputs\n from mmdeploy.apis import build_task_processor\n task_processor = build_task_processor(model_cfg, deploy_cfg, device)\n\n torch_model = task_processor.init_pytorch_model(model_checkpoint)\n data, model_inputs = task_processor.create_input(img, input_shape)\n input_metas = dict(img_metas=data.get('img_metas', None))\n if not isinstance(model_inputs, torch.Tensor) and len(model_inputs) == 1:\n model_inputs = model_inputs[0]\n\n # export to onnx\n context_info = dict()\n context_info['deploy_cfg'] = deploy_cfg\n output_prefix = osp.join(work_dir,\n osp.splitext(osp.basename(save_file))[0])\n backend = get_backend(deploy_cfg).value\n\n onnx_cfg = get_onnx_config(deploy_cfg)\n opset_version = onnx_cfg.get('opset_version', 11)\n\n input_names = onnx_cfg['input_names']\n output_names = onnx_cfg['output_names']\n axis_names = input_names + output_names\n dynamic_axes = get_dynamic_axes(deploy_cfg, axis_names)\n verbose = not onnx_cfg.get('strip_doc_string', True) or onnx_cfg.get(\n 'verbose', False)\n keep_initializers_as_inputs = onnx_cfg.get('keep_initializers_as_inputs',\n True)\n optimize = onnx_cfg.get('optimize', False)\n with no_mp():\n export(\n torch_model,\n model_inputs,\n input_metas=input_metas,\n output_path_prefix=output_prefix,\n backend=backend,\n input_names=input_names,\n output_names=output_names,\n context_info=context_info,\n opset_version=opset_version,\n dynamic_axes=dynamic_axes,\n verbose=verbose,\n keep_initializers_as_inputs=keep_initializers_as_inputs,\n optimize=optimize)\n", "path": "mmdeploy/apis/pytorch2onnx.py"}]} | 1,701 | 194 |
gh_patches_debug_25470 | rasdani/github-patches | git_diff | hylang__hy-2188 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Monkey-patching `py.path.local.pyimport` should no longer be necessary
Hi
I noticed **py** is used in conftest.py but not declared in any configuration files .
In addition, py as a Python library is deprecated as its [documentation](https://pypi.org/project/py/) "py.path: uniform local and svn path objects -> please use pathlib/pathlib2 instead"
Maybe it is necessary to migrate to new dependency-pathlib2 and add it to configuration files.
</issue>
<code>
[start of conftest.py]
1 import sys
2 import os
3 import importlib
4 from operator import or_
5 from functools import reduce
6
7 import py
8 import pytest
9 import hy
10 from hy._compat import PY3_8, PY3_10
11
12 NATIVE_TESTS = os.path.join("", "tests", "native_tests", "")
13
14 _fspath_pyimport = py.path.local.pyimport
15
16 # https://github.com/hylang/hy/issues/2029
17 os.environ.pop("HYSTARTUP", None)
18
19
20 def pytest_ignore_collect(path, config):
21 versions = [
22 (sys.version_info < (3, 8), "sub_py3_7_only"),
23 (PY3_8, "py3_8_only"),
24 (PY3_10, "py3_10_only"),
25 ]
26
27 return reduce(
28 or_,
29 (name in path.basename and not condition for condition, name in versions),
30 ) or None
31
32
33 def pyimport_patch_mismatch(self, **kwargs):
34 """Lame fix for https://github.com/pytest-dev/py/issues/195"""
35 try:
36 return _fspath_pyimport(self, **kwargs)
37 except py.path.local.ImportMismatchError:
38 pkgpath = self.pypkgpath()
39 if pkgpath is None:
40 pkgroot = self.dirpath()
41 modname = self.purebasename
42 else:
43 pkgroot = pkgpath.dirpath()
44 names = self.new(ext="").relto(pkgroot).split(self.sep)
45 if names[-1] == "__init__":
46 names.pop()
47 modname = ".".join(names)
48
49 res = importlib.import_module(modname)
50
51 return res
52
53
54 py.path.local.pyimport = pyimport_patch_mismatch
55
56
57 def pytest_collect_file(parent, path):
58 if (path.ext == ".hy"
59 and NATIVE_TESTS in path.dirname + os.sep
60 and path.basename != "__init__.hy"):
61
62 if hasattr(pytest.Module, "from_parent"):
63 pytest_mod = pytest.Module.from_parent(parent, fspath=path)
64 else:
65 pytest_mod = pytest.Module(path, parent)
66 return pytest_mod
67
[end of conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -4,15 +4,12 @@
from operator import or_
from functools import reduce
-import py
import pytest
import hy
from hy._compat import PY3_8, PY3_10
NATIVE_TESTS = os.path.join("", "tests", "native_tests", "")
-_fspath_pyimport = py.path.local.pyimport
-
# https://github.com/hylang/hy/issues/2029
os.environ.pop("HYSTARTUP", None)
@@ -30,30 +27,6 @@
) or None
-def pyimport_patch_mismatch(self, **kwargs):
- """Lame fix for https://github.com/pytest-dev/py/issues/195"""
- try:
- return _fspath_pyimport(self, **kwargs)
- except py.path.local.ImportMismatchError:
- pkgpath = self.pypkgpath()
- if pkgpath is None:
- pkgroot = self.dirpath()
- modname = self.purebasename
- else:
- pkgroot = pkgpath.dirpath()
- names = self.new(ext="").relto(pkgroot).split(self.sep)
- if names[-1] == "__init__":
- names.pop()
- modname = ".".join(names)
-
- res = importlib.import_module(modname)
-
- return res
-
-
-py.path.local.pyimport = pyimport_patch_mismatch
-
-
def pytest_collect_file(parent, path):
if (path.ext == ".hy"
and NATIVE_TESTS in path.dirname + os.sep
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -4,15 +4,12 @@\n from operator import or_\n from functools import reduce\n \n-import py\n import pytest\n import hy\n from hy._compat import PY3_8, PY3_10\n \n NATIVE_TESTS = os.path.join(\"\", \"tests\", \"native_tests\", \"\")\n \n-_fspath_pyimport = py.path.local.pyimport\n-\n # https://github.com/hylang/hy/issues/2029\n os.environ.pop(\"HYSTARTUP\", None)\n \n@@ -30,30 +27,6 @@\n ) or None\n \n \n-def pyimport_patch_mismatch(self, **kwargs):\n- \"\"\"Lame fix for https://github.com/pytest-dev/py/issues/195\"\"\"\n- try:\n- return _fspath_pyimport(self, **kwargs)\n- except py.path.local.ImportMismatchError:\n- pkgpath = self.pypkgpath()\n- if pkgpath is None:\n- pkgroot = self.dirpath()\n- modname = self.purebasename\n- else:\n- pkgroot = pkgpath.dirpath()\n- names = self.new(ext=\"\").relto(pkgroot).split(self.sep)\n- if names[-1] == \"__init__\":\n- names.pop()\n- modname = \".\".join(names)\n-\n- res = importlib.import_module(modname)\n-\n- return res\n-\n-\n-py.path.local.pyimport = pyimport_patch_mismatch\n-\n-\n def pytest_collect_file(parent, path):\n if (path.ext == \".hy\"\n and NATIVE_TESTS in path.dirname + os.sep\n", "issue": "Monkey-patching `py.path.local.pyimport` should no longer be necessary\nHi\r\nI noticed **py** is used in conftest.py but not declared in any configuration files .\r\nIn addition, py as a Python library is deprecated as its [documentation](https://pypi.org/project/py/) \"py.path: uniform local and svn path objects -> please use pathlib/pathlib2 instead\"\r\n\r\nMaybe it is necessary to migrate to new dependency-pathlib2 and add it to configuration files.\n", "before_files": [{"content": "import sys\nimport os\nimport importlib\nfrom operator import or_\nfrom functools import reduce\n\nimport py\nimport pytest\nimport hy\nfrom hy._compat import PY3_8, PY3_10\n\nNATIVE_TESTS = os.path.join(\"\", \"tests\", \"native_tests\", \"\")\n\n_fspath_pyimport = py.path.local.pyimport\n\n# https://github.com/hylang/hy/issues/2029\nos.environ.pop(\"HYSTARTUP\", None)\n\n\ndef pytest_ignore_collect(path, config):\n versions = [\n (sys.version_info < (3, 8), \"sub_py3_7_only\"),\n (PY3_8, \"py3_8_only\"),\n (PY3_10, \"py3_10_only\"),\n ]\n\n return reduce(\n or_,\n (name in path.basename and not condition for condition, name in versions),\n ) or None\n\n\ndef pyimport_patch_mismatch(self, **kwargs):\n \"\"\"Lame fix for https://github.com/pytest-dev/py/issues/195\"\"\"\n try:\n return _fspath_pyimport(self, **kwargs)\n except py.path.local.ImportMismatchError:\n pkgpath = self.pypkgpath()\n if pkgpath is None:\n pkgroot = self.dirpath()\n modname = self.purebasename\n else:\n pkgroot = pkgpath.dirpath()\n names = self.new(ext=\"\").relto(pkgroot).split(self.sep)\n if names[-1] == \"__init__\":\n names.pop()\n modname = \".\".join(names)\n\n res = importlib.import_module(modname)\n\n return res\n\n\npy.path.local.pyimport = pyimport_patch_mismatch\n\n\ndef pytest_collect_file(parent, path):\n if (path.ext == \".hy\"\n and NATIVE_TESTS in path.dirname + os.sep\n and path.basename != \"__init__.hy\"):\n\n if hasattr(pytest.Module, \"from_parent\"):\n pytest_mod = pytest.Module.from_parent(parent, fspath=path)\n else:\n pytest_mod = pytest.Module(path, parent)\n return pytest_mod\n", "path": "conftest.py"}]} | 1,215 | 372 |
gh_patches_debug_9777 | rasdani/github-patches | git_diff | kivy__kivy-1397 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
simplelistadapter should accept objects inheriting from list or tuple
I'll found it usefull if it was possible to extend the list object that I pass to the simplelistadapter, but an exception is raised.
Reproduce :
``` python
from kivy.adapters.simplelistadapter import SimpleListAdapter
class ExtendedList(list):
pass
list_adapter = SimpleListAdapter(data=ExtendedList())
```
A solution :
In kivy/adapters/simplelistadapter.py
``` python
47 if type(kwargs['data']) not in (tuple, list):
48 raise Exception('list adapter: data must be a tuple or list')
```
May be replaced by:
``` python
if not isinstance(kwargs['data'], list) and not isinstance(kwargs['data'], tuple)
```
</issue>
<code>
[start of kivy/adapters/simplelistadapter.py]
1 '''
2 SimpleListAdapter
3 =================
4
5 .. versionadded:: 1.5
6
7 .. warning::
8
9 This code is still experimental, and its API is subject to change in a
10 future version.
11
12 The :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is used for
13 basic lists. For example, it can be used for displaying a list of read-only
14 strings that do not require user interaction.
15
16 '''
17
18 __all__ = ('SimpleListAdapter', )
19
20 from kivy.adapters.adapter import Adapter
21 from kivy.properties import ListProperty
22 from kivy.lang import Builder
23
24
25 class SimpleListAdapter(Adapter):
26 '''A :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an
27 adapter around a Python list.
28
29 From :class:`~kivy.adapters.adapter.Adapter`, the
30 :class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,
31 and args_converter properties.
32 '''
33
34 data = ListProperty([])
35 '''The data list property contains a list of objects (which can be strings)
36 that will be used directly if no args_converter function is provided. If
37 there is an args_converter, the data objects will be passed to it for
38 instantiating the item view class instances.
39
40 :data:`data` is a :class:`~kivy.properties.ListProperty` and
41 defaults to [].
42 '''
43
44 def __init__(self, **kwargs):
45 if 'data' not in kwargs:
46 raise Exception('list adapter: input must include data argument')
47 if type(kwargs['data']) not in (tuple, list):
48 raise Exception('list adapter: data must be a tuple or list')
49 super(SimpleListAdapter, self).__init__(**kwargs)
50
51 def get_count(self):
52 return len(self.data)
53
54 def get_data_item(self, index):
55 if index < 0 or index >= len(self.data):
56 return None
57 return self.data[index]
58
59 # Returns a view instance for an item.
60 def get_view(self, index):
61 item = self.get_data_item(index)
62
63 if item is None:
64 return None
65
66 item_args = self.args_converter(index, item)
67
68 if self.cls:
69 instance = self.cls(**item_args)
70 return instance
71 else:
72 return Builder.template(self.template, **item_args)
73
[end of kivy/adapters/simplelistadapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/adapters/simplelistadapter.py b/kivy/adapters/simplelistadapter.py
--- a/kivy/adapters/simplelistadapter.py
+++ b/kivy/adapters/simplelistadapter.py
@@ -44,7 +44,8 @@
def __init__(self, **kwargs):
if 'data' not in kwargs:
raise Exception('list adapter: input must include data argument')
- if type(kwargs['data']) not in (tuple, list):
+ if not isinstance(kwargs['data'], list) and \
+ not isinstance(kwargs['data'], tuple):
raise Exception('list adapter: data must be a tuple or list')
super(SimpleListAdapter, self).__init__(**kwargs)
| {"golden_diff": "diff --git a/kivy/adapters/simplelistadapter.py b/kivy/adapters/simplelistadapter.py\n--- a/kivy/adapters/simplelistadapter.py\n+++ b/kivy/adapters/simplelistadapter.py\n@@ -44,7 +44,8 @@\n def __init__(self, **kwargs):\n if 'data' not in kwargs:\n raise Exception('list adapter: input must include data argument')\n- if type(kwargs['data']) not in (tuple, list):\n+ if not isinstance(kwargs['data'], list) and \\\n+ not isinstance(kwargs['data'], tuple):\n raise Exception('list adapter: data must be a tuple or list')\n super(SimpleListAdapter, self).__init__(**kwargs)\n", "issue": "simplelistadapter should accept objects inheriting from list or tuple\nI'll found it usefull if it was possible to extend the list object that I pass to the simplelistadapter, but an exception is raised.\n\nReproduce :\n\n``` python\nfrom kivy.adapters.simplelistadapter import SimpleListAdapter\nclass ExtendedList(list):\n pass\n\nlist_adapter = SimpleListAdapter(data=ExtendedList())\n```\n\nA solution :\nIn kivy/adapters/simplelistadapter.py\n\n``` python\n 47 if type(kwargs['data']) not in (tuple, list): \n 48 raise Exception('list adapter: data must be a tuple or list') \n```\n\nMay be replaced by:\n\n``` python\nif not isinstance(kwargs['data'], list) and not isinstance(kwargs['data'], tuple)\n```\n\n", "before_files": [{"content": "'''\nSimpleListAdapter\n=================\n\n.. versionadded:: 1.5\n\n.. warning::\n\n This code is still experimental, and its API is subject to change in a\n future version.\n\nThe :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is used for\nbasic lists. For example, it can be used for displaying a list of read-only\nstrings that do not require user interaction.\n\n'''\n\n__all__ = ('SimpleListAdapter', )\n\nfrom kivy.adapters.adapter import Adapter\nfrom kivy.properties import ListProperty\nfrom kivy.lang import Builder\n\n\nclass SimpleListAdapter(Adapter):\n '''A :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an\n adapter around a Python list.\n\n From :class:`~kivy.adapters.adapter.Adapter`, the\n :class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,\n and args_converter properties.\n '''\n\n data = ListProperty([])\n '''The data list property contains a list of objects (which can be strings)\n that will be used directly if no args_converter function is provided. If\n there is an args_converter, the data objects will be passed to it for\n instantiating the item view class instances.\n\n :data:`data` is a :class:`~kivy.properties.ListProperty` and\n defaults to [].\n '''\n\n def __init__(self, **kwargs):\n if 'data' not in kwargs:\n raise Exception('list adapter: input must include data argument')\n if type(kwargs['data']) not in (tuple, list):\n raise Exception('list adapter: data must be a tuple or list')\n super(SimpleListAdapter, self).__init__(**kwargs)\n\n def get_count(self):\n return len(self.data)\n\n def get_data_item(self, index):\n if index < 0 or index >= len(self.data):\n return None\n return self.data[index]\n\n # Returns a view instance for an item.\n def get_view(self, index):\n item = self.get_data_item(index)\n\n if item is None:\n return None\n\n item_args = self.args_converter(index, item)\n\n if self.cls:\n instance = self.cls(**item_args)\n return instance\n else:\n return Builder.template(self.template, **item_args)\n", "path": "kivy/adapters/simplelistadapter.py"}]} | 1,334 | 155 |
gh_patches_debug_24859 | rasdani/github-patches | git_diff | zulip__zulip-16242 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable translations for hotspots subsystem
There are unused translations at the hotspots subsystem, which could be enabled due to finished and available translations. At the moment there is a mix of English and the configured user language.
Affected file: zerver/lib/hotspots.py
Example (mixed English/German):

</issue>
<code>
[start of zerver/lib/hotspots.py]
1 # See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html
2 # for documentation on this subsystem.
3 from typing import Dict, List
4
5 from django.conf import settings
6 from django.utils.translation import ugettext as _
7
8 from zerver.models import UserHotspot, UserProfile
9
10 ALL_HOTSPOTS: Dict[str, Dict[str, str]] = {
11 'intro_reply': {
12 'title': _('Reply to a message'),
13 'description': _('Click anywhere on a message to reply.'),
14 },
15 'intro_streams': {
16 'title': _('Catch up on a stream'),
17 'description': _('Messages sent to a stream are seen by everyone subscribed '
18 'to that stream. Try clicking on one of the stream links below.'),
19 },
20 'intro_topics': {
21 'title': _('Topics'),
22 'description': _('Every message has a topic. Topics keep conversations '
23 'easy to follow, and make it easy to reply to conversations that start '
24 'while you are offline.'),
25 },
26 'intro_gear': {
27 'title': _('Settings'),
28 'description': _('Go to Settings to configure your '
29 'notifications and display settings.'),
30 },
31 'intro_compose': {
32 'title': _('Compose'),
33 'description': _('Click here to start a new conversation. Pick a topic '
34 '(2-3 words is best), and give it a go!'),
35 },
36 }
37
38 def get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:
39 # For manual testing, it can be convenient to set
40 # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to
41 # make it easy to click on all of the hotspots. Note that
42 # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link
43 # above) for details.
44 if settings.ALWAYS_SEND_ALL_HOTSPOTS:
45 return [{
46 'name': hotspot,
47 'title': ALL_HOTSPOTS[hotspot]['title'],
48 'description': ALL_HOTSPOTS[hotspot]['description'],
49 'delay': 0,
50 } for hotspot in ALL_HOTSPOTS]
51
52 if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:
53 return []
54
55 seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))
56 for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_gear', 'intro_compose']:
57 if hotspot not in seen_hotspots:
58 return [{
59 'name': hotspot,
60 'title': ALL_HOTSPOTS[hotspot]['title'],
61 'description': ALL_HOTSPOTS[hotspot]['description'],
62 'delay': 0.5,
63 }]
64
65 user.tutorial_status = UserProfile.TUTORIAL_FINISHED
66 user.save(update_fields=['tutorial_status'])
67 return []
68
69 def copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:
70 for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):
71 UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,
72 timestamp=userhotspot.timestamp)
73
74 target_profile.tutorial_status = source_profile.tutorial_status
75 target_profile.onboarding_steps = source_profile.onboarding_steps
76 target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])
77
[end of zerver/lib/hotspots.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py
--- a/zerver/lib/hotspots.py
+++ b/zerver/lib/hotspots.py
@@ -3,7 +3,7 @@
from typing import Dict, List
from django.conf import settings
-from django.utils.translation import ugettext as _
+from django.utils.translation import ugettext_lazy as _
from zerver.models import UserHotspot, UserProfile
@@ -44,8 +44,8 @@
if settings.ALWAYS_SEND_ALL_HOTSPOTS:
return [{
'name': hotspot,
- 'title': ALL_HOTSPOTS[hotspot]['title'],
- 'description': ALL_HOTSPOTS[hotspot]['description'],
+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),
+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),
'delay': 0,
} for hotspot in ALL_HOTSPOTS]
@@ -57,8 +57,8 @@
if hotspot not in seen_hotspots:
return [{
'name': hotspot,
- 'title': ALL_HOTSPOTS[hotspot]['title'],
- 'description': ALL_HOTSPOTS[hotspot]['description'],
+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),
+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),
'delay': 0.5,
}]
| {"golden_diff": "diff --git a/zerver/lib/hotspots.py b/zerver/lib/hotspots.py\n--- a/zerver/lib/hotspots.py\n+++ b/zerver/lib/hotspots.py\n@@ -3,7 +3,7 @@\n from typing import Dict, List\n \n from django.conf import settings\n-from django.utils.translation import ugettext as _\n+from django.utils.translation import ugettext_lazy as _\n \n from zerver.models import UserHotspot, UserProfile\n \n@@ -44,8 +44,8 @@\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [{\n 'name': hotspot,\n- 'title': ALL_HOTSPOTS[hotspot]['title'],\n- 'description': ALL_HOTSPOTS[hotspot]['description'],\n+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),\n+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),\n 'delay': 0,\n } for hotspot in ALL_HOTSPOTS]\n \n@@ -57,8 +57,8 @@\n if hotspot not in seen_hotspots:\n return [{\n 'name': hotspot,\n- 'title': ALL_HOTSPOTS[hotspot]['title'],\n- 'description': ALL_HOTSPOTS[hotspot]['description'],\n+ 'title': str(ALL_HOTSPOTS[hotspot]['title']),\n+ 'description': str(ALL_HOTSPOTS[hotspot]['description']),\n 'delay': 0.5,\n }]\n", "issue": "Enable translations for hotspots subsystem\nThere are unused translations at the hotspots subsystem, which could be enabled due to finished and available translations. At the moment there is a mix of English and the configured user language.\r\n\r\nAffected file: zerver/lib/hotspots.py\r\n\r\nExample (mixed English/German):\r\n\r\n\n", "before_files": [{"content": "# See https://zulip.readthedocs.io/en/latest/subsystems/hotspots.html\n# for documentation on this subsystem.\nfrom typing import Dict, List\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserHotspot, UserProfile\n\nALL_HOTSPOTS: Dict[str, Dict[str, str]] = {\n 'intro_reply': {\n 'title': _('Reply to a message'),\n 'description': _('Click anywhere on a message to reply.'),\n },\n 'intro_streams': {\n 'title': _('Catch up on a stream'),\n 'description': _('Messages sent to a stream are seen by everyone subscribed '\n 'to that stream. Try clicking on one of the stream links below.'),\n },\n 'intro_topics': {\n 'title': _('Topics'),\n 'description': _('Every message has a topic. Topics keep conversations '\n 'easy to follow, and make it easy to reply to conversations that start '\n 'while you are offline.'),\n },\n 'intro_gear': {\n 'title': _('Settings'),\n 'description': _('Go to Settings to configure your '\n 'notifications and display settings.'),\n },\n 'intro_compose': {\n 'title': _('Compose'),\n 'description': _('Click here to start a new conversation. Pick a topic '\n '(2-3 words is best), and give it a go!'),\n },\n}\n\ndef get_next_hotspots(user: UserProfile) -> List[Dict[str, object]]:\n # For manual testing, it can be convenient to set\n # ALWAYS_SEND_ALL_HOTSPOTS=True in `zproject/dev_settings.py` to\n # make it easy to click on all of the hotspots. Note that\n # ALWAYS_SEND_ALL_HOTSPOTS has some bugs; see ReadTheDocs (link\n # above) for details.\n if settings.ALWAYS_SEND_ALL_HOTSPOTS:\n return [{\n 'name': hotspot,\n 'title': ALL_HOTSPOTS[hotspot]['title'],\n 'description': ALL_HOTSPOTS[hotspot]['description'],\n 'delay': 0,\n } for hotspot in ALL_HOTSPOTS]\n\n if user.tutorial_status == UserProfile.TUTORIAL_FINISHED:\n return []\n\n seen_hotspots = frozenset(UserHotspot.objects.filter(user=user).values_list('hotspot', flat=True))\n for hotspot in ['intro_reply', 'intro_streams', 'intro_topics', 'intro_gear', 'intro_compose']:\n if hotspot not in seen_hotspots:\n return [{\n 'name': hotspot,\n 'title': ALL_HOTSPOTS[hotspot]['title'],\n 'description': ALL_HOTSPOTS[hotspot]['description'],\n 'delay': 0.5,\n }]\n\n user.tutorial_status = UserProfile.TUTORIAL_FINISHED\n user.save(update_fields=['tutorial_status'])\n return []\n\ndef copy_hotpots(source_profile: UserProfile, target_profile: UserProfile) -> None:\n for userhotspot in frozenset(UserHotspot.objects.filter(user=source_profile)):\n UserHotspot.objects.create(user=target_profile, hotspot=userhotspot.hotspot,\n timestamp=userhotspot.timestamp)\n\n target_profile.tutorial_status = source_profile.tutorial_status\n target_profile.onboarding_steps = source_profile.onboarding_steps\n target_profile.save(update_fields=['tutorial_status', 'onboarding_steps'])\n", "path": "zerver/lib/hotspots.py"}]} | 1,526 | 315 |
gh_patches_debug_13021 | rasdani/github-patches | git_diff | mkdocs__mkdocs-173 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update PyPI description
At the moment I wouldn't be tempted if I first seen this page.
https://pypi.python.org/pypi/mkdocs
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import print_function
5 from setuptools import setup
6 import re
7 import os
8 import sys
9
10
11 name = 'mkdocs'
12 package = 'mkdocs'
13 description = 'In progress.'
14 url = 'http://www.mkdocs.org'
15 author = 'Tom Christie'
16 author_email = '[email protected]'
17 license = 'BSD'
18 install_requires = [
19 'Jinja2>=2.7.1',
20 'Markdown>=2.3.1,<2.5',
21 'PyYAML>=3.10',
22 'watchdog>=0.7.0',
23 'ghp-import>=0.4.1'
24 ]
25
26 long_description = """Work in progress."""
27
28
29 def get_version(package):
30 """
31 Return package version as listed in `__version__` in `init.py`.
32 """
33 init_py = open(os.path.join(package, '__init__.py')).read()
34 return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
35
36
37 def get_packages(package):
38 """
39 Return root package and all sub-packages.
40 """
41 return [dirpath
42 for dirpath, dirnames, filenames in os.walk(package)
43 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
44
45
46 def get_package_data(package):
47 """
48 Return all files under the root package, that are not in a
49 package themselves.
50 """
51 walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
52 for dirpath, dirnames, filenames in os.walk(package)
53 if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
54
55 filepaths = []
56 for base, filenames in walk:
57 filepaths.extend([os.path.join(base, filename)
58 for filename in filenames])
59 return {package: filepaths}
60
61
62 if sys.argv[-1] == 'publish':
63 os.system("python setup.py sdist upload")
64 args = {'version': get_version(package)}
65 print("You probably want to also tag the version now:")
66 print(" git tag -a %(version)s -m 'version %(version)s'" % args)
67 print(" git push --tags")
68 sys.exit()
69
70
71 setup(
72 name=name,
73 version=get_version(package),
74 url=url,
75 license=license,
76 description=description,
77 long_description=long_description,
78 author=author,
79 author_email=author_email,
80 packages=get_packages(package),
81 package_data=get_package_data(package),
82 install_requires=install_requires,
83 entry_points={
84 'console_scripts': [
85 'mkdocs = mkdocs.main:run_main',
86 ],
87 },
88 classifiers=[
89 'Development Status :: 5 - Production/Stable',
90 'Environment :: Console',
91 'Environment :: Web Environment',
92 'Intended Audience :: Developers',
93 'License :: OSI Approved :: BSD License',
94 'Operating System :: OS Independent',
95 'Programming Language :: Python',
96 'Programming Language :: Python :: 2',
97 'Programming Language :: Python :: 2.6',
98 'Programming Language :: Python :: 2.7',
99 'Programming Language :: Python :: 3',
100 'Programming Language :: Python :: 3.3',
101 'Programming Language :: Python :: 3.4',
102 'Topic :: Documentation',
103 'Topic :: Text Processing',
104 ]
105 )
106
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -10,7 +10,7 @@
name = 'mkdocs'
package = 'mkdocs'
-description = 'In progress.'
+description = 'Project documentation with Markdown.'
url = 'http://www.mkdocs.org'
author = 'Tom Christie'
author_email = '[email protected]'
@@ -23,7 +23,12 @@
'ghp-import>=0.4.1'
]
-long_description = """Work in progress."""
+long_description = (
+ "MkDocs is a fast, simple and downright gorgeous static site generator "
+ "that's geared towards building project documentation. Documentation "
+ "source files are written in Markdown, and configured with a single YAML "
+ "configuration file."
+)
def get_version(package):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n \n name = 'mkdocs'\n package = 'mkdocs'\n-description = 'In progress.'\n+description = 'Project documentation with Markdown.'\n url = 'http://www.mkdocs.org'\n author = 'Tom Christie'\n author_email = '[email protected]'\n@@ -23,7 +23,12 @@\n 'ghp-import>=0.4.1'\n ]\n \n-long_description = \"\"\"Work in progress.\"\"\"\n+long_description = (\n+ \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n+ \"that's geared towards building project documentation. Documentation \"\n+ \"source files are written in Markdown, and configured with a single YAML \"\n+ \"configuration file.\"\n+)\n \n \n def get_version(package):\n", "issue": "Update PyPI description\nAt the moment I wouldn't be tempted if I first seen this page.\n\nhttps://pypi.python.org/pypi/mkdocs\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nname = 'mkdocs'\npackage = 'mkdocs'\ndescription = 'In progress.'\nurl = 'http://www.mkdocs.org'\nauthor = 'Tom Christie'\nauthor_email = '[email protected]'\nlicense = 'BSD'\ninstall_requires = [\n 'Jinja2>=2.7.1',\n 'Markdown>=2.3.1,<2.5',\n 'PyYAML>=3.10',\n 'watchdog>=0.7.0',\n 'ghp-import>=0.4.1'\n]\n\nlong_description = \"\"\"Work in progress.\"\"\"\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': get_version(package)}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.main:run_main',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ]\n)\n", "path": "setup.py"}]} | 1,508 | 190 |
gh_patches_debug_3389 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please make the rqrequeue service quieter
## Description
The rqrequeue service feels compelled to report that it has nothing to do, resulting in an endless stream of "No interrupted jobs found in started job registry." messages. This is not helpful during normal operations, and annoying during development.
</issue>
<code>
[start of securedrop/worker.py]
1 import logging
2 import os
3 from typing import Optional, List
4
5 from redis import Redis
6 from rq.queue import Queue
7 from rq.worker import Worker, WorkerStatus
8 from rq.exceptions import InvalidJobOperation, NoSuchJobError
9 from rq.registry import StartedJobRegistry
10
11 from sdconfig import config
12
13
14 def create_queue(name=None, timeout=3600):
15 # type: (str, int) -> Queue
16 """
17 Create an rq ``Queue`` named ``name`` with default timeout ``timeout``.
18
19 If ``name`` is omitted, ``config.RQ_WORKER_NAME`` is used.
20 """
21 if name is None:
22 name = config.RQ_WORKER_NAME
23 q = Queue(name=name, connection=Redis(), default_timeout=timeout)
24 return q
25
26
27 def rq_workers(queue=None):
28 # type: (Queue) -> List[Worker]
29 """
30 Returns the list of current rq ``Worker``s.
31 """
32
33 return Worker.all(connection=Redis(), queue=queue)
34
35
36 def worker_for_job(job_id):
37 # type: (str) -> Optional[Worker]
38 """
39 If the job is being run, return its ``Worker``.
40 """
41 for worker in rq_workers():
42 # If the worker process no longer exists, skip it. From "man 2
43 # kill": "If sig is 0, then no signal is sent, but existence
44 # and permission checks are still performed; this can be used
45 # to check for the existence of a process ID or process group
46 # ID that the caller is permitted to signal."
47 try:
48 os.kill(worker.pid, 0)
49 except OSError:
50 continue
51
52 # If it's running and working on the given job, return it.
53 if worker.state == WorkerStatus.BUSY and job_id == worker.get_current_job_id():
54 return worker
55 return None
56
57
58 def requeue_interrupted_jobs(queue_name=None):
59 # type: (str) -> None
60 """
61 Requeues jobs found in the given queue's started job registry.
62
63 Only restarts those that aren't already queued or being run.
64
65 When rq starts a job, it records it in the queue's started job
66 registry. If the server is rebooted before the job completes, the
67 job is not automatically restarted from the information in the
68 registry. For tasks like secure deletion of files, this means that
69 information thought to be deleted is still present in the case of
70 seizure or compromise. We have manage.py tasks to clean such files
71 up, but this utility attempts to reduce the need for manual
72 intervention by automatically resuming interrupted jobs.
73
74 This function is predicated on a risky assumption: that all jobs
75 are idempotent. At time of writing, we use rq for securely
76 deleting submission files and hashing submissions for the ETag
77 header. Both of these can be safely repeated. If we add rq tasks
78 that cannot, this function should be improved to omit those.
79 """
80 queue = create_queue(queue_name)
81 started_job_registry = StartedJobRegistry(queue=queue)
82
83 queued_job_ids = queue.get_job_ids()
84 logging.debug("queued jobs: {}".format(queued_job_ids))
85 started_job_ids = started_job_registry.get_job_ids()
86 logging.debug("started jobs: {}".format(started_job_ids))
87 job_ids = [j for j in started_job_ids if j not in queued_job_ids]
88 logging.debug("candidate job ids: {}".format(job_ids))
89
90 if not job_ids:
91 logging.info("No interrupted jobs found in started job registry.")
92
93 for job_id in job_ids:
94 logging.debug("Considering job %s", job_id)
95 try:
96 job = started_job_registry.job_class.fetch(job_id, started_job_registry.connection)
97 except NoSuchJobError as e:
98 logging.error(
99 "Could not find details for job %s: %s", job_id, e
100 )
101 continue
102
103 logging.debug(
104 "Job %s enqueued at %s, started at %s", job_id, job.enqueued_at, job.started_at
105 )
106
107 worker = worker_for_job(job_id)
108 if worker:
109 logging.info(
110 "Skipping job %s, which is already being run by worker %s", job_id, worker.key
111 )
112 continue
113
114 logging.info("Requeuing job %s", job)
115
116 try:
117 started_job_registry.remove(job)
118 except InvalidJobOperation as e:
119 logging.error("Could not remove job %s from started job registry: %s", job, e)
120 continue
121
122 try:
123 queue.enqueue_job(job)
124 logging.debug("Job now enqueued at %s, started at %s", job.enqueued_at, job.started_at)
125 except Exception as e:
126 logging.error("Could not requeue job %s: %s", job, e)
127 continue
128
[end of securedrop/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/worker.py b/securedrop/worker.py
--- a/securedrop/worker.py
+++ b/securedrop/worker.py
@@ -88,7 +88,7 @@
logging.debug("candidate job ids: {}".format(job_ids))
if not job_ids:
- logging.info("No interrupted jobs found in started job registry.")
+ logging.debug("No interrupted jobs found in started job registry.")
for job_id in job_ids:
logging.debug("Considering job %s", job_id)
| {"golden_diff": "diff --git a/securedrop/worker.py b/securedrop/worker.py\n--- a/securedrop/worker.py\n+++ b/securedrop/worker.py\n@@ -88,7 +88,7 @@\n logging.debug(\"candidate job ids: {}\".format(job_ids))\n \n if not job_ids:\n- logging.info(\"No interrupted jobs found in started job registry.\")\n+ logging.debug(\"No interrupted jobs found in started job registry.\")\n \n for job_id in job_ids:\n logging.debug(\"Considering job %s\", job_id)\n", "issue": "Please make the rqrequeue service quieter\n## Description\r\n\r\nThe rqrequeue service feels compelled to report that it has nothing to do, resulting in an endless stream of \"No interrupted jobs found in started job registry.\" messages. This is not helpful during normal operations, and annoying during development.\n", "before_files": [{"content": "import logging\nimport os\nfrom typing import Optional, List\n\nfrom redis import Redis\nfrom rq.queue import Queue\nfrom rq.worker import Worker, WorkerStatus\nfrom rq.exceptions import InvalidJobOperation, NoSuchJobError\nfrom rq.registry import StartedJobRegistry\n\nfrom sdconfig import config\n\n\ndef create_queue(name=None, timeout=3600):\n # type: (str, int) -> Queue\n \"\"\"\n Create an rq ``Queue`` named ``name`` with default timeout ``timeout``.\n\n If ``name`` is omitted, ``config.RQ_WORKER_NAME`` is used.\n \"\"\"\n if name is None:\n name = config.RQ_WORKER_NAME\n q = Queue(name=name, connection=Redis(), default_timeout=timeout)\n return q\n\n\ndef rq_workers(queue=None):\n # type: (Queue) -> List[Worker]\n \"\"\"\n Returns the list of current rq ``Worker``s.\n \"\"\"\n\n return Worker.all(connection=Redis(), queue=queue)\n\n\ndef worker_for_job(job_id):\n # type: (str) -> Optional[Worker]\n \"\"\"\n If the job is being run, return its ``Worker``.\n \"\"\"\n for worker in rq_workers():\n # If the worker process no longer exists, skip it. From \"man 2\n # kill\": \"If sig is 0, then no signal is sent, but existence\n # and permission checks are still performed; this can be used\n # to check for the existence of a process ID or process group\n # ID that the caller is permitted to signal.\"\n try:\n os.kill(worker.pid, 0)\n except OSError:\n continue\n\n # If it's running and working on the given job, return it.\n if worker.state == WorkerStatus.BUSY and job_id == worker.get_current_job_id():\n return worker\n return None\n\n\ndef requeue_interrupted_jobs(queue_name=None):\n # type: (str) -> None\n \"\"\"\n Requeues jobs found in the given queue's started job registry.\n\n Only restarts those that aren't already queued or being run.\n\n When rq starts a job, it records it in the queue's started job\n registry. If the server is rebooted before the job completes, the\n job is not automatically restarted from the information in the\n registry. For tasks like secure deletion of files, this means that\n information thought to be deleted is still present in the case of\n seizure or compromise. We have manage.py tasks to clean such files\n up, but this utility attempts to reduce the need for manual\n intervention by automatically resuming interrupted jobs.\n\n This function is predicated on a risky assumption: that all jobs\n are idempotent. At time of writing, we use rq for securely\n deleting submission files and hashing submissions for the ETag\n header. Both of these can be safely repeated. If we add rq tasks\n that cannot, this function should be improved to omit those.\n \"\"\"\n queue = create_queue(queue_name)\n started_job_registry = StartedJobRegistry(queue=queue)\n\n queued_job_ids = queue.get_job_ids()\n logging.debug(\"queued jobs: {}\".format(queued_job_ids))\n started_job_ids = started_job_registry.get_job_ids()\n logging.debug(\"started jobs: {}\".format(started_job_ids))\n job_ids = [j for j in started_job_ids if j not in queued_job_ids]\n logging.debug(\"candidate job ids: {}\".format(job_ids))\n\n if not job_ids:\n logging.info(\"No interrupted jobs found in started job registry.\")\n\n for job_id in job_ids:\n logging.debug(\"Considering job %s\", job_id)\n try:\n job = started_job_registry.job_class.fetch(job_id, started_job_registry.connection)\n except NoSuchJobError as e:\n logging.error(\n \"Could not find details for job %s: %s\", job_id, e\n )\n continue\n\n logging.debug(\n \"Job %s enqueued at %s, started at %s\", job_id, job.enqueued_at, job.started_at\n )\n\n worker = worker_for_job(job_id)\n if worker:\n logging.info(\n \"Skipping job %s, which is already being run by worker %s\", job_id, worker.key\n )\n continue\n\n logging.info(\"Requeuing job %s\", job)\n\n try:\n started_job_registry.remove(job)\n except InvalidJobOperation as e:\n logging.error(\"Could not remove job %s from started job registry: %s\", job, e)\n continue\n\n try:\n queue.enqueue_job(job)\n logging.debug(\"Job now enqueued at %s, started at %s\", job.enqueued_at, job.started_at)\n except Exception as e:\n logging.error(\"Could not requeue job %s: %s\", job, e)\n continue\n", "path": "securedrop/worker.py"}]} | 1,922 | 118 |
gh_patches_debug_25831 | rasdani/github-patches | git_diff | larq__larq-93 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: Add links to source code
This is really handy if people want to understand what's going on behind the scenes or want to implement more advanced stuff
</issue>
<code>
[start of generate_api_docs.py]
1 """https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py"""
2
3 import os
4 import sys
5 import yaml
6
7 from pydocmd.document import Index
8 from pydocmd.imp import dir_object
9 from pydocmd.loader import PythonLoader
10 from pydocmd.preprocessor import Preprocessor
11
12
13 with open("apidocs.yml", "r") as stream:
14 api_structure = yaml.safe_load(stream)
15
16 # Build the index and document structure first, we load the actual
17 # docstrings at a later point.
18 print("Building index...")
19 index = Index()
20
21
22 def add_sections(doc, object_names, depth=1):
23 if isinstance(object_names, list):
24 [add_sections(doc, x, depth) for x in object_names]
25 elif isinstance(object_names, dict):
26 for key, subsections in object_names.items():
27 add_sections(doc, key, depth)
28 add_sections(doc, subsections, depth + 1)
29 elif isinstance(object_names, str):
30 # Check how many levels of recursion we should be going.
31 expand_depth = len(object_names)
32 object_names = object_names.rstrip("+")
33 expand_depth -= len(object_names)
34
35 def create_sections(name, level):
36 if level > expand_depth:
37 return
38 index.new_section(doc, name, depth=depth + level, header_type="markdown")
39 for sub in dir_object(name, "line", False):
40 sub = name + "." + sub
41 create_sections(sub, level + 1)
42
43 create_sections(object_names, 0)
44 else:
45 raise RuntimeError(object_names)
46
47
48 # Make sure that we can find modules from the current working directory,
49 # and have them take precedence over installed modules.
50 sys.path.insert(0, ".")
51
52 for pages in api_structure:
53 for fname, object_names in pages.items():
54 doc = index.new_document(fname)
55 add_sections(doc, object_names)
56
57 loader = PythonLoader({})
58 preproc = Preprocessor({})
59
60 preproc.link_lookup = {}
61 for file, doc in index.documents.items():
62 for section in doc.sections:
63 preproc.link_lookup[section.identifier] = file
64 # Load the docstrings and fill the sections.
65 print("Started generating documentation...")
66 for doc in index.documents.values():
67 for section in filter(lambda s: s.identifier, doc.sections):
68 loader.load_section(section)
69 preproc.preprocess_section(section)
70
71 # Write out all the generated documents.
72 os.makedirs(os.path.join("docs", "api"), exist_ok=True)
73 for fname, doc in index.documents.items():
74 with open(os.path.join("docs", "api", fname), "w") as fp:
75 for section in doc.sections:
76 section.render(fp)
77
[end of generate_api_docs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/generate_api_docs.py b/generate_api_docs.py
--- a/generate_api_docs.py
+++ b/generate_api_docs.py
@@ -1,5 +1,6 @@
"""https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py"""
+import inspect
import os
import sys
import yaml
@@ -10,6 +11,23 @@
from pydocmd.preprocessor import Preprocessor
+def callable_to_source_link(obj, scope):
+ path = scope.__file__.lstrip(".")
+ source = inspect.getsourcelines(obj)
+ line = source[-1] + 1 if source[0][0].startswith("@") else source[-1]
+ link = f"https://github.com/plumerai/larq/blob/master{path}#L{line}"
+ return f'<a class="headerlink code-link" style="float:right;" href="{link}" title="Source Code"></a>'
+
+
+class PythonLoaderWithSource(PythonLoader):
+ def load_section(self, section):
+ super().load_section(section)
+ obj = section.loader_context["obj"]
+ if callable(obj):
+ scope = section.loader_context["scope"]
+ section.title += callable_to_source_link(obj, scope)
+
+
with open("apidocs.yml", "r") as stream:
api_structure = yaml.safe_load(stream)
@@ -54,7 +72,7 @@
doc = index.new_document(fname)
add_sections(doc, object_names)
-loader = PythonLoader({})
+loader = PythonLoaderWithSource({})
preproc = Preprocessor({})
preproc.link_lookup = {}
| {"golden_diff": "diff --git a/generate_api_docs.py b/generate_api_docs.py\n--- a/generate_api_docs.py\n+++ b/generate_api_docs.py\n@@ -1,5 +1,6 @@\n \"\"\"https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py\"\"\"\n \n+import inspect\n import os\n import sys\n import yaml\n@@ -10,6 +11,23 @@\n from pydocmd.preprocessor import Preprocessor\n \n \n+def callable_to_source_link(obj, scope):\n+ path = scope.__file__.lstrip(\".\")\n+ source = inspect.getsourcelines(obj)\n+ line = source[-1] + 1 if source[0][0].startswith(\"@\") else source[-1]\n+ link = f\"https://github.com/plumerai/larq/blob/master{path}#L{line}\"\n+ return f'<a class=\"headerlink code-link\" style=\"float:right;\" href=\"{link}\" title=\"Source Code\"></a>'\n+\n+\n+class PythonLoaderWithSource(PythonLoader):\n+ def load_section(self, section):\n+ super().load_section(section)\n+ obj = section.loader_context[\"obj\"]\n+ if callable(obj):\n+ scope = section.loader_context[\"scope\"]\n+ section.title += callable_to_source_link(obj, scope)\n+\n+\n with open(\"apidocs.yml\", \"r\") as stream:\n api_structure = yaml.safe_load(stream)\n \n@@ -54,7 +72,7 @@\n doc = index.new_document(fname)\n add_sections(doc, object_names)\n \n-loader = PythonLoader({})\n+loader = PythonLoaderWithSource({})\n preproc = Preprocessor({})\n \n preproc.link_lookup = {}\n", "issue": "Docs: Add links to source code\nThis is really handy if people want to understand what's going on behind the scenes or want to implement more advanced stuff\n", "before_files": [{"content": "\"\"\"https://github.com/NiklasRosenstein/pydoc-markdown/blob/master/pydocmd/__main__.py\"\"\"\n\nimport os\nimport sys\nimport yaml\n\nfrom pydocmd.document import Index\nfrom pydocmd.imp import dir_object\nfrom pydocmd.loader import PythonLoader\nfrom pydocmd.preprocessor import Preprocessor\n\n\nwith open(\"apidocs.yml\", \"r\") as stream:\n api_structure = yaml.safe_load(stream)\n\n# Build the index and document structure first, we load the actual\n# docstrings at a later point.\nprint(\"Building index...\")\nindex = Index()\n\n\ndef add_sections(doc, object_names, depth=1):\n if isinstance(object_names, list):\n [add_sections(doc, x, depth) for x in object_names]\n elif isinstance(object_names, dict):\n for key, subsections in object_names.items():\n add_sections(doc, key, depth)\n add_sections(doc, subsections, depth + 1)\n elif isinstance(object_names, str):\n # Check how many levels of recursion we should be going.\n expand_depth = len(object_names)\n object_names = object_names.rstrip(\"+\")\n expand_depth -= len(object_names)\n\n def create_sections(name, level):\n if level > expand_depth:\n return\n index.new_section(doc, name, depth=depth + level, header_type=\"markdown\")\n for sub in dir_object(name, \"line\", False):\n sub = name + \".\" + sub\n create_sections(sub, level + 1)\n\n create_sections(object_names, 0)\n else:\n raise RuntimeError(object_names)\n\n\n# Make sure that we can find modules from the current working directory,\n# and have them take precedence over installed modules.\nsys.path.insert(0, \".\")\n\nfor pages in api_structure:\n for fname, object_names in pages.items():\n doc = index.new_document(fname)\n add_sections(doc, object_names)\n\nloader = PythonLoader({})\npreproc = Preprocessor({})\n\npreproc.link_lookup = {}\nfor file, doc in index.documents.items():\n for section in doc.sections:\n preproc.link_lookup[section.identifier] = file\n# Load the docstrings and fill the sections.\nprint(\"Started generating documentation...\")\nfor doc in index.documents.values():\n for section in filter(lambda s: s.identifier, doc.sections):\n loader.load_section(section)\n preproc.preprocess_section(section)\n\n# Write out all the generated documents.\nos.makedirs(os.path.join(\"docs\", \"api\"), exist_ok=True)\nfor fname, doc in index.documents.items():\n with open(os.path.join(\"docs\", \"api\", fname), \"w\") as fp:\n for section in doc.sections:\n section.render(fp)\n", "path": "generate_api_docs.py"}]} | 1,282 | 368 |
gh_patches_debug_38033 | rasdani/github-patches | git_diff | google__clusterfuzz-1524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support authentication with Cloud IAP
</issue>
<code>
[start of src/appengine/libs/auth.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Authentication helpers."""
15
16 import collections
17
18 from firebase_admin import auth
19 from google.cloud import ndb
20 import webapp2
21
22 from base import utils
23 from config import local_config
24 from datastore import data_types
25 from metrics import logs
26 from system import environment
27
28 User = collections.namedtuple('User', ['email'])
29
30
31 class AuthError(Exception):
32 """Auth error."""
33
34
35 def auth_domain():
36 """Get the auth domain."""
37 domain = local_config.ProjectConfig().get('firebase.auth_domain')
38 if domain:
39 return domain
40
41 return utils.get_application_id() + '.firebaseapp.com'
42
43
44 def is_current_user_admin():
45 """Returns whether or not the current logged in user is an admin."""
46 if environment.is_local_development():
47 return True
48
49 user = get_current_user()
50 if not user:
51 return False
52
53 key = ndb.Key(data_types.Admin, user.email)
54 return bool(key.get())
55
56
57 def get_current_user():
58 """Get the current logged in user, or None."""
59 if environment.is_local_development():
60 return User('user@localhost')
61
62 loas_user = environment.get_value('LOAS_PEER_USERNAME')
63 if loas_user:
64 return User(loas_user + '@google.com')
65
66 current_request = get_current_request()
67 oauth_email = getattr(current_request, '_oauth_email', None)
68 if oauth_email:
69 return User(oauth_email)
70
71 cached_email = getattr(current_request, '_cached_email', None)
72 if cached_email:
73 return User(cached_email)
74
75 session_cookie = get_session_cookie()
76 if not session_cookie:
77 return None
78
79 try:
80 decoded_claims = decode_claims(get_session_cookie())
81 except AuthError:
82 logs.log_warn('Invalid session cookie.')
83 return None
84
85 if not decoded_claims.get('email_verified'):
86 return None
87
88 email = decoded_claims.get('email')
89 if not email:
90 return None
91
92 # We cache the email for this request if we've validated the user to make
93 # subsequent get_current_user() calls fast.
94 setattr(current_request, '_cached_email', email)
95 return User(email)
96
97
98 def create_session_cookie(id_token, expires_in):
99 """Create a new session cookie."""
100 try:
101 return auth.create_session_cookie(id_token, expires_in=expires_in)
102 except auth.AuthError:
103 raise AuthError('Failed to create session cookie.')
104
105
106 def get_current_request():
107 """Get the current request."""
108 return webapp2.get_request()
109
110
111 def get_session_cookie():
112 """Get the current session cookie."""
113 return get_current_request().cookies.get('session')
114
115
116 def revoke_session_cookie(session_cookie):
117 """Revoke a session cookie."""
118 decoded_claims = decode_claims(session_cookie)
119 auth.revoke_refresh_tokens(decoded_claims['sub'])
120
121
122 def decode_claims(session_cookie):
123 """Decode the claims for the current session cookie."""
124 try:
125 return auth.verify_session_cookie(session_cookie, check_revoked=True)
126 except (ValueError, auth.AuthError):
127 raise AuthError('Invalid session cookie.')
128
[end of src/appengine/libs/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/appengine/libs/auth.py b/src/appengine/libs/auth.py
--- a/src/appengine/libs/auth.py
+++ b/src/appengine/libs/auth.py
@@ -13,12 +13,17 @@
# limitations under the License.
"""Authentication helpers."""
+from builtins import str
import collections
+import jwt
from firebase_admin import auth
from google.cloud import ndb
+from googleapiclient.discovery import build
+import requests
import webapp2
+from base import memoize
from base import utils
from config import local_config
from datastore import data_types
@@ -54,6 +59,68 @@
return bool(key.get())
[email protected](memoize.FifoInMemory(1))
+def _project_number_from_id(project_id):
+ """Get the project number from project ID."""
+ resource_manager = build('cloudresourcemanager', 'v1')
+ result = resource_manager.projects().get(projectId=project_id).execute()
+ if 'projectNumber' not in result:
+ raise AuthError('Failed to get project number.')
+
+ return result['projectNumber']
+
+
[email protected](memoize.FifoInMemory(1))
+def _get_iap_key(key_id):
+ """Retrieves a public key from the list published by Identity-Aware Proxy,
+ re-fetching the key file if necessary.
+ """
+ resp = requests.get('https://www.gstatic.com/iap/verify/public_key')
+ if resp.status_code != 200:
+ raise AuthError('Unable to fetch IAP keys: {} / {} / {}'.format(
+ resp.status_code, resp.headers, resp.text))
+
+ result = resp.json()
+ key = result.get(key_id)
+ if not key:
+ raise AuthError('Key {!r} not found'.format(key_id))
+
+ return key
+
+
+def _validate_iap_jwt(iap_jwt):
+ """Validate JWT assertion."""
+ project_id = utils.get_application_id()
+ expected_audience = '/projects/{}/apps/{}'.format(
+ _project_number_from_id(project_id), project_id)
+
+ try:
+ key_id = jwt.get_unverified_header(iap_jwt).get('kid')
+ if not key_id:
+ raise AuthError('No key ID.')
+
+ key = _get_iap_key(key_id)
+ decoded_jwt = jwt.decode(
+ iap_jwt,
+ key,
+ algorithms=['ES256'],
+ issuer='https://cloud.google.com/iap',
+ audience=expected_audience)
+ return decoded_jwt['email']
+ except (jwt.exceptions.InvalidTokenError,
+ requests.exceptions.RequestException) as e:
+ raise AuthError('JWT assertion decode error: ' + str(e))
+
+
+def get_iap_email(current_request):
+ """Get Cloud IAP email."""
+ jwt_assertion = current_request.headers.get('X-Goog-IAP-JWT-Assertion')
+ if not jwt_assertion:
+ return None
+
+ return _validate_iap_jwt(jwt_assertion)
+
+
def get_current_user():
"""Get the current logged in user, or None."""
if environment.is_local_development():
@@ -64,6 +131,10 @@
return User(loas_user + '@google.com')
current_request = get_current_request()
+ iap_email = get_iap_email(current_request)
+ if iap_email:
+ return User(iap_email)
+
oauth_email = getattr(current_request, '_oauth_email', None)
if oauth_email:
return User(oauth_email)
| {"golden_diff": "diff --git a/src/appengine/libs/auth.py b/src/appengine/libs/auth.py\n--- a/src/appengine/libs/auth.py\n+++ b/src/appengine/libs/auth.py\n@@ -13,12 +13,17 @@\n # limitations under the License.\n \"\"\"Authentication helpers.\"\"\"\n \n+from builtins import str\n import collections\n+import jwt\n \n from firebase_admin import auth\n from google.cloud import ndb\n+from googleapiclient.discovery import build\n+import requests\n import webapp2\n \n+from base import memoize\n from base import utils\n from config import local_config\n from datastore import data_types\n@@ -54,6 +59,68 @@\n return bool(key.get())\n \n \[email protected](memoize.FifoInMemory(1))\n+def _project_number_from_id(project_id):\n+ \"\"\"Get the project number from project ID.\"\"\"\n+ resource_manager = build('cloudresourcemanager', 'v1')\n+ result = resource_manager.projects().get(projectId=project_id).execute()\n+ if 'projectNumber' not in result:\n+ raise AuthError('Failed to get project number.')\n+\n+ return result['projectNumber']\n+\n+\[email protected](memoize.FifoInMemory(1))\n+def _get_iap_key(key_id):\n+ \"\"\"Retrieves a public key from the list published by Identity-Aware Proxy,\n+ re-fetching the key file if necessary.\n+ \"\"\"\n+ resp = requests.get('https://www.gstatic.com/iap/verify/public_key')\n+ if resp.status_code != 200:\n+ raise AuthError('Unable to fetch IAP keys: {} / {} / {}'.format(\n+ resp.status_code, resp.headers, resp.text))\n+\n+ result = resp.json()\n+ key = result.get(key_id)\n+ if not key:\n+ raise AuthError('Key {!r} not found'.format(key_id))\n+\n+ return key\n+\n+\n+def _validate_iap_jwt(iap_jwt):\n+ \"\"\"Validate JWT assertion.\"\"\"\n+ project_id = utils.get_application_id()\n+ expected_audience = '/projects/{}/apps/{}'.format(\n+ _project_number_from_id(project_id), project_id)\n+\n+ try:\n+ key_id = jwt.get_unverified_header(iap_jwt).get('kid')\n+ if not key_id:\n+ raise AuthError('No key ID.')\n+\n+ key = _get_iap_key(key_id)\n+ decoded_jwt = jwt.decode(\n+ iap_jwt,\n+ key,\n+ algorithms=['ES256'],\n+ issuer='https://cloud.google.com/iap',\n+ audience=expected_audience)\n+ return decoded_jwt['email']\n+ except (jwt.exceptions.InvalidTokenError,\n+ requests.exceptions.RequestException) as e:\n+ raise AuthError('JWT assertion decode error: ' + str(e))\n+\n+\n+def get_iap_email(current_request):\n+ \"\"\"Get Cloud IAP email.\"\"\"\n+ jwt_assertion = current_request.headers.get('X-Goog-IAP-JWT-Assertion')\n+ if not jwt_assertion:\n+ return None\n+\n+ return _validate_iap_jwt(jwt_assertion)\n+\n+\n def get_current_user():\n \"\"\"Get the current logged in user, or None.\"\"\"\n if environment.is_local_development():\n@@ -64,6 +131,10 @@\n return User(loas_user + '@google.com')\n \n current_request = get_current_request()\n+ iap_email = get_iap_email(current_request)\n+ if iap_email:\n+ return User(iap_email)\n+\n oauth_email = getattr(current_request, '_oauth_email', None)\n if oauth_email:\n return User(oauth_email)\n", "issue": "Support authentication with Cloud IAP\n\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Authentication helpers.\"\"\"\n\nimport collections\n\nfrom firebase_admin import auth\nfrom google.cloud import ndb\nimport webapp2\n\nfrom base import utils\nfrom config import local_config\nfrom datastore import data_types\nfrom metrics import logs\nfrom system import environment\n\nUser = collections.namedtuple('User', ['email'])\n\n\nclass AuthError(Exception):\n \"\"\"Auth error.\"\"\"\n\n\ndef auth_domain():\n \"\"\"Get the auth domain.\"\"\"\n domain = local_config.ProjectConfig().get('firebase.auth_domain')\n if domain:\n return domain\n\n return utils.get_application_id() + '.firebaseapp.com'\n\n\ndef is_current_user_admin():\n \"\"\"Returns whether or not the current logged in user is an admin.\"\"\"\n if environment.is_local_development():\n return True\n\n user = get_current_user()\n if not user:\n return False\n\n key = ndb.Key(data_types.Admin, user.email)\n return bool(key.get())\n\n\ndef get_current_user():\n \"\"\"Get the current logged in user, or None.\"\"\"\n if environment.is_local_development():\n return User('user@localhost')\n\n loas_user = environment.get_value('LOAS_PEER_USERNAME')\n if loas_user:\n return User(loas_user + '@google.com')\n\n current_request = get_current_request()\n oauth_email = getattr(current_request, '_oauth_email', None)\n if oauth_email:\n return User(oauth_email)\n\n cached_email = getattr(current_request, '_cached_email', None)\n if cached_email:\n return User(cached_email)\n\n session_cookie = get_session_cookie()\n if not session_cookie:\n return None\n\n try:\n decoded_claims = decode_claims(get_session_cookie())\n except AuthError:\n logs.log_warn('Invalid session cookie.')\n return None\n\n if not decoded_claims.get('email_verified'):\n return None\n\n email = decoded_claims.get('email')\n if not email:\n return None\n\n # We cache the email for this request if we've validated the user to make\n # subsequent get_current_user() calls fast.\n setattr(current_request, '_cached_email', email)\n return User(email)\n\n\ndef create_session_cookie(id_token, expires_in):\n \"\"\"Create a new session cookie.\"\"\"\n try:\n return auth.create_session_cookie(id_token, expires_in=expires_in)\n except auth.AuthError:\n raise AuthError('Failed to create session cookie.')\n\n\ndef get_current_request():\n \"\"\"Get the current request.\"\"\"\n return webapp2.get_request()\n\n\ndef get_session_cookie():\n \"\"\"Get the current session cookie.\"\"\"\n return get_current_request().cookies.get('session')\n\n\ndef revoke_session_cookie(session_cookie):\n \"\"\"Revoke a session cookie.\"\"\"\n decoded_claims = decode_claims(session_cookie)\n auth.revoke_refresh_tokens(decoded_claims['sub'])\n\n\ndef decode_claims(session_cookie):\n \"\"\"Decode the claims for the current session cookie.\"\"\"\n try:\n return auth.verify_session_cookie(session_cookie, check_revoked=True)\n except (ValueError, auth.AuthError):\n raise AuthError('Invalid session cookie.')\n", "path": "src/appengine/libs/auth.py"}]} | 1,613 | 811 |
gh_patches_debug_28600 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-3822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[beta][v20] Lire une notification n'invalide pas le cache
Serveur : Beta
Version : v20-RC2/99bee1d
Système : Mac OS X
Navigateur : 52.0.2743.116 (64-bit)
---
1. Générez une notification.
2. Lisez là depuis le site.
3. Récupérez la liste des notifications par l'API.
4. Si le timeout de 15 minutes n'est pas passé par là, la notification est toujours marquée comme non lue dans la réponse de l'API.
</issue>
<code>
[start of zds/notification/api/views.py]
1 # coding: utf-8
2 from dry_rest_permissions.generics import DRYPermissions
3 from rest_framework import filters
4 from rest_framework.generics import ListAPIView
5 from rest_framework.permissions import IsAuthenticated
6 from rest_framework_extensions.cache.decorators import cache_response
7 from rest_framework_extensions.etag.decorators import etag
8 from rest_framework_extensions.key_constructor import bits
9 from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
10
11 from zds.api.bits import DJRF3xPaginationKeyBit
12 from zds.notification.api.serializers import NotificationSerializer
13 from zds.notification.models import Notification
14
15
16 class PagingNotificationListKeyConstructor(DefaultKeyConstructor):
17 pagination = DJRF3xPaginationKeyBit()
18 search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])
19 list_sql_query = bits.ListSqlQueryKeyBit()
20 unique_view_id = bits.UniqueViewIdKeyBit()
21 user = bits.UserKeyBit()
22
23
24 class NotificationListAPI(ListAPIView):
25 """
26 List of notification.
27 """
28
29 filter_backends = (filters.SearchFilter, filters.OrderingFilter)
30 search_fields = ('title',)
31 ordering_fields = ('pubdate', 'title',)
32 list_key_func = PagingNotificationListKeyConstructor()
33 serializer_class = NotificationSerializer
34 permission_classes = (IsAuthenticated, DRYPermissions,)
35
36 @etag(list_key_func)
37 @cache_response(key_func=list_key_func)
38 def get(self, request, *args, **kwargs):
39 """
40 Lists all notifications of a user.
41 ---
42
43 parameters:
44 - name: Authorization
45 description: Bearer token to make an authenticated request.
46 required: true
47 paramType: header
48 - name: page
49 description: Restricts output to the given page number.
50 required: false
51 paramType: query
52 - name: page_size
53 description: Sets the number of notifications per page.
54 required: false
55 paramType: query
56 - name: search
57 description: Filters by title.
58 required: false
59 paramType: query
60 - name: ordering
61 description: Sorts the results. You can order by (-)pubdate or (-)title.
62 paramType: query
63 - name: type
64 description: Filters by notification type.
65 paramType: query
66 - name: subscription_type
67 description: Filters by subscription type.
68 paramType: query
69 - name: expand
70 description: Returns an object instead of an identifier representing the given field.
71 required: false
72 paramType: query
73 responseMessages:
74 - code: 401
75 message: Not Authenticated
76 - code: 404
77 message: Not Found
78 """
79 return self.list(request, *args, **kwargs)
80
81 def get_queryset(self):
82 queryset = Notification.objects.get_notifications_of(self.request.user)
83 subscription_type = self.request.query_params.get('subscription_type', None)
84 if subscription_type:
85 queryset = queryset.filter(subscription__content_type__model=subscription_type)
86 _type = self.request.query_params.get('type', None)
87 if _type:
88 queryset = queryset.filter(content_type__model=_type)
89 return queryset
90
[end of zds/notification/api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py
--- a/zds/notification/api/views.py
+++ b/zds/notification/api/views.py
@@ -1,4 +1,8 @@
# coding: utf-8
+import datetime
+from django.core.cache import cache
+from django.db.models.signals import post_delete
+from django.db.models.signals import post_save
from dry_rest_permissions.generics import DRYPermissions
from rest_framework import filters
from rest_framework.generics import ListAPIView
@@ -8,7 +12,7 @@
from rest_framework_extensions.key_constructor import bits
from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
-from zds.api.bits import DJRF3xPaginationKeyBit
+from zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit
from zds.notification.api.serializers import NotificationSerializer
from zds.notification.models import Notification
@@ -19,6 +23,15 @@
list_sql_query = bits.ListSqlQueryKeyBit()
unique_view_id = bits.UniqueViewIdKeyBit()
user = bits.UserKeyBit()
+ updated_at = UpdatedAtKeyBit('api_updated_notification')
+
+
+def change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):
+ cache.set('api_updated_notification', datetime.datetime.utcnow())
+
+
+post_save.connect(receiver=change_api_notification_updated_at, sender=Notification)
+post_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)
class NotificationListAPI(ListAPIView):
| {"golden_diff": "diff --git a/zds/notification/api/views.py b/zds/notification/api/views.py\n--- a/zds/notification/api/views.py\n+++ b/zds/notification/api/views.py\n@@ -1,4 +1,8 @@\n # coding: utf-8\n+import datetime\n+from django.core.cache import cache\n+from django.db.models.signals import post_delete\n+from django.db.models.signals import post_save\n from dry_rest_permissions.generics import DRYPermissions\n from rest_framework import filters\n from rest_framework.generics import ListAPIView\n@@ -8,7 +12,7 @@\n from rest_framework_extensions.key_constructor import bits\n from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n \n-from zds.api.bits import DJRF3xPaginationKeyBit\n+from zds.api.bits import DJRF3xPaginationKeyBit, UpdatedAtKeyBit\n from zds.notification.api.serializers import NotificationSerializer\n from zds.notification.models import Notification\n \n@@ -19,6 +23,15 @@\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n+ updated_at = UpdatedAtKeyBit('api_updated_notification')\n+\n+\n+def change_api_notification_updated_at(sender=None, instance=None, *args, **kwargs):\n+ cache.set('api_updated_notification', datetime.datetime.utcnow())\n+\n+\n+post_save.connect(receiver=change_api_notification_updated_at, sender=Notification)\n+post_delete.connect(receiver=change_api_notification_updated_at, sender=Notification)\n \n \n class NotificationListAPI(ListAPIView):\n", "issue": "[beta][v20] Lire une notification n'invalide pas le cache\nServeur : Beta\nVersion : v20-RC2/99bee1d\nSyst\u00e8me : Mac OS X\nNavigateur : 52.0.2743.116 (64-bit)\n\n---\n1. G\u00e9n\u00e9rez une notification.\n2. Lisez l\u00e0 depuis le site.\n3. R\u00e9cup\u00e9rez la liste des notifications par l'API.\n4. Si le timeout de 15 minutes n'est pas pass\u00e9 par l\u00e0, la notification est toujours marqu\u00e9e comme non lue dans la r\u00e9ponse de l'API.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom dry_rest_permissions.generics import DRYPermissions\nfrom rest_framework import filters\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework_extensions.cache.decorators import cache_response\nfrom rest_framework_extensions.etag.decorators import etag\nfrom rest_framework_extensions.key_constructor import bits\nfrom rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor\n\nfrom zds.api.bits import DJRF3xPaginationKeyBit\nfrom zds.notification.api.serializers import NotificationSerializer\nfrom zds.notification.models import Notification\n\n\nclass PagingNotificationListKeyConstructor(DefaultKeyConstructor):\n pagination = DJRF3xPaginationKeyBit()\n search = bits.QueryParamsKeyBit(['search', 'ordering', 'type'])\n list_sql_query = bits.ListSqlQueryKeyBit()\n unique_view_id = bits.UniqueViewIdKeyBit()\n user = bits.UserKeyBit()\n\n\nclass NotificationListAPI(ListAPIView):\n \"\"\"\n List of notification.\n \"\"\"\n\n filter_backends = (filters.SearchFilter, filters.OrderingFilter)\n search_fields = ('title',)\n ordering_fields = ('pubdate', 'title',)\n list_key_func = PagingNotificationListKeyConstructor()\n serializer_class = NotificationSerializer\n permission_classes = (IsAuthenticated, DRYPermissions,)\n\n @etag(list_key_func)\n @cache_response(key_func=list_key_func)\n def get(self, request, *args, **kwargs):\n \"\"\"\n Lists all notifications of a user.\n ---\n\n parameters:\n - name: Authorization\n description: Bearer token to make an authenticated request.\n required: true\n paramType: header\n - name: page\n description: Restricts output to the given page number.\n required: false\n paramType: query\n - name: page_size\n description: Sets the number of notifications per page.\n required: false\n paramType: query\n - name: search\n description: Filters by title.\n required: false\n paramType: query\n - name: ordering\n description: Sorts the results. You can order by (-)pubdate or (-)title.\n paramType: query\n - name: type\n description: Filters by notification type.\n paramType: query\n - name: subscription_type\n description: Filters by subscription type.\n paramType: query\n - name: expand\n description: Returns an object instead of an identifier representing the given field.\n required: false\n paramType: query\n responseMessages:\n - code: 401\n message: Not Authenticated\n - code: 404\n message: Not Found\n \"\"\"\n return self.list(request, *args, **kwargs)\n\n def get_queryset(self):\n queryset = Notification.objects.get_notifications_of(self.request.user)\n subscription_type = self.request.query_params.get('subscription_type', None)\n if subscription_type:\n queryset = queryset.filter(subscription__content_type__model=subscription_type)\n _type = self.request.query_params.get('type', None)\n if _type:\n queryset = queryset.filter(content_type__model=_type)\n return queryset\n", "path": "zds/notification/api/views.py"}]} | 1,524 | 333 |
gh_patches_debug_35738 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1682 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase streaming unit tests
reach parity with C# unit tests
</issue>
<code>
[start of libraries/botframework-streaming/botframework/streaming/receive_request.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import List
5
6 from botframework.streaming.payloads import ContentStream
7
8
9 class ReceiveRequest:
10 def __init__(
11 self, *, verb: str = None, path: str = None, streams: List[ContentStream]
12 ):
13 self.verb = verb
14 self.path = path
15 self.streams: List[ContentStream] = streams or []
16
17 async def read_body_as_str(self) -> str:
18 try:
19 content_stream = self.streams[0] if self.streams else None
20
21 if not content_stream:
22 # TODO: maybe raise an error
23 return ""
24
25 # TODO: encoding double check
26 stream = await content_stream.stream.read_until_end()
27 return bytes(stream).decode("utf-8-sig")
28 except Exception as error:
29 raise error
30
[end of libraries/botframework-streaming/botframework/streaming/receive_request.py]
[start of libraries/botframework-streaming/botframework/streaming/streaming_response.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import json
5 from uuid import UUID, uuid4
6 from typing import List, Union
7
8 from msrest.serialization import Model
9 from botframework.streaming.payloads import ResponseMessageStream
10 from botframework.streaming.payloads.models import Serializable
11
12
13 class StreamingResponse:
14 def __init__(
15 self, *, status_code: int = None, streams: List[ResponseMessageStream] = None
16 ):
17 self.status_code = status_code
18 self.streams = streams
19
20 def add_stream(self, content: object, identifier: UUID = None):
21 if not content:
22 raise TypeError("content can't be None")
23
24 if self.streams is None:
25 self.streams: List[ResponseMessageStream] = []
26
27 self.streams.append(
28 ResponseMessageStream(id=identifier or uuid4(), content=content)
29 )
30
31 def set_body(self, body: Union[str, Serializable, Model]):
32 # TODO: verify if msrest.serialization.Model is necessary
33 if not body:
34 return
35
36 if isinstance(body, Serializable):
37 body = body.to_json()
38 elif isinstance(body, Model):
39 body = json.dumps(body.as_dict())
40
41 self.add_stream(list(body.encode()))
42
43 @staticmethod
44 def create_response(status_code: int, body: object) -> "StreamingResponse":
45 response = StreamingResponse(status_code=status_code)
46
47 if body:
48 response.add_stream(body)
49
50 return response
51
[end of libraries/botframework-streaming/botframework/streaming/streaming_response.py]
[start of libraries/botframework-streaming/botframework/streaming/receive_response.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import List, Union, Type
5
6 from msrest.serialization import Model
7 from botframework.streaming.payloads import ContentStream
8 from botframework.streaming.payloads.models import Serializable
9
10
11 class ReceiveResponse:
12 def __init__(self, status_code: int = None, streams: List[ContentStream] = None):
13 self.status_code = status_code
14 self.streams = streams
15
16 def read_body_as_json(
17 self, cls: Union[Type[Model], Type[Serializable]]
18 ) -> Union[Model, Serializable]:
19 try:
20 body_str = self.read_body_as_str()
21 body = None
22
23 if issubclass(cls, Serializable):
24 body = cls().from_json(body_str)
25 elif isinstance(cls, Model):
26 body = cls.deserialize(body_str)
27 return body
28 except Exception as error:
29 raise error
30
31 def read_body_as_str(self) -> str:
32 try:
33 content_stream = self.read_body()
34
35 if not content_stream:
36 return ""
37
38 # TODO: encoding double check
39 return content_stream.decode("utf8")
40 except Exception as error:
41 raise error
42
43 def read_body(self) -> bytes:
44 try:
45 content_stream = self.streams[0] if self.streams else None
46
47 if not content_stream:
48 return None
49
50 return bytes(content_stream.stream)
51 except Exception as error:
52 raise error
53
[end of libraries/botframework-streaming/botframework/streaming/receive_response.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botframework-streaming/botframework/streaming/receive_request.py b/libraries/botframework-streaming/botframework/streaming/receive_request.py
--- a/libraries/botframework-streaming/botframework/streaming/receive_request.py
+++ b/libraries/botframework-streaming/botframework/streaming/receive_request.py
@@ -8,7 +8,7 @@
class ReceiveRequest:
def __init__(
- self, *, verb: str = None, path: str = None, streams: List[ContentStream]
+ self, *, verb: str = None, path: str = None, streams: List[ContentStream] = None
):
self.verb = verb
self.path = path
diff --git a/libraries/botframework-streaming/botframework/streaming/receive_response.py b/libraries/botframework-streaming/botframework/streaming/receive_response.py
--- a/libraries/botframework-streaming/botframework/streaming/receive_response.py
+++ b/libraries/botframework-streaming/botframework/streaming/receive_response.py
@@ -9,9 +9,9 @@
class ReceiveResponse:
- def __init__(self, status_code: int = None, streams: List[ContentStream] = None):
+ def __init__(self, status_code: int = 0, streams: List[ContentStream] = None):
self.status_code = status_code
- self.streams = streams
+ self.streams = streams or []
def read_body_as_json(
self, cls: Union[Type[Model], Type[Serializable]]
diff --git a/libraries/botframework-streaming/botframework/streaming/streaming_response.py b/libraries/botframework-streaming/botframework/streaming/streaming_response.py
--- a/libraries/botframework-streaming/botframework/streaming/streaming_response.py
+++ b/libraries/botframework-streaming/botframework/streaming/streaming_response.py
@@ -2,6 +2,7 @@
# Licensed under the MIT License.
import json
+from http import HTTPStatus
from uuid import UUID, uuid4
from typing import List, Union
@@ -12,7 +13,7 @@
class StreamingResponse:
def __init__(
- self, *, status_code: int = None, streams: List[ResponseMessageStream] = None
+ self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None
):
self.status_code = status_code
self.streams = streams
@@ -48,3 +49,20 @@
response.add_stream(body)
return response
+
+ @staticmethod
+ def not_found(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body)
+
+ @staticmethod
+ def forbidden(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body)
+
+ # pylint: disable=invalid-name
+ @staticmethod
+ def ok(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.OK, body)
+
+ @staticmethod
+ def internal_server_error(body: object = None) -> "StreamingResponse":
+ return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)
| {"golden_diff": "diff --git a/libraries/botframework-streaming/botframework/streaming/receive_request.py b/libraries/botframework-streaming/botframework/streaming/receive_request.py\n--- a/libraries/botframework-streaming/botframework/streaming/receive_request.py\n+++ b/libraries/botframework-streaming/botframework/streaming/receive_request.py\n@@ -8,7 +8,7 @@\n \n class ReceiveRequest:\n def __init__(\n- self, *, verb: str = None, path: str = None, streams: List[ContentStream]\n+ self, *, verb: str = None, path: str = None, streams: List[ContentStream] = None\n ):\n self.verb = verb\n self.path = path\ndiff --git a/libraries/botframework-streaming/botframework/streaming/receive_response.py b/libraries/botframework-streaming/botframework/streaming/receive_response.py\n--- a/libraries/botframework-streaming/botframework/streaming/receive_response.py\n+++ b/libraries/botframework-streaming/botframework/streaming/receive_response.py\n@@ -9,9 +9,9 @@\n \n \n class ReceiveResponse:\n- def __init__(self, status_code: int = None, streams: List[ContentStream] = None):\n+ def __init__(self, status_code: int = 0, streams: List[ContentStream] = None):\n self.status_code = status_code\n- self.streams = streams\n+ self.streams = streams or []\n \n def read_body_as_json(\n self, cls: Union[Type[Model], Type[Serializable]]\ndiff --git a/libraries/botframework-streaming/botframework/streaming/streaming_response.py b/libraries/botframework-streaming/botframework/streaming/streaming_response.py\n--- a/libraries/botframework-streaming/botframework/streaming/streaming_response.py\n+++ b/libraries/botframework-streaming/botframework/streaming/streaming_response.py\n@@ -2,6 +2,7 @@\n # Licensed under the MIT License.\n \n import json\n+from http import HTTPStatus\n from uuid import UUID, uuid4\n from typing import List, Union\n \n@@ -12,7 +13,7 @@\n \n class StreamingResponse:\n def __init__(\n- self, *, status_code: int = None, streams: List[ResponseMessageStream] = None\n+ self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None\n ):\n self.status_code = status_code\n self.streams = streams\n@@ -48,3 +49,20 @@\n response.add_stream(body)\n \n return response\n+\n+ @staticmethod\n+ def not_found(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body)\n+\n+ @staticmethod\n+ def forbidden(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body)\n+\n+ # pylint: disable=invalid-name\n+ @staticmethod\n+ def ok(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.OK, body)\n+\n+ @staticmethod\n+ def internal_server_error(body: object = None) -> \"StreamingResponse\":\n+ return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)\n", "issue": "Increase streaming unit tests\nreach parity with C# unit tests\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List\n\nfrom botframework.streaming.payloads import ContentStream\n\n\nclass ReceiveRequest:\n def __init__(\n self, *, verb: str = None, path: str = None, streams: List[ContentStream]\n ):\n self.verb = verb\n self.path = path\n self.streams: List[ContentStream] = streams or []\n\n async def read_body_as_str(self) -> str:\n try:\n content_stream = self.streams[0] if self.streams else None\n\n if not content_stream:\n # TODO: maybe raise an error\n return \"\"\n\n # TODO: encoding double check\n stream = await content_stream.stream.read_until_end()\n return bytes(stream).decode(\"utf-8-sig\")\n except Exception as error:\n raise error\n", "path": "libraries/botframework-streaming/botframework/streaming/receive_request.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport json\nfrom uuid import UUID, uuid4\nfrom typing import List, Union\n\nfrom msrest.serialization import Model\nfrom botframework.streaming.payloads import ResponseMessageStream\nfrom botframework.streaming.payloads.models import Serializable\n\n\nclass StreamingResponse:\n def __init__(\n self, *, status_code: int = None, streams: List[ResponseMessageStream] = None\n ):\n self.status_code = status_code\n self.streams = streams\n\n def add_stream(self, content: object, identifier: UUID = None):\n if not content:\n raise TypeError(\"content can't be None\")\n\n if self.streams is None:\n self.streams: List[ResponseMessageStream] = []\n\n self.streams.append(\n ResponseMessageStream(id=identifier or uuid4(), content=content)\n )\n\n def set_body(self, body: Union[str, Serializable, Model]):\n # TODO: verify if msrest.serialization.Model is necessary\n if not body:\n return\n\n if isinstance(body, Serializable):\n body = body.to_json()\n elif isinstance(body, Model):\n body = json.dumps(body.as_dict())\n\n self.add_stream(list(body.encode()))\n\n @staticmethod\n def create_response(status_code: int, body: object) -> \"StreamingResponse\":\n response = StreamingResponse(status_code=status_code)\n\n if body:\n response.add_stream(body)\n\n return response\n", "path": "libraries/botframework-streaming/botframework/streaming/streaming_response.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom typing import List, Union, Type\n\nfrom msrest.serialization import Model\nfrom botframework.streaming.payloads import ContentStream\nfrom botframework.streaming.payloads.models import Serializable\n\n\nclass ReceiveResponse:\n def __init__(self, status_code: int = None, streams: List[ContentStream] = None):\n self.status_code = status_code\n self.streams = streams\n\n def read_body_as_json(\n self, cls: Union[Type[Model], Type[Serializable]]\n ) -> Union[Model, Serializable]:\n try:\n body_str = self.read_body_as_str()\n body = None\n\n if issubclass(cls, Serializable):\n body = cls().from_json(body_str)\n elif isinstance(cls, Model):\n body = cls.deserialize(body_str)\n return body\n except Exception as error:\n raise error\n\n def read_body_as_str(self) -> str:\n try:\n content_stream = self.read_body()\n\n if not content_stream:\n return \"\"\n\n # TODO: encoding double check\n return content_stream.decode(\"utf8\")\n except Exception as error:\n raise error\n\n def read_body(self) -> bytes:\n try:\n content_stream = self.streams[0] if self.streams else None\n\n if not content_stream:\n return None\n\n return bytes(content_stream.stream)\n except Exception as error:\n raise error\n", "path": "libraries/botframework-streaming/botframework/streaming/receive_response.py"}]} | 1,719 | 736 |
gh_patches_debug_14007 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1219 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CSV export broken
### Short description
Export CSV failed when the plot name has decode error characters.
### Code to reproduce
```python
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication([])
win = pg.GraphicsLayoutWidget(show=True, title="Basic plotting examples")
win.resize(1000,600)
win.setWindowTitle('pyqtgraph example: Plotting')
pg.setConfigOptions(antialias=True)
pw = win.addPlot(title="Scatter plot, axis labels, log scale")
pw.addLegend()
pw .plot(np.random.normal(size=100), pen=(255,0,0), name="\u00A0下加热体")
QtGui.QApplication.instance().exec_()
```
### Expected behavior
Export CSV Success
### Real behavior
Export CSV Failed
```
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
c:\program files\python37\lib\site-packages\pyqtgraph\exporters\Exporter.py in fileSaveFinished(self, fileName)
75 fileName = fileName + '.' + selectedExt.lstrip('.')
76
---> 77 self.export(fileName=fileName, **self.fileDialog.opts)
78
79 def getScene(self):
c:\program files\python37\lib\site-packages\pyqtgraph\exporters\CSVExporter.py in export(self, fileName)
58
59 with open(fileName, 'w') as fd:
---> 60 fd.write(sep.join(header) + '\n')
61 i = 0
62 numFormat = '%%0.%dg' % self.params['precision']
UnicodeEncodeError: 'gbk' codec can't encode character '\xa0' in position 1: illegal multibyte sequence
```
### Tested environment(s)
* PyQtGraph version: 0.11.0.dev0+g2203933
* Qt Python binding: PyQt5 5.13.2 Qt 5.13.2
* Python version: Python 3.7.5
* NumPy version: 1.17.4
* Operating system: Windows 7 X64
* Installation method: pip git+
### Additional context
I use "\u00A0" because i want to add some space before label name in the legend.
Could i use the csv export by "utf-8" but not "gbk" ?
</issue>
<code>
[start of pyqtgraph/exporters/CSVExporter.py]
1 # -*- coding: utf-8 -*-
2 from ..Qt import QtGui, QtCore
3 from .Exporter import Exporter
4 from ..parametertree import Parameter
5 from .. import PlotItem
6
7 __all__ = ['CSVExporter']
8
9
10 class CSVExporter(Exporter):
11 Name = "CSV from plot data"
12 windows = []
13 def __init__(self, item):
14 Exporter.__init__(self, item)
15 self.params = Parameter(name='params', type='group', children=[
16 {'name': 'separator', 'type': 'list', 'value': 'comma', 'values': ['comma', 'tab']},
17 {'name': 'precision', 'type': 'int', 'value': 10, 'limits': [0, None]},
18 {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']}
19 ])
20
21 def parameters(self):
22 return self.params
23
24 def export(self, fileName=None):
25
26 if not isinstance(self.item, PlotItem):
27 raise Exception("Must have a PlotItem selected for CSV export.")
28
29 if fileName is None:
30 self.fileSaveDialog(filter=["*.csv", "*.tsv"])
31 return
32
33 data = []
34 header = []
35
36 appendAllX = self.params['columnMode'] == '(x,y) per plot'
37
38 for i, c in enumerate(self.item.curves):
39 cd = c.getData()
40 if cd[0] is None:
41 continue
42 data.append(cd)
43 if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:
44 name = c.name().replace('"', '""') + '_'
45 xName, yName = '"'+name+'x"', '"'+name+'y"'
46 else:
47 xName = 'x%04d' % i
48 yName = 'y%04d' % i
49 if appendAllX or i == 0:
50 header.extend([xName, yName])
51 else:
52 header.extend([yName])
53
54 if self.params['separator'] == 'comma':
55 sep = ','
56 else:
57 sep = '\t'
58
59 with open(fileName, 'w') as fd:
60 fd.write(sep.join(header) + '\n')
61 i = 0
62 numFormat = '%%0.%dg' % self.params['precision']
63 numRows = max([len(d[0]) for d in data])
64 for i in range(numRows):
65 for j, d in enumerate(data):
66 # write x value if this is the first column, or if we want
67 # x for all rows
68 if appendAllX or j == 0:
69 if d is not None and i < len(d[0]):
70 fd.write(numFormat % d[0][i] + sep)
71 else:
72 fd.write(' %s' % sep)
73
74 # write y value
75 if d is not None and i < len(d[1]):
76 fd.write(numFormat % d[1][i] + sep)
77 else:
78 fd.write(' %s' % sep)
79 fd.write('\n')
80
81
82 CSVExporter.register()
83
84
85
[end of pyqtgraph/exporters/CSVExporter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/exporters/CSVExporter.py b/pyqtgraph/exporters/CSVExporter.py
--- a/pyqtgraph/exporters/CSVExporter.py
+++ b/pyqtgraph/exporters/CSVExporter.py
@@ -3,6 +3,7 @@
from .Exporter import Exporter
from ..parametertree import Parameter
from .. import PlotItem
+from ..python2_3 import asUnicode
__all__ = ['CSVExporter']
@@ -57,7 +58,7 @@
sep = '\t'
with open(fileName, 'w') as fd:
- fd.write(sep.join(header) + '\n')
+ fd.write(sep.join(map(asUnicode, header)) + '\n')
i = 0
numFormat = '%%0.%dg' % self.params['precision']
numRows = max([len(d[0]) for d in data])
| {"golden_diff": "diff --git a/pyqtgraph/exporters/CSVExporter.py b/pyqtgraph/exporters/CSVExporter.py\n--- a/pyqtgraph/exporters/CSVExporter.py\n+++ b/pyqtgraph/exporters/CSVExporter.py\n@@ -3,6 +3,7 @@\n from .Exporter import Exporter\n from ..parametertree import Parameter\n from .. import PlotItem\n+from ..python2_3 import asUnicode\n \n __all__ = ['CSVExporter']\n \n@@ -57,7 +58,7 @@\n sep = '\\t'\n \n with open(fileName, 'w') as fd:\n- fd.write(sep.join(header) + '\\n')\n+ fd.write(sep.join(map(asUnicode, header)) + '\\n')\n i = 0\n numFormat = '%%0.%dg' % self.params['precision']\n numRows = max([len(d[0]) for d in data])\n", "issue": "CSV export broken\n### Short description\r\nExport CSV failed when the plot name has decode error characters.\r\n\r\n### Code to reproduce\r\n```python\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nimport numpy as np\r\nimport pyqtgraph as pg\r\n\r\n#QtGui.QApplication.setGraphicsSystem('raster')\r\napp = QtGui.QApplication([])\r\nwin = pg.GraphicsLayoutWidget(show=True, title=\"Basic plotting examples\")\r\nwin.resize(1000,600)\r\nwin.setWindowTitle('pyqtgraph example: Plotting')\r\n\r\n\r\npg.setConfigOptions(antialias=True)\r\n\r\npw = win.addPlot(title=\"Scatter plot, axis labels, log scale\")\r\npw.addLegend()\r\npw .plot(np.random.normal(size=100), pen=(255,0,0), name=\"\\u00A0\u4e0b\u52a0\u70ed\u4f53\")\r\n\r\nQtGui.QApplication.instance().exec_()\r\n```\r\n\r\n### Expected behavior\r\nExport CSV Success\r\n\r\n### Real behavior\r\nExport CSV Failed\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nUnicodeEncodeError Traceback (most recent call last)\r\nc:\\program files\\python37\\lib\\site-packages\\pyqtgraph\\exporters\\Exporter.py in fileSaveFinished(self, fileName)\r\n 75 fileName = fileName + '.' + selectedExt.lstrip('.')\r\n 76\r\n---> 77 self.export(fileName=fileName, **self.fileDialog.opts)\r\n 78\r\n 79 def getScene(self):\r\n\r\nc:\\program files\\python37\\lib\\site-packages\\pyqtgraph\\exporters\\CSVExporter.py in export(self, fileName)\r\n 58\r\n 59 with open(fileName, 'w') as fd:\r\n---> 60 fd.write(sep.join(header) + '\\n')\r\n 61 i = 0\r\n 62 numFormat = '%%0.%dg' % self.params['precision']\r\n\r\nUnicodeEncodeError: 'gbk' codec can't encode character '\\xa0' in position 1: illegal multibyte sequence\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11.0.dev0+g2203933\r\n * Qt Python binding: PyQt5 5.13.2 Qt 5.13.2\r\n * Python version: Python 3.7.5 \r\n * NumPy version: 1.17.4\r\n * Operating system: Windows 7 X64\r\n * Installation method: pip git+\r\n\r\n### Additional context\r\nI use \"\\u00A0\" because i want to add some space before label name in the legend.\r\nCould i use the csv export by \"utf-8\" but not \"gbk\" ?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom ..Qt import QtGui, QtCore\nfrom .Exporter import Exporter\nfrom ..parametertree import Parameter\nfrom .. import PlotItem\n\n__all__ = ['CSVExporter']\n \n \nclass CSVExporter(Exporter):\n Name = \"CSV from plot data\"\n windows = []\n def __init__(self, item):\n Exporter.__init__(self, item)\n self.params = Parameter(name='params', type='group', children=[\n {'name': 'separator', 'type': 'list', 'value': 'comma', 'values': ['comma', 'tab']},\n {'name': 'precision', 'type': 'int', 'value': 10, 'limits': [0, None]},\n {'name': 'columnMode', 'type': 'list', 'values': ['(x,y) per plot', '(x,y,y,y) for all plots']}\n ])\n \n def parameters(self):\n return self.params\n \n def export(self, fileName=None):\n \n if not isinstance(self.item, PlotItem):\n raise Exception(\"Must have a PlotItem selected for CSV export.\")\n \n if fileName is None:\n self.fileSaveDialog(filter=[\"*.csv\", \"*.tsv\"])\n return\n\n data = []\n header = []\n\n appendAllX = self.params['columnMode'] == '(x,y) per plot'\n\n for i, c in enumerate(self.item.curves):\n cd = c.getData()\n if cd[0] is None:\n continue\n data.append(cd)\n if hasattr(c, 'implements') and c.implements('plotData') and c.name() is not None:\n name = c.name().replace('\"', '\"\"') + '_'\n xName, yName = '\"'+name+'x\"', '\"'+name+'y\"'\n else:\n xName = 'x%04d' % i\n yName = 'y%04d' % i\n if appendAllX or i == 0:\n header.extend([xName, yName])\n else:\n header.extend([yName])\n\n if self.params['separator'] == 'comma':\n sep = ','\n else:\n sep = '\\t'\n\n with open(fileName, 'w') as fd:\n fd.write(sep.join(header) + '\\n')\n i = 0\n numFormat = '%%0.%dg' % self.params['precision']\n numRows = max([len(d[0]) for d in data])\n for i in range(numRows):\n for j, d in enumerate(data):\n # write x value if this is the first column, or if we want\n # x for all rows\n if appendAllX or j == 0:\n if d is not None and i < len(d[0]):\n fd.write(numFormat % d[0][i] + sep)\n else:\n fd.write(' %s' % sep)\n\n # write y value\n if d is not None and i < len(d[1]):\n fd.write(numFormat % d[1][i] + sep)\n else:\n fd.write(' %s' % sep)\n fd.write('\\n')\n\n\nCSVExporter.register() \n \n \n", "path": "pyqtgraph/exporters/CSVExporter.py"}]} | 1,971 | 198 |
gh_patches_debug_36559 | rasdani/github-patches | git_diff | svthalia__concrexit-2930 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add API endpoint for event slugs
### Is your feature request related to a problem? Please describe.
For the app we want to get events based on their slug, this is currently not possible.
### Describe the solution you'd like
Add an API endpoint for event slugs.
</issue>
<code>
[start of website/events/api/v2/urls.py]
1 """Events app API v2 urls."""
2 from django.urls import path
3
4 from events.api.v2.views import (
5 EventDetailView,
6 EventListView,
7 EventRegistrationDetailView,
8 EventRegistrationFieldsView,
9 EventRegistrationsView,
10 ExternalEventDetailView,
11 ExternalEventListView,
12 MarkPresentAPIView,
13 )
14
15 app_name = "events"
16
17 urlpatterns = [
18 path("events/", EventListView.as_view(), name="events-list"),
19 path(
20 "events/<int:pk>/",
21 EventDetailView.as_view(),
22 name="event-detail",
23 ),
24 path(
25 "events/<int:pk>/registrations/",
26 EventRegistrationsView.as_view(),
27 name="event-registrations",
28 ),
29 path(
30 "events/<int:event_id>/registrations/<int:pk>/",
31 EventRegistrationDetailView.as_view(),
32 name="event-registration-detail",
33 ),
34 path(
35 "events/<int:event_id>/registrations/<int:registration_id>/fields/",
36 EventRegistrationFieldsView.as_view(),
37 name="event-registration-fields",
38 ),
39 path(
40 "events/<int:pk>/mark-present/<uuid:token>/",
41 MarkPresentAPIView.as_view(),
42 name="mark-present",
43 ),
44 path(
45 "events/external/", ExternalEventListView.as_view(), name="external-events-list"
46 ),
47 path(
48 "events/external/<int:pk>/",
49 ExternalEventDetailView.as_view(),
50 name="external-event-detail",
51 ),
52 ]
53
[end of website/events/api/v2/urls.py]
[start of website/events/api/v2/serializers/event.py]
1 from rest_framework import serializers
2
3 from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
4 from documents.api.v2.serializers.document import DocumentSerializer
5 from events import services
6 from events.api.v2.serializers.event_registration import EventRegistrationSerializer
7 from events.models import Event
8 from payments.api.v2.serializers.payment_amount import PaymentAmountSerializer
9 from thaliawebsite.api.v2.serializers import CleanedHTMLSerializer
10 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
11 CleanedModelSerializer,
12 )
13 from utils.snippets import create_google_maps_url
14
15
16 class EventSerializer(CleanedModelSerializer):
17 """Serializer for events."""
18
19 class Meta:
20 model = Event
21 fields = (
22 "pk",
23 "title",
24 "description",
25 "caption",
26 "start",
27 "end",
28 "category",
29 "registration_start",
30 "registration_end",
31 "cancel_deadline",
32 "optional_registrations",
33 "location",
34 "price",
35 "fine",
36 "num_participants",
37 "max_participants",
38 "no_registration_message",
39 "registration_status",
40 "cancel_too_late_message",
41 "has_fields",
42 "food_event",
43 "maps_url",
44 "user_permissions",
45 "user_registration",
46 "organisers",
47 "documents",
48 )
49
50 description = CleanedHTMLSerializer()
51 organisers = MemberGroupSerializer(many=True)
52 user_registration = serializers.SerializerMethodField("_user_registration")
53 num_participants = serializers.SerializerMethodField("_num_participants")
54 maps_url = serializers.SerializerMethodField("_maps_url")
55 registration_status = serializers.SerializerMethodField("_registration_status")
56 price = PaymentAmountSerializer()
57 fine = PaymentAmountSerializer()
58 documents = DocumentSerializer(many=True)
59 user_permissions = serializers.SerializerMethodField("_user_permissions")
60
61 def _user_registration(self, instance: Event):
62 if self.context["request"].member and len(instance.member_registration) > 0:
63 registration = instance.member_registration[-1]
64 return EventRegistrationSerializer(
65 registration,
66 context=self.context,
67 fields=(
68 "pk",
69 "present",
70 "queue_position",
71 "is_cancelled",
72 "is_late_cancellation",
73 "date",
74 "payment",
75 ),
76 ).data
77 return None
78
79 def _registration_status(self, instance: Event):
80 if self.context["request"].member and len(instance.member_registration) > 0:
81 registration = instance.member_registration[-1]
82 else:
83 registration = None
84 status = services.registration_status(
85 instance, registration, self.context["request"].member
86 )
87 cancel_status = services.cancel_status(instance, registration)
88
89 status_str = services.registration_status_string(status, instance, registration)
90 cancel_str = services.cancel_info_string(instance, cancel_status, status)
91 if services.show_cancel_status(status) and cancel_str != "":
92 return f"{status_str} {cancel_str}"
93 return f"{status_str}"
94
95 def _num_participants(self, instance: Event):
96 if instance.max_participants:
97 return min(instance.participant_count, instance.max_participants)
98 return instance.participant_count
99
100 def _user_permissions(self, instance):
101 member = self.context["request"].member
102 return services.event_permissions(member, instance, registration_prefetch=True)
103
104 def _maps_url(self, instance):
105 return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
106
[end of website/events/api/v2/serializers/event.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py
--- a/website/events/api/v2/serializers/event.py
+++ b/website/events/api/v2/serializers/event.py
@@ -1,4 +1,5 @@
from rest_framework import serializers
+from rest_framework.reverse import reverse
from activemembers.api.v2.serializers.member_group import MemberGroupSerializer
from documents.api.v2.serializers.document import DocumentSerializer
@@ -20,6 +21,8 @@
model = Event
fields = (
"pk",
+ "slug",
+ "url",
"title",
"description",
"caption",
@@ -57,6 +60,7 @@
fine = PaymentAmountSerializer()
documents = DocumentSerializer(many=True)
user_permissions = serializers.SerializerMethodField("_user_permissions")
+ url = serializers.SerializerMethodField("_url")
def _user_registration(self, instance: Event):
if self.context["request"].member and len(instance.member_registration) > 0:
@@ -101,5 +105,18 @@
member = self.context["request"].member
return services.event_permissions(member, instance, registration_prefetch=True)
+ def _url(self, instance: Event):
+ if instance.slug is None:
+ return reverse(
+ "events:event",
+ kwargs={"pk": instance.pk},
+ request=self.context["request"],
+ )
+ return reverse(
+ "events:event",
+ kwargs={"slug": instance.slug},
+ request=self.context["request"],
+ )
+
def _maps_url(self, instance):
return create_google_maps_url(instance.map_location, zoom=13, size="450x250")
diff --git a/website/events/api/v2/urls.py b/website/events/api/v2/urls.py
--- a/website/events/api/v2/urls.py
+++ b/website/events/api/v2/urls.py
@@ -21,6 +21,11 @@
EventDetailView.as_view(),
name="event-detail",
),
+ path(
+ "events/<slug:slug>/",
+ EventDetailView.as_view(lookup_field="slug"),
+ name="event-detail",
+ ),
path(
"events/<int:pk>/registrations/",
EventRegistrationsView.as_view(),
| {"golden_diff": "diff --git a/website/events/api/v2/serializers/event.py b/website/events/api/v2/serializers/event.py\n--- a/website/events/api/v2/serializers/event.py\n+++ b/website/events/api/v2/serializers/event.py\n@@ -1,4 +1,5 @@\n from rest_framework import serializers\n+from rest_framework.reverse import reverse\n \n from activemembers.api.v2.serializers.member_group import MemberGroupSerializer\n from documents.api.v2.serializers.document import DocumentSerializer\n@@ -20,6 +21,8 @@\n model = Event\n fields = (\n \"pk\",\n+ \"slug\",\n+ \"url\",\n \"title\",\n \"description\",\n \"caption\",\n@@ -57,6 +60,7 @@\n fine = PaymentAmountSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n+ url = serializers.SerializerMethodField(\"_url\")\n \n def _user_registration(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n@@ -101,5 +105,18 @@\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance, registration_prefetch=True)\n \n+ def _url(self, instance: Event):\n+ if instance.slug is None:\n+ return reverse(\n+ \"events:event\",\n+ kwargs={\"pk\": instance.pk},\n+ request=self.context[\"request\"],\n+ )\n+ return reverse(\n+ \"events:event\",\n+ kwargs={\"slug\": instance.slug},\n+ request=self.context[\"request\"],\n+ )\n+\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\ndiff --git a/website/events/api/v2/urls.py b/website/events/api/v2/urls.py\n--- a/website/events/api/v2/urls.py\n+++ b/website/events/api/v2/urls.py\n@@ -21,6 +21,11 @@\n EventDetailView.as_view(),\n name=\"event-detail\",\n ),\n+ path(\n+ \"events/<slug:slug>/\",\n+ EventDetailView.as_view(lookup_field=\"slug\"),\n+ name=\"event-detail\",\n+ ),\n path(\n \"events/<int:pk>/registrations/\",\n EventRegistrationsView.as_view(),\n", "issue": "Add API endpoint for event slugs\n### Is your feature request related to a problem? Please describe.\r\nFor the app we want to get events based on their slug, this is currently not possible.\r\n\r\n### Describe the solution you'd like\r\nAdd an API endpoint for event slugs.\r\n\n", "before_files": [{"content": "\"\"\"Events app API v2 urls.\"\"\"\nfrom django.urls import path\n\nfrom events.api.v2.views import (\n EventDetailView,\n EventListView,\n EventRegistrationDetailView,\n EventRegistrationFieldsView,\n EventRegistrationsView,\n ExternalEventDetailView,\n ExternalEventListView,\n MarkPresentAPIView,\n)\n\napp_name = \"events\"\n\nurlpatterns = [\n path(\"events/\", EventListView.as_view(), name=\"events-list\"),\n path(\n \"events/<int:pk>/\",\n EventDetailView.as_view(),\n name=\"event-detail\",\n ),\n path(\n \"events/<int:pk>/registrations/\",\n EventRegistrationsView.as_view(),\n name=\"event-registrations\",\n ),\n path(\n \"events/<int:event_id>/registrations/<int:pk>/\",\n EventRegistrationDetailView.as_view(),\n name=\"event-registration-detail\",\n ),\n path(\n \"events/<int:event_id>/registrations/<int:registration_id>/fields/\",\n EventRegistrationFieldsView.as_view(),\n name=\"event-registration-fields\",\n ),\n path(\n \"events/<int:pk>/mark-present/<uuid:token>/\",\n MarkPresentAPIView.as_view(),\n name=\"mark-present\",\n ),\n path(\n \"events/external/\", ExternalEventListView.as_view(), name=\"external-events-list\"\n ),\n path(\n \"events/external/<int:pk>/\",\n ExternalEventDetailView.as_view(),\n name=\"external-event-detail\",\n ),\n]\n", "path": "website/events/api/v2/urls.py"}, {"content": "from rest_framework import serializers\n\nfrom activemembers.api.v2.serializers.member_group import MemberGroupSerializer\nfrom documents.api.v2.serializers.document import DocumentSerializer\nfrom events import services\nfrom events.api.v2.serializers.event_registration import EventRegistrationSerializer\nfrom events.models import Event\nfrom payments.api.v2.serializers.payment_amount import PaymentAmountSerializer\nfrom thaliawebsite.api.v2.serializers import CleanedHTMLSerializer\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.snippets import create_google_maps_url\n\n\nclass EventSerializer(CleanedModelSerializer):\n \"\"\"Serializer for events.\"\"\"\n\n class Meta:\n model = Event\n fields = (\n \"pk\",\n \"title\",\n \"description\",\n \"caption\",\n \"start\",\n \"end\",\n \"category\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"optional_registrations\",\n \"location\",\n \"price\",\n \"fine\",\n \"num_participants\",\n \"max_participants\",\n \"no_registration_message\",\n \"registration_status\",\n \"cancel_too_late_message\",\n \"has_fields\",\n \"food_event\",\n \"maps_url\",\n \"user_permissions\",\n \"user_registration\",\n \"organisers\",\n \"documents\",\n )\n\n description = CleanedHTMLSerializer()\n organisers = MemberGroupSerializer(many=True)\n user_registration = serializers.SerializerMethodField(\"_user_registration\")\n num_participants = serializers.SerializerMethodField(\"_num_participants\")\n maps_url = serializers.SerializerMethodField(\"_maps_url\")\n registration_status = serializers.SerializerMethodField(\"_registration_status\")\n price = PaymentAmountSerializer()\n fine = PaymentAmountSerializer()\n documents = DocumentSerializer(many=True)\n user_permissions = serializers.SerializerMethodField(\"_user_permissions\")\n\n def _user_registration(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n return EventRegistrationSerializer(\n registration,\n context=self.context,\n fields=(\n \"pk\",\n \"present\",\n \"queue_position\",\n \"is_cancelled\",\n \"is_late_cancellation\",\n \"date\",\n \"payment\",\n ),\n ).data\n return None\n\n def _registration_status(self, instance: Event):\n if self.context[\"request\"].member and len(instance.member_registration) > 0:\n registration = instance.member_registration[-1]\n else:\n registration = None\n status = services.registration_status(\n instance, registration, self.context[\"request\"].member\n )\n cancel_status = services.cancel_status(instance, registration)\n\n status_str = services.registration_status_string(status, instance, registration)\n cancel_str = services.cancel_info_string(instance, cancel_status, status)\n if services.show_cancel_status(status) and cancel_str != \"\":\n return f\"{status_str} {cancel_str}\"\n return f\"{status_str}\"\n\n def _num_participants(self, instance: Event):\n if instance.max_participants:\n return min(instance.participant_count, instance.max_participants)\n return instance.participant_count\n\n def _user_permissions(self, instance):\n member = self.context[\"request\"].member\n return services.event_permissions(member, instance, registration_prefetch=True)\n\n def _maps_url(self, instance):\n return create_google_maps_url(instance.map_location, zoom=13, size=\"450x250\")\n", "path": "website/events/api/v2/serializers/event.py"}]} | 1,977 | 525 |
gh_patches_debug_33090 | rasdani/github-patches | git_diff | psychopy__psychopy-947 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Polygon setEdges does not update the ShapeStim vertices
If I make a polygon object:
``` python
poly = visual.Polygon(win, edges=3, lineWidth=3, radius=3)
poly.draw()
win.flip()
```
and then want to change the shape on the fly in code, I would have though I would do:
``` python
poly.setEdges(5)
poly.draw()
win.flip()
```
This doesn't actually change the shape that gets shown though, but the following code does:
``` python
poly.setEdges(5)
poly.setVertices(poly.vertices)
poly.draw()
win.flip()
```
I think this is because `poly.setEdges` calls `poly._calcVertices` which sets the `poly.vertices` attribute, but `poly.setEdges` doesn't pass the new array to the `poly.setVertices` method, which I gather is inherited from `ShapeStim`.
</issue>
<code>
[start of psychopy/visual/polygon.py]
1
2 #!/usr/bin/env python2
3
4 '''Creates a regular polygon (triangles, pentagrams, ...)
5 as a special case of a :class:`~psychopy.visual.ShapeStim`'''
6
7 # Part of the PsychoPy library
8 # Copyright (C) 2015 Jonathan Peirce
9 # Distributed under the terms of the GNU General Public License (GPL).
10
11 import psychopy # so we can get the __path__
12
13 from psychopy.visual.shape import ShapeStim
14 from psychopy.tools.attributetools import attributeSetter, setAttribute
15
16 import numpy
17
18
19 class Polygon(ShapeStim):
20 """Creates a regular polygon (triangles, pentagrams, ...) as a special case of a :class:`~psychopy.visual.ShapeStim`
21
22 (New in version 1.72.00)
23 """
24 def __init__(self, win, edges=3, radius=.5, **kwargs):
25 """
26 Polygon accepts all input parameters that :class:`~psychopy.visual.ShapeStim` accepts, except for vertices and closeShape.
27 """
28 #what local vars are defined (these are the init params) for use by __repr__
29 self._initParams = dir()
30 self._initParams.remove('self')
31 #kwargs isn't a parameter, but a list of params
32 self._initParams.remove('kwargs')
33 self._initParams.extend(kwargs)
34 self.autoLog = False #but will be changed if needed at end of init
35 self.__dict__['edges'] = edges
36 self.radius = numpy.asarray(radius)
37 self._calcVertices()
38 kwargs['closeShape'] = True # Make sure nobody messes around here
39 kwargs['vertices'] = self.vertices
40 super(Polygon, self).__init__(win, **kwargs)
41
42 def _calcVertices(self):
43 d = numpy.pi*2/ self.edges
44 self.vertices = numpy.asarray([
45 numpy.asarray(
46 (numpy.sin(e*d), numpy.cos(e*d))
47 ) * self.radius
48 for e in xrange(int(round(self.edges)))
49 ])
50
51 @attributeSetter
52 def edges(self, edges):
53 """Int or float. Number of edges of the polygon. Floats are rounded to int.
54 :ref:`Operations <attrib-operations>` supported."""
55 self.__dict__['edges'] = edges
56 self._calcVertices()
57 def setEdges(self, edges, operation='', log=None):
58 """Usually you can use 'stim.attribute = value' syntax instead,
59 but use this method if you need to suppress the log message"""
60 setAttribute(self, 'edges', edges, log, operation)
61
62 @attributeSetter
63 def radius(self, radius):
64 """float, int, tuple, list or 2x1 array
65 Radius of the Polygon (distance from the center to the corners).
66 May be a -2tuple or list to stretch the polygon asymmetrically.
67
68 :ref:`Operations <attrib-operations>` supported.
69
70 Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging."""
71 self.__dict__['radius'] = numpy.array(radius)
72 self._calcVertices()
73 self.setVertices(self.vertices, log=False)
74 def setRadius(self, radius, operation='', log=None):
75 """Usually you can use 'stim.attribute = value' syntax instead,
76 but use this method if you need to suppress the log message"""
77 setAttribute(self, 'radius', radius, log, operation)
[end of psychopy/visual/polygon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/visual/polygon.py b/psychopy/visual/polygon.py
--- a/psychopy/visual/polygon.py
+++ b/psychopy/visual/polygon.py
@@ -47,13 +47,14 @@
) * self.radius
for e in xrange(int(round(self.edges)))
])
-
+
@attributeSetter
def edges(self, edges):
"""Int or float. Number of edges of the polygon. Floats are rounded to int.
:ref:`Operations <attrib-operations>` supported."""
self.__dict__['edges'] = edges
self._calcVertices()
+ self.setVertices(self.vertices, log=False)
def setEdges(self, edges, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message"""
@@ -66,7 +67,7 @@
May be a -2tuple or list to stretch the polygon asymmetrically.
:ref:`Operations <attrib-operations>` supported.
-
+
Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging."""
self.__dict__['radius'] = numpy.array(radius)
self._calcVertices()
@@ -74,4 +75,4 @@
def setRadius(self, radius, operation='', log=None):
"""Usually you can use 'stim.attribute = value' syntax instead,
but use this method if you need to suppress the log message"""
- setAttribute(self, 'radius', radius, log, operation)
\ No newline at end of file
+ setAttribute(self, 'radius', radius, log, operation)
| {"golden_diff": "diff --git a/psychopy/visual/polygon.py b/psychopy/visual/polygon.py\n--- a/psychopy/visual/polygon.py\n+++ b/psychopy/visual/polygon.py\n@@ -47,13 +47,14 @@\n ) * self.radius\n for e in xrange(int(round(self.edges)))\n ])\n- \n+\n @attributeSetter\n def edges(self, edges):\n \"\"\"Int or float. Number of edges of the polygon. Floats are rounded to int.\n :ref:`Operations <attrib-operations>` supported.\"\"\"\n self.__dict__['edges'] = edges\n self._calcVertices()\n+ self.setVertices(self.vertices, log=False)\n def setEdges(self, edges, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n@@ -66,7 +67,7 @@\n May be a -2tuple or list to stretch the polygon asymmetrically.\n \n :ref:`Operations <attrib-operations>` supported.\n- \n+\n Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging.\"\"\"\n self.__dict__['radius'] = numpy.array(radius)\n self._calcVertices()\n@@ -74,4 +75,4 @@\n def setRadius(self, radius, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n- setAttribute(self, 'radius', radius, log, operation)\n\\ No newline at end of file\n+ setAttribute(self, 'radius', radius, log, operation)\n", "issue": "Polygon setEdges does not update the ShapeStim vertices\nIf I make a polygon object:\n\n``` python\npoly = visual.Polygon(win, edges=3, lineWidth=3, radius=3)\npoly.draw()\nwin.flip()\n```\n\nand then want to change the shape on the fly in code, I would have though I would do:\n\n``` python\npoly.setEdges(5)\npoly.draw()\nwin.flip()\n```\n\nThis doesn't actually change the shape that gets shown though, but the following code does:\n\n``` python\npoly.setEdges(5)\npoly.setVertices(poly.vertices)\npoly.draw()\nwin.flip()\n```\n\nI think this is because `poly.setEdges` calls `poly._calcVertices` which sets the `poly.vertices` attribute, but `poly.setEdges` doesn't pass the new array to the `poly.setVertices` method, which I gather is inherited from `ShapeStim`.\n\n", "before_files": [{"content": "\n#!/usr/bin/env python2\n\n'''Creates a regular polygon (triangles, pentagrams, ...)\nas a special case of a :class:`~psychopy.visual.ShapeStim`'''\n\n# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport psychopy # so we can get the __path__\n\nfrom psychopy.visual.shape import ShapeStim\nfrom psychopy.tools.attributetools import attributeSetter, setAttribute\n\nimport numpy\n\n\nclass Polygon(ShapeStim):\n \"\"\"Creates a regular polygon (triangles, pentagrams, ...) as a special case of a :class:`~psychopy.visual.ShapeStim`\n\n (New in version 1.72.00)\n \"\"\"\n def __init__(self, win, edges=3, radius=.5, **kwargs):\n \"\"\"\n Polygon accepts all input parameters that :class:`~psychopy.visual.ShapeStim` accepts, except for vertices and closeShape.\n \"\"\"\n #what local vars are defined (these are the init params) for use by __repr__\n self._initParams = dir()\n self._initParams.remove('self')\n #kwargs isn't a parameter, but a list of params\n self._initParams.remove('kwargs')\n self._initParams.extend(kwargs)\n self.autoLog = False #but will be changed if needed at end of init\n self.__dict__['edges'] = edges\n self.radius = numpy.asarray(radius)\n self._calcVertices()\n kwargs['closeShape'] = True # Make sure nobody messes around here\n kwargs['vertices'] = self.vertices\n super(Polygon, self).__init__(win, **kwargs)\n\n def _calcVertices(self):\n d = numpy.pi*2/ self.edges\n self.vertices = numpy.asarray([\n numpy.asarray(\n (numpy.sin(e*d), numpy.cos(e*d))\n ) * self.radius\n for e in xrange(int(round(self.edges)))\n ])\n \n @attributeSetter\n def edges(self, edges):\n \"\"\"Int or float. Number of edges of the polygon. Floats are rounded to int.\n :ref:`Operations <attrib-operations>` supported.\"\"\"\n self.__dict__['edges'] = edges\n self._calcVertices()\n def setEdges(self, edges, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n setAttribute(self, 'edges', edges, log, operation)\n\n @attributeSetter\n def radius(self, radius):\n \"\"\"float, int, tuple, list or 2x1 array\n Radius of the Polygon (distance from the center to the corners).\n May be a -2tuple or list to stretch the polygon asymmetrically.\n\n :ref:`Operations <attrib-operations>` supported.\n \n Usually there's a setAttribute(value, log=False) method for each attribute. Use this if you want to disable logging.\"\"\"\n self.__dict__['radius'] = numpy.array(radius)\n self._calcVertices()\n self.setVertices(self.vertices, log=False)\n def setRadius(self, radius, operation='', log=None):\n \"\"\"Usually you can use 'stim.attribute = value' syntax instead,\n but use this method if you need to suppress the log message\"\"\"\n setAttribute(self, 'radius', radius, log, operation)", "path": "psychopy/visual/polygon.py"}]} | 1,612 | 375 |
gh_patches_debug_50470 | rasdani/github-patches | git_diff | cython__cython-4942 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Annotated attributes of cclass are not supporting pointers
<!--
**PLEASE READ THIS FIRST:**
- Do not use the bug and feature tracker for support requests. Use the `cython-users` mailing list instead.
- Did you search for similar issues already? Please do, it helps to save us precious time that we otherwise could not invest into development.
- Did you try the latest master branch or pre-release? It might already have what you want to report. Also see the [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) regarding recent changes.
-->
**Describe the bug**
The compilation is failing, when attribute of cclass is declared using annotated type containing pointer.
**To Reproduce**
Following code:
```python
import cython
@cython.cclass
class Foo:
a: cython.pointer(cython.int)
def bar(self):
self.a = cython.NULL
```
fails during compilation with error:
```
$ cython -3 test.py
Error compiling Cython file:
------------------------------------------------------------
...
@cython.cclass
class Foo:
a: cython.pointer(cython.int)
def bar(self):
self.a = cython.NULL
^
------------------------------------------------------------
test.py:8:23: Cannot convert 'void *' to Python object
```
**Expected behavior**
Compilation should be successfull.
**Environment (please complete the following information):**
- OS: Linux
- Python version: Python 3.9.2
- Cython version: master
**Additional context**
When `declare()` statement or `cython.p_int` type is used, compilation is successful:
```python
import cython
@cython.cclass
class Foo:
a = cython.declare(cython.pointer(cython.int))
def bar(self):
self.a = cython.NULL
```
```python
import cython
@cython.cclass
class Foo:
a: cython.p_int
def bar(self):
self.a = cython.NULL
```
</issue>
<code>
[start of docs/examples/tutorial/clibraries/queue.py]
1 from cython.cimports import cqueue
2
3 @cython.cclass
4 class Queue:
5 _c_queue = cython.declare(cython.pointer(cqueue.Queue))
6
7 def __cinit__(self):
8 self._c_queue = cqueue.queue_new()
9
[end of docs/examples/tutorial/clibraries/queue.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/tutorial/clibraries/queue.py b/docs/examples/tutorial/clibraries/queue.py
--- a/docs/examples/tutorial/clibraries/queue.py
+++ b/docs/examples/tutorial/clibraries/queue.py
@@ -2,7 +2,7 @@
@cython.cclass
class Queue:
- _c_queue = cython.declare(cython.pointer(cqueue.Queue))
+ _c_queue: cython.pointer(cqueue.Queue)
def __cinit__(self):
self._c_queue = cqueue.queue_new()
| {"golden_diff": "diff --git a/docs/examples/tutorial/clibraries/queue.py b/docs/examples/tutorial/clibraries/queue.py\n--- a/docs/examples/tutorial/clibraries/queue.py\n+++ b/docs/examples/tutorial/clibraries/queue.py\n@@ -2,7 +2,7 @@\n \n @cython.cclass\n class Queue:\n- _c_queue = cython.declare(cython.pointer(cqueue.Queue))\n+ _c_queue: cython.pointer(cqueue.Queue)\n \n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "issue": "[BUG] Annotated attributes of cclass are not supporting pointers\n<!--\r\n**PLEASE READ THIS FIRST:**\r\n- Do not use the bug and feature tracker for support requests. Use the `cython-users` mailing list instead.\r\n- Did you search for similar issues already? Please do, it helps to save us precious time that we otherwise could not invest into development.\r\n- Did you try the latest master branch or pre-release? It might already have what you want to report. Also see the [Changelog](https://github.com/cython/cython/blob/master/CHANGES.rst) regarding recent changes.\r\n-->\r\n\r\n**Describe the bug**\r\nThe compilation is failing, when attribute of cclass is declared using annotated type containing pointer.\r\n\r\n**To Reproduce**\r\nFollowing code:\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a: cython.pointer(cython.int)\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\nfails during compilation with error:\r\n```\r\n$ cython -3 test.py\r\n\r\nError compiling Cython file:\r\n------------------------------------------------------------\r\n...\r\[email protected]\r\nclass Foo:\r\n a: cython.pointer(cython.int)\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n ^\r\n------------------------------------------------------------\r\n\r\ntest.py:8:23: Cannot convert 'void *' to Python object\r\n```\r\n\r\n**Expected behavior**\r\nCompilation should be successfull.\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Linux\r\n - Python version: Python 3.9.2\r\n - Cython version: master\r\n\r\n**Additional context**\r\nWhen `declare()` statement or `cython.p_int` type is used, compilation is successful:\r\n\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a = cython.declare(cython.pointer(cython.int))\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\n\r\n```python\r\nimport cython\r\n\r\[email protected]\r\nclass Foo:\r\n a: cython.p_int\r\n\r\n def bar(self):\r\n self.a = cython.NULL\r\n```\r\n\n", "before_files": [{"content": "from cython.cimports import cqueue\n\[email protected]\nclass Queue:\n _c_queue = cython.declare(cython.pointer(cqueue.Queue))\n\n def __cinit__(self):\n self._c_queue = cqueue.queue_new()\n", "path": "docs/examples/tutorial/clibraries/queue.py"}]} | 1,041 | 115 |
gh_patches_debug_6285 | rasdani/github-patches | git_diff | encode__httpx-1503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CertTypes `keyfile` and `password` should be Optional types.
`SSLContext.load_cert_chain` can take `None` as arguments values ([docs](https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain)) so I guess this:
https://github.com/encode/httpx/blob/c09e61d50c8f169187cada6dbf14b89c7763c63f/httpx/_types.py#L54
should be rewritten as follows:
```python
CertTypes = Union[str, Tuple[str, Optional[str]], Tuple[str, Optional[str], Optional[str]]]
```
</issue>
<code>
[start of httpx/_types.py]
1 """
2 Type definitions for type checking purposes.
3 """
4
5 import ssl
6 from http.cookiejar import CookieJar
7 from typing import (
8 IO,
9 TYPE_CHECKING,
10 AsyncIterable,
11 Callable,
12 Dict,
13 Iterable,
14 List,
15 Mapping,
16 Optional,
17 Sequence,
18 Tuple,
19 Union,
20 )
21
22 if TYPE_CHECKING: # pragma: no cover
23 from ._auth import Auth # noqa: F401
24 from ._config import Proxy, Timeout # noqa: F401
25 from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401
26
27
28 PrimitiveData = Optional[Union[str, int, float, bool]]
29
30 RawURL = Tuple[bytes, bytes, Optional[int], bytes]
31
32 URLTypes = Union["URL", str]
33
34 QueryParamTypes = Union[
35 "QueryParams",
36 Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],
37 List[Tuple[str, PrimitiveData]],
38 Tuple[Tuple[str, PrimitiveData], ...],
39 str,
40 bytes,
41 None,
42 ]
43
44 HeaderTypes = Union[
45 "Headers",
46 Dict[str, str],
47 Dict[bytes, bytes],
48 Sequence[Tuple[str, str]],
49 Sequence[Tuple[bytes, bytes]],
50 ]
51
52 CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]
53
54 CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
55 VerifyTypes = Union[str, bool, ssl.SSLContext]
56 TimeoutTypes = Union[
57 Optional[float],
58 Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],
59 "Timeout",
60 ]
61 ProxiesTypes = Union[URLTypes, "Proxy", Dict[URLTypes, Union[None, URLTypes, "Proxy"]]]
62
63 AuthTypes = Union[
64 Tuple[Union[str, bytes], Union[str, bytes]],
65 Callable[["Request"], "Request"],
66 "Auth",
67 None,
68 ]
69
70 ByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]
71 RequestContent = Union[str, bytes, ByteStream]
72 ResponseContent = Union[str, bytes, ByteStream]
73
74 RequestData = dict
75
76 FileContent = Union[IO[str], IO[bytes], str, bytes]
77 FileTypes = Union[
78 # file (or text)
79 FileContent,
80 # (filename, file (or text))
81 Tuple[Optional[str], FileContent],
82 # (filename, file (or text), content_type)
83 Tuple[Optional[str], FileContent, Optional[str]],
84 ]
85 RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
86
[end of httpx/_types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/_types.py b/httpx/_types.py
--- a/httpx/_types.py
+++ b/httpx/_types.py
@@ -51,7 +51,14 @@
CookieTypes = Union["Cookies", CookieJar, Dict[str, str], List[Tuple[str, str]]]
-CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]
+CertTypes = Union[
+ # certfile
+ str,
+ # (certfile, keyfile)
+ Tuple[str, Optional[str]],
+ # (certfile, keyfile, password)
+ Tuple[str, Optional[str], Optional[str]],
+]
VerifyTypes = Union[str, bool, ssl.SSLContext]
TimeoutTypes = Union[
Optional[float],
| {"golden_diff": "diff --git a/httpx/_types.py b/httpx/_types.py\n--- a/httpx/_types.py\n+++ b/httpx/_types.py\n@@ -51,7 +51,14 @@\n \n CookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n \n-CertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\n+CertTypes = Union[\n+ # certfile\n+ str,\n+ # (certfile, keyfile)\n+ Tuple[str, Optional[str]],\n+ # (certfile, keyfile, password)\n+ Tuple[str, Optional[str], Optional[str]],\n+]\n VerifyTypes = Union[str, bool, ssl.SSLContext]\n TimeoutTypes = Union[\n Optional[float],\n", "issue": "CertTypes `keyfile` and `password` should be Optional types.\n`SSLContext.load_cert_chain` can take `None` as arguments values ([docs](https://docs.python.org/3/library/ssl.html#ssl.SSLContext.load_cert_chain)) so I guess this:\r\nhttps://github.com/encode/httpx/blob/c09e61d50c8f169187cada6dbf14b89c7763c63f/httpx/_types.py#L54\r\nshould be rewritten as follows:\r\n```python\r\nCertTypes = Union[str, Tuple[str, Optional[str]], Tuple[str, Optional[str], Optional[str]]] \r\n```\n", "before_files": [{"content": "\"\"\"\nType definitions for type checking purposes.\n\"\"\"\n\nimport ssl\nfrom http.cookiejar import CookieJar\nfrom typing import (\n IO,\n TYPE_CHECKING,\n AsyncIterable,\n Callable,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nif TYPE_CHECKING: # pragma: no cover\n from ._auth import Auth # noqa: F401\n from ._config import Proxy, Timeout # noqa: F401\n from ._models import URL, Cookies, Headers, QueryParams, Request # noqa: F401\n\n\nPrimitiveData = Optional[Union[str, int, float, bool]]\n\nRawURL = Tuple[bytes, bytes, Optional[int], bytes]\n\nURLTypes = Union[\"URL\", str]\n\nQueryParamTypes = Union[\n \"QueryParams\",\n Mapping[str, Union[PrimitiveData, Sequence[PrimitiveData]]],\n List[Tuple[str, PrimitiveData]],\n Tuple[Tuple[str, PrimitiveData], ...],\n str,\n bytes,\n None,\n]\n\nHeaderTypes = Union[\n \"Headers\",\n Dict[str, str],\n Dict[bytes, bytes],\n Sequence[Tuple[str, str]],\n Sequence[Tuple[bytes, bytes]],\n]\n\nCookieTypes = Union[\"Cookies\", CookieJar, Dict[str, str], List[Tuple[str, str]]]\n\nCertTypes = Union[str, Tuple[str, str], Tuple[str, str, str]]\nVerifyTypes = Union[str, bool, ssl.SSLContext]\nTimeoutTypes = Union[\n Optional[float],\n Tuple[Optional[float], Optional[float], Optional[float], Optional[float]],\n \"Timeout\",\n]\nProxiesTypes = Union[URLTypes, \"Proxy\", Dict[URLTypes, Union[None, URLTypes, \"Proxy\"]]]\n\nAuthTypes = Union[\n Tuple[Union[str, bytes], Union[str, bytes]],\n Callable[[\"Request\"], \"Request\"],\n \"Auth\",\n None,\n]\n\nByteStream = Union[Iterable[bytes], AsyncIterable[bytes]]\nRequestContent = Union[str, bytes, ByteStream]\nResponseContent = Union[str, bytes, ByteStream]\n\nRequestData = dict\n\nFileContent = Union[IO[str], IO[bytes], str, bytes]\nFileTypes = Union[\n # file (or text)\n FileContent,\n # (filename, file (or text))\n Tuple[Optional[str], FileContent],\n # (filename, file (or text), content_type)\n Tuple[Optional[str], FileContent, Optional[str]],\n]\nRequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]\n", "path": "httpx/_types.py"}]} | 1,410 | 171 |
gh_patches_debug_14526 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typing: ManualScheduleTrigger `points` should accept `int`
</issue>
<code>
[start of pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py]
1 # mypy: ignore-errors
2
3 from typing import List, Union, TYPE_CHECKING
4
5 from pytorch_pfn_extras.training import trigger
6
7
8 if TYPE_CHECKING:
9 from pytorch_pfn_extras.training.manager import _BaseExtensionsManager
10 from pytorch_pfn_extras.training._trigger_util import UnitLiteral
11
12
13 class ManualScheduleTrigger(trigger.Trigger):
14
15 """Trigger invoked at specified point(s) of iterations or epochs.
16
17 This trigger accepts iterations or epochs indicated by given point(s).
18 There are two ways to specify the point(s): iteration and epoch.
19 ``iteration`` means the number of updates, while ``epoch`` means the number
20 of sweeps over the training dataset. Fractional values are allowed
21 if the point is a number of epochs; the trigger uses the ``iteration``
22 and ``epoch_detail`` attributes defined by the manager.
23
24 Args:
25 points (int, float, or list of int or float): time of the trigger.
26 Must be an integer or list of integer if unit is ``'iteration'``.
27 unit (str): Unit of the time specified by ``points``. It must be
28 either ``'iteration'`` or ``'epoch'``.
29
30 """
31
32 def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):
33 if unit not in ('epoch', 'iteration'):
34 raise ValueError(
35 'Trigger unit must be either \'epoch\' or \'iteration\'.')
36
37 self.points = (points if isinstance(points, list) else [points])
38 self.unit = unit
39
40 def __call__(self, manager: '_BaseExtensionsManager') -> bool:
41 """Decides whether the extension should be called on this iteration.
42
43 Args:
44 manager (~pytorch_pfn_extras.training.ExtensionsManager):
45 Manager object that this trigger is associated with.
46 The iteration information in this manager is used to
47 determine if the trigger should fire.
48
49 Returns:
50 bool: True if the corresponding extension should be invoked in this
51 iteration.
52
53 """
54 fire = self.may_fire(manager.iteration, manager._iters_per_epoch)
55 return fire
56
57 def may_fire(self, iteration: int, epoch_length: int) -> bool:
58 if self.unit == 'epoch':
59 fire = any(
60 int(p * epoch_length) == iteration for p in self.points)
61 else:
62 fire = any(p == iteration for p in self.points)
63 return fire
64
[end of pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
--- a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
+++ b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py
@@ -1,6 +1,6 @@
# mypy: ignore-errors
-from typing import List, Union, TYPE_CHECKING
+from typing import Sequence, Union, TYPE_CHECKING
from pytorch_pfn_extras.training import trigger
@@ -29,7 +29,7 @@
"""
- def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):
+ def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):
if unit not in ('epoch', 'iteration'):
raise ValueError(
'Trigger unit must be either \'epoch\' or \'iteration\'.')
| {"golden_diff": "diff --git a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n--- a/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n+++ b/pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py\n@@ -1,6 +1,6 @@\n # mypy: ignore-errors\n \n-from typing import List, Union, TYPE_CHECKING\n+from typing import Sequence, Union, TYPE_CHECKING\n \n from pytorch_pfn_extras.training import trigger\n \n@@ -29,7 +29,7 @@\n \n \"\"\"\n \n- def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):\n+ def __init__(self, points: Union[float, Sequence[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n", "issue": "Typing: ManualScheduleTrigger `points` should accept `int`\n\n", "before_files": [{"content": "# mypy: ignore-errors\n\nfrom typing import List, Union, TYPE_CHECKING\n\nfrom pytorch_pfn_extras.training import trigger\n\n\nif TYPE_CHECKING:\n from pytorch_pfn_extras.training.manager import _BaseExtensionsManager\n from pytorch_pfn_extras.training._trigger_util import UnitLiteral\n\n\nclass ManualScheduleTrigger(trigger.Trigger):\n\n \"\"\"Trigger invoked at specified point(s) of iterations or epochs.\n\n This trigger accepts iterations or epochs indicated by given point(s).\n There are two ways to specify the point(s): iteration and epoch.\n ``iteration`` means the number of updates, while ``epoch`` means the number\n of sweeps over the training dataset. Fractional values are allowed\n if the point is a number of epochs; the trigger uses the ``iteration``\n and ``epoch_detail`` attributes defined by the manager.\n\n Args:\n points (int, float, or list of int or float): time of the trigger.\n Must be an integer or list of integer if unit is ``'iteration'``.\n unit (str): Unit of the time specified by ``points``. It must be\n either ``'iteration'`` or ``'epoch'``.\n\n \"\"\"\n\n def __init__(self, points: Union[float, List[float]], unit: 'UnitLiteral'):\n if unit not in ('epoch', 'iteration'):\n raise ValueError(\n 'Trigger unit must be either \\'epoch\\' or \\'iteration\\'.')\n\n self.points = (points if isinstance(points, list) else [points])\n self.unit = unit\n\n def __call__(self, manager: '_BaseExtensionsManager') -> bool:\n \"\"\"Decides whether the extension should be called on this iteration.\n\n Args:\n manager (~pytorch_pfn_extras.training.ExtensionsManager):\n Manager object that this trigger is associated with.\n The iteration information in this manager is used to\n determine if the trigger should fire.\n\n Returns:\n bool: True if the corresponding extension should be invoked in this\n iteration.\n\n \"\"\"\n fire = self.may_fire(manager.iteration, manager._iters_per_epoch)\n return fire\n\n def may_fire(self, iteration: int, epoch_length: int) -> bool:\n if self.unit == 'epoch':\n fire = any(\n int(p * epoch_length) == iteration for p in self.points)\n else:\n fire = any(p == iteration for p in self.points)\n return fire\n", "path": "pytorch_pfn_extras/training/triggers/manual_schedule_trigger.py"}]} | 1,217 | 211 |
gh_patches_debug_21898 | rasdani/github-patches | git_diff | falconry__falcon-2008 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unclear which `HTTPError` was instantiated from `deprecated_args()` warnings
In Falcon 3.0.x, instantiating `HTTPError` or its subclasses with positional arguments (of course except the allowed ones) generates a `DeprecatedWarning` via the `deprecated_args()` decorator.
However, it is unclear from the warning which class/function was invoked, it just says "calls [with more than N] positional args are deprecated". Brought up by @laurent-chriqui (see the linked PR).
Ideally, as a developer, I would like the warning to read along the lines of
```
DeprecatedWarning: Calls to HTTPNotFound.__init__(...) with positional args are deprecated. Please specify them as keyword arguments instead.
```
</issue>
<code>
[start of falcon/util/deprecation.py]
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Miscellaneous deprecation utilities.
16
17 This module provides decorators to mark functions and classes as deprecated.
18 """
19
20 import functools
21 import warnings
22
23
24 __all__ = (
25 'DeprecatedWarning',
26 'deprecated',
27 'deprecated_args',
28 )
29
30
31 # NOTE(kgriffs): We don't want our deprecations to be ignored by default,
32 # so create our own type.
33 #
34 # TODO(kgriffs): Revisit this decision if users complain.
35 class DeprecatedWarning(UserWarning):
36 pass
37
38
39 def deprecated(instructions, is_property=False, method_name=None):
40 """Flag a method as deprecated.
41
42 This function returns a decorator which can be used to mark deprecated
43 functions. Applying this decorator will result in a warning being
44 emitted when the function is used.
45
46 Args:
47 instructions (str): Specific guidance for the developer, e.g.:
48 'Please migrate to add_proxy(...)'.
49 is_property (bool): If the deprecated object is a property. It
50 will omit the ``(...)`` from the generated documentation.
51 method_name (str, optional): Set to override the name of the
52 deprecated function or property in the generated
53 documentation (default ``None``). This is useful when
54 decorating an alias that carries the target's ``__name__``.
55
56 """
57
58 def decorator(func):
59
60 object_name = 'property' if is_property else 'function'
61 post_name = '' if is_property else '(...)'
62 message = 'Call to deprecated {} {}{}. {}'.format(
63 object_name, method_name or func.__name__, post_name, instructions
64 )
65
66 @functools.wraps(func)
67 def wrapper(*args, **kwargs):
68 warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
69
70 return func(*args, **kwargs)
71
72 return wrapper
73
74 return decorator
75
76
77 def deprecated_args(*, allowed_positional, is_method=True):
78 """Flag a method call with positional args as deprecated.
79
80 Keyword Args:
81 allowed_positional (int): Number of allowed positional arguments
82 is_method (bool, optional): The decorated function is a method. Will
83 add one to the number of allowed positional args to account for
84 ``self``. Defaults to True.
85 """
86
87 template = (
88 'Calls with{} positional args are deprecated.'
89 ' Please specify them as keyword arguments instead.'
90 )
91 text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
92 warn_text = template.format(text)
93 if is_method:
94 allowed_positional += 1
95
96 def deprecated_args(fn):
97 @functools.wraps(fn)
98 def wraps(*args, **kwargs):
99 if len(args) > allowed_positional:
100 warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)
101 return fn(*args, **kwargs)
102
103 return wraps
104
105 return deprecated_args
106
[end of falcon/util/deprecation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/util/deprecation.py b/falcon/util/deprecation.py
--- a/falcon/util/deprecation.py
+++ b/falcon/util/deprecation.py
@@ -85,11 +85,11 @@
"""
template = (
- 'Calls with{} positional args are deprecated.'
+ 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'
' Please specify them as keyword arguments instead.'
)
text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
- warn_text = template.format(text)
+ warn_text = template.format(arg_text=text)
if is_method:
allowed_positional += 1
@@ -97,7 +97,11 @@
@functools.wraps(fn)
def wraps(*args, **kwargs):
if len(args) > allowed_positional:
- warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)
+ warnings.warn(
+ warn_text.format(fn=fn.__qualname__),
+ DeprecatedWarning,
+ stacklevel=2,
+ )
return fn(*args, **kwargs)
return wraps
| {"golden_diff": "diff --git a/falcon/util/deprecation.py b/falcon/util/deprecation.py\n--- a/falcon/util/deprecation.py\n+++ b/falcon/util/deprecation.py\n@@ -85,11 +85,11 @@\n \"\"\"\n \n template = (\n- 'Calls with{} positional args are deprecated.'\n+ 'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n- warn_text = template.format(text)\n+ warn_text = template.format(arg_text=text)\n if is_method:\n allowed_positional += 1\n \n@@ -97,7 +97,11 @@\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n- warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)\n+ warnings.warn(\n+ warn_text.format(fn=fn.__qualname__),\n+ DeprecatedWarning,\n+ stacklevel=2,\n+ )\n return fn(*args, **kwargs)\n \n return wraps\n", "issue": "Unclear which `HTTPError` was instantiated from `deprecated_args()` warnings\nIn Falcon 3.0.x, instantiating `HTTPError` or its subclasses with positional arguments (of course except the allowed ones) generates a `DeprecatedWarning` via the `deprecated_args()` decorator.\r\n\r\nHowever, it is unclear from the warning which class/function was invoked, it just says \"calls [with more than N] positional args are deprecated\". Brought up by @laurent-chriqui (see the linked PR).\r\n\r\nIdeally, as a developer, I would like the warning to read along the lines of\r\n```\r\nDeprecatedWarning: Calls to HTTPNotFound.__init__(...) with positional args are deprecated. Please specify them as keyword arguments instead.\r\n```\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Miscellaneous deprecation utilities.\n\nThis module provides decorators to mark functions and classes as deprecated.\n\"\"\"\n\nimport functools\nimport warnings\n\n\n__all__ = (\n 'DeprecatedWarning',\n 'deprecated',\n 'deprecated_args',\n)\n\n\n# NOTE(kgriffs): We don't want our deprecations to be ignored by default,\n# so create our own type.\n#\n# TODO(kgriffs): Revisit this decision if users complain.\nclass DeprecatedWarning(UserWarning):\n pass\n\n\ndef deprecated(instructions, is_property=False, method_name=None):\n \"\"\"Flag a method as deprecated.\n\n This function returns a decorator which can be used to mark deprecated\n functions. Applying this decorator will result in a warning being\n emitted when the function is used.\n\n Args:\n instructions (str): Specific guidance for the developer, e.g.:\n 'Please migrate to add_proxy(...)'.\n is_property (bool): If the deprecated object is a property. It\n will omit the ``(...)`` from the generated documentation.\n method_name (str, optional): Set to override the name of the\n deprecated function or property in the generated\n documentation (default ``None``). This is useful when\n decorating an alias that carries the target's ``__name__``.\n\n \"\"\"\n\n def decorator(func):\n\n object_name = 'property' if is_property else 'function'\n post_name = '' if is_property else '(...)'\n message = 'Call to deprecated {} {}{}. {}'.format(\n object_name, method_name or func.__name__, post_name, instructions\n )\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n warnings.warn(message, category=DeprecatedWarning, stacklevel=2)\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator\n\n\ndef deprecated_args(*, allowed_positional, is_method=True):\n \"\"\"Flag a method call with positional args as deprecated.\n\n Keyword Args:\n allowed_positional (int): Number of allowed positional arguments\n is_method (bool, optional): The decorated function is a method. Will\n add one to the number of allowed positional args to account for\n ``self``. Defaults to True.\n \"\"\"\n\n template = (\n 'Calls with{} positional args are deprecated.'\n ' Please specify them as keyword arguments instead.'\n )\n text = ' more than {}'.format(allowed_positional) if allowed_positional else ''\n warn_text = template.format(text)\n if is_method:\n allowed_positional += 1\n\n def deprecated_args(fn):\n @functools.wraps(fn)\n def wraps(*args, **kwargs):\n if len(args) > allowed_positional:\n warnings.warn(warn_text, DeprecatedWarning, stacklevel=2)\n return fn(*args, **kwargs)\n\n return wraps\n\n return deprecated_args\n", "path": "falcon/util/deprecation.py"}]} | 1,654 | 256 |
gh_patches_debug_3352 | rasdani/github-patches | git_diff | streamlink__streamlink-3395 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change `author_email` in setup.py
https://github.com/streamlink/streamlink/blob/08e582580f3411b2de2c368f8b0cc7108264f990/setup.py#L83
@gravyboat
you've registered `[email protected]` a couple of years ago, right? Can this be used instead?
What's the email address of the `streamlink` account on pypi?
https://pypi.org/user/streamlink/
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 deps = [
12 "requests>=2.21.0,<3.0",
13 "isodate",
14 "websocket-client",
15 # Support for SOCKS proxies
16 "PySocks!=1.5.7,>=1.5.6",
17 ]
18
19 # for encrypted streams
20 if environ.get("STREAMLINK_USE_PYCRYPTO"):
21 deps.append("pycrypto")
22 else:
23 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
24 deps.append("pycryptodome>=3.4.3,<4")
25
26 # for localization
27 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
28 deps.append("pycountry")
29 else:
30 deps.append("iso-639")
31 deps.append("iso3166")
32
33 # When we build an egg for the Win32 bootstrap we don"t want dependency
34 # information built into it.
35 if environ.get("NO_DEPS"):
36 deps = []
37
38 this_directory = path.abspath(path.dirname(__file__))
39 srcdir = path.join(this_directory, "src/")
40 sys_path.insert(0, srcdir)
41
42 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
43 long_description = f.read()
44
45
46 def is_wheel_for_windows():
47 if "bdist_wheel" in argv:
48 names = ["win32", "win-amd64", "cygwin"]
49 length = len(argv)
50 for pos in range(argv.index("bdist_wheel") + 1, length):
51 if argv[pos] == "--plat-name" and pos + 1 < length:
52 return argv[pos + 1] in names
53 elif argv[pos][:12] == "--plat-name=":
54 return argv[pos][12:] in names
55 return False
56
57
58 entry_points = {
59 "console_scripts": ["streamlink=streamlink_cli.main:main"]
60 }
61
62 if is_wheel_for_windows():
63 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
64
65
66 setup(name="streamlink",
67 version=versioneer.get_version(),
68 cmdclass=versioneer.get_cmdclass(),
69 description="Streamlink is a command-line utility that extracts streams "
70 "from various services and pipes them into a video player of "
71 "choice.",
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 url="https://github.com/streamlink/streamlink",
75 project_urls={
76 "Documentation": "https://streamlink.github.io/",
77 "Tracker": "https://github.com/streamlink/streamlink/issues",
78 "Source": "https://github.com/streamlink/streamlink",
79 "Funding": "https://opencollective.com/streamlink"
80 },
81 author="Streamlink",
82 # temp until we have a mailing list / global email
83 author_email="[email protected]",
84 license="Simplified BSD",
85 packages=find_packages("src"),
86 package_dir={"": "src"},
87 entry_points=entry_points,
88 install_requires=deps,
89 test_suite="tests",
90 python_requires=">=3.6, <4",
91 classifiers=["Development Status :: 5 - Production/Stable",
92 "License :: OSI Approved :: BSD License",
93 "Environment :: Console",
94 "Intended Audience :: End Users/Desktop",
95 "Operating System :: POSIX",
96 "Operating System :: Microsoft :: Windows",
97 "Operating System :: MacOS",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3 :: Only",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Topic :: Internet :: WWW/HTTP",
105 "Topic :: Multimedia :: Sound/Audio",
106 "Topic :: Multimedia :: Video",
107 "Topic :: Utilities"])
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -80,7 +80,7 @@
},
author="Streamlink",
# temp until we have a mailing list / global email
- author_email="[email protected]",
+ author_email="[email protected]",
license="Simplified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -80,7 +80,7 @@\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n- author_email=\"[email protected]\",\n+ author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n", "issue": "Change `author_email` in setup.py\nhttps://github.com/streamlink/streamlink/blob/08e582580f3411b2de2c368f8b0cc7108264f990/setup.py#L83\r\n\r\n@gravyboat \r\nyou've registered `[email protected]` a couple of years ago, right? Can this be used instead?\r\n\r\nWhat's the email address of the `streamlink` account on pypi?\r\nhttps://pypi.org/user/streamlink/\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]} | 1,758 | 103 |
gh_patches_debug_33078 | rasdani/github-patches | git_diff | getsentry__sentry-python-1641 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django Signals integration breaks on partial objects for python <3.10
### How do you use Sentry?
Self-hosted/on-premise
### Version
1.9.9
### Steps to Reproduce
1. Use python older than 3.10.
2. Register a partial function as a signal handler.
### Expected Result
Signal is traced correctly.
### Actual Result
Exception is raised from `_get_receiver_name` function as `partial` objects don't have `__module__` before python 3.10 (and even there it's undocumented from what I can see).
It fails in our tests where we don't even register any signals so either Django itself or some kind of integration (Sentry?) registers such signals by default.
The whole signals integration is missing a `capture_internal_exceptions` context too I believe.
</issue>
<code>
[start of sentry_sdk/integrations/django/signals_handlers.py]
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import
3
4 from django.dispatch import Signal
5
6 from sentry_sdk import Hub
7 from sentry_sdk._types import MYPY
8
9
10 if MYPY:
11 from typing import Any
12 from typing import Callable
13 from typing import List
14
15
16 def patch_signals():
17 # type: () -> None
18 """Patch django signal receivers to create a span"""
19
20 old_live_receivers = Signal._live_receivers
21
22 def _get_receiver_name(receiver):
23 # type: (Callable[..., Any]) -> str
24 name = receiver.__module__ + "."
25 if hasattr(receiver, "__name__"):
26 return name + receiver.__name__
27 return name + str(receiver)
28
29 def _sentry_live_receivers(self, sender):
30 # type: (Signal, Any) -> List[Callable[..., Any]]
31 hub = Hub.current
32 receivers = old_live_receivers(self, sender)
33
34 def sentry_receiver_wrapper(receiver):
35 # type: (Callable[..., Any]) -> Callable[..., Any]
36 def wrapper(*args, **kwargs):
37 # type: (Any, Any) -> Any
38 with hub.start_span(
39 op="django.signals",
40 description=_get_receiver_name(receiver),
41 ) as span:
42 span.set_data("signal", _get_receiver_name(receiver))
43 return receiver(*args, **kwargs)
44
45 return wrapper
46
47 for idx, receiver in enumerate(receivers):
48 receivers[idx] = sentry_receiver_wrapper(receiver)
49
50 return receivers
51
52 Signal._live_receivers = _sentry_live_receivers
53
[end of sentry_sdk/integrations/django/signals_handlers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py
--- a/sentry_sdk/integrations/django/signals_handlers.py
+++ b/sentry_sdk/integrations/django/signals_handlers.py
@@ -13,19 +13,32 @@
from typing import List
+def _get_receiver_name(receiver):
+ # type: (Callable[..., Any]) -> str
+ name = ""
+
+ if hasattr(receiver, "__qualname__"):
+ name += receiver.__qualname__
+ elif hasattr(receiver, "__name__"): # Python 2.7 has no __qualname__
+ name += receiver.__name__
+
+ if (
+ name == ""
+ ): # certain functions (like partials) dont have a name so return the string representation
+ return str(receiver)
+
+ if hasattr(receiver, "__module__"): # prepend with module, if there is one
+ name = receiver.__module__ + "." + name
+
+ return name
+
+
def patch_signals():
# type: () -> None
"""Patch django signal receivers to create a span"""
old_live_receivers = Signal._live_receivers
- def _get_receiver_name(receiver):
- # type: (Callable[..., Any]) -> str
- name = receiver.__module__ + "."
- if hasattr(receiver, "__name__"):
- return name + receiver.__name__
- return name + str(receiver)
-
def _sentry_live_receivers(self, sender):
# type: (Signal, Any) -> List[Callable[..., Any]]
hub = Hub.current
@@ -35,11 +48,12 @@
# type: (Callable[..., Any]) -> Callable[..., Any]
def wrapper(*args, **kwargs):
# type: (Any, Any) -> Any
+ signal_name = _get_receiver_name(receiver)
with hub.start_span(
op="django.signals",
- description=_get_receiver_name(receiver),
+ description=signal_name,
) as span:
- span.set_data("signal", _get_receiver_name(receiver))
+ span.set_data("signal", signal_name)
return receiver(*args, **kwargs)
return wrapper
| {"golden_diff": "diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py\n--- a/sentry_sdk/integrations/django/signals_handlers.py\n+++ b/sentry_sdk/integrations/django/signals_handlers.py\n@@ -13,19 +13,32 @@\n from typing import List\n \n \n+def _get_receiver_name(receiver):\n+ # type: (Callable[..., Any]) -> str\n+ name = \"\"\n+\n+ if hasattr(receiver, \"__qualname__\"):\n+ name += receiver.__qualname__\n+ elif hasattr(receiver, \"__name__\"): # Python 2.7 has no __qualname__\n+ name += receiver.__name__\n+\n+ if (\n+ name == \"\"\n+ ): # certain functions (like partials) dont have a name so return the string representation\n+ return str(receiver)\n+\n+ if hasattr(receiver, \"__module__\"): # prepend with module, if there is one\n+ name = receiver.__module__ + \".\" + name\n+\n+ return name\n+\n+\n def patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n \n old_live_receivers = Signal._live_receivers\n \n- def _get_receiver_name(receiver):\n- # type: (Callable[..., Any]) -> str\n- name = receiver.__module__ + \".\"\n- if hasattr(receiver, \"__name__\"):\n- return name + receiver.__name__\n- return name + str(receiver)\n-\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n@@ -35,11 +48,12 @@\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n+ signal_name = _get_receiver_name(receiver)\n with hub.start_span(\n op=\"django.signals\",\n- description=_get_receiver_name(receiver),\n+ description=signal_name,\n ) as span:\n- span.set_data(\"signal\", _get_receiver_name(receiver))\n+ span.set_data(\"signal\", signal_name)\n return receiver(*args, **kwargs)\n \n return wrapper\n", "issue": "Django Signals integration breaks on partial objects for python <3.10\n### How do you use Sentry?\n\nSelf-hosted/on-premise\n\n### Version\n\n1.9.9\n\n### Steps to Reproduce\n\n1. Use python older than 3.10.\r\n2. Register a partial function as a signal handler.\n\n### Expected Result\n\nSignal is traced correctly.\n\n### Actual Result\n\nException is raised from `_get_receiver_name` function as `partial` objects don't have `__module__` before python 3.10 (and even there it's undocumented from what I can see).\r\n\r\nIt fails in our tests where we don't even register any signals so either Django itself or some kind of integration (Sentry?) registers such signals by default.\r\n\r\nThe whole signals integration is missing a `capture_internal_exceptions` context too I believe.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom django.dispatch import Signal\n\nfrom sentry_sdk import Hub\nfrom sentry_sdk._types import MYPY\n\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import List\n\n\ndef patch_signals():\n # type: () -> None\n \"\"\"Patch django signal receivers to create a span\"\"\"\n\n old_live_receivers = Signal._live_receivers\n\n def _get_receiver_name(receiver):\n # type: (Callable[..., Any]) -> str\n name = receiver.__module__ + \".\"\n if hasattr(receiver, \"__name__\"):\n return name + receiver.__name__\n return name + str(receiver)\n\n def _sentry_live_receivers(self, sender):\n # type: (Signal, Any) -> List[Callable[..., Any]]\n hub = Hub.current\n receivers = old_live_receivers(self, sender)\n\n def sentry_receiver_wrapper(receiver):\n # type: (Callable[..., Any]) -> Callable[..., Any]\n def wrapper(*args, **kwargs):\n # type: (Any, Any) -> Any\n with hub.start_span(\n op=\"django.signals\",\n description=_get_receiver_name(receiver),\n ) as span:\n span.set_data(\"signal\", _get_receiver_name(receiver))\n return receiver(*args, **kwargs)\n\n return wrapper\n\n for idx, receiver in enumerate(receivers):\n receivers[idx] = sentry_receiver_wrapper(receiver)\n\n return receivers\n\n Signal._live_receivers = _sentry_live_receivers\n", "path": "sentry_sdk/integrations/django/signals_handlers.py"}]} | 1,172 | 507 |
gh_patches_debug_43 | rasdani/github-patches | git_diff | python-discord__site-268 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ugly prefix on all ID links.
Currently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment.
It would be better if this simply said `#development-environment`, so let's change that.
</issue>
<code>
[start of pydis_site/__init__.py]
[end of pydis_site/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py
--- a/pydis_site/__init__.py
+++ b/pydis_site/__init__.py
@@ -0,0 +1,4 @@
+from wiki.plugins.macros.mdx import toc
+
+# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.
+toc.HEADER_ID_PREFIX = ''
| {"golden_diff": "diff --git a/pydis_site/__init__.py b/pydis_site/__init__.py\n--- a/pydis_site/__init__.py\n+++ b/pydis_site/__init__.py\n@@ -0,0 +1,4 @@\n+from wiki.plugins.macros.mdx import toc\n+\n+# Remove the toc header prefix. There's no option for this, so we gotta monkey patch it.\n+toc.HEADER_ID_PREFIX = ''\n", "issue": "Ugly prefix on all ID links.\nCurrently, all the headers that are created by the wiki will have id's that are prefixed with `wiki-toc`. As such, when you want to link a header, the link will look something like https://pythondiscord.com/pages/contributing/site/#wiki-toc-development-environment.\r\n\r\nIt would be better if this simply said `#development-environment`, so let's change that.\n", "before_files": [{"content": "", "path": "pydis_site/__init__.py"}]} | 629 | 92 |
gh_patches_debug_8884 | rasdani/github-patches | git_diff | getsentry__sentry-3421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Audit log crash on IPv6
Affected version: 8.5.0
I was just saving some preferences and faced this error:
```
DataError: ERREUR: syntaxe en entrée invalide pour le type inet : « 2001 »
LINE 1: [email protected]', 2, NULL, 1, NULL, 11, '2001', 'e...
^
SQL: INSERT INTO "sentry_auditlogentry" ("organization_id", "actor_label", "actor_id", "actor_key_id", "target_object", "target_user_id", "event", "ip_address", "data", "datetime") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING "sentry_auditlogentry"."id"
```
Looks like IPv6 addresses are not handled properly.
Also reproduced by deleting a project.
Oddly this wasn't triggered on project creation and my IPv4 was logged instead of my IPv6.
</issue>
<code>
[start of src/sentry/middleware/proxy.py]
1 from __future__ import absolute_import
2
3
4 class SetRemoteAddrFromForwardedFor(object):
5 def process_request(self, request):
6 try:
7 real_ip = request.META['HTTP_X_FORWARDED_FOR']
8 except KeyError:
9 pass
10 else:
11 # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
12 # Take just the first one.
13 real_ip = real_ip.split(",")[0]
14 if ':' in real_ip:
15 real_ip = real_ip.split(':', 1)[0]
16 request.META['REMOTE_ADDR'] = real_ip
17
18
19 class ContentLengthHeaderMiddleware(object):
20 """
21 Ensure that we have a proper Content-Length/Transfer-Encoding header
22 """
23
24 def process_response(self, request, response):
25 if 'Transfer-Encoding' in response or 'Content-Length' in response:
26 return response
27
28 if not response.streaming:
29 response['Content-Length'] = str(len(response.content))
30
31 return response
32
[end of src/sentry/middleware/proxy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/middleware/proxy.py b/src/sentry/middleware/proxy.py
--- a/src/sentry/middleware/proxy.py
+++ b/src/sentry/middleware/proxy.py
@@ -11,7 +11,8 @@
# HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.
# Take just the first one.
real_ip = real_ip.split(",")[0]
- if ':' in real_ip:
+ if ':' in real_ip and '.' in real_ip:
+ # Strip the port number off of an IPv4 FORWARDED_FOR entry.
real_ip = real_ip.split(':', 1)[0]
request.META['REMOTE_ADDR'] = real_ip
| {"golden_diff": "diff --git a/src/sentry/middleware/proxy.py b/src/sentry/middleware/proxy.py\n--- a/src/sentry/middleware/proxy.py\n+++ b/src/sentry/middleware/proxy.py\n@@ -11,7 +11,8 @@\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n- if ':' in real_ip:\n+ if ':' in real_ip and '.' in real_ip:\n+ # Strip the port number off of an IPv4 FORWARDED_FOR entry.\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n", "issue": "Audit log crash on IPv6\nAffected version: 8.5.0\n\nI was just saving some preferences and faced this error:\n\n```\nDataError: ERREUR: syntaxe en entr\u00e9e invalide pour le type inet : \u00ab 2001 \u00bb\nLINE 1: [email protected]', 2, NULL, 1, NULL, 11, '2001', 'e...\n ^\n\nSQL: INSERT INTO \"sentry_auditlogentry\" (\"organization_id\", \"actor_label\", \"actor_id\", \"actor_key_id\", \"target_object\", \"target_user_id\", \"event\", \"ip_address\", \"data\", \"datetime\") VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING \"sentry_auditlogentry\".\"id\"\n```\n\nLooks like IPv6 addresses are not handled properly.\n\nAlso reproduced by deleting a project.\nOddly this wasn't triggered on project creation and my IPv4 was logged instead of my IPv6.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\n\nclass SetRemoteAddrFromForwardedFor(object):\n def process_request(self, request):\n try:\n real_ip = request.META['HTTP_X_FORWARDED_FOR']\n except KeyError:\n pass\n else:\n # HTTP_X_FORWARDED_FOR can be a comma-separated list of IPs.\n # Take just the first one.\n real_ip = real_ip.split(\",\")[0]\n if ':' in real_ip:\n real_ip = real_ip.split(':', 1)[0]\n request.META['REMOTE_ADDR'] = real_ip\n\n\nclass ContentLengthHeaderMiddleware(object):\n \"\"\"\n Ensure that we have a proper Content-Length/Transfer-Encoding header\n \"\"\"\n\n def process_response(self, request, response):\n if 'Transfer-Encoding' in response or 'Content-Length' in response:\n return response\n\n if not response.streaming:\n response['Content-Length'] = str(len(response.content))\n\n return response\n", "path": "src/sentry/middleware/proxy.py"}]} | 1,030 | 155 |
gh_patches_debug_36367 | rasdani/github-patches | git_diff | searx__searx-335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flickr engine is broken
The html seems to have changed, but it's seems there is [REST API](https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=25&page=1&lang=en-US&rb=1&text=proxy&viewerNSID=&method=flickr.photos.search&csrf=&api_key=3e5918155f464baad83cce2efcf8b57e&format=json&hermes=1&hermesClient=1&reqId=rgb38n1&nojsoncallback=1)
In all parameters there is an api_key : I don't know how long it is valid, in which condition.
The call to this URL is trigger inside another minified javascript.
</issue>
<code>
[start of searx/engines/flickr_noapi.py]
1 #!/usr/bin/env python
2
3 """
4 Flickr (Images)
5
6 @website https://www.flickr.com
7 @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
8
9 @using-api no
10 @results HTML
11 @stable no
12 @parse url, title, thumbnail, img_src
13 """
14
15 from urllib import urlencode
16 from json import loads
17 import re
18 from searx.engines import logger
19
20
21 logger = logger.getChild('flickr-noapi')
22
23 categories = ['images']
24
25 url = 'https://www.flickr.com/'
26 search_url = url + 'search?{query}&page={page}'
27 photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
28 regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
29 image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
30
31 paging = True
32
33
34 def build_flickr_url(user_id, photo_id):
35 return photo_url.format(userid=user_id, photoid=photo_id)
36
37
38 def request(query, params):
39 params['url'] = search_url.format(query=urlencode({'text': query}),
40 page=params['pageno'])
41 return params
42
43
44 def response(resp):
45 results = []
46
47 matches = regex.search(resp.text)
48
49 if matches is None:
50 return results
51
52 match = matches.group(1)
53 search_results = loads(match)
54
55 if '_data' not in search_results:
56 return []
57
58 photos = search_results['_data']
59
60 for photo in photos:
61
62 # In paged configuration, the first pages' photos
63 # are represented by a None object
64 if photo is None:
65 continue
66
67 img_src = None
68 # From the biggest to the lowest format
69 for image_size in image_sizes:
70 if image_size in photo['sizes']:
71 img_src = photo['sizes'][image_size]['url']
72 break
73
74 if not img_src:
75 logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
76 continue
77
78 if 'id' not in photo['owner']:
79 continue
80
81 # For a bigger thumbnail, keep only the url_z, not the url_n
82 if 'n' in photo['sizes']:
83 thumbnail_src = photo['sizes']['n']['url']
84 elif 'z' in photo['sizes']:
85 thumbnail_src = photo['sizes']['z']['url']
86 else:
87 thumbnail_src = img_src
88
89 url = build_flickr_url(photo['owner']['id'], photo['id'])
90
91 title = photo.get('title', '')
92
93 content = '<span class="photo-author">' +\
94 photo['owner']['username'] +\
95 '</span><br />'
96
97 if 'description' in photo:
98 content = content +\
99 '<span class="description">' +\
100 photo['description'] +\
101 '</span>'
102
103 # append result
104 results.append({'url': url,
105 'title': title,
106 'img_src': img_src,
107 'thumbnail_src': thumbnail_src,
108 'content': content,
109 'template': 'images.html'})
110
111 return results
112
[end of searx/engines/flickr_noapi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -25,7 +25,7 @@
url = 'https://www.flickr.com/'
search_url = url + 'search?{query}&page={page}'
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
-regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
+regex = re.compile(r"\"search-photos-lite-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
paging = True
@@ -38,6 +38,7 @@
def request(query, params):
params['url'] = search_url.format(query=urlencode({'text': query}),
page=params['pageno'])
+
return params
@@ -75,10 +76,10 @@
logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
continue
- if 'id' not in photo['owner']:
+ if 'ownerNsid' not in photo:
continue
-# For a bigger thumbnail, keep only the url_z, not the url_n
+ # For a bigger thumbnail, keep only the url_z, not the url_n
if 'n' in photo['sizes']:
thumbnail_src = photo['sizes']['n']['url']
elif 'z' in photo['sizes']:
@@ -86,20 +87,14 @@
else:
thumbnail_src = img_src
- url = build_flickr_url(photo['owner']['id'], photo['id'])
+ url = build_flickr_url(photo['ownerNsid'], photo['id'])
title = photo.get('title', '')
content = '<span class="photo-author">' +\
- photo['owner']['username'] +\
+ photo['username'] +\
'</span><br />'
- if 'description' in photo:
- content = content +\
- '<span class="description">' +\
- photo['description'] +\
- '</span>'
-
# append result
results.append({'url': url,
'title': title,
| {"golden_diff": "diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py\n--- a/searx/engines/flickr_noapi.py\n+++ b/searx/engines/flickr_noapi.py\n@@ -25,7 +25,7 @@\n url = 'https://www.flickr.com/'\n search_url = url + 'search?{query}&page={page}'\n photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\n-regex = re.compile(r\"\\\"search-photos-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\n+regex = re.compile(r\"\\\"search-photos-lite-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\n image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n \n paging = True\n@@ -38,6 +38,7 @@\n def request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n+\n return params\n \n \n@@ -75,10 +76,10 @@\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n \n- if 'id' not in photo['owner']:\n+ if 'ownerNsid' not in photo:\n continue\n \n-# For a bigger thumbnail, keep only the url_z, not the url_n\n+ # For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n@@ -86,20 +87,14 @@\n else:\n thumbnail_src = img_src\n \n- url = build_flickr_url(photo['owner']['id'], photo['id'])\n+ url = build_flickr_url(photo['ownerNsid'], photo['id'])\n \n title = photo.get('title', '')\n \n content = '<span class=\"photo-author\">' +\\\n- photo['owner']['username'] +\\\n+ photo['username'] +\\\n '</span><br />'\n \n- if 'description' in photo:\n- content = content +\\\n- '<span class=\"description\">' +\\\n- photo['description'] +\\\n- '</span>'\n-\n # append result\n results.append({'url': url,\n 'title': title,\n", "issue": "Flickr engine is broken\nThe html seems to have changed, but it's seems there is [REST API](https://api.flickr.com/services/rest?sort=relevance&parse_tags=1&content_type=7&extras=can_comment%2Ccount_comments%2Ccount_faves%2Cisfavorite%2Clicense%2Cmedia%2Cneeds_interstitial%2Cowner_name%2Cpath_alias%2Crealname%2Crotation%2Curl_c%2Curl_l%2Curl_m%2Curl_n%2Curl_q%2Curl_s%2Curl_sq%2Curl_t%2Curl_z&per_page=25&page=1&lang=en-US&rb=1&text=proxy&viewerNSID=&method=flickr.photos.search&csrf=&api_key=3e5918155f464baad83cce2efcf8b57e&format=json&hermes=1&hermesClient=1&reqId=rgb38n1&nojsoncallback=1)\n\nIn all parameters there is an api_key : I don't know how long it is valid, in which condition.\nThe call to this URL is trigger inside another minified javascript.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\n Flickr (Images)\n\n @website https://www.flickr.com\n @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)\n\n @using-api no\n @results HTML\n @stable no\n @parse url, title, thumbnail, img_src\n\"\"\"\n\nfrom urllib import urlencode\nfrom json import loads\nimport re\nfrom searx.engines import logger\n\n\nlogger = logger.getChild('flickr-noapi')\n\ncategories = ['images']\n\nurl = 'https://www.flickr.com/'\nsearch_url = url + 'search?{query}&page={page}'\nphoto_url = 'https://www.flickr.com/photos/{userid}/{photoid}'\nregex = re.compile(r\"\\\"search-photos-models\\\",\\\"photos\\\":(.*}),\\\"totalItems\\\":\", re.DOTALL)\nimage_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')\n\npaging = True\n\n\ndef build_flickr_url(user_id, photo_id):\n return photo_url.format(userid=user_id, photoid=photo_id)\n\n\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'text': query}),\n page=params['pageno'])\n return params\n\n\ndef response(resp):\n results = []\n\n matches = regex.search(resp.text)\n\n if matches is None:\n return results\n\n match = matches.group(1)\n search_results = loads(match)\n\n if '_data' not in search_results:\n return []\n\n photos = search_results['_data']\n\n for photo in photos:\n\n # In paged configuration, the first pages' photos\n # are represented by a None object\n if photo is None:\n continue\n\n img_src = None\n # From the biggest to the lowest format\n for image_size in image_sizes:\n if image_size in photo['sizes']:\n img_src = photo['sizes'][image_size]['url']\n break\n\n if not img_src:\n logger.debug('cannot find valid image size: {0}'.format(repr(photo)))\n continue\n\n if 'id' not in photo['owner']:\n continue\n\n# For a bigger thumbnail, keep only the url_z, not the url_n\n if 'n' in photo['sizes']:\n thumbnail_src = photo['sizes']['n']['url']\n elif 'z' in photo['sizes']:\n thumbnail_src = photo['sizes']['z']['url']\n else:\n thumbnail_src = img_src\n\n url = build_flickr_url(photo['owner']['id'], photo['id'])\n\n title = photo.get('title', '')\n\n content = '<span class=\"photo-author\">' +\\\n photo['owner']['username'] +\\\n '</span><br />'\n\n if 'description' in photo:\n content = content +\\\n '<span class=\"description\">' +\\\n photo['description'] +\\\n '</span>'\n\n # append result\n results.append({'url': url,\n 'title': title,\n 'img_src': img_src,\n 'thumbnail_src': thumbnail_src,\n 'content': content,\n 'template': 'images.html'})\n\n return results\n", "path": "searx/engines/flickr_noapi.py"}]} | 1,766 | 558 |
gh_patches_debug_35825 | rasdani/github-patches | git_diff | facebookresearch__hydra-2174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add more details to Callback doc
(from zulip chat)
> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)
</issue>
<code>
[start of hydra/experimental/callback.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import logging
3 from typing import Any
4
5 from omegaconf import DictConfig
6
7 from hydra.core.utils import JobReturn
8
9 logger = logging.getLogger(__name__)
10
11
12 class Callback:
13 def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
14 """
15 Called in RUN mode before job starts.
16 """
17 ...
18
19 def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
20 """
21 Called in RUN mode after job ends.
22 """
23 ...
24
25 def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
26 """
27 Called in MULTIRUN mode before any job starts.
28 """
29 ...
30
31 def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
32 """
33 Called in MULTIRUN mode after all jobs end.
34 """
35 ...
36
37 def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
38 """
39 Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
40 application code).
41 """
42 ...
43
44 def on_job_end(
45 self, config: DictConfig, job_return: JobReturn, **kwargs: Any
46 ) -> None:
47 """
48 Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
49 application code).
50 """
51 ...
52
[end of hydra/experimental/callback.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py
--- a/hydra/experimental/callback.py
+++ b/hydra/experimental/callback.py
@@ -12,32 +12,37 @@
class Callback:
def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode before job starts.
+ Called in RUN mode before job/application code starts. `config` is composed with overrides.
+ Some `hydra.runtime` configs are not populated yet.
+ See hydra.core.utils.run_job for more info.
"""
...
def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in RUN mode after job ends.
+ Called in RUN mode after job/application code returns.
"""
...
def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
Called in MULTIRUN mode before any job starts.
+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.
"""
...
def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in MULTIRUN mode after all jobs end.
+ Called in MULTIRUN mode after all jobs returns.
+ When using a launcher, this will be executed on local machine.
"""
...
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
"""
- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running
- application code).
+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server along with your application code.
"""
...
@@ -47,5 +52,10 @@
"""
Called in both RUN and MULTIRUN modes, once for each Hydra job (after running
application code).
+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed
+ on the remote server after your application code.
+
+ `job_return` contains info that could be useful for logging or post-processing.
+ See hydra.core.utils.JobReturn for more.
"""
...
| {"golden_diff": "diff --git a/hydra/experimental/callback.py b/hydra/experimental/callback.py\n--- a/hydra/experimental/callback.py\n+++ b/hydra/experimental/callback.py\n@@ -12,32 +12,37 @@\n class Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode before job starts.\n+ Called in RUN mode before job/application code starts. `config` is composed with overrides.\n+ Some `hydra.runtime` configs are not populated yet.\n+ See hydra.core.utils.run_job for more info.\n \"\"\"\n ...\n \n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in RUN mode after job ends.\n+ Called in RUN mode after job/application code returns.\n \"\"\"\n ...\n \n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n+ When using a launcher, this will be executed on local machine before any Sweeper/Launcher is initialized.\n \"\"\"\n ...\n \n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in MULTIRUN mode after all jobs end.\n+ Called in MULTIRUN mode after all jobs returns.\n+ When using a launcher, this will be executed on local machine.\n \"\"\"\n ...\n \n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n- Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n- application code).\n+ Called in both RUN and MULTIRUN modes, once for each Hydra job (before running application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server along with your application code.\n \"\"\"\n ...\n \n@@ -47,5 +52,10 @@\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n+ This is called from within `hydra.core.utils.run_job`. In the case of remote launching, this will be executed\n+ on the remote server after your application code.\n+\n+ `job_return` contains info that could be useful for logging or post-processing.\n+ See hydra.core.utils.JobReturn for more.\n \"\"\"\n ...\n", "issue": "Add more details to Callback doc\n(from zulip chat)\r\n> When are the Callbacks executed in the hydra stack exactly? More specifically, are they executed after the launchers in the environments that the launchers provide? (Would be awesome to add a sentence about this in the docs)\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport logging\nfrom typing import Any\n\nfrom omegaconf import DictConfig\n\nfrom hydra.core.utils import JobReturn\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callback:\n def on_run_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode before job starts.\n \"\"\"\n ...\n\n def on_run_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in RUN mode after job ends.\n \"\"\"\n ...\n\n def on_multirun_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode before any job starts.\n \"\"\"\n ...\n\n def on_multirun_end(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in MULTIRUN mode after all jobs end.\n \"\"\"\n ...\n\n def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (before running\n application code).\n \"\"\"\n ...\n\n def on_job_end(\n self, config: DictConfig, job_return: JobReturn, **kwargs: Any\n ) -> None:\n \"\"\"\n Called in both RUN and MULTIRUN modes, once for each Hydra job (after running\n application code).\n \"\"\"\n ...\n", "path": "hydra/experimental/callback.py"}]} | 1,016 | 553 |
gh_patches_debug_29649 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4897 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances
**Describe the issue**
The CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.
**Examples**
For a PostgreSQL RDS instance with the following Terraform configuration:
**main.tf**
```
resource "aws_rds_cluster" "main" {
# ... other configurations ...
enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports
# ... other configurations ...
}
```
**variables.tf**
```
variable "enabled_cloudwatch_logs_exports" {
default = [
"postgresql"
]
}
```
The CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.
**Expected outcome**
The CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.
**Version**
**Checkov Version:** Lastest
**Additional context**
This issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.
https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py]
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
7
8
9 class RDSClusterAuditLogging(BaseResourceCheck):
10 def __init__(self) -> None:
11 """
12 NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,
13 NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),
14 NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),
15 NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
16 Database logging should be enabled
17 """
18 name = "Ensure that RDS Cluster audit logging is enabled"
19 id = "CKV_AWS_325"
20 supported_resources = ("aws_rds_cluster",)
21 categories = (CheckCategories.LOGGING,)
22 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
23
24 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
25 logs_exports = conf.get("enabled_cloudwatch_logs_exports")
26 if (
27 logs_exports
28 and isinstance(logs_exports, list)
29 and isinstance(logs_exports[0], list)
30 and "audit" in logs_exports[0]
31 ):
32 return CheckResult.PASSED
33
34 return CheckResult.FAILED
35
36
37 check = RDSClusterAuditLogging()
38
[end of checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py
@@ -5,6 +5,12 @@
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+SUPPORTED_ENGINES = {
+ "aurora",
+ "aurora-mysql",
+ "mysql"
+}
+
class RDSClusterAuditLogging(BaseResourceCheck):
def __init__(self) -> None:
@@ -15,13 +21,18 @@
NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)
Database logging should be enabled
"""
- name = "Ensure that RDS Cluster audit logging is enabled"
+ name = "Ensure that RDS Cluster audit logging is enabled for MySQL engine"
id = "CKV_AWS_325"
supported_resources = ("aws_rds_cluster",)
categories = (CheckCategories.LOGGING,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ engine = conf.get("engine")
+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:
+ # only MySQL cluster support easy audit logging export
+ return CheckResult.UNKNOWN
+
logs_exports = conf.get("enabled_cloudwatch_logs_exports")
if (
logs_exports
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n--- a/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n+++ b/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n@@ -5,6 +5,12 @@\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n+SUPPORTED_ENGINES = {\n+ \"aurora\",\n+ \"aurora-mysql\",\n+ \"mysql\"\n+}\n+\n \n class RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n@@ -15,13 +21,18 @@\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n- name = \"Ensure that RDS Cluster audit logging is enabled\"\n+ name = \"Ensure that RDS Cluster audit logging is enabled for MySQL engine\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ engine = conf.get(\"engine\")\n+ if engine and isinstance(engine, list) and engine[0] not in SUPPORTED_ENGINES:\n+ # only MySQL cluster support easy audit logging export\n+ return CheckResult.UNKNOWN\n+\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n", "issue": "CKV_AWS_325 check might not be accurate for PostgreSQL RDS instances\n**Describe the issue**\r\nThe CKV_AWS_325 check expects the 'audit' log type to be enabled for RDS Cluster audit logging. However, this log type may not be applicable for PostgreSQL RDS instances, which use the 'postgresql' log type instead.\r\n\r\n**Examples**\r\nFor a PostgreSQL RDS instance with the following Terraform configuration:\r\n\r\n**main.tf**\r\n\r\n```\r\nresource \"aws_rds_cluster\" \"main\" {\r\n # ... other configurations ...\r\n enabled_cloudwatch_logs_exports = var.enabled_cloudwatch_logs_exports\r\n # ... other configurations ...\r\n}\r\n```\r\n\r\n**variables.tf**\r\n\r\n```\r\nvariable \"enabled_cloudwatch_logs_exports\" {\r\n default = [\r\n \"postgresql\"\r\n ]\r\n}\r\n```\r\n\r\nThe CKV_AWS_325 check fails, even though the 'postgresql' log type is enabled for the RDS instance.\r\n\r\n**Expected outcome**\r\nThe CKV_AWS_325 check should either not be applicable or should pass for PostgreSQL RDS instances with the 'postgresql' log type enabled.\r\n\r\n**Version**\r\n\r\n**Checkov Version:** Lastest\r\n\r\n**Additional context**\r\nThis issue was discovered while analyzing a Terraform configuration for an AWS PostgreSQL RDS instance. The CKV_AWS_325 check might need to be updated to accommodate different log types for different RDS engines.\r\n\r\nhttps://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass RDSClusterAuditLogging(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n NIST.800-53.r5 AC-2(4), NIST.800-53.r5 AC-4(26), NIST.800-53.r5 AC-6(9), NIST.800-53.r5 AU-10,\n NIST.800-53.r5 AU-12, NIST.800-53.r5 AU-2, NIST.800-53.r5 AU-3, NIST.800-53.r5 AU-6(3), NIST.800-53.r5 AU-6(4),\n NIST.800-53.r5 CA-7, NIST.800-53.r5 SC-7(10), NIST.800-53.r5 SC-7(9), NIST.800-53.r5 SI-3(8),\n NIST.800-53.r5 SI-4(20), NIST.800-53.r5 SI-7(8)\n Database logging should be enabled\n \"\"\"\n name = \"Ensure that RDS Cluster audit logging is enabled\"\n id = \"CKV_AWS_325\"\n supported_resources = (\"aws_rds_cluster\",)\n categories = (CheckCategories.LOGGING,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n logs_exports = conf.get(\"enabled_cloudwatch_logs_exports\")\n if (\n logs_exports\n and isinstance(logs_exports, list)\n and isinstance(logs_exports[0], list)\n and \"audit\" in logs_exports[0]\n ):\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = RDSClusterAuditLogging()\n", "path": "checkov/terraform/checks/resource/aws/RDSClusterAuditLogging.py"}]} | 1,446 | 406 |
gh_patches_debug_9538 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-481 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't run in Travis CI xenial
I was testing the new Travis CI image:
https://blog.travis-ci.com/2018-11-08-xenial-release
https://docs.travis-ci.com/user/reference/xenial/
My install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:
```Traceback (most recent call last):
File "/home/travis/.local/bin/cfn-lint", line 7, in <module>
from cfnlint.__main__ import main
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py", line 19, in <module>
import cfnlint.core
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py", line 24, in <module>
import cfnlint.maintenance
File "/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py", line 19, in <module>
import requests
File "/usr/lib/python2.7/dist-packages/requests/__init__.py", line 58, in <module>
from . import utils
File "/usr/lib/python2.7/dist-packages/requests/utils.py", line 25, in <module>
from . import certs
ImportError: cannot import name certs
```
I "fixed" this problem by just changing to Python3 in my build script:
```
pyenv global 3.7
pip3 install --user --upgrade cfn-lint
cfn-lint stack.cfn.yaml
```
But there might be some other lingering issue here
</issue>
<code>
[start of setup.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import codecs
18 import re
19 from setuptools import find_packages
20 from setuptools import setup
21
22
23 def get_version(filename):
24 with codecs.open(filename, 'r', 'utf-8') as fp:
25 contents = fp.read()
26 return re.search(r"__version__ = ['\"]([^'\"]+)['\"]", contents).group(1)
27
28
29 version = get_version('src/cfnlint/version.py')
30
31
32 with open('README.md') as f:
33 readme = f.read()
34
35 setup(
36 name='cfn-lint',
37 version=version,
38 description=('checks cloudformation for practices and behaviour \
39 that could potentially be improved'),
40 long_description=readme,
41 long_description_content_type="text/markdown",
42 keywords='aws, lint',
43 author='kddejong',
44 author_email='[email protected]',
45 url='https://github.com/awslabs/cfn-python-lint',
46 package_dir={'': 'src'},
47 package_data={'cfnlint': [
48 'data/CloudSpecs/*.json',
49 'data/AdditionalSpecs/*.json',
50 'data/Serverless/*.json',
51 'data/CfnLintCli/config/schema.json'
52 ]},
53 packages=find_packages('src'),
54 zip_safe=False,
55 install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
56 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
57 entry_points={
58 'console_scripts': [
59 'cfn-lint = cfnlint.__main__:main'
60 ]
61 },
62 license='MIT no attribution',
63 test_suite="unittest",
64 classifiers=[
65 'Development Status :: 5 - Production/Stable',
66 'Intended Audience :: Developers',
67 'License :: OSI Approved :: MIT License',
68 'Natural Language :: English',
69 'Operating System :: OS Independent',
70 'Programming Language :: Python :: 2',
71 'Programming Language :: Python :: 2.7',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.4',
74 'Programming Language :: Python :: 3.5',
75 'Programming Language :: Python :: 3.6',
76 ],
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,15 @@
]},
packages=find_packages('src'),
zip_safe=False,
- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],
+ install_requires=[
+ 'pyyaml',
+ 'six~=1.11',
+ 'requests>=2.15.0',
+ 'aws-sam-translator>=1.8.0',
+ 'jsonpatch',
+ 'jsonschema~=2.6',
+ 'pathlib2>=2.3.0;python_version<"3.4"'
+ ],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
entry_points={
'console_scripts': [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,15 @@\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n- install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n+ install_requires=[\n+ 'pyyaml',\n+ 'six~=1.11',\n+ 'requests>=2.15.0',\n+ 'aws-sam-translator>=1.8.0',\n+ 'jsonpatch',\n+ 'jsonschema~=2.6',\n+ 'pathlib2>=2.3.0;python_version<\"3.4\"'\n+ ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n", "issue": "Can't run in Travis CI xenial\nI was testing the new Travis CI image:\r\n\r\nhttps://blog.travis-ci.com/2018-11-08-xenial-release\r\nhttps://docs.travis-ci.com/user/reference/xenial/\r\n\r\nMy install script ran `pip install --user --upgrade cfn-lint` and then `cfn-lint my.cfn.yaml` which gave this error:\r\n\r\n```Traceback (most recent call last):\r\n File \"/home/travis/.local/bin/cfn-lint\", line 7, in <module>\r\n from cfnlint.__main__ import main\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/__main__.py\", line 19, in <module>\r\n import cfnlint.core\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/core.py\", line 24, in <module>\r\n import cfnlint.maintenance\r\n File \"/home/travis/.local/lib/python2.7/site-packages/cfnlint/maintenance.py\", line 19, in <module>\r\n import requests\r\n File \"/usr/lib/python2.7/dist-packages/requests/__init__.py\", line 58, in <module>\r\n from . import utils\r\n File \"/usr/lib/python2.7/dist-packages/requests/utils.py\", line 25, in <module>\r\n from . import certs\r\nImportError: cannot import name certs\r\n```\r\n\r\nI \"fixed\" this problem by just changing to Python3 in my build script:\r\n\r\n```\r\npyenv global 3.7\r\npip3 install --user --upgrade cfn-lint\r\ncfn-lint stack.cfn.yaml\r\n```\r\n\r\nBut there might be some other lingering issue here\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport codecs\nimport re\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version(filename):\n with codecs.open(filename, 'r', 'utf-8') as fp:\n contents = fp.read()\n return re.search(r\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", contents).group(1)\n\n\nversion = get_version('src/cfnlint/version.py')\n\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(\n name='cfn-lint',\n version=version,\n description=('checks cloudformation for practices and behaviour \\\n that could potentially be improved'),\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n keywords='aws, lint',\n author='kddejong',\n author_email='[email protected]',\n url='https://github.com/awslabs/cfn-python-lint',\n package_dir={'': 'src'},\n package_data={'cfnlint': [\n 'data/CloudSpecs/*.json',\n 'data/AdditionalSpecs/*.json',\n 'data/Serverless/*.json',\n 'data/CfnLintCli/config/schema.json'\n ]},\n packages=find_packages('src'),\n zip_safe=False,\n install_requires=['pyyaml', 'six', 'requests', 'aws-sam-translator>=1.6.0', 'jsonpatch', 'jsonschema~=2.6.0', 'pathlib2'],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n entry_points={\n 'console_scripts': [\n 'cfn-lint = cfnlint.__main__:main'\n ]\n },\n license='MIT no attribution',\n test_suite=\"unittest\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} | 1,762 | 222 |
gh_patches_debug_62284 | rasdani/github-patches | git_diff | SciTools__cartopy-228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
coastlines drawn under filled contours
When using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:
```
ax.coastlines(zorder=1)
```
is used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.
</issue>
<code>
[start of lib/cartopy/mpl/feature_artist.py]
1 # (C) British Crown Copyright 2011 - 2012, Met Office
2 #
3 # This file is part of cartopy.
4 #
5 # cartopy is free software: you can redistribute it and/or modify it under
6 # the terms of the GNU Lesser General Public License as published by the
7 # Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # cartopy is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with cartopy. If not, see <http://www.gnu.org/licenses/>.
17 """
18 This module defines the :class:`FeatureArtist` class, for drawing
19 :class:`Feature` instances with matplotlib.
20
21 """
22 import warnings
23 import weakref
24
25 import matplotlib.artist
26 import matplotlib.cbook
27 import matplotlib.collections
28 import matplotlib.lines
29
30 import cartopy.mpl.patch
31
32
33 class FeatureArtist(matplotlib.artist.Artist):
34 """
35 A subclass of :class:`~matplotlib.artist.Artist` capable of
36 drawing a :class:`cartopy.feature.Feature`.
37
38 """
39 _geometry_to_path_cache = weakref.WeakKeyDictionary()
40 """
41 A nested mapping from geometry and target projection to the
42 resulting transformed matplotlib paths::
43
44 {geom: {target_projection: list_of_paths}}
45
46 This provides a significant boost when producing multiple maps of the
47 same projection.
48
49 """
50 def __init__(self, feature, **kwargs):
51 """
52 Args:
53
54 * feature:
55 an instance of :class:`cartopy.feature.Feature` to draw.
56 * kwargs:
57 keyword arguments to be used when drawing the feature. These
58 will override those shared with the feature.
59
60 """
61 super(FeatureArtist, self).__init__()
62
63 if kwargs is None:
64 kwargs = {}
65 self._kwargs = dict(kwargs)
66
67 # Set default zorder so that features are drawn before
68 # lines e.g. contours but after images.
69 # Note that the zorder of Patch, PatchCollection and PathCollection
70 # are all 1 by default. Assuming equal zorder drawing takes place in
71 # the following order: collections, patches, lines (default zorder=2),
72 # text (default zorder=3), then other artists e.g. FeatureArtist.
73 if self._kwargs.get('zorder') is not None:
74 self.set_zorder(self._kwargs['zorder'])
75 elif feature.kwargs.get('zorder') is not None:
76 self.set_zorder(feature.kwargs['zorder'])
77 else:
78 self.set_zorder(matplotlib.collections.PathCollection.zorder)
79
80 self._feature = feature
81
82 @matplotlib.artist.allow_rasterization
83 def draw(self, renderer, *args, **kwargs):
84 """
85 Draws the geometries of the feature that intersect with the extent of
86 the :class:`cartopy.mpl.GeoAxes` instance to which this
87 object has been added.
88
89 """
90 if not self.get_visible():
91 return
92
93 ax = self.get_axes()
94 feature_crs = self._feature.crs
95
96 # Get geometries that we need to draw.
97 extent = None
98 try:
99 extent = ax.get_extent(feature_crs)
100 except ValueError:
101 warnings.warn('Unable to determine extent. Defaulting to global.')
102 geoms = self._feature.intersecting_geometries(extent)
103
104 # Project (if necessary) and convert geometries to matplotlib paths.
105 paths = []
106 key = ax.projection
107 for geom in geoms:
108 mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,
109 {})
110 geom_paths = mapping.get(key)
111 if geom_paths is None:
112 if ax.projection != feature_crs:
113 projected_geom = ax.projection.project_geometry(
114 geom, feature_crs)
115 else:
116 projected_geom = geom
117 geom_paths = cartopy.mpl.patch.geos_to_path(
118 projected_geom)
119 mapping[key] = geom_paths
120 paths.extend(geom_paths)
121
122 # Build path collection and draw it.
123 transform = ax.projection._as_mpl_transform(ax)
124 # Combine all the keyword args in priority order
125 final_kwargs = dict(self._feature.kwargs)
126 final_kwargs.update(self._kwargs)
127 final_kwargs.update(kwargs)
128 c = matplotlib.collections.PathCollection(paths,
129 transform=transform,
130 **final_kwargs)
131 c.set_clip_path(ax.patch)
132 return c.draw(renderer)
133
[end of lib/cartopy/mpl/feature_artist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py
--- a/lib/cartopy/mpl/feature_artist.py
+++ b/lib/cartopy/mpl/feature_artist.py
@@ -75,7 +75,10 @@
elif feature.kwargs.get('zorder') is not None:
self.set_zorder(feature.kwargs['zorder'])
else:
- self.set_zorder(matplotlib.collections.PathCollection.zorder)
+ # The class attribute matplotlib.collections.PathCollection.zorder
+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is
+ # used instead.
+ self.set_zorder(1)
self._feature = feature
| {"golden_diff": "diff --git a/lib/cartopy/mpl/feature_artist.py b/lib/cartopy/mpl/feature_artist.py\n--- a/lib/cartopy/mpl/feature_artist.py\n+++ b/lib/cartopy/mpl/feature_artist.py\n@@ -75,7 +75,10 @@\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n- self.set_zorder(matplotlib.collections.PathCollection.zorder)\n+ # The class attribute matplotlib.collections.PathCollection.zorder\n+ # was removed after mpl v1.2.0, so the hard-coded value of 1 is\n+ # used instead.\n+ self.set_zorder(1)\n \n self._feature = feature\n", "issue": "coastlines drawn under filled contours\nWhen using matplotlib v1.3.x and cartopy v0.7.x (development master from github) coastlines and other features are drawn underneath filled contours unless:\n\n```\nax.coastlines(zorder=1)\n```\n\nis used to manually raise them above. I suspect this may be a matplotlib issue, but thought it would be best to raise it here for the cartopy experts to take a look at first.\n\n", "before_files": [{"content": "# (C) British Crown Copyright 2011 - 2012, Met Office\n#\n# This file is part of cartopy.\n#\n# cartopy is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cartopy is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with cartopy. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nThis module defines the :class:`FeatureArtist` class, for drawing\n:class:`Feature` instances with matplotlib.\n\n\"\"\"\nimport warnings\nimport weakref\n\nimport matplotlib.artist\nimport matplotlib.cbook\nimport matplotlib.collections\nimport matplotlib.lines\n\nimport cartopy.mpl.patch\n\n\nclass FeatureArtist(matplotlib.artist.Artist):\n \"\"\"\n A subclass of :class:`~matplotlib.artist.Artist` capable of\n drawing a :class:`cartopy.feature.Feature`.\n\n \"\"\"\n _geometry_to_path_cache = weakref.WeakKeyDictionary()\n \"\"\"\n A nested mapping from geometry and target projection to the\n resulting transformed matplotlib paths::\n\n {geom: {target_projection: list_of_paths}}\n\n This provides a significant boost when producing multiple maps of the\n same projection.\n\n \"\"\"\n def __init__(self, feature, **kwargs):\n \"\"\"\n Args:\n\n * feature:\n an instance of :class:`cartopy.feature.Feature` to draw.\n * kwargs:\n keyword arguments to be used when drawing the feature. These\n will override those shared with the feature.\n\n \"\"\"\n super(FeatureArtist, self).__init__()\n\n if kwargs is None:\n kwargs = {}\n self._kwargs = dict(kwargs)\n\n # Set default zorder so that features are drawn before\n # lines e.g. contours but after images.\n # Note that the zorder of Patch, PatchCollection and PathCollection\n # are all 1 by default. Assuming equal zorder drawing takes place in\n # the following order: collections, patches, lines (default zorder=2),\n # text (default zorder=3), then other artists e.g. FeatureArtist.\n if self._kwargs.get('zorder') is not None:\n self.set_zorder(self._kwargs['zorder'])\n elif feature.kwargs.get('zorder') is not None:\n self.set_zorder(feature.kwargs['zorder'])\n else:\n self.set_zorder(matplotlib.collections.PathCollection.zorder)\n\n self._feature = feature\n\n @matplotlib.artist.allow_rasterization\n def draw(self, renderer, *args, **kwargs):\n \"\"\"\n Draws the geometries of the feature that intersect with the extent of\n the :class:`cartopy.mpl.GeoAxes` instance to which this\n object has been added.\n\n \"\"\"\n if not self.get_visible():\n return\n\n ax = self.get_axes()\n feature_crs = self._feature.crs\n\n # Get geometries that we need to draw.\n extent = None\n try:\n extent = ax.get_extent(feature_crs)\n except ValueError:\n warnings.warn('Unable to determine extent. Defaulting to global.')\n geoms = self._feature.intersecting_geometries(extent)\n\n # Project (if necessary) and convert geometries to matplotlib paths.\n paths = []\n key = ax.projection\n for geom in geoms:\n mapping = FeatureArtist._geometry_to_path_cache.setdefault(geom,\n {})\n geom_paths = mapping.get(key)\n if geom_paths is None:\n if ax.projection != feature_crs:\n projected_geom = ax.projection.project_geometry(\n geom, feature_crs)\n else:\n projected_geom = geom\n geom_paths = cartopy.mpl.patch.geos_to_path(\n projected_geom)\n mapping[key] = geom_paths\n paths.extend(geom_paths)\n\n # Build path collection and draw it.\n transform = ax.projection._as_mpl_transform(ax)\n # Combine all the keyword args in priority order\n final_kwargs = dict(self._feature.kwargs)\n final_kwargs.update(self._kwargs)\n final_kwargs.update(kwargs)\n c = matplotlib.collections.PathCollection(paths,\n transform=transform,\n **final_kwargs)\n c.set_clip_path(ax.patch)\n return c.draw(renderer)\n", "path": "lib/cartopy/mpl/feature_artist.py"}]} | 1,925 | 163 |
gh_patches_debug_9163 | rasdani/github-patches | git_diff | opsdroid__opsdroid-1540 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 3.8 support to setup.py
We've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.
We should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 import os
3 from setuptools import setup, find_packages
4 from setuptools.command.build_py import build_py
5 from setuptools.command.sdist import sdist
6 from setuptools.command.develop import develop
7 import versioneer
8
9 PACKAGE_NAME = "opsdroid"
10 HERE = os.path.abspath(os.path.dirname(__file__))
11 README = open(os.path.join(HERE, "README.md"), encoding="utf8").read()
12
13 PACKAGES = find_packages(
14 exclude=["tests", "tests.*", "modules", "modules.*", "docs", "docs.*"]
15 )
16
17
18 # For now we simply define the install_requires based on the contents
19 # of requirements.txt. In the future, install_requires may become much
20 # looser than the (automatically) resolved requirements.txt.
21 with open(os.path.join(HERE, "requirements.txt"), "r") as fh:
22 REQUIRES = [line.strip() for line in fh]
23
24
25 class Develop(develop):
26 """Custom `develop` command to always build mo files on install -e."""
27
28 def run(self):
29 self.run_command("compile_catalog")
30 develop.run(self) # old style class
31
32
33 class BuildPy(build_py):
34 """Custom `build_py` command to always build mo files for wheels."""
35
36 def run(self):
37 self.run_command("compile_catalog")
38 build_py.run(self) # old style class
39
40
41 class Sdist(sdist):
42 """Custom `sdist` command to ensure that mo files are always created."""
43
44 def run(self):
45 self.run_command("compile_catalog")
46 sdist.run(self) # old style class
47
48
49 setup(
50 name=PACKAGE_NAME,
51 version=versioneer.get_version(),
52 license="Apache License 2.0",
53 url="https://opsdroid.github.io/",
54 download_url="https://github.com/opsdroid/opsdroid/releases",
55 author="Jacob Tomlinson",
56 author_email="[email protected]",
57 description="An open source ChatOps bot framework.",
58 long_description=README,
59 long_description_content_type="text/markdown",
60 packages=PACKAGES,
61 include_package_data=True,
62 zip_safe=False,
63 platforms="any",
64 classifiers=[
65 "Development Status :: 4 - Beta",
66 "Environment :: Console",
67 "Framework :: AsyncIO",
68 "Intended Audience :: Developers",
69 "Intended Audience :: System Administrators",
70 "Intended Audience :: Information Technology",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3 :: Only",
75 "Programming Language :: Python :: 3.6",
76 "Programming Language :: Python :: 3.7",
77 "Topic :: Communications :: Chat",
78 "Topic :: Scientific/Engineering :: Artificial Intelligence",
79 "Topic :: Software Development :: Libraries :: Python Modules",
80 ],
81 install_requires=REQUIRES,
82 test_suite="tests",
83 keywords=[
84 "bot",
85 "bot-framework",
86 "opsdroid",
87 "botkit",
88 "python3",
89 "asyncio",
90 "chatops",
91 "devops",
92 "nlu",
93 ],
94 setup_requires=["Babel"],
95 cmdclass=versioneer.get_cmdclass(
96 {"sdist": Sdist, "build_py": BuildPy, "develop": Develop}
97 ),
98 entry_points={"console_scripts": ["opsdroid = opsdroid.cli:cli"]},
99 )
100
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,6 +74,7 @@
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
"Topic :: Communications :: Chat",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,6 +74,7 @@\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n", "issue": "Add 3.8 support to setup.py\nWe've been running CI against 3.8 for a while now, we should update the metadata in `setup.py` to explicitly state we support 3.8.\r\n\r\nWe should also update the [support table](https://github.com/opsdroid/opsdroid/blob/master/docs/maintaining/supported-python-versions.md) to say we support 3.8.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = \"opsdroid\"\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, \"README.md\"), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(\n exclude=[\"tests\", \"tests.*\", \"modules\", \"modules.*\", \"docs\", \"docs.*\"]\n)\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, \"requirements.txt\"), \"r\") as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command(\"compile_catalog\")\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license=\"Apache License 2.0\",\n url=\"https://opsdroid.github.io/\",\n download_url=\"https://github.com/opsdroid/opsdroid/releases\",\n author=\"Jacob Tomlinson\",\n author_email=\"[email protected]\",\n description=\"An open source ChatOps bot framework.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms=\"any\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Framework :: AsyncIO\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Communications :: Chat\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n install_requires=REQUIRES,\n test_suite=\"tests\",\n keywords=[\n \"bot\",\n \"bot-framework\",\n \"opsdroid\",\n \"botkit\",\n \"python3\",\n \"asyncio\",\n \"chatops\",\n \"devops\",\n \"nlu\",\n ],\n setup_requires=[\"Babel\"],\n cmdclass=versioneer.get_cmdclass(\n {\"sdist\": Sdist, \"build_py\": BuildPy, \"develop\": Develop}\n ),\n entry_points={\"console_scripts\": [\"opsdroid = opsdroid.cli:cli\"]},\n)\n", "path": "setup.py"}]} | 1,554 | 113 |
gh_patches_debug_63641 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-566 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature request - block quote formatting in reviews
I published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:
```
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
```
But it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!
I would expect it to render more like it does here on Github:
> "I was expecting to go to prison for the rest of my life," Ellsberg wryly said later, "and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government."
</issue>
<code>
[start of bookwyrm/sanitize_html.py]
1 ''' html parser to clean up incoming text from unknown sources '''
2 from html.parser import HTMLParser
3
4 class InputHtmlParser(HTMLParser):#pylint: disable=abstract-method
5 ''' Removes any html that isn't allowed_tagsed from a block '''
6
7 def __init__(self):
8 HTMLParser.__init__(self)
9 self.allowed_tags = [
10 'p', 'br',
11 'b', 'i', 'strong', 'em', 'pre',
12 'a', 'span', 'ul', 'ol', 'li'
13 ]
14 self.tag_stack = []
15 self.output = []
16 # if the html appears invalid, we just won't allow any at all
17 self.allow_html = True
18
19
20 def handle_starttag(self, tag, attrs):
21 ''' check if the tag is valid '''
22 if self.allow_html and tag in self.allowed_tags:
23 self.output.append(('tag', self.get_starttag_text()))
24 self.tag_stack.append(tag)
25 else:
26 self.output.append(('data', ''))
27
28
29 def handle_endtag(self, tag):
30 ''' keep the close tag '''
31 if not self.allow_html or tag not in self.allowed_tags:
32 self.output.append(('data', ''))
33 return
34
35 if not self.tag_stack or self.tag_stack[-1] != tag:
36 # the end tag doesn't match the most recent start tag
37 self.allow_html = False
38 self.output.append(('data', ''))
39 return
40
41 self.tag_stack = self.tag_stack[:-1]
42 self.output.append(('tag', '</%s>' % tag))
43
44
45 def handle_data(self, data):
46 ''' extract the answer, if we're in an answer tag '''
47 self.output.append(('data', data))
48
49
50 def get_output(self):
51 ''' convert the output from a list of tuples to a string '''
52 if self.tag_stack:
53 self.allow_html = False
54 if not self.allow_html:
55 return ''.join(v for (k, v) in self.output if k == 'data')
56 return ''.join(v for (k, v) in self.output)
57
[end of bookwyrm/sanitize_html.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py
--- a/bookwyrm/sanitize_html.py
+++ b/bookwyrm/sanitize_html.py
@@ -7,7 +7,7 @@
def __init__(self):
HTMLParser.__init__(self)
self.allowed_tags = [
- 'p', 'br',
+ 'p', 'blockquote', 'br',
'b', 'i', 'strong', 'em', 'pre',
'a', 'span', 'ul', 'ol', 'li'
]
| {"golden_diff": "diff --git a/bookwyrm/sanitize_html.py b/bookwyrm/sanitize_html.py\n--- a/bookwyrm/sanitize_html.py\n+++ b/bookwyrm/sanitize_html.py\n@@ -7,7 +7,7 @@\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n- 'p', 'br',\n+ 'p', 'blockquote', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n", "issue": "Feature request - block quote formatting in reviews\nI published [a review](https://bookwyrm.social/user/darius/review/14497) just now with a markdown-style block quote in it. The second-to-last paragraph is formatted, at least when I wrote it in the dialog box, like this:\r\n\r\n```\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\r\n```\r\n\r\nBut it renders as regular text. I'm guessing a lot of reviewers will want to excerpt their books, so block quotes seem like a good thing to include!\r\n\r\nI would expect it to render more like it does here on Github:\r\n\r\n> \"I was expecting to go to prison for the rest of my life,\" Ellsberg wryly said later, \"and Ithiel wanted to know whether I understood that I'd never get another dollar from the federal government.\"\n", "before_files": [{"content": "''' html parser to clean up incoming text from unknown sources '''\nfrom html.parser import HTMLParser\n\nclass InputHtmlParser(HTMLParser):#pylint: disable=abstract-method\n ''' Removes any html that isn't allowed_tagsed from a block '''\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.allowed_tags = [\n 'p', 'br',\n 'b', 'i', 'strong', 'em', 'pre',\n 'a', 'span', 'ul', 'ol', 'li'\n ]\n self.tag_stack = []\n self.output = []\n # if the html appears invalid, we just won't allow any at all\n self.allow_html = True\n\n\n def handle_starttag(self, tag, attrs):\n ''' check if the tag is valid '''\n if self.allow_html and tag in self.allowed_tags:\n self.output.append(('tag', self.get_starttag_text()))\n self.tag_stack.append(tag)\n else:\n self.output.append(('data', ''))\n\n\n def handle_endtag(self, tag):\n ''' keep the close tag '''\n if not self.allow_html or tag not in self.allowed_tags:\n self.output.append(('data', ''))\n return\n\n if not self.tag_stack or self.tag_stack[-1] != tag:\n # the end tag doesn't match the most recent start tag\n self.allow_html = False\n self.output.append(('data', ''))\n return\n\n self.tag_stack = self.tag_stack[:-1]\n self.output.append(('tag', '</%s>' % tag))\n\n\n def handle_data(self, data):\n ''' extract the answer, if we're in an answer tag '''\n self.output.append(('data', data))\n\n\n def get_output(self):\n ''' convert the output from a list of tuples to a string '''\n if self.tag_stack:\n self.allow_html = False\n if not self.allow_html:\n return ''.join(v for (k, v) in self.output if k == 'data')\n return ''.join(v for (k, v) in self.output)\n", "path": "bookwyrm/sanitize_html.py"}]} | 1,304 | 125 |
gh_patches_debug_21318 | rasdani/github-patches | git_diff | openai__gym-1400 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
why MultiDiscrete dtype=int8
MultiDiscrete dtype is hardcoded as int8.
Is this a bug ? What happens when we need more than 256 category
```
space = MultiDiscrete([7, 1024])
space.sample()
```
https://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10
</issue>
<code>
[start of gym/spaces/multi_discrete.py]
1 import gym
2 import numpy as np
3 from .space import Space
4
5
6 class MultiDiscrete(Space):
7 """
8 - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs
9 - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
10 - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space
11
12 Note: A value of 0 always need to represent the NOOP action.
13
14 e.g. Nintendo Game Controller
15 - Can be conceptualized as 3 discrete action spaces:
16
17 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
18 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
19 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
20
21 - Can be initialized as
22
23 MultiDiscrete([ 5, 2, 2 ])
24
25 """
26 def __init__(self, nvec):
27
28 """
29 nvec: vector of counts of each categorical variable
30 """
31 assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
32 self.nvec = np.asarray(nvec, dtype=np.uint32)
33
34 super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
35 self.np_random = np.random.RandomState()
36
37 def seed(self, seed):
38 self.np_random.seed(seed)
39
40 def sample(self):
41 return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)
42
43 def contains(self, x):
44 # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x
45 # is within correct bounds for space dtype (even though x does not have to be unsigned)
46 return (0 <= x).all() and (x < self.nvec).all()
47
48 def to_jsonable(self, sample_n):
49 return [sample.tolist() for sample in sample_n]
50
51 def from_jsonable(self, sample_n):
52 return np.array(sample_n)
53
54 def __repr__(self):
55 return "MultiDiscrete({})".format(self.nvec)
56
57 def __eq__(self, other):
58 return np.all(self.nvec == other.nvec)
59
[end of gym/spaces/multi_discrete.py]
[start of gym/spaces/space.py]
1 import numpy as np
2
3
4 class Space(object):
5 """Defines the observation and action spaces, so you can write generic
6 code that applies to any Env. For example, you can choose a random
7 action.
8 """
9 def __init__(self, shape=None, dtype=None):
10 import numpy as np # takes about 300-400ms to import, so we load lazily
11 self.shape = None if shape is None else tuple(shape)
12 self.dtype = None if dtype is None else np.dtype(dtype)
13
14 def sample(self):
15 """
16 Uniformly randomly sample a random element of this space
17 """
18 raise NotImplementedError
19
20 def seed(self, seed):
21 """Set the seed for this space's pseudo-random number generator. """
22 raise NotImplementedError
23
24 def contains(self, x):
25 """
26 Return boolean specifying if x is a valid
27 member of this space
28 """
29 raise NotImplementedError
30
31 def __contains__(self, x):
32 return self.contains(x)
33
34 def to_jsonable(self, sample_n):
35 """Convert a batch of samples from this space to a JSONable data type."""
36 # By default, assume identity is JSONable
37 return sample_n
38
39 def from_jsonable(self, sample_n):
40 """Convert a JSONable data type to a batch of samples from this space."""
41 # By default, assume identity is JSONable
42 return sample_n
43
[end of gym/spaces/space.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py
--- a/gym/spaces/multi_discrete.py
+++ b/gym/spaces/multi_discrete.py
@@ -1,4 +1,3 @@
-import gym
import numpy as np
from .space import Space
@@ -29,9 +28,9 @@
nvec: vector of counts of each categorical variable
"""
assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'
- self.nvec = np.asarray(nvec, dtype=np.uint32)
+ self.nvec = np.asarray(nvec, dtype=np.int64)
- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)
+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)
self.np_random = np.random.RandomState()
def seed(self, seed):
diff --git a/gym/spaces/space.py b/gym/spaces/space.py
--- a/gym/spaces/space.py
+++ b/gym/spaces/space.py
@@ -1,6 +1,3 @@
-import numpy as np
-
-
class Space(object):
"""Defines the observation and action spaces, so you can write generic
code that applies to any Env. For example, you can choose a random
| {"golden_diff": "diff --git a/gym/spaces/multi_discrete.py b/gym/spaces/multi_discrete.py\n--- a/gym/spaces/multi_discrete.py\n+++ b/gym/spaces/multi_discrete.py\n@@ -1,4 +1,3 @@\n-import gym\n import numpy as np\n from .space import Space\n \n@@ -29,9 +28,9 @@\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n- self.nvec = np.asarray(nvec, dtype=np.uint32)\n+ self.nvec = np.asarray(nvec, dtype=np.int64)\n \n- super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n+ super(MultiDiscrete, self).__init__(self.nvec.shape, np.int64)\n self.np_random = np.random.RandomState()\n \n def seed(self, seed):\ndiff --git a/gym/spaces/space.py b/gym/spaces/space.py\n--- a/gym/spaces/space.py\n+++ b/gym/spaces/space.py\n@@ -1,6 +1,3 @@\n-import numpy as np\n-\n-\n class Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n", "issue": "why MultiDiscrete dtype=int8 \nMultiDiscrete dtype is hardcoded as int8. \r\nIs this a bug ? What happens when we need more than 256 category \r\n```\r\nspace = MultiDiscrete([7, 1024])\r\nspace.sample()\r\n```\r\n\r\nhttps://github.com/openai/gym/blob/e944885e3b31a10fb6973093b39ff7682ef3aa3d/gym/spaces/multi_discrete.py#L10\n", "before_files": [{"content": "import gym\nimport numpy as np\nfrom .space import Space\n\n\nclass MultiDiscrete(Space):\n \"\"\"\n - The multi-discrete action space consists of a series of discrete action spaces with different number of actions in eachs\n - It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space\n - It is parametrized by passing an array of positive integers specifying number of actions for each discrete action space\n\n Note: A value of 0 always need to represent the NOOP action.\n\n e.g. Nintendo Game Controller\n - Can be conceptualized as 3 discrete action spaces:\n\n 1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4\n 2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n 3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1\n\n - Can be initialized as\n\n MultiDiscrete([ 5, 2, 2 ])\n\n \"\"\"\n def __init__(self, nvec):\n \n \"\"\"\n nvec: vector of counts of each categorical variable\n \"\"\"\n assert (np.array(nvec) > 0).all(), 'nvec (counts) have to be positive'\n self.nvec = np.asarray(nvec, dtype=np.uint32)\n\n super(MultiDiscrete, self).__init__(self.nvec.shape, np.uint32)\n self.np_random = np.random.RandomState()\n\n def seed(self, seed):\n self.np_random.seed(seed)\n\n def sample(self):\n return (self.np_random.random_sample(self.nvec.shape) * self.nvec).astype(self.dtype)\n\n def contains(self, x):\n # if nvec is uint32 and space dtype is uint32, then 0 <= x < self.nvec guarantees that x\n # is within correct bounds for space dtype (even though x does not have to be unsigned)\n return (0 <= x).all() and (x < self.nvec).all()\n\n def to_jsonable(self, sample_n):\n return [sample.tolist() for sample in sample_n]\n\n def from_jsonable(self, sample_n):\n return np.array(sample_n)\n\n def __repr__(self):\n return \"MultiDiscrete({})\".format(self.nvec)\n\n def __eq__(self, other):\n return np.all(self.nvec == other.nvec)\n", "path": "gym/spaces/multi_discrete.py"}, {"content": "import numpy as np\n\n\nclass Space(object):\n \"\"\"Defines the observation and action spaces, so you can write generic\n code that applies to any Env. For example, you can choose a random\n action.\n \"\"\"\n def __init__(self, shape=None, dtype=None):\n import numpy as np # takes about 300-400ms to import, so we load lazily\n self.shape = None if shape is None else tuple(shape)\n self.dtype = None if dtype is None else np.dtype(dtype)\n\n def sample(self):\n \"\"\"\n Uniformly randomly sample a random element of this space\n \"\"\"\n raise NotImplementedError\n\n def seed(self, seed):\n \"\"\"Set the seed for this space's pseudo-random number generator. \"\"\"\n raise NotImplementedError\n\n def contains(self, x):\n \"\"\"\n Return boolean specifying if x is a valid\n member of this space\n \"\"\"\n raise NotImplementedError\n\n def __contains__(self, x):\n return self.contains(x)\n\n def to_jsonable(self, sample_n):\n \"\"\"Convert a batch of samples from this space to a JSONable data type.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n\n def from_jsonable(self, sample_n):\n \"\"\"Convert a JSONable data type to a batch of samples from this space.\"\"\"\n # By default, assume identity is JSONable\n return sample_n\n", "path": "gym/spaces/space.py"}]} | 1,764 | 315 |
gh_patches_debug_56609 | rasdani/github-patches | git_diff | spacetelescope__jwql-678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade Django to 3.0
Django 3.0 is out, and since it is a major release, we should consider upgrading to this.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.24.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']
13
14 REQUIRES = [
15 'asdf>=2.3.3',
16 'astropy>=3.2.1',
17 'astroquery>=0.3.9',
18 'authlib',
19 'bokeh>=1.0,<1.4',
20 'codecov',
21 'crds',
22 'cryptography',
23 'django>=2.0,<3.0',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'jwst',
31 'matplotlib',
32 'nodejs',
33 'numpy',
34 'numpydoc',
35 'pandas',
36 'psycopg2',
37 'pysiaf',
38 'pytest',
39 'pytest-cov',
40 'scipy',
41 'sphinx',
42 'sqlalchemy',
43 'stsci_rtd_theme',
44 'twine',
45 'wtforms'
46 ]
47
48 setup(
49 name='jwql',
50 version=VERSION,
51 description=DESCRIPTION,
52 url='https://github.com/spacetelescope/jwql.git',
53 author=AUTHORS,
54 author_email='[email protected]',
55 license='BSD',
56 keywords=['astronomy', 'python'],
57 classifiers=['Programming Language :: Python'],
58 packages=find_packages(),
59 install_requires=REQUIRES,
60 dependency_links=DEPENDENCY_LINKS,
61 include_package_data=True,
62 include_dirs=[np.get_include()],
63 )
64
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
'codecov',
'crds',
'cryptography',
- 'django>=2.0,<3.0',
+ 'django',
'flake8',
'inflection',
'ipython',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n 'codecov',\n 'crds',\n 'cryptography',\n- 'django>=2.0,<3.0',\n+ 'django',\n 'flake8',\n 'inflection',\n 'ipython',\n", "issue": "Upgrade Django to 3.0\nDjango 3.0 is out, and since it is a major release, we should consider upgrading to this.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.24.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles']\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'jwst',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine',\n 'wtforms'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,142 | 81 |
gh_patches_debug_47926 | rasdani/github-patches | git_diff | uccser__cs-unplugged-717 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Workaround Crowdin bug where integer yaml keys are not preserved
When downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.
As a work around, we can use the string value of the integer as the key, i.e. `"0"` instead of `0`
</issue>
<code>
[start of csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py]
1 """Custom loader for loading structure of programming challenges."""
2
3 import os
4 from django.db import transaction
5 from utils.errors.MissingRequiredFieldError import MissingRequiredFieldError
6 from utils.TranslatableModelLoader import TranslatableModelLoader
7 from topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty
8
9
10 class ProgrammingChallengesStructureLoader(TranslatableModelLoader):
11 """Custom loader for loading structure of programming challenges."""
12
13 @transaction.atomic
14 def load(self):
15 """Load the content for structure of programming challenges.
16
17 Raises:
18 MissingRequiredFieldError: when no object can be found with the matching
19 attribute.
20 """
21 structure = self.load_yaml_file(self.structure_file_path)
22
23 prog_languages = structure.get("languages", None)
24 difficulty_levels = structure.get("difficulties", None)
25 if None in [prog_languages, difficulty_levels]:
26 raise MissingRequiredFieldError(
27 self.structure_file_path,
28 ["lanugages", "difficulties"],
29 "Programming Challenge Structure"
30 )
31
32 # Add "-languages" to the structure filename
33 prog_languages_translation_filename = "{}-languages.yaml".format(
34 os.path.splitext(self.structure_filename)[0]
35 )
36 prog_languages_translations = self.get_yaml_translations(
37 prog_languages_translation_filename,
38 required_slugs=prog_languages.keys(),
39 required_fields=["name"]
40 )
41
42 for (prog_language, prog_language_data) in prog_languages.items():
43
44 if prog_language_data is None:
45 raise MissingRequiredFieldError(
46 self.structure_file_path,
47 ["number"],
48 "Programming Challenge Language"
49 )
50
51 # Check for required fields
52 prog_language_number = prog_language_data.get("number", None)
53 if prog_language_number is None:
54 raise MissingRequiredFieldError(
55 self.structure_file_path,
56 ["number"],
57 "Programming Challenge Language"
58 )
59
60 # Check if icon is given
61 if "icon" in prog_language_data:
62 prog_language_icon = prog_language_data["icon"]
63 else:
64 prog_language_icon = None
65
66 new_prog_language = ProgrammingChallengeLanguage(
67 slug=prog_language,
68 number=prog_language_number,
69 icon=prog_language_icon
70 )
71
72 translations = prog_languages_translations.get(prog_language, dict())
73 self.populate_translations(new_prog_language, translations)
74 self.mark_translation_availability(new_prog_language, required_fields=["name"])
75 new_prog_language.save()
76
77 self.log("Added programming language: {}".format(new_prog_language.__str__()))
78
79 # Add "-languages" to the structure filename
80 difficulties_translation_filename = "{}-difficulties.yaml".format(
81 os.path.splitext(self.structure_filename)[0]
82 )
83 difficulties_translations = self.get_yaml_translations(
84 difficulties_translation_filename,
85 required_slugs=difficulty_levels,
86 required_fields=["name"],
87 )
88
89 for difficulty in difficulty_levels:
90
91 new_difficulty = ProgrammingChallengeDifficulty(
92 level=difficulty,
93 )
94
95 translations = difficulties_translations.get(difficulty, dict())
96 self.populate_translations(new_difficulty, translations)
97 self.mark_translation_availability(new_difficulty, required_fields=["name"])
98 new_difficulty.save()
99
100 self.log("Added programming difficulty level: {}".format(new_difficulty.__str__()))
101
102 self.log("")
103
[end of csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py
@@ -89,7 +89,7 @@
for difficulty in difficulty_levels:
new_difficulty = ProgrammingChallengeDifficulty(
- level=difficulty,
+ level=int(difficulty),
)
translations = difficulties_translations.get(difficulty, dict())
| {"golden_diff": "diff --git a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py\n@@ -89,7 +89,7 @@\n for difficulty in difficulty_levels:\n \n new_difficulty = ProgrammingChallengeDifficulty(\n- level=difficulty,\n+ level=int(difficulty),\n )\n \n translations = difficulties_translations.get(difficulty, dict())\n", "issue": "Workaround Crowdin bug where integer yaml keys are not preserved\nWhen downloading in-context localisation files, integer keys in YAML files are not preserved. This is only an issue in the file `topics/content/en/programming-challenges-structure-difficulties.yaml`, which uses the difficulty number as the key.\r\n\r\nAs a work around, we can use the string value of the integer as the key, i.e. `\"0\"` instead of `0`\n", "before_files": [{"content": "\"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\nimport os\nfrom django.db import transaction\nfrom utils.errors.MissingRequiredFieldError import MissingRequiredFieldError\nfrom utils.TranslatableModelLoader import TranslatableModelLoader\nfrom topics.models import ProgrammingChallengeLanguage, ProgrammingChallengeDifficulty\n\n\nclass ProgrammingChallengesStructureLoader(TranslatableModelLoader):\n \"\"\"Custom loader for loading structure of programming challenges.\"\"\"\n\n @transaction.atomic\n def load(self):\n \"\"\"Load the content for structure of programming challenges.\n\n Raises:\n MissingRequiredFieldError: when no object can be found with the matching\n attribute.\n \"\"\"\n structure = self.load_yaml_file(self.structure_file_path)\n\n prog_languages = structure.get(\"languages\", None)\n difficulty_levels = structure.get(\"difficulties\", None)\n if None in [prog_languages, difficulty_levels]:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"lanugages\", \"difficulties\"],\n \"Programming Challenge Structure\"\n )\n\n # Add \"-languages\" to the structure filename\n prog_languages_translation_filename = \"{}-languages.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n prog_languages_translations = self.get_yaml_translations(\n prog_languages_translation_filename,\n required_slugs=prog_languages.keys(),\n required_fields=[\"name\"]\n )\n\n for (prog_language, prog_language_data) in prog_languages.items():\n\n if prog_language_data is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check for required fields\n prog_language_number = prog_language_data.get(\"number\", None)\n if prog_language_number is None:\n raise MissingRequiredFieldError(\n self.structure_file_path,\n [\"number\"],\n \"Programming Challenge Language\"\n )\n\n # Check if icon is given\n if \"icon\" in prog_language_data:\n prog_language_icon = prog_language_data[\"icon\"]\n else:\n prog_language_icon = None\n\n new_prog_language = ProgrammingChallengeLanguage(\n slug=prog_language,\n number=prog_language_number,\n icon=prog_language_icon\n )\n\n translations = prog_languages_translations.get(prog_language, dict())\n self.populate_translations(new_prog_language, translations)\n self.mark_translation_availability(new_prog_language, required_fields=[\"name\"])\n new_prog_language.save()\n\n self.log(\"Added programming language: {}\".format(new_prog_language.__str__()))\n\n # Add \"-languages\" to the structure filename\n difficulties_translation_filename = \"{}-difficulties.yaml\".format(\n os.path.splitext(self.structure_filename)[0]\n )\n difficulties_translations = self.get_yaml_translations(\n difficulties_translation_filename,\n required_slugs=difficulty_levels,\n required_fields=[\"name\"],\n )\n\n for difficulty in difficulty_levels:\n\n new_difficulty = ProgrammingChallengeDifficulty(\n level=difficulty,\n )\n\n translations = difficulties_translations.get(difficulty, dict())\n self.populate_translations(new_difficulty, translations)\n self.mark_translation_availability(new_difficulty, required_fields=[\"name\"])\n new_difficulty.save()\n\n self.log(\"Added programming difficulty level: {}\".format(new_difficulty.__str__()))\n\n self.log(\"\")\n", "path": "csunplugged/topics/management/commands/_ProgrammingChallengesStructureLoader.py"}]} | 1,538 | 134 |
gh_patches_debug_11571 | rasdani/github-patches | git_diff | microsoft__hi-ml-78 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make the package tag-line on PyPi more concrete
Javier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.
</issue>
<code>
[start of setup.py]
1 # ------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
4 # ------------------------------------------------------------------------------------------
5
6 """A setuptools based setup module.
7
8 See:
9 https://packaging.python.org/guides/distributing-packages-using-setuptools/
10 """
11
12 import os
13 from math import floor
14 import pathlib
15 from random import random
16 from setuptools import setup, find_packages # type: ignore
17
18
19 here = pathlib.Path(__file__).parent.resolve()
20
21 # Get the long description from the README file
22 long_description = (here / 'README.md').read_text(encoding='utf-8')
23
24 version = ''
25
26 # If running from a GitHub Action then a standard set of environment variables will be
27 # populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).
28 # In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.
29 # If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.
30 # Extract this tag and use it as a version string
31 # See also:
32 # https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/
33 # https://github.com/pypa/gh-action-pypi-publish
34 GITHUB_REF_TAG_COMMIT = 'refs/tags/'
35
36 github_ref = os.getenv('GITHUB_REF')
37 if github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):
38 version = github_ref[len(GITHUB_REF_TAG_COMMIT):]
39
40 # Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.
41 # Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be
42 # '0.1.2.post124'. Although this is discouraged, see:
43 # https://www.python.org/dev/peps/pep-0440/#post-releases
44 # it is necessary here to avoid duplicate packages in Test.PyPI.
45 if not version:
46 # TODO: Replace this with more principled package version management for the package wheels built during local test
47 # runs, one which circumvents AzureML's apparent package caching:
48 build_number = os.getenv('GITHUB_RUN_NUMBER')
49 if build_number:
50 version = '0.1.0.post' + build_number
51 else:
52 default_random_version_number = floor(random() * 10_000_000_000)
53 version = f'0.1.0.post{str(default_random_version_number)}'
54
55 (here / 'latest_version.txt').write_text(version)
56
57 # Read run_requirements.txt to get install_requires
58 install_requires = (here / 'run_requirements.txt').read_text().split("\n")
59 # Remove any whitespace and blank lines
60 install_requires = [line.strip() for line in install_requires if line.strip()]
61
62 setup(
63 name='hi-ml',
64 version=version,
65 description='Microsoft Health Intelligence AzureML helpers',
66 long_description=long_description,
67 long_description_content_type='text/markdown',
68 url='https://github.com/microsoft/hi-ml',
69 author="Microsoft Research Cambridge InnerEye Team ",
70 author_email="[email protected]",
71 classifiers=[
72 'Development Status :: 3 - Alpha',
73 'Intended Audience :: Science/Research',
74 "Topic :: Scientific/Engineering :: Medical Science Apps.",
75 'License :: OSI Approved :: MIT License',
76 'Programming Language :: Python :: 3.7'
77 ],
78 keywords='InnerEye, HealthIntelligence, AzureML',
79 license='MIT License',
80 packages=find_packages(where="src"),
81 package_dir={"": "src"},
82 include_package_data=True,
83 install_requires=install_requires,
84 scripts=['src/health/azure/run_tensorboard.py']
85 )
86
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,10 +59,12 @@
# Remove any whitespace and blank lines
install_requires = [line.strip() for line in install_requires if line.strip()]
+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'
+
setup(
name='hi-ml',
version=version,
- description='Microsoft Health Intelligence AzureML helpers',
+ description=description,
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/microsoft/hi-ml',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,10 +59,12 @@\n # Remove any whitespace and blank lines\n install_requires = [line.strip() for line in install_requires if line.strip()]\n \n+description = 'Microsoft Health Intelligence package to elevate and monitor scripts to an AzureML workspace'\n+\n setup(\n name='hi-ml',\n version=version,\n- description='Microsoft Health Intelligence AzureML helpers',\n+ description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n", "issue": "Make the package tag-line on PyPi more concrete\nJavier pointed out that our tagline, _Microsoft Health Intelligence AzureML helpers_, on https://pypi.org/manage/project/hi-ml/releases/ is too generic.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\n\n\"\"\"A setuptools based setup module.\n\nSee:\nhttps://packaging.python.org/guides/distributing-packages-using-setuptools/\n\"\"\"\n\nimport os\nfrom math import floor\nimport pathlib\nfrom random import random\nfrom setuptools import setup, find_packages # type: ignore\n\n\nhere = pathlib.Path(__file__).parent.resolve()\n\n# Get the long description from the README file\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nversion = ''\n\n# If running from a GitHub Action then a standard set of environment variables will be\n# populated (https://docs.github.com/en/actions/reference/environment-variables#default-environment-variables).\n# In particular, GITHUB_REF is the branch or tag ref that triggered the workflow.\n# If this was triggered by a tagged commit then GITHUB_REF will be: 'ref/tags/new_tag'.\n# Extract this tag and use it as a version string\n# See also:\n# https://packaging.python.org/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/\n# https://github.com/pypa/gh-action-pypi-publish\nGITHUB_REF_TAG_COMMIT = 'refs/tags/'\n\ngithub_ref = os.getenv('GITHUB_REF')\nif github_ref and github_ref.startswith(GITHUB_REF_TAG_COMMIT):\n version = github_ref[len(GITHUB_REF_TAG_COMMIT):]\n\n# Otherwise, if running from a GitHub Action, but not a tagged commit then GITHUB_RUN_NUMBER will be populated.\n# Use this as a post release number. For example if GITHUB_RUN_NUMBER = 124 then the version string will be\n# '0.1.2.post124'. Although this is discouraged, see:\n# https://www.python.org/dev/peps/pep-0440/#post-releases\n# it is necessary here to avoid duplicate packages in Test.PyPI.\nif not version:\n # TODO: Replace this with more principled package version management for the package wheels built during local test\n # runs, one which circumvents AzureML's apparent package caching:\n build_number = os.getenv('GITHUB_RUN_NUMBER')\n if build_number:\n version = '0.1.0.post' + build_number\n else:\n default_random_version_number = floor(random() * 10_000_000_000)\n version = f'0.1.0.post{str(default_random_version_number)}'\n\n(here / 'latest_version.txt').write_text(version)\n\n# Read run_requirements.txt to get install_requires\ninstall_requires = (here / 'run_requirements.txt').read_text().split(\"\\n\")\n# Remove any whitespace and blank lines\ninstall_requires = [line.strip() for line in install_requires if line.strip()]\n\nsetup(\n name='hi-ml',\n version=version,\n description='Microsoft Health Intelligence AzureML helpers',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/microsoft/hi-ml',\n author=\"Microsoft Research Cambridge InnerEye Team \",\n author_email=\"[email protected]\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n \"Topic :: Scientific/Engineering :: Medical Science Apps.\",\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.7'\n ],\n keywords='InnerEye, HealthIntelligence, AzureML',\n license='MIT License',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n include_package_data=True,\n install_requires=install_requires,\n scripts=['src/health/azure/run_tensorboard.py']\n)\n", "path": "setup.py"}]} | 1,566 | 142 |
gh_patches_debug_21805 | rasdani/github-patches | git_diff | psychopy__psychopy-4622 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
404 errors on some of the page in the HELP menus
Version used
- psychopy: 2021.2.3
https://www.psychopy.org/builder/builder.html
https://www.psychopy.org/api/api.html
</issue>
<code>
[start of psychopy/app/urls.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """A central location to store information about urls
5 """
6 urls = dict()
7
8 # links based on string names
9 urls['builder'] = "https://www.psychopy.org/builder/builder.html"
10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
11 # NB. builder components get their urls defined by the component code
12 # (so a custom component can have a url)
13
14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases"
15 urls['changelog'] = "https://www.psychopy.org/changelog.html"
16
17 general = "https://www.psychopy.org/general/"
18 urls['prefs'] = general + "prefs.html"
19 urls['prefs.general'] = general + "prefs.html#general-settings"
20 urls['prefs.app'] = general + "prefs.html#application-settings"
21 urls['prefs.coder'] = general + "prefs.html#coder-settings"
22 urls['prefs.builder'] = general + "prefs.html#builder-settings"
23 urls['prefs.connections'] = general + "prefs.html#connection-settings"
24
25 # links keyed by wxIDs (e.g. menu item IDs)
26 urls['psychopyHome'] = "https://www.psychopy.org/"
27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
29 urls['builderHelp'] = urls['builder']
30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html"
32
[end of psychopy/app/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py
--- a/psychopy/app/urls.py
+++ b/psychopy/app/urls.py
@@ -6,7 +6,7 @@
urls = dict()
# links based on string names
-urls['builder'] = "https://www.psychopy.org/builder/builder.html"
+urls['builder'] = "https://www.psychopy.org/builder"
urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops"
# NB. builder components get their urls defined by the component code
# (so a custom component can have a url)
@@ -24,7 +24,7 @@
# links keyed by wxIDs (e.g. menu item IDs)
urls['psychopyHome'] = "https://www.psychopy.org/"
-urls['psychopyReference'] = "https://www.psychopy.org/api/api.html"
+urls['psychopyReference'] = "https://www.psychopy.org/api"
urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html"
urls['builderHelp'] = urls['builder']
urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
| {"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]} | 1,017 | 286 |
gh_patches_debug_30758 | rasdani/github-patches | git_diff | opendatacube__datacube-core-1061 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Example PickleWriterDriver does not support URIs
### Expected behaviour
Using the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.
### Actual behaviour
The test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.
### Steps to reproduce the behaviour
- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`
- Run `pytest integration_tests/test_end_to_end.py`
- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.
### Environment information
- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`
- Docker image: `opendatacube/datacube-tests:latest`
</issue>
<code>
[start of examples/io_plugin/dcio_example/pickles.py]
1 # This file is part of the Open Data Cube, see https://opendatacube.org for more information
2 #
3 # Copyright (c) 2015-2020 ODC Contributors
4 # SPDX-License-Identifier: Apache-2.0
5 """ Example reader plugin
6 """
7 from contextlib import contextmanager
8 import pickle
9
10
11 PROTOCOL = 'file'
12 FORMAT = 'pickle'
13
14
15 def uri_split(uri):
16 loc = uri.find('://')
17 if loc < 0:
18 return uri, PROTOCOL
19 return uri[loc+3:], uri[:loc]
20
21
22 class PickleDataSource(object):
23 class BandDataSource(object):
24 def __init__(self, da):
25 self._da = da
26 self.nodata = da.nodata
27
28 @property
29 def crs(self):
30 return self._da.crs
31
32 @property
33 def transform(self):
34 return self._da.affine
35
36 @property
37 def dtype(self):
38 return self._da.dtype
39
40 @property
41 def shape(self):
42 return self._da.shape
43
44 def read(self, window=None, out_shape=None):
45 if window is None:
46 data = self._da.values
47 else:
48 rows, cols = [slice(*w) for w in window]
49 data = self._da.values[rows, cols]
50
51 if out_shape is None or out_shape == data.shape:
52 return data
53
54 raise NotImplementedError('Native reading not supported for this data source')
55
56 def __init__(self, band):
57 self._band = band
58 uri = band.uri
59 self._filename, protocol = uri_split(uri)
60
61 if protocol not in [PROTOCOL, 'pickle']:
62 raise ValueError('Expected file:// or pickle:// url')
63
64 @contextmanager
65 def open(self):
66 with open(self._filename, 'rb') as f:
67 ds = pickle.load(f)
68
69 yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))
70
71
72 class PickleReaderDriver(object):
73 def __init__(self):
74 self.name = 'PickleReader'
75 self.protocols = [PROTOCOL, 'pickle']
76 self.formats = [FORMAT]
77
78 def supports(self, protocol, fmt):
79 return (protocol in self.protocols and
80 fmt in self.formats)
81
82 def new_datasource(self, band):
83 return PickleDataSource(band)
84
85
86 def rdr_driver_init():
87 return PickleReaderDriver()
88
89
90 class PickleWriterDriver(object):
91 def __init__(self):
92 pass
93
94 @property
95 def aliases(self):
96 return ['pickles']
97
98 @property
99 def format(self):
100 return FORMAT
101
102 @property
103 def uri_scheme(self):
104 return PROTOCOL
105
106 def write_dataset_to_storage(self, dataset, filename,
107 global_attributes=None,
108 variable_params=None,
109 storage_config=None,
110 **kwargs):
111 with open(filename, 'wb') as f:
112 pickle.dump(dataset, f)
113 return {}
114
115
116 def writer_driver_init():
117 return PickleWriterDriver()
118
[end of examples/io_plugin/dcio_example/pickles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py
--- a/examples/io_plugin/dcio_example/pickles.py
+++ b/examples/io_plugin/dcio_example/pickles.py
@@ -4,9 +4,12 @@
# SPDX-License-Identifier: Apache-2.0
""" Example reader plugin
"""
-from contextlib import contextmanager
import pickle
+from contextlib import contextmanager
+from pathlib import Path
+from urllib.parse import urlsplit
+from datacube.utils.uris import normalise_path
PROTOCOL = 'file'
FORMAT = 'pickle'
@@ -103,12 +106,33 @@
def uri_scheme(self):
return PROTOCOL
- def write_dataset_to_storage(self, dataset, filename,
+ def mk_uri(self, file_path, storage_config):
+ """
+ Constructs a URI from the file_path and storage config.
+
+ A typical implementation should return f'{scheme}://{file_path}'
+
+ Example:
+ file_path = '/path/to/my_file.pickled'
+ storage_config = {'driver': 'pickles'}
+
+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'
+
+ :param Path file_path: The file path of the file to be converted into a URI.
+ :param dict storage_config: The dict holding the storage config found in the ingest definition.
+ :return: file_path as a URI that the Driver understands.
+ :rtype: str
+ """
+ return normalise_path(file_path).as_uri()
+
+ def write_dataset_to_storage(self, dataset, file_uri,
global_attributes=None,
variable_params=None,
storage_config=None,
**kwargs):
- with open(filename, 'wb') as f:
+ filepath = Path(urlsplit(file_uri).path)
+ filepath.parent.mkdir(parents=True, exist_ok=True)
+ with filepath.open('wb') as f:
pickle.dump(dataset, f)
return {}
| {"golden_diff": "diff --git a/examples/io_plugin/dcio_example/pickles.py b/examples/io_plugin/dcio_example/pickles.py\n--- a/examples/io_plugin/dcio_example/pickles.py\n+++ b/examples/io_plugin/dcio_example/pickles.py\n@@ -4,9 +4,12 @@\n # SPDX-License-Identifier: Apache-2.0\n \"\"\" Example reader plugin\n \"\"\"\n-from contextlib import contextmanager\n import pickle\n+from contextlib import contextmanager\n+from pathlib import Path\n+from urllib.parse import urlsplit\n \n+from datacube.utils.uris import normalise_path\n \n PROTOCOL = 'file'\n FORMAT = 'pickle'\n@@ -103,12 +106,33 @@\n def uri_scheme(self):\n return PROTOCOL\n \n- def write_dataset_to_storage(self, dataset, filename,\n+ def mk_uri(self, file_path, storage_config):\n+ \"\"\"\n+ Constructs a URI from the file_path and storage config.\n+\n+ A typical implementation should return f'{scheme}://{file_path}'\n+\n+ Example:\n+ file_path = '/path/to/my_file.pickled'\n+ storage_config = {'driver': 'pickles'}\n+\n+ mk_uri(file_path, storage_config) should return 'file:///path/to/my_file.pickled'\n+\n+ :param Path file_path: The file path of the file to be converted into a URI.\n+ :param dict storage_config: The dict holding the storage config found in the ingest definition.\n+ :return: file_path as a URI that the Driver understands.\n+ :rtype: str\n+ \"\"\"\n+ return normalise_path(file_path).as_uri()\n+\n+ def write_dataset_to_storage(self, dataset, file_uri,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n- with open(filename, 'wb') as f:\n+ filepath = Path(urlsplit(file_uri).path)\n+ filepath.parent.mkdir(parents=True, exist_ok=True)\n+ with filepath.open('wb') as f:\n pickle.dump(dataset, f)\n return {}\n", "issue": "Example PickleWriterDriver does not support URIs\n### Expected behaviour\r\nUsing the example `pickles` driver should allow to successfully run, e.g., `integration_tests/test_end_to_end`. This is assuming the example dcio driver has been installed first.\r\n\r\n### Actual behaviour\r\nThe test fails because a URI gets passed to `PickleWriterDriver.write_dataset_to_storage()` which still expects a filepath string. Also, this URI may contain intermediate directories that need creating.\r\n\r\n### Steps to reproduce the behaviour\r\n- Set `pickles` as driver name in, e.g., `datacube-core/docs/config_samples/ingester/ls5_nbar_albers.yaml(106)`\r\n- Run `pytest integration_tests/test_end_to_end.py`\r\n- First error: `AttributeError: 'PickleWriterDriver' object has no attribute 'mk_uri'` but subsequent errors happen due to missing intermediate directories.\r\n\r\n### Environment information\r\n- `Open Data Cube core, version 1.8.4.dev52+g07bc51a5.d20210222`\r\n- Docker image: `opendatacube/datacube-tests:latest`\n", "before_files": [{"content": "# This file is part of the Open Data Cube, see https://opendatacube.org for more information\n#\n# Copyright (c) 2015-2020 ODC Contributors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\" Example reader plugin\n\"\"\"\nfrom contextlib import contextmanager\nimport pickle\n\n\nPROTOCOL = 'file'\nFORMAT = 'pickle'\n\n\ndef uri_split(uri):\n loc = uri.find('://')\n if loc < 0:\n return uri, PROTOCOL\n return uri[loc+3:], uri[:loc]\n\n\nclass PickleDataSource(object):\n class BandDataSource(object):\n def __init__(self, da):\n self._da = da\n self.nodata = da.nodata\n\n @property\n def crs(self):\n return self._da.crs\n\n @property\n def transform(self):\n return self._da.affine\n\n @property\n def dtype(self):\n return self._da.dtype\n\n @property\n def shape(self):\n return self._da.shape\n\n def read(self, window=None, out_shape=None):\n if window is None:\n data = self._da.values\n else:\n rows, cols = [slice(*w) for w in window]\n data = self._da.values[rows, cols]\n\n if out_shape is None or out_shape == data.shape:\n return data\n\n raise NotImplementedError('Native reading not supported for this data source')\n\n def __init__(self, band):\n self._band = band\n uri = band.uri\n self._filename, protocol = uri_split(uri)\n\n if protocol not in [PROTOCOL, 'pickle']:\n raise ValueError('Expected file:// or pickle:// url')\n\n @contextmanager\n def open(self):\n with open(self._filename, 'rb') as f:\n ds = pickle.load(f)\n\n yield PickleDataSource.BandDataSource(ds[self._band.name].isel(time=0))\n\n\nclass PickleReaderDriver(object):\n def __init__(self):\n self.name = 'PickleReader'\n self.protocols = [PROTOCOL, 'pickle']\n self.formats = [FORMAT]\n\n def supports(self, protocol, fmt):\n return (protocol in self.protocols and\n fmt in self.formats)\n\n def new_datasource(self, band):\n return PickleDataSource(band)\n\n\ndef rdr_driver_init():\n return PickleReaderDriver()\n\n\nclass PickleWriterDriver(object):\n def __init__(self):\n pass\n\n @property\n def aliases(self):\n return ['pickles']\n\n @property\n def format(self):\n return FORMAT\n\n @property\n def uri_scheme(self):\n return PROTOCOL\n\n def write_dataset_to_storage(self, dataset, filename,\n global_attributes=None,\n variable_params=None,\n storage_config=None,\n **kwargs):\n with open(filename, 'wb') as f:\n pickle.dump(dataset, f)\n return {}\n\n\ndef writer_driver_init():\n return PickleWriterDriver()\n", "path": "examples/io_plugin/dcio_example/pickles.py"}]} | 1,701 | 445 |
gh_patches_debug_16915 | rasdani/github-patches | git_diff | fossasia__open-event-server-5322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add unit test for Event helpers
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
We should add unit tests for event helpers.
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->
Parent: #5320
</issue>
<code>
[start of app/api/helpers/errors.py]
1 import json
2
3 from flask import make_response
4 from flask_rest_jsonapi.errors import jsonapi_errors
5
6
7 class ErrorResponse:
8 """
9 Parent ErrorResponse class for handling json-api compliant errors.
10 Inspired by the JsonApiException class of `flask-rest-jsonapi` itself
11 """
12 title = 'Unknown error'
13 status = 500
14 headers = {'Content-Type': 'application/vnd.api+json'}
15
16 def __init__(self, source, detail, title=None, status=None):
17 """Initialize a jsonapi ErrorResponse Object
18
19 :param dict source: the source of the error
20 :param str detail: the detail of the error
21 """
22 self.source = source
23 self.detail = detail
24 if title is not None:
25 self.title = title
26 if status is not None:
27 self.status = status
28
29 def respond(self):
30 """
31 :return: a jsonapi compliant response object
32 """
33 dict_ = self.to_dict()
34 return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)
35
36 def to_dict(self):
37 """
38 :return: Dict from details of the object
39 """
40 return {'status': self.status,
41 'source': self.source,
42 'title': self.title,
43 'detail': self.detail}
44
45
46 class ForbiddenError(ErrorResponse):
47 """
48 Default class for 403 Error
49 """
50 title = 'Access Forbidden'
51 status = 403
52
53
54 class NotFoundError(ErrorResponse):
55 """
56 Default class for 404 Error
57 """
58 title = 'Not Found'
59 status = 404
60
61
62 class ServerError(ErrorResponse):
63 status = 500
64 title = 'Internal Server Error'
65
66
67 class UnprocessableEntityError(ErrorResponse):
68 status = 422
69 title = 'Unprocessable Entity'
70
71
72 class BadRequestError(ErrorResponse):
73 status = 400
74 title = 'Bad Request'
75
[end of app/api/helpers/errors.py]
[start of app/api/helpers/exceptions.py]
1 from flask_rest_jsonapi.exceptions import JsonApiException
2
3
4 class UnprocessableEntity(JsonApiException):
5 title = "Unprocessable Entity"
6 status = 422
7
8
9 class ConflictException(JsonApiException):
10 title = "Conflict"
11 status = 409
12
13
14 class ForbiddenException(JsonApiException):
15 """
16 Default class for 403 Error
17 """
18 title = 'Access Forbidden'
19 status = 403
20
21
22 class MethodNotAllowed(JsonApiException):
23 """
24 Default Class to throw HTTP 405 Exception
25 """
26 title = "Method Not Allowed"
27 status = 405
28
[end of app/api/helpers/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py
--- a/app/api/helpers/errors.py
+++ b/app/api/helpers/errors.py
@@ -65,10 +65,16 @@
class UnprocessableEntityError(ErrorResponse):
+ """
+ Default class for 422 Error
+ """
status = 422
title = 'Unprocessable Entity'
class BadRequestError(ErrorResponse):
+ """
+ Default class for 400 Error
+ """
status = 400
title = 'Bad Request'
diff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py
--- a/app/api/helpers/exceptions.py
+++ b/app/api/helpers/exceptions.py
@@ -2,11 +2,17 @@
class UnprocessableEntity(JsonApiException):
+ """
+ Default class for 422 Error
+ """
title = "Unprocessable Entity"
status = 422
class ConflictException(JsonApiException):
+ """
+ Default class for 409 Error
+ """
title = "Conflict"
status = 409
| {"golden_diff": "diff --git a/app/api/helpers/errors.py b/app/api/helpers/errors.py\n--- a/app/api/helpers/errors.py\n+++ b/app/api/helpers/errors.py\n@@ -65,10 +65,16 @@\n \n \n class UnprocessableEntityError(ErrorResponse):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n status = 422\n title = 'Unprocessable Entity'\n \n \n class BadRequestError(ErrorResponse):\n+ \"\"\"\n+ Default class for 400 Error\n+ \"\"\"\n status = 400\n title = 'Bad Request'\ndiff --git a/app/api/helpers/exceptions.py b/app/api/helpers/exceptions.py\n--- a/app/api/helpers/exceptions.py\n+++ b/app/api/helpers/exceptions.py\n@@ -2,11 +2,17 @@\n \n \n class UnprocessableEntity(JsonApiException):\n+ \"\"\"\n+ Default class for 422 Error\n+ \"\"\"\n title = \"Unprocessable Entity\"\n status = 422\n \n \n class ConflictException(JsonApiException):\n+ \"\"\"\n+ Default class for 409 Error\n+ \"\"\"\n title = \"Conflict\"\n status = 409\n", "issue": "Add unit test for Event helpers\n**Is your feature request related to a problem? Please describe.**\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\nWe should add unit tests for event helpers.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nParent: #5320 \n", "before_files": [{"content": "import json\n\nfrom flask import make_response\nfrom flask_rest_jsonapi.errors import jsonapi_errors\n\n\nclass ErrorResponse:\n \"\"\"\n Parent ErrorResponse class for handling json-api compliant errors.\n Inspired by the JsonApiException class of `flask-rest-jsonapi` itself\n \"\"\"\n title = 'Unknown error'\n status = 500\n headers = {'Content-Type': 'application/vnd.api+json'}\n\n def __init__(self, source, detail, title=None, status=None):\n \"\"\"Initialize a jsonapi ErrorResponse Object\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n \"\"\"\n self.source = source\n self.detail = detail\n if title is not None:\n self.title = title\n if status is not None:\n self.status = status\n\n def respond(self):\n \"\"\"\n :return: a jsonapi compliant response object\n \"\"\"\n dict_ = self.to_dict()\n return make_response(json.dumps(jsonapi_errors([dict_])), self.status, self.headers)\n\n def to_dict(self):\n \"\"\"\n :return: Dict from details of the object\n \"\"\"\n return {'status': self.status,\n 'source': self.source,\n 'title': self.title,\n 'detail': self.detail}\n\n\nclass ForbiddenError(ErrorResponse):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass NotFoundError(ErrorResponse):\n \"\"\"\n Default class for 404 Error\n \"\"\"\n title = 'Not Found'\n status = 404\n\n\nclass ServerError(ErrorResponse):\n status = 500\n title = 'Internal Server Error'\n\n\nclass UnprocessableEntityError(ErrorResponse):\n status = 422\n title = 'Unprocessable Entity'\n\n\nclass BadRequestError(ErrorResponse):\n status = 400\n title = 'Bad Request'\n", "path": "app/api/helpers/errors.py"}, {"content": "from flask_rest_jsonapi.exceptions import JsonApiException\n\n\nclass UnprocessableEntity(JsonApiException):\n title = \"Unprocessable Entity\"\n status = 422\n\n\nclass ConflictException(JsonApiException):\n title = \"Conflict\"\n status = 409\n\n\nclass ForbiddenException(JsonApiException):\n \"\"\"\n Default class for 403 Error\n \"\"\"\n title = 'Access Forbidden'\n status = 403\n\n\nclass MethodNotAllowed(JsonApiException):\n \"\"\"\n Default Class to throw HTTP 405 Exception\n \"\"\"\n title = \"Method Not Allowed\"\n status = 405\n", "path": "app/api/helpers/exceptions.py"}]} | 1,385 | 257 |
gh_patches_debug_15280 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-891 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change name of chat room from Matrix/Riot to Element
The name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.
</issue>
<code>
[start of plasmapy/particles/isotopes.py]
1 """
2 Module for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.
3
4 .. attention::
5 This module only contains non-public functionality. To learn more about the
6 package functionality, then examine the code itself.
7 """
8 __all__ = []
9
10 import astropy.units as u
11 import json
12 import pkgutil
13
14 # this code was used to create the JSON file as per vn-ki on Riot:
15 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
16 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
17 #
18 # def _isotope_default(obj):
19 # if isinstance(obj, u.Quantity):
20 # return {
21 # "unit": obj.unit.name,
22 # "value": obj.value,
23 # }
24 # with open("isotopes.json", "w") as f:
25 # json.dump(_Isotopes, f, default=plasma_default, indent=2)
26
27
28 def _isotope_obj_hook(obj):
29 """An `object_hook` designed for `json.load` and `json.loads`."""
30 if "unit" in obj:
31 return obj["value"] * u.Unit(obj["unit"])
32 return obj
33
34
35 #: Dictionary of isotope data.
36 _Isotopes = json.loads(
37 pkgutil.get_data("plasmapy", "particles/data/isotopes.json"),
38 object_hook=_isotope_obj_hook,
39 )
40
[end of plasmapy/particles/isotopes.py]
[start of plasmapy/particles/elements.py]
1 """
2 Module for loading atomic data for elements from
3 :file:`plasmapy/particles/data/elements.json`.
4
5 The periodic tabla data is from: http://periodic.lanl.gov/index.shtml
6
7 .. attention::
8 This module only contains non-public functionality. To learn more about the
9 package functionality, examine the code itself.
10 """
11 __all__ = []
12
13 import astropy.units as u
14 import collections
15 import json
16 import pkgutil
17
18 _PeriodicTable = collections.namedtuple(
19 "periodic_table", ["group", "category", "block", "period"]
20 )
21
22
23 def _element_obj_hook(obj):
24 if "unit" in obj:
25 return obj["value"] * u.Unit(obj["unit"])
26 return obj
27
28
29 # this code was used to create the JSON file as per vn-ki on Riot:
30 # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
31 # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
32 #
33 # def plasma_default(obj):
34 # if isinstance(obj, u.Quantity):
35 # return {
36 # "unit": obj.unit.name,
37 # "value": obj.value,
38 # }
39 #
40 # with open("elements.json", "w") as f:
41 # json.dump(_Elements, f, default=plasma_default, indent=2)
42
43
44 _Elements = json.loads(
45 pkgutil.get_data("plasmapy", "particles/data/elements.json"),
46 object_hook=_element_obj_hook,
47 )
48
49
50 _atomic_numbers_to_symbols = {
51 elemdict["atomic number"]: symb for (symb, elemdict) in _Elements.items()
52 }
53
54 _element_names_to_symbols = {
55 elemdict["element name"]: symb for (symb, elemdict) in _Elements.items()
56 }
57
[end of plasmapy/particles/elements.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py
--- a/plasmapy/particles/elements.py
+++ b/plasmapy/particles/elements.py
@@ -26,7 +26,7 @@
return obj
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
diff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py
--- a/plasmapy/particles/isotopes.py
+++ b/plasmapy/particles/isotopes.py
@@ -11,7 +11,7 @@
import json
import pkgutil
-# this code was used to create the JSON file as per vn-ki on Riot:
+# this code was used to create the JSON file as per vn-ki on Matrix:
# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/
# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com
#
| {"golden_diff": "diff --git a/plasmapy/particles/elements.py b/plasmapy/particles/elements.py\n--- a/plasmapy/particles/elements.py\n+++ b/plasmapy/particles/elements.py\n@@ -26,7 +26,7 @@\n return obj\n \n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\ndiff --git a/plasmapy/particles/isotopes.py b/plasmapy/particles/isotopes.py\n--- a/plasmapy/particles/isotopes.py\n+++ b/plasmapy/particles/isotopes.py\n@@ -11,7 +11,7 @@\n import json\n import pkgutil\n \n-# this code was used to create the JSON file as per vn-ki on Riot:\n+# this code was used to create the JSON file as per vn-ki on Matrix:\n # https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n # $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n #\n", "issue": "Change name of chat room from Matrix/Riot to Element\nThe name of the chat platform we are using has [changed from Matrix/Riot to Element](https://element.io/blog/the-world-is-changing/). We should change the name of our chat room accordingly, including in our README file and possibly some places in our documentation.\n", "before_files": [{"content": "\"\"\"\nModule for loading isotope data from :file:`plasmapy/particles/data/isotopes.json`.\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, then examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport json\nimport pkgutil\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def _isotope_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n# with open(\"isotopes.json\", \"w\") as f:\n# json.dump(_Isotopes, f, default=plasma_default, indent=2)\n\n\ndef _isotope_obj_hook(obj):\n \"\"\"An `object_hook` designed for `json.load` and `json.loads`.\"\"\"\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n#: Dictionary of isotope data.\n_Isotopes = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/isotopes.json\"),\n object_hook=_isotope_obj_hook,\n)\n", "path": "plasmapy/particles/isotopes.py"}, {"content": "\"\"\"\nModule for loading atomic data for elements from\n:file:`plasmapy/particles/data/elements.json`.\n\nThe periodic tabla data is from: http://periodic.lanl.gov/index.shtml\n\n.. attention::\n This module only contains non-public functionality. To learn more about the\n package functionality, examine the code itself.\n\"\"\"\n__all__ = []\n\nimport astropy.units as u\nimport collections\nimport json\nimport pkgutil\n\n_PeriodicTable = collections.namedtuple(\n \"periodic_table\", [\"group\", \"category\", \"block\", \"period\"]\n)\n\n\ndef _element_obj_hook(obj):\n if \"unit\" in obj:\n return obj[\"value\"] * u.Unit(obj[\"unit\"])\n return obj\n\n\n# this code was used to create the JSON file as per vn-ki on Riot:\n# https://matrix.to/#/!hkWCiyhQyxiYJlUtKF:matrix.org/\n# $1554667515670438wIKlP:matrix.org?via=matrix.org&via=cadair.com\n#\n# def plasma_default(obj):\n# if isinstance(obj, u.Quantity):\n# return {\n# \"unit\": obj.unit.name,\n# \"value\": obj.value,\n# }\n#\n# with open(\"elements.json\", \"w\") as f:\n# json.dump(_Elements, f, default=plasma_default, indent=2)\n\n\n_Elements = json.loads(\n pkgutil.get_data(\"plasmapy\", \"particles/data/elements.json\"),\n object_hook=_element_obj_hook,\n)\n\n\n_atomic_numbers_to_symbols = {\n elemdict[\"atomic number\"]: symb for (symb, elemdict) in _Elements.items()\n}\n\n_element_names_to_symbols = {\n elemdict[\"element name\"]: symb for (symb, elemdict) in _Elements.items()\n}\n", "path": "plasmapy/particles/elements.py"}]} | 1,561 | 337 |
gh_patches_debug_2500 | rasdani/github-patches | git_diff | rlworkgroup__garage-692 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Intel-optimized version of the package
</issue>
<code>
[start of setup.py]
1 from setuptools import find_packages
2 from setuptools import setup
3
4 # Required dependencies
5 required = [
6 # Please keep alphabetized
7 'akro',
8 'boto3',
9 'cached_property',
10 'click',
11 'cloudpickle',
12 'cma==1.1.06',
13 # dm_control throws an error during install about not being able to
14 # find a build dependency (absl-py). Later pip executes the `install`
15 # command again and the install succeeds because absl-py has been
16 # installed. This is stupid, but harmless.
17 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501
18 'dowel==0.0.2',
19 'gym[all]==0.10.11',
20 'joblib<0.13,>=0.12',
21 'matplotlib',
22 'mujoco-py<1.50.2,>=1.50.1',
23 'numpy==1.14.5',
24 'psutil',
25 'pyprind',
26 'python-dateutil',
27 'scikit-image',
28 'scipy',
29 'tensorflow<1.13,>=1.12.0',
30 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12
31 ]
32
33 # Dependencies for optional features
34 extras = {}
35 extras['all'] = list(set(sum(extras.values(), [])))
36
37 # Development dependencies (*not* included in "all")
38 extras['dev'] = [
39 # Please keep alphabetized
40 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501
41 'coverage',
42 'flake8',
43 'flake8-docstrings==1.3.0',
44 'flake8-import-order',
45 'nose2',
46 'pandas',
47 'pep8-naming==0.7.0',
48 'pre-commit',
49 'pylint==1.9.2',
50 'sphinx',
51 'sphinx_rtd_theme',
52 'yapf',
53 ]
54
55 with open('README.md') as f:
56 readme = f.read()
57
58 # Get the package version dynamically
59 with open('VERSION') as v:
60 version = v.read().strip()
61
62 setup(
63 name='garage',
64 version=version,
65 author='Reinforcement Learning Working Group',
66 description='A framework for reproducible reinforcement learning research',
67 url='https://github.com/rlworkgroup/garage',
68 packages=find_packages(where='src'),
69 package_dir={'': 'src'},
70 scripts=['scripts/garage'],
71 install_requires=required,
72 extras_require=extras,
73 license='MIT',
74 long_description=readme,
75 long_description_content_type='text/markdown',
76 classifiers=[
77 'Development Status :: 4 - Beta',
78 'Intended Audience :: Developers',
79 'Intended Audience :: Education',
80 'Intended Audience :: Science/Research',
81 'License :: OSI Approved :: MIT License',
82 'Programming Language :: Python :: 3.5',
83 'Programming Language :: Python :: 3.6',
84 'Programming Language :: Python :: 3.7',
85 'Programming Language :: Python :: 3 :: Only',
86 'Topic :: Scientific/Engineering :: Artificial Intelligence',
87 'Topic :: Scientific/Engineering :: Mathematics',
88 'Topic :: Software Development :: Libraries',
89 ],
90 )
91
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,6 +34,9 @@
extras = {}
extras['all'] = list(set(sum(extras.values(), [])))
+# Intel dependencies not included in all
+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']
+
# Development dependencies (*not* included in "all")
extras['dev'] = [
# Please keep alphabetized
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,6 +34,9 @@\n extras = {}\n extras['all'] = list(set(sum(extras.values(), [])))\n \n+# Intel dependencies not included in all\n+extras['intel'] = ['intel-tensorflow<1.13,>=1.12.0']\n+\n # Development dependencies (*not* included in \"all\")\n extras['dev'] = [\n # Please keep alphabetized\n", "issue": "Add Intel-optimized version of the package\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n# Required dependencies\nrequired = [\n # Please keep alphabetized\n 'akro',\n 'boto3',\n 'cached_property',\n 'click',\n 'cloudpickle',\n 'cma==1.1.06',\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control @ https://api.github.com/repos/deepmind/dm_control/tarball/92f9913013face0468442cd0964d5973ea2089ea', # noqa: E501\n 'dowel==0.0.2',\n 'gym[all]==0.10.11',\n 'joblib<0.13,>=0.12',\n 'matplotlib',\n 'mujoco-py<1.50.2,>=1.50.1',\n 'numpy==1.14.5',\n 'psutil',\n 'pyprind',\n 'python-dateutil',\n 'scikit-image',\n 'scipy',\n 'tensorflow<1.13,>=1.12.0',\n 'tensorflow-probability<0.6.0,>=0.5.0', # for tensorflow 1.12\n]\n\n# Dependencies for optional features\nextras = {}\nextras['all'] = list(set(sum(extras.values(), [])))\n\n# Development dependencies (*not* included in \"all\")\nextras['dev'] = [\n # Please keep alphabetized\n 'baselines @ https://api.github.com/repos/openai/baselines/tarball/f2729693253c0ef4d4086231d36e0a4307ec1cb3', # noqa: E501\n 'coverage',\n 'flake8',\n 'flake8-docstrings==1.3.0',\n 'flake8-import-order',\n 'nose2',\n 'pandas',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pylint==1.9.2',\n 'sphinx',\n 'sphinx_rtd_theme',\n 'yapf',\n]\n\nwith open('README.md') as f:\n readme = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n version = v.read().strip()\n\nsetup(\n name='garage',\n version=version,\n author='Reinforcement Learning Working Group',\n description='A framework for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n install_requires=required,\n extras_require=extras,\n license='MIT',\n long_description=readme,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]} | 1,539 | 109 |
gh_patches_debug_16985 | rasdani/github-patches | git_diff | searxng__searxng-2369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Double bangs without query redirect to search URL instead of main page
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
`2023.4.21+98387e29`
**How did you install SearXNG?**
`searxng-docker`
**What happened?**
Double bangs without query redirect to search URL instead of main page
**How To Reproduce**
Use external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=`
**Expected behavior**
`!!gh` should redirect to `https://github.com` if no search items
</issue>
<code>
[start of searx/external_bang.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2
3 from urllib.parse import quote_plus
4 from searx.data import EXTERNAL_BANGS
5
6 LEAF_KEY = chr(16)
7
8
9 def get_node(external_bangs_db, bang):
10 node = external_bangs_db['trie']
11 after = ''
12 before = ''
13 for bang_letter in bang:
14 after += bang_letter
15 if after in node and isinstance(node, dict):
16 node = node[after]
17 before += after
18 after = ''
19 return node, before, after
20
21
22 def get_bang_definition_and_ac(external_bangs_db, bang):
23 node, before, after = get_node(external_bangs_db, bang)
24
25 bang_definition = None
26 bang_ac_list = []
27 if after != '':
28 for k in node:
29 if k.startswith(after):
30 bang_ac_list.append(before + k)
31 elif isinstance(node, dict):
32 bang_definition = node.get(LEAF_KEY)
33 bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]
34 elif isinstance(node, str):
35 bang_definition = node
36 bang_ac_list = []
37
38 return bang_definition, bang_ac_list
39
40
41 def resolve_bang_definition(bang_definition, query):
42 url, rank = bang_definition.split(chr(1))
43 url = url.replace(chr(2), quote_plus(query))
44 if url.startswith('//'):
45 url = 'https:' + url
46 rank = int(rank) if len(rank) > 0 else 0
47 return (url, rank)
48
49
50 def get_bang_definition_and_autocomplete(bang, external_bangs_db=None):
51 if external_bangs_db is None:
52 external_bangs_db = EXTERNAL_BANGS
53
54 bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)
55
56 new_autocomplete = []
57 current = [*bang_ac_list]
58 done = set()
59 while len(current) > 0:
60 bang_ac = current.pop(0)
61 done.add(bang_ac)
62
63 current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)
64 if current_bang_definition:
65 _, order = resolve_bang_definition(current_bang_definition, '')
66 new_autocomplete.append((bang_ac, order))
67 for new_bang in current_bang_ac_list:
68 if new_bang not in done and new_bang not in current:
69 current.append(new_bang)
70
71 new_autocomplete.sort(key=lambda t: (-t[1], t[0]))
72 new_autocomplete = list(map(lambda t: t[0], new_autocomplete))
73
74 return bang_definition, new_autocomplete
75
76
77 def get_bang_url(search_query, external_bangs_db=None):
78 """
79 Redirects if the user supplied a correct bang search.
80 :param search_query: This is a search_query object which contains preferences and the submitted queries.
81 :return: None if the bang was invalid, else a string of the redirect url.
82 """
83 ret_val = None
84
85 if external_bangs_db is None:
86 external_bangs_db = EXTERNAL_BANGS
87
88 if search_query.external_bang:
89 bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)
90 if bang_definition and isinstance(bang_definition, str):
91 ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]
92
93 return ret_val
94
[end of searx/external_bang.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/external_bang.py b/searx/external_bang.py
--- a/searx/external_bang.py
+++ b/searx/external_bang.py
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
-from urllib.parse import quote_plus
+from urllib.parse import quote_plus, urlparse
from searx.data import EXTERNAL_BANGS
LEAF_KEY = chr(16)
@@ -40,9 +40,15 @@
def resolve_bang_definition(bang_definition, query):
url, rank = bang_definition.split(chr(1))
- url = url.replace(chr(2), quote_plus(query))
if url.startswith('//'):
url = 'https:' + url
+ if query:
+ url = url.replace(chr(2), quote_plus(query))
+ else:
+ # go to main instead of search page
+ o = urlparse(url)
+ url = o.scheme + '://' + o.netloc
+
rank = int(rank) if len(rank) > 0 else 0
return (url, rank)
| {"golden_diff": "diff --git a/searx/external_bang.py b/searx/external_bang.py\n--- a/searx/external_bang.py\n+++ b/searx/external_bang.py\n@@ -1,6 +1,6 @@\n # SPDX-License-Identifier: AGPL-3.0-or-later\n \n-from urllib.parse import quote_plus\n+from urllib.parse import quote_plus, urlparse\n from searx.data import EXTERNAL_BANGS\n \n LEAF_KEY = chr(16)\n@@ -40,9 +40,15 @@\n \n def resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n- url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n+ if query:\n+ url = url.replace(chr(2), quote_plus(query))\n+ else:\n+ # go to main instead of search page\n+ o = urlparse(url)\n+ url = o.scheme + '://' + o.netloc\n+\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n", "issue": "Double bangs without query redirect to search URL instead of main page\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n`2023.4.21+98387e29`\r\n\r\n**How did you install SearXNG?**\r\n`searxng-docker`\r\n\r\n**What happened?**\r\nDouble bangs without query redirect to search URL instead of main page\r\n\r\n**How To Reproduce**\r\nUse external bang without any search items e.g. `!!gh` and we land on `https://github.com/search?utf8=%E2%9C%93&q=` \r\n\r\n**Expected behavior**\r\n`!!gh` should redirect to `https://github.com` if no search items\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\nfrom urllib.parse import quote_plus\nfrom searx.data import EXTERNAL_BANGS\n\nLEAF_KEY = chr(16)\n\n\ndef get_node(external_bangs_db, bang):\n node = external_bangs_db['trie']\n after = ''\n before = ''\n for bang_letter in bang:\n after += bang_letter\n if after in node and isinstance(node, dict):\n node = node[after]\n before += after\n after = ''\n return node, before, after\n\n\ndef get_bang_definition_and_ac(external_bangs_db, bang):\n node, before, after = get_node(external_bangs_db, bang)\n\n bang_definition = None\n bang_ac_list = []\n if after != '':\n for k in node:\n if k.startswith(after):\n bang_ac_list.append(before + k)\n elif isinstance(node, dict):\n bang_definition = node.get(LEAF_KEY)\n bang_ac_list = [before + k for k in node.keys() if k != LEAF_KEY]\n elif isinstance(node, str):\n bang_definition = node\n bang_ac_list = []\n\n return bang_definition, bang_ac_list\n\n\ndef resolve_bang_definition(bang_definition, query):\n url, rank = bang_definition.split(chr(1))\n url = url.replace(chr(2), quote_plus(query))\n if url.startswith('//'):\n url = 'https:' + url\n rank = int(rank) if len(rank) > 0 else 0\n return (url, rank)\n\n\ndef get_bang_definition_and_autocomplete(bang, external_bangs_db=None):\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n bang_definition, bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang)\n\n new_autocomplete = []\n current = [*bang_ac_list]\n done = set()\n while len(current) > 0:\n bang_ac = current.pop(0)\n done.add(bang_ac)\n\n current_bang_definition, current_bang_ac_list = get_bang_definition_and_ac(external_bangs_db, bang_ac)\n if current_bang_definition:\n _, order = resolve_bang_definition(current_bang_definition, '')\n new_autocomplete.append((bang_ac, order))\n for new_bang in current_bang_ac_list:\n if new_bang not in done and new_bang not in current:\n current.append(new_bang)\n\n new_autocomplete.sort(key=lambda t: (-t[1], t[0]))\n new_autocomplete = list(map(lambda t: t[0], new_autocomplete))\n\n return bang_definition, new_autocomplete\n\n\ndef get_bang_url(search_query, external_bangs_db=None):\n \"\"\"\n Redirects if the user supplied a correct bang search.\n :param search_query: This is a search_query object which contains preferences and the submitted queries.\n :return: None if the bang was invalid, else a string of the redirect url.\n \"\"\"\n ret_val = None\n\n if external_bangs_db is None:\n external_bangs_db = EXTERNAL_BANGS\n\n if search_query.external_bang:\n bang_definition, _ = get_bang_definition_and_ac(external_bangs_db, search_query.external_bang)\n if bang_definition and isinstance(bang_definition, str):\n ret_val = resolve_bang_definition(bang_definition, search_query.query)[0]\n\n return ret_val\n", "path": "searx/external_bang.py"}]} | 1,675 | 254 |
gh_patches_debug_2243 | rasdani/github-patches | git_diff | streamlink__streamlink-5023 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.vtvgo: '403 Client Error: Forbidden for url: ...'
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Last month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.
### Debug log
```text
streamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug
[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35
[cli][debug] Python: 3.10.6
[cli][debug] Streamlink: 5.1.2+4.g68dad105
[cli][debug] Dependencies:
[cli][debug] certifi: 2022.9.24
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.1
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.15.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.28.1
[cli][debug] urllib3: 1.26.12
[cli][debug] websocket-client: 1.4.1
[cli][debug] importlib-metadata: 4.6.4
[cli][debug] Arguments:
[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html
error: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)
```
</issue>
<code>
[start of src/streamlink/plugins/vtvgo.py]
1 """
2 $description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.
3 $url vtvgo.vn
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13
14 log = logging.getLogger(__name__)
15
16
17 @pluginmatcher(re.compile(
18 r"https?://vtvgo\.vn/xem-truc-tuyen-kenh-"
19 ))
20 class VTVgo(Plugin):
21 AJAX_URL = "https://vtvgo.vn/ajax-get-stream"
22
23 def _get_streams(self):
24 # get cookies
25 self.session.http.get("https://vtvgo.vn/")
26
27 self.session.http.headers.update({
28 "Origin": "https://vtvgo.vn",
29 "Referer": self.url,
30 "X-Requested-With": "XMLHttpRequest",
31 })
32
33 params = self.session.http.get(self.url, schema=validate.Schema(
34 validate.parse_html(),
35 validate.xml_xpath_string(".//script[contains(text(),'setplayer(')][1]/text()"),
36 validate.none_or_all(
37 validate.regex(
38 re.compile(r"""var\s+(?P<key>(?:type_)?id|time|token)\s*=\s*["']?(?P<value>[^"']+)["']?;"""),
39 method="findall",
40 ),
41 [
42 ("id", int),
43 ("type_id", str),
44 ("time", str),
45 ("token", str),
46 ],
47 ),
48 ))
49 if not params:
50 return
51
52 log.trace(f"{params!r}")
53 hls_url = self.session.http.post(
54 self.AJAX_URL,
55 data=dict(params),
56 schema=validate.Schema(
57 validate.parse_json(),
58 {"stream_url": [validate.url()]},
59 validate.get(("stream_url", 0)),
60 ),
61 )
62
63 return HLSStream.parse_variant_playlist(self.session, hls_url)
64
65
66 __plugin__ = VTVgo
67
[end of src/streamlink/plugins/vtvgo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py
--- a/src/streamlink/plugins/vtvgo.py
+++ b/src/streamlink/plugins/vtvgo.py
@@ -27,6 +27,7 @@
self.session.http.headers.update({
"Origin": "https://vtvgo.vn",
"Referer": self.url,
+ "Sec-Fetch-Site": "same-origin",
"X-Requested-With": "XMLHttpRequest",
})
| {"golden_diff": "diff --git a/src/streamlink/plugins/vtvgo.py b/src/streamlink/plugins/vtvgo.py\n--- a/src/streamlink/plugins/vtvgo.py\n+++ b/src/streamlink/plugins/vtvgo.py\n@@ -27,6 +27,7 @@\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n+ \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n", "issue": "plugins.vtvgo: '403 Client Error: Forbidden for url: ...'\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nLast month VtvGo added cookie requirements for the stream playlist, and now it seems that they added another security layer. The request to the website returns error 403.\n\n### Debug log\n\n```text\nstreamlink https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html --loglevel=debug\r\n[cli][debug] OS: Linux-5.15.0-53-generic-x86_64-with-glibc2.35\r\n[cli][debug] Python: 3.10.6\r\n[cli][debug] Streamlink: 5.1.2+4.g68dad105\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.9.24\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.1\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.15.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] urllib3: 1.26.12\r\n[cli][debug] websocket-client: 1.4.1\r\n[cli][debug] importlib-metadata: 4.6.4\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin vtvgo for URL https://vtvgo.vn/xem-truc-tuyen-kenh-vtv3-3.html\r\nerror: Unable to open URL: https://vtvgo.vn/ajax-get-stream (403 Client Error: Forbidden for url: https://vtvgo.vn/ajax-get-stream)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Live TV channels from VTV, a Vietnamese public, state-owned broadcaster.\n$url vtvgo.vn\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vtvgo\\.vn/xem-truc-tuyen-kenh-\"\n))\nclass VTVgo(Plugin):\n AJAX_URL = \"https://vtvgo.vn/ajax-get-stream\"\n\n def _get_streams(self):\n # get cookies\n self.session.http.get(\"https://vtvgo.vn/\")\n\n self.session.http.headers.update({\n \"Origin\": \"https://vtvgo.vn\",\n \"Referer\": self.url,\n \"X-Requested-With\": \"XMLHttpRequest\",\n })\n\n params = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'setplayer(')][1]/text()\"),\n validate.none_or_all(\n validate.regex(\n re.compile(r\"\"\"var\\s+(?P<key>(?:type_)?id|time|token)\\s*=\\s*[\"']?(?P<value>[^\"']+)[\"']?;\"\"\"),\n method=\"findall\",\n ),\n [\n (\"id\", int),\n (\"type_id\", str),\n (\"time\", str),\n (\"token\", str),\n ],\n ),\n ))\n if not params:\n return\n\n log.trace(f\"{params!r}\")\n hls_url = self.session.http.post(\n self.AJAX_URL,\n data=dict(params),\n schema=validate.Schema(\n validate.parse_json(),\n {\"stream_url\": [validate.url()]},\n validate.get((\"stream_url\", 0)),\n ),\n )\n\n return HLSStream.parse_variant_playlist(self.session, hls_url)\n\n\n__plugin__ = VTVgo\n", "path": "src/streamlink/plugins/vtvgo.py"}]} | 1,709 | 109 |
gh_patches_debug_29824 | rasdani/github-patches | git_diff | microsoft__Qcodes-531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a
### Steps to reproduce
1. Connect Keithley 2600
2. ask it for output status or mode
3. doh
### Expected behaviour
tell me what i asked for
### Actual behaviour
error because parsing fails
issue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)
### System
**operating system**
W10
**qcodes branch**
master
**qcodes commit**
177f8201b3778c0071c60d4e4c658449284e1a6c
</issue>
<code>
[start of qcodes/instrument_drivers/tektronix/Keithley_2600.py]
1 from qcodes import VisaInstrument
2
3
4 class Keithley_2600(VisaInstrument):
5 """
6 channel: use channel 'a' or 'b'
7
8 This is the qcodes driver for the Keithley_2600 Source-Meter series,
9 tested with Keithley_2614B
10
11 Status: beta-version.
12 TODO:
13 - Add all parameters that are in the manual
14 - range and limit should be set according to mode
15 - add ramping and such stuff
16
17 """
18 def __init__(self, name, address, channel, **kwargs):
19 super().__init__(name, address, terminator='\n', **kwargs)
20 self._channel = channel
21
22 self.add_parameter('volt', get_cmd='measure.v()',
23 get_parser=float, set_cmd='source.levelv={:.12f}',
24 label='Voltage',
25 unit='V')
26 self.add_parameter('curr', get_cmd='measure.i()',
27 get_parser=float, set_cmd='source.leveli={:.12f}',
28 label='Current',
29 unit='A')
30 self.add_parameter('mode',
31 get_cmd='source.func',
32 set_cmd='source.func={:d}',
33 val_mapping={'current': 0, 'voltage': 1})
34 self.add_parameter('output',
35 get_cmd='source.output',
36 set_cmd='source.output={:d}',
37 val_mapping={'on': 1, 'off': 0})
38 # Source range
39 # needs get after set
40 self.add_parameter('rangev',
41 get_cmd='source.rangev',
42 get_parser=float,
43 set_cmd='source.rangev={:.4f}',
44 unit='V')
45 # Measure range
46 # needs get after set
47 self.add_parameter('rangei',
48 get_cmd='source.rangei',
49 get_parser=float,
50 set_cmd='source.rangei={:.4f}',
51 unit='A')
52 # Compliance limit
53 self.add_parameter('limitv',
54 get_cmd='source.limitv',
55 get_parser=float,
56 set_cmd='source.limitv={:.4f}',
57 unit='V')
58 # Compliance limit
59 self.add_parameter('limiti',
60 get_cmd='source.limiti',
61 get_parser=float,
62 set_cmd='source.limiti={:.4f}',
63 unit='A')
64
65 self.connect_message()
66
67 def get_idn(self):
68 IDN = self.ask_raw('*IDN?')
69 vendor, model, serial, firmware = map(str.strip, IDN.split(','))
70 model = model[6:]
71
72 IDN = {'vendor': vendor, 'model': model,
73 'serial': serial, 'firmware': firmware}
74 return IDN
75
76 def reset(self):
77 self.write('reset()')
78
79 def ask(self, cmd):
80 return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))
81
82 def write(self, cmd):
83 super().write('smu{:s}.{:s}'.format(self._channel, cmd))
84
[end of qcodes/instrument_drivers/tektronix/Keithley_2600.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py
+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py
@@ -19,20 +19,26 @@
super().__init__(name, address, terminator='\n', **kwargs)
self._channel = channel
- self.add_parameter('volt', get_cmd='measure.v()',
- get_parser=float, set_cmd='source.levelv={:.12f}',
+ self.add_parameter('volt',
+ get_cmd='measure.v()',
+ get_parser=float,
+ set_cmd='source.levelv={:.12f}',
label='Voltage',
unit='V')
- self.add_parameter('curr', get_cmd='measure.i()',
- get_parser=float, set_cmd='source.leveli={:.12f}',
+ self.add_parameter('curr',
+ get_cmd='measure.i()',
+ get_parser=float,
+ set_cmd='source.leveli={:.12f}',
label='Current',
unit='A')
self.add_parameter('mode',
get_cmd='source.func',
+ get_parser=float,
set_cmd='source.func={:d}',
val_mapping={'current': 0, 'voltage': 1})
self.add_parameter('output',
get_cmd='source.output',
+ get_parser=float,
set_cmd='source.output={:d}',
val_mapping={'on': 1, 'off': 0})
# Source range
| {"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n@@ -19,20 +19,26 @@\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n \n- self.add_parameter('volt', get_cmd='measure.v()',\n- get_parser=float, set_cmd='source.levelv={:.12f}',\n+ self.add_parameter('volt',\n+ get_cmd='measure.v()',\n+ get_parser=float,\n+ set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n- self.add_parameter('curr', get_cmd='measure.i()',\n- get_parser=float, set_cmd='source.leveli={:.12f}',\n+ self.add_parameter('curr',\n+ get_cmd='measure.i()',\n+ get_parser=float,\n+ set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n+ get_parser=float,\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n+ get_parser=float,\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n", "issue": "Keithley2600 driver broken after commit 460c93c71506213102e56d3ea0c518723666d56a\n\r\n### Steps to reproduce\r\n1. Connect Keithley 2600\r\n2. ask it for output status or mode\r\n3. doh\r\n\r\n### Expected behaviour\r\ntell me what i asked for\r\n\r\n### Actual behaviour\r\nerror because parsing fails\r\nissue introduced in commit 460c93c71506213102e56d3ea0c518723666d56a (@alexcjohnson)\r\n\r\n\r\n### System\r\n**operating system**\r\nW10\r\n**qcodes branch**\r\nmaster\r\n**qcodes commit**\r\n177f8201b3778c0071c60d4e4c658449284e1a6c \n", "before_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}]} | 1,600 | 383 |
gh_patches_debug_26054 | rasdani/github-patches | git_diff | encode__uvicorn-636 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--proxy-headers with supervisor get client ip failed
#### uvicorn version
`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux`
#### supervisor config
```
[fcgi-program:uvicorn]
socket=tcp://0.0.0.0:5000
directory=/var/www/webapp/
command=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app
numprocs=2
process_name=uvicorn-%(process_num)d
stdout_logfile_maxbytes=0
stdout_logfile=/var/log/uvicorn.log
```
### fastapi
version 0.52.0
```
# cat /var/www/webapp/main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fastapi import FastAPI, Request
app = FastAPI(redoc_url=None)
@app.get('/')
def index(request: Request):
return 'hello {}!'.format(request.client.host)
```
### run on supervisor
supervisord version 4.1.0
```
supervisord -n
curl http://127.0.0.1:5000
```
why client.host is None?
</issue>
<code>
[start of uvicorn/protocols/utils.py]
1 import socket
2
3
4 def get_remote_addr(transport):
5 socket_info = transport.get_extra_info("socket")
6 if socket_info is not None:
7 try:
8 info = socket_info.getpeername()
9 except OSError:
10 # This case appears to inconsistently occur with uvloop
11 # bound to a unix domain socket.
12 family = None
13 info = None
14 else:
15 family = socket_info.family
16
17 if family in (socket.AF_INET, socket.AF_INET6):
18 return (str(info[0]), int(info[1]))
19 return None
20 info = transport.get_extra_info("peername")
21 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
22 return (str(info[0]), int(info[1]))
23 return None
24
25
26 def get_local_addr(transport):
27 socket_info = transport.get_extra_info("socket")
28 if socket_info is not None:
29 info = socket_info.getsockname()
30 family = socket_info.family
31 if family in (socket.AF_INET, socket.AF_INET6):
32 return (str(info[0]), int(info[1]))
33 return None
34 info = transport.get_extra_info("sockname")
35 if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
36 return (str(info[0]), int(info[1]))
37 return None
38
39
40 def is_ssl(transport):
41 return bool(transport.get_extra_info("sslcontext"))
42
43
44 def get_client_addr(scope):
45 client = scope.get("client")
46 if not client:
47 return ""
48 return "%s:%d" % client
49
50
51 def get_path_with_query_string(scope):
52 path_with_query_string = scope.get("root_path", "") + scope["path"]
53 if scope["query_string"]:
54 path_with_query_string = "{}?{}".format(
55 path_with_query_string, scope["query_string"].decode("ascii")
56 )
57 return path_with_query_string
58
[end of uvicorn/protocols/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py
--- a/uvicorn/protocols/utils.py
+++ b/uvicorn/protocols/utils.py
@@ -1,5 +1,10 @@
import socket
+if hasattr(socket, "AF_UNIX"):
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)
+else:
+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)
+
def get_remote_addr(transport):
socket_info = transport.get_extra_info("socket")
@@ -14,8 +19,9 @@
else:
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
+
return None
info = transport.get_extra_info("peername")
if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:
@@ -28,7 +34,7 @@
if socket_info is not None:
info = socket_info.getsockname()
family = socket_info.family
- if family in (socket.AF_INET, socket.AF_INET6):
+ if family in SUPPORTED_SOCKET_FAMILIES:
return (str(info[0]), int(info[1]))
return None
info = transport.get_extra_info("sockname")
| {"golden_diff": "diff --git a/uvicorn/protocols/utils.py b/uvicorn/protocols/utils.py\n--- a/uvicorn/protocols/utils.py\n+++ b/uvicorn/protocols/utils.py\n@@ -1,5 +1,10 @@\n import socket\n \n+if hasattr(socket, \"AF_UNIX\"):\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6, socket.AF_UNIX)\n+else:\n+ SUPPORTED_SOCKET_FAMILIES = (socket.AF_INET, socket.AF_INET6)\n+\n \n def get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n@@ -14,8 +19,9 @@\n else:\n family = socket_info.family\n \n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n+\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n@@ -28,7 +34,7 @@\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n- if family in (socket.AF_INET, socket.AF_INET6):\n+ if family in SUPPORTED_SOCKET_FAMILIES:\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n", "issue": "--proxy-headers with supervisor get client ip failed\n#### uvicorn version\r\n`Running uvicorn 0.11.3 with CPython 3.8.2 on Linux` \r\n#### supervisor config\r\n```\r\n[fcgi-program:uvicorn]\r\nsocket=tcp://0.0.0.0:5000 \r\ndirectory=/var/www/webapp/ \r\ncommand=/usr/local/python3/bin/uvicorn --fd 0 --proxy-headers main:app \r\nnumprocs=2 \r\nprocess_name=uvicorn-%(process_num)d \r\nstdout_logfile_maxbytes=0 \r\nstdout_logfile=/var/log/uvicorn.log \r\n```\r\n\r\n### fastapi \r\nversion 0.52.0\r\n\r\n```\r\n# cat /var/www/webapp/main.py\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nfrom fastapi import FastAPI, Request\r\n\r\napp = FastAPI(redoc_url=None)\r\n\r\n\r\[email protected]('/')\r\ndef index(request: Request):\r\n return 'hello {}!'.format(request.client.host)\r\n\r\n```\r\n\r\n### run on supervisor\r\nsupervisord version 4.1.0\r\n```\r\nsupervisord -n\r\ncurl http://127.0.0.1:5000\r\n\r\n```\r\nwhy client.host is None?\r\n\n", "before_files": [{"content": "import socket\n\n\ndef get_remote_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n try:\n info = socket_info.getpeername()\n except OSError:\n # This case appears to inconsistently occur with uvloop\n # bound to a unix domain socket.\n family = None\n info = None\n else:\n family = socket_info.family\n\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"peername\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef get_local_addr(transport):\n socket_info = transport.get_extra_info(\"socket\")\n if socket_info is not None:\n info = socket_info.getsockname()\n family = socket_info.family\n if family in (socket.AF_INET, socket.AF_INET6):\n return (str(info[0]), int(info[1]))\n return None\n info = transport.get_extra_info(\"sockname\")\n if info is not None and isinstance(info, (list, tuple)) and len(info) == 2:\n return (str(info[0]), int(info[1]))\n return None\n\n\ndef is_ssl(transport):\n return bool(transport.get_extra_info(\"sslcontext\"))\n\n\ndef get_client_addr(scope):\n client = scope.get(\"client\")\n if not client:\n return \"\"\n return \"%s:%d\" % client\n\n\ndef get_path_with_query_string(scope):\n path_with_query_string = scope.get(\"root_path\", \"\") + scope[\"path\"]\n if scope[\"query_string\"]:\n path_with_query_string = \"{}?{}\".format(\n path_with_query_string, scope[\"query_string\"].decode(\"ascii\")\n )\n return path_with_query_string\n", "path": "uvicorn/protocols/utils.py"}]} | 1,341 | 321 |
gh_patches_debug_3256 | rasdani/github-patches | git_diff | e-valuation__EvaP-1467 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OpenID login button is broken in latest firefox
Behavior:
When clicking the "login" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`
Expected Behavior:
Display the openID page and allow login
Steps to reproduce
Go to https://evap.hpi.de and click the "login" button. Browser is an up-to-date firefox on linux
</issue>
<code>
[start of evap/middleware.py]
1 from django.contrib.auth.views import redirect_to_login
2
3
4 class RequireLoginMiddleware:
5 def __init__(self, get_response):
6 self.get_response = get_response
7
8 def __call__(self, request):
9 return self.get_response(request)
10
11 @staticmethod
12 def process_view(request, view_func, _view_args, _view_kwargs):
13 # Returning None tells django to pass the request on
14 if request.user.is_authenticated:
15 return None
16
17 if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
18 return None
19
20 return redirect_to_login(request.get_full_path())
21
22
23 def no_login_required(func):
24 func.no_login_required = True
25 return func
26
[end of evap/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/middleware.py b/evap/middleware.py
--- a/evap/middleware.py
+++ b/evap/middleware.py
@@ -17,6 +17,9 @@
if "no_login_required" in view_func.__dict__ and view_func.no_login_required:
return None
+ if view_func.__name__ in ["OIDCAuthenticationRequestView", "OIDCAuthenticationCallbackView"]:
+ return None
+
return redirect_to_login(request.get_full_path())
| {"golden_diff": "diff --git a/evap/middleware.py b/evap/middleware.py\n--- a/evap/middleware.py\n+++ b/evap/middleware.py\n@@ -17,6 +17,9 @@\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n \n+ if view_func.__name__ in [\"OIDCAuthenticationRequestView\", \"OIDCAuthenticationCallbackView\"]:\n+ return None\n+\n return redirect_to_login(request.get_full_path())\n", "issue": "OpenID login button is broken in latest firefox\nBehavior:\r\nWhen clicking the \"login\" button at `https://evap.hpi.de` a request is sent to `https://evap.hpi.de/oidc/authenticate/`, which returns a 302 and redirects to `https://evap.hpi.de/?next=/oidc/authenticate/`, which has no visible difference with `https://evap.hpi.de`. After clicking the login button again I'm redirected to `https://evap.hpi.de/?next=/oidc/authenticate/%3Fnext%3D/oidc/authenticate/`\r\n\r\nExpected Behavior:\r\nDisplay the openID page and allow login\r\n\r\nSteps to reproduce\r\nGo to https://evap.hpi.de and click the \"login\" button. Browser is an up-to-date firefox on linux\n", "before_files": [{"content": "from django.contrib.auth.views import redirect_to_login\n\n\nclass RequireLoginMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n return self.get_response(request)\n\n @staticmethod\n def process_view(request, view_func, _view_args, _view_kwargs):\n # Returning None tells django to pass the request on\n if request.user.is_authenticated:\n return None\n\n if \"no_login_required\" in view_func.__dict__ and view_func.no_login_required:\n return None\n\n return redirect_to_login(request.get_full_path())\n\n\ndef no_login_required(func):\n func.no_login_required = True\n return func\n", "path": "evap/middleware.py"}]} | 906 | 114 |
gh_patches_debug_3683 | rasdani/github-patches | git_diff | bokeh__bokeh-5883 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Test bokehjs/examples and run them on travis ci
They are constantly broken due to not being continuously tested.
</issue>
<code>
[start of examples/plotting/file/airports_map.py]
1 from __future__ import print_function
2
3 from bokeh.layouts import row
4 from bokeh.models import (
5 Range1d, WMTSTileSource, ColumnDataSource, HoverTool,
6 )
7 from bokeh.plotting import figure, show, output_file
8 from bokeh.sampledata.airports import data as airports
9 from bokeh.tile_providers import CARTODBPOSITRON
10
11 points_source = ColumnDataSource(airports)
12 title = "US Airports: Field Elevation > 1500m"
13
14 def plot(tile_source, filename):
15 output_file(filename, title=title)
16
17 # set to roughly extent of points
18 x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)
19 y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)
20
21 # create plot and add tools
22 p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)
23 p.axis.visible = False
24 hover_tool = HoverTool(tooltips=[("Name", "@name"), ("Elevation", "@elevation (m)")])
25 p.add_tools(hover_tool)
26 p.add_tile(tile_source)
27
28 # create point glyphs
29 p.circle(x='x', y='y', size=9, fill_color="#60ACA1", line_color="#D2C4C1", line_width=1.5, source=points_source)
30 return p
31
32 # create a tile source
33 tile_options = {}
34 tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
35 mq_tile_source = WMTSTileSource(**tile_options)
36
37 carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
38 mq = plot(mq_tile_source, 'airports_map.html')
39
40 show(row([carto, mq]))
41
[end of examples/plotting/file/airports_map.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py
--- a/examples/plotting/file/airports_map.py
+++ b/examples/plotting/file/airports_map.py
@@ -31,7 +31,7 @@
# create a tile source
tile_options = {}
-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'
mq_tile_source = WMTSTileSource(**tile_options)
carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')
| {"golden_diff": "diff --git a/examples/plotting/file/airports_map.py b/examples/plotting/file/airports_map.py\n--- a/examples/plotting/file/airports_map.py\n+++ b/examples/plotting/file/airports_map.py\n@@ -31,7 +31,7 @@\n \n # create a tile source\n tile_options = {}\n-tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n+# TODO tile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\n mq_tile_source = WMTSTileSource(**tile_options)\n \n carto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\n", "issue": "Test bokehjs/examples and run them on travis ci\nThey are constantly broken due to not being continuously tested.\n", "before_files": [{"content": "from __future__ import print_function\n\nfrom bokeh.layouts import row\nfrom bokeh.models import (\n Range1d, WMTSTileSource, ColumnDataSource, HoverTool,\n)\nfrom bokeh.plotting import figure, show, output_file\nfrom bokeh.sampledata.airports import data as airports\nfrom bokeh.tile_providers import CARTODBPOSITRON\n\npoints_source = ColumnDataSource(airports)\ntitle = \"US Airports: Field Elevation > 1500m\"\n\ndef plot(tile_source, filename):\n output_file(filename, title=title)\n\n # set to roughly extent of points\n x_range = Range1d(start=airports['x'].min() - 10000, end=airports['x'].max() + 10000, bounds=None)\n y_range = Range1d(start=airports['y'].min() - 10000, end=airports['y'].max() + 10000, bounds=None)\n\n # create plot and add tools\n p = figure(tools='wheel_zoom,pan', x_range=x_range, y_range=y_range, title=title)\n p.axis.visible = False\n hover_tool = HoverTool(tooltips=[(\"Name\", \"@name\"), (\"Elevation\", \"@elevation (m)\")])\n p.add_tools(hover_tool)\n p.add_tile(tile_source)\n\n # create point glyphs\n p.circle(x='x', y='y', size=9, fill_color=\"#60ACA1\", line_color=\"#D2C4C1\", line_width=1.5, source=points_source)\n return p\n\n# create a tile source\ntile_options = {}\ntile_options['url'] = 'http://otile2.mqcdn.com/tiles/1.0.0/sat/{Z}/{X}/{Y}.png'\nmq_tile_source = WMTSTileSource(**tile_options)\n\ncarto = plot(CARTODBPOSITRON, 'airports_map_cartodb.html')\nmq = plot(mq_tile_source, 'airports_map.html')\n\nshow(row([carto, mq]))\n", "path": "examples/plotting/file/airports_map.py"}]} | 1,102 | 181 |
gh_patches_debug_31548 | rasdani/github-patches | git_diff | archlinux__archinstall-1194 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`select_disk` throw an error because BlockDevice does not contain label
Running this line from a python script causes an error:
`archinstall.select_disk(archinstall.all_blockdevices())`
```
Traceback (most recent call last):
File "/root/test.py", line 3, in <module>
hdd = archinstall.select_disk(archinstall.all_blockdevices())
File "/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py", line 68, in select_disk
f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
File "/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py", line 45, in __getitem__
raise KeyError(f'{self} does not contain information: "{key}"')
KeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: "label"'
```
</issue>
<code>
[start of archinstall/lib/user_interaction/disk_conf.py]
1 from __future__ import annotations
2
3 from typing import Any, Dict, TYPE_CHECKING, Optional
4
5 from .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout
6 from ..disk import BlockDevice
7 from ..exceptions import DiskError
8 from ..menu import Menu
9 from ..menu.menu import MenuSelectionType
10 from ..output import log
11
12 if TYPE_CHECKING:
13 _: Any
14
15
16 def ask_for_main_filesystem_format(advanced_options=False) -> str:
17 options = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}
18
19 advanced = {'ntfs': 'ntfs'}
20
21 if advanced_options:
22 options.update(advanced)
23
24 prompt = _('Select which filesystem your main partition should use')
25 choice = Menu(prompt, options, skip=False).run()
26 return choice.value
27
28
29 def select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:
30 result = {}
31
32 for device in block_devices:
33 layout = manage_new_and_existing_partitions(device)
34 result[device.path] = layout
35
36 return result
37
38
39 def select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:
40 wipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))
41 custome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))
42 modes = [wipe_mode, custome_mode]
43
44 warning = str(_('Are you sure you want to reset this setting?'))
45
46 choice = Menu(
47 _('Select what you wish to do with the selected block devices'),
48 modes,
49 explode_on_interrupt=True,
50 explode_warning=warning
51 ).run()
52
53 match choice.type_:
54 case MenuSelectionType.Esc: return preset
55 case MenuSelectionType.Ctrl_c: return None
56 case MenuSelectionType.Selection:
57 if choice.value == wipe_mode:
58 return get_default_partition_layout(block_devices, advanced_options)
59 else:
60 return select_individual_blockdevice_usage(block_devices)
61
62
63 def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
64 """
65 Asks the user to select a harddrive from the `dict_o_disks` selection.
66 Usually this is combined with :ref:`archinstall.list_drives`.
67
68 :param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.
69 :type dict_o_disks: dict
70
71 :return: The name/path (the dictionary key) of the selected drive
72 :rtype: str
73 """
74 drives = sorted(list(dict_o_disks.keys()))
75 if len(drives) >= 1:
76 for index, drive in enumerate(drives):
77 print(
78 f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
79 )
80
81 log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
82 fg="yellow")
83
84 drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
85 if not drive:
86 return drive
87
88 drive = dict_o_disks[drive]
89 return drive
90
91 raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
92
[end of archinstall/lib/user_interaction/disk_conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py
--- a/archinstall/lib/user_interaction/disk_conf.py
+++ b/archinstall/lib/user_interaction/disk_conf.py
@@ -7,7 +7,6 @@
from ..exceptions import DiskError
from ..menu import Menu
from ..menu.menu import MenuSelectionType
-from ..output import log
if TYPE_CHECKING:
_: Any
@@ -60,7 +59,7 @@
return select_individual_blockdevice_usage(block_devices)
-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:
+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:
"""
Asks the user to select a harddrive from the `dict_o_disks` selection.
Usually this is combined with :ref:`archinstall.list_drives`.
@@ -73,19 +72,15 @@
"""
drives = sorted(list(dict_o_disks.keys()))
if len(drives) >= 1:
- for index, drive in enumerate(drives):
- print(
- f"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})"
- )
+ title = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\n'
+ title += str(_('Select one of the disks or skip and use /mnt as default'))
- log("You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)",
- fg="yellow")
+ choice = Menu(title, drives).run()
- drive = Menu('Select one of the disks or skip and use "/mnt" as default"', drives).run()
- if not drive:
- return drive
+ if choice.type_ == MenuSelectionType.Esc:
+ return None
- drive = dict_o_disks[drive]
+ drive = dict_o_disks[choice.value]
return drive
raise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')
| {"golden_diff": "diff --git a/archinstall/lib/user_interaction/disk_conf.py b/archinstall/lib/user_interaction/disk_conf.py\n--- a/archinstall/lib/user_interaction/disk_conf.py\n+++ b/archinstall/lib/user_interaction/disk_conf.py\n@@ -7,7 +7,6 @@\n from ..exceptions import DiskError\n from ..menu import Menu\n from ..menu.menu import MenuSelectionType\n-from ..output import log\n \n if TYPE_CHECKING:\n \t_: Any\n@@ -60,7 +59,7 @@\n \t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n \n \n-def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n+def select_disk(dict_o_disks: Dict[str, BlockDevice]) -> Optional[BlockDevice]:\n \t\"\"\"\n \tAsks the user to select a harddrive from the `dict_o_disks` selection.\n \tUsually this is combined with :ref:`archinstall.list_drives`.\n@@ -73,19 +72,15 @@\n \t\"\"\"\n \tdrives = sorted(list(dict_o_disks.keys()))\n \tif len(drives) >= 1:\n-\t\tfor index, drive in enumerate(drives):\n-\t\t\tprint(\n-\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n-\t\t\t)\n+\t\ttitle = str(_('You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)')) + '\\n'\n+\t\ttitle += str(_('Select one of the disks or skip and use /mnt as default'))\n \n-\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n-\t\t\tfg=\"yellow\")\n+\t\tchoice = Menu(title, drives).run()\n \n-\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n-\t\tif not drive:\n-\t\t\treturn drive\n+\t\tif choice.type_ == MenuSelectionType.Esc:\n+\t\t\treturn None\n \n-\t\tdrive = dict_o_disks[drive]\n+\t\tdrive = dict_o_disks[choice.value]\n \t\treturn drive\n \n \traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "issue": "`select_disk` throw an error because BlockDevice does not contain label\nRunning this line from a python script causes an error: \r\n\r\n`archinstall.select_disk(archinstall.all_blockdevices())`\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/root/test.py\", line 3, in <module>\r\n hdd = archinstall.select_disk(archinstall.all_blockdevices())\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/user_interaction/disk_conf.py\", line 68, in select_disk\r\n f\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\r\n File \"/usr/lib/python3.10/site-packages/archinstall/lib/disk/blockdevice.py\", line 45, in __getitem__\r\n raise KeyError(f'{self} does not contain information: \"{key}\"')\r\nKeyError: 'BlockDevice(/dev/sr0, size=0.8GB, free_space=, bus_type=sata) does not contain information: \"label\"'\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Dict, TYPE_CHECKING, Optional\n\nfrom .partitioning_conf import manage_new_and_existing_partitions, get_default_partition_layout\nfrom ..disk import BlockDevice\nfrom ..exceptions import DiskError\nfrom ..menu import Menu\nfrom ..menu.menu import MenuSelectionType\nfrom ..output import log\n\nif TYPE_CHECKING:\n\t_: Any\n\n\ndef ask_for_main_filesystem_format(advanced_options=False) -> str:\n\toptions = {'btrfs': 'btrfs', 'ext4': 'ext4', 'xfs': 'xfs', 'f2fs': 'f2fs'}\n\n\tadvanced = {'ntfs': 'ntfs'}\n\n\tif advanced_options:\n\t\toptions.update(advanced)\n\n\tprompt = _('Select which filesystem your main partition should use')\n\tchoice = Menu(prompt, options, skip=False).run()\n\treturn choice.value\n\n\ndef select_individual_blockdevice_usage(block_devices: list) -> Dict[str, Any]:\n\tresult = {}\n\n\tfor device in block_devices:\n\t\tlayout = manage_new_and_existing_partitions(device)\n\t\tresult[device.path] = layout\n\n\treturn result\n\n\ndef select_disk_layout(preset: Optional[Dict[str, Any]], block_devices: list, advanced_options=False) -> Optional[Dict[str, Any]]:\n\twipe_mode = str(_('Wipe all selected drives and use a best-effort default partition layout'))\n\tcustome_mode = str(_('Select what to do with each individual drive (followed by partition usage)'))\n\tmodes = [wipe_mode, custome_mode]\n\n\twarning = str(_('Are you sure you want to reset this setting?'))\n\n\tchoice = Menu(\n\t\t_('Select what you wish to do with the selected block devices'),\n\t\tmodes,\n\t\texplode_on_interrupt=True,\n\t\texplode_warning=warning\n\t).run()\n\n\tmatch choice.type_:\n\t\tcase MenuSelectionType.Esc: return preset\n\t\tcase MenuSelectionType.Ctrl_c: return None\n\t\tcase MenuSelectionType.Selection:\n\t\t\tif choice.value == wipe_mode:\n\t\t\t\treturn get_default_partition_layout(block_devices, advanced_options)\n\t\t\telse:\n\t\t\t\treturn select_individual_blockdevice_usage(block_devices)\n\n\ndef select_disk(dict_o_disks: Dict[str, BlockDevice]) -> BlockDevice:\n\t\"\"\"\n\tAsks the user to select a harddrive from the `dict_o_disks` selection.\n\tUsually this is combined with :ref:`archinstall.list_drives`.\n\n\t:param dict_o_disks: A `dict` where keys are the drive-name, value should be a dict containing drive information.\n\t:type dict_o_disks: dict\n\n\t:return: The name/path (the dictionary key) of the selected drive\n\t:rtype: str\n\t\"\"\"\n\tdrives = sorted(list(dict_o_disks.keys()))\n\tif len(drives) >= 1:\n\t\tfor index, drive in enumerate(drives):\n\t\t\tprint(\n\t\t\t\tf\"{index}: {drive} ({dict_o_disks[drive]['size'], dict_o_disks[drive].device, dict_o_disks[drive]['label']})\"\n\t\t\t)\n\n\t\tlog(\"You can skip selecting a drive and partitioning and use whatever drive-setup is mounted at /mnt (experimental)\",\n\t\t\tfg=\"yellow\")\n\n\t\tdrive = Menu('Select one of the disks or skip and use \"/mnt\" as default\"', drives).run()\n\t\tif not drive:\n\t\t\treturn drive\n\n\t\tdrive = dict_o_disks[drive]\n\t\treturn drive\n\n\traise DiskError('select_disk() requires a non-empty dictionary of disks to select from.')\n", "path": "archinstall/lib/user_interaction/disk_conf.py"}]} | 1,737 | 499 |
gh_patches_debug_19023 | rasdani/github-patches | git_diff | vyperlang__vyper-874 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add uint256 decimal support for convert
### What's your issue about?
Convert should allow conversion of uint256 -> decimal.
### How can it be fixed?
convert.py:57
#### Cute Animal Picture
^.^
</issue>
<code>
[start of vyper/types/convert.py]
1 from vyper.functions.signature import (
2 signature
3 )
4 from vyper.parser.parser_utils import (
5 LLLnode,
6 getpos,
7 byte_array_to_num
8 )
9 from vyper.exceptions import (
10 InvalidLiteralException,
11 TypeMismatchException,
12 )
13 from vyper.types import (
14 BaseType,
15 )
16 from vyper.types import (
17 get_type,
18 )
19 from vyper.utils import (
20 DECIMAL_DIVISOR,
21 MemoryPositions,
22 SizeLimits
23 )
24
25
26 @signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')
27 def to_int128(expr, args, kwargs, context):
28 in_node = args[0]
29 typ, len = get_type(in_node)
30 if typ in ('int128', 'uint256', 'bytes32'):
31 if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):
32 raise InvalidLiteralException("Number out of range: {}".format(in_node.value), expr)
33 return LLLnode.from_list(
34 ['clamp', ['mload', MemoryPositions.MINNUM], in_node,
35 ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)
36 )
37 else:
38 return byte_array_to_num(in_node, expr, 'int128')
39
40
41 @signature(('num_literal', 'int128', 'bytes32'), 'str_literal')
42 def to_uint256(expr, args, kwargs, context):
43 in_node = args[0]
44 typ, len = get_type(in_node)
45 if isinstance(in_node, int):
46
47 if not SizeLimits.in_bounds('uint256', in_node):
48 raise InvalidLiteralException("Number out of range: {}".format(in_node))
49 _unit = in_node.typ.unit if typ == 'int128' else None
50 return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))
51 elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):
52 _unit = in_node.typ.unit if typ == 'int128' else None
53 return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))
54 elif isinstance(in_node, LLLnode) and typ in ('bytes32'):
55 return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))
56 else:
57 raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
58
59
60 @signature('int128', 'str_literal')
61 def to_decimal(expr, args, kwargs, context):
62 input = args[0]
63 return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),
64 pos=getpos(expr))
65
66
67 @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
68 def to_bytes32(expr, args, kwargs, context):
69 input = args[0]
70 typ, len = get_type(input)
71 if typ == 'bytes':
72 if len != 32:
73 raise TypeMismatchException("Unable to convert bytes[{}] to bytes32".format(len))
74 if input.location == "memory":
75 return LLLnode.from_list(
76 ['mload', ['add', input, 32]], typ=BaseType('bytes32')
77 )
78 elif input.location == "storage":
79 return LLLnode.from_list(
80 ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')
81 )
82 else:
83 return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))
84
85
86 def convert(expr, context):
87 output_type = expr.args[1].s
88 if output_type in conversion_table:
89 return conversion_table[output_type](expr, context)
90 else:
91 raise Exception("Conversion to {} is invalid.".format(output_type))
92
93
94 conversion_table = {
95 'int128': to_int128,
96 'uint256': to_uint256,
97 'decimal': to_decimal,
98 'bytes32': to_bytes32,
99 }
100
[end of vyper/types/convert.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/types/convert.py b/vyper/types/convert.py
--- a/vyper/types/convert.py
+++ b/vyper/types/convert.py
@@ -57,11 +57,20 @@
raise InvalidLiteralException("Invalid input for uint256: %r" % in_node, expr)
-@signature('int128', 'str_literal')
+@signature(('int128', 'uint256'), 'str_literal')
def to_decimal(expr, args, kwargs, context):
input = args[0]
- return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),
- pos=getpos(expr))
+ if input.typ.typ == 'uint256':
+ return LLLnode.from_list(
+ ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],
+ typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)
+ )
+ else:
+ return LLLnode.from_list(
+ ['mul', input, DECIMAL_DIVISOR],
+ typ=BaseType('decimal', input.typ.unit, input.typ.positional),
+ pos=getpos(expr)
+ )
@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')
| {"golden_diff": "diff --git a/vyper/types/convert.py b/vyper/types/convert.py\n--- a/vyper/types/convert.py\n+++ b/vyper/types/convert.py\n@@ -57,11 +57,20 @@\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n \n \n-@signature('int128', 'str_literal')\n+@signature(('int128', 'uint256'), 'str_literal')\n def to_decimal(expr, args, kwargs, context):\n input = args[0]\n- return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n- pos=getpos(expr))\n+ if input.typ.typ == 'uint256':\n+ return LLLnode.from_list(\n+ ['uclample', ['mul', input, DECIMAL_DIVISOR], ['mload', MemoryPositions.MAXDECIMAL]],\n+ typ=BaseType('decimal', input.typ.unit, input.typ.positional), pos=getpos(expr)\n+ )\n+ else:\n+ return LLLnode.from_list(\n+ ['mul', input, DECIMAL_DIVISOR],\n+ typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n+ pos=getpos(expr)\n+ )\n \n \n @signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\n", "issue": "Add uint256 decimal support for convert\n### What's your issue about?\r\n\r\nConvert should allow conversion of uint256 -> decimal.\r\n\r\n### How can it be fixed?\r\n\r\nconvert.py:57\r\n\r\n#### Cute Animal Picture\r\n^.^\n", "before_files": [{"content": "from vyper.functions.signature import (\n signature\n)\nfrom vyper.parser.parser_utils import (\n LLLnode,\n getpos,\n byte_array_to_num\n)\nfrom vyper.exceptions import (\n InvalidLiteralException,\n TypeMismatchException,\n)\nfrom vyper.types import (\n BaseType,\n)\nfrom vyper.types import (\n get_type,\n)\nfrom vyper.utils import (\n DECIMAL_DIVISOR,\n MemoryPositions,\n SizeLimits\n)\n\n\n@signature(('int128', 'uint256', 'bytes32', 'bytes'), 'str_literal')\ndef to_int128(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if typ in ('int128', 'uint256', 'bytes32'):\n if in_node.typ.is_literal and not SizeLimits.in_bounds('int128', in_node.value):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node.value), expr)\n return LLLnode.from_list(\n ['clamp', ['mload', MemoryPositions.MINNUM], in_node,\n ['mload', MemoryPositions.MAXNUM]], typ=BaseType('int128', in_node.typ.unit), pos=getpos(expr)\n )\n else:\n return byte_array_to_num(in_node, expr, 'int128')\n\n\n@signature(('num_literal', 'int128', 'bytes32'), 'str_literal')\ndef to_uint256(expr, args, kwargs, context):\n in_node = args[0]\n typ, len = get_type(in_node)\n if isinstance(in_node, int):\n\n if not SizeLimits.in_bounds('uint256', in_node):\n raise InvalidLiteralException(\"Number out of range: {}\".format(in_node))\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(in_node, typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('int128', 'num_literal'):\n _unit = in_node.typ.unit if typ == 'int128' else None\n return LLLnode.from_list(['clampge', in_node, 0], typ=BaseType('uint256', _unit), pos=getpos(expr))\n elif isinstance(in_node, LLLnode) and typ in ('bytes32'):\n return LLLnode(value=in_node.value, args=in_node.args, typ=BaseType('uint256'), pos=getpos(expr))\n else:\n raise InvalidLiteralException(\"Invalid input for uint256: %r\" % in_node, expr)\n\n\n@signature('int128', 'str_literal')\ndef to_decimal(expr, args, kwargs, context):\n input = args[0]\n return LLLnode.from_list(['mul', input, DECIMAL_DIVISOR], typ=BaseType('decimal', input.typ.unit, input.typ.positional),\n pos=getpos(expr))\n\n\n@signature(('int128', 'uint256', 'address', 'bytes'), 'str_literal')\ndef to_bytes32(expr, args, kwargs, context):\n input = args[0]\n typ, len = get_type(input)\n if typ == 'bytes':\n if len != 32:\n raise TypeMismatchException(\"Unable to convert bytes[{}] to bytes32\".format(len))\n if input.location == \"memory\":\n return LLLnode.from_list(\n ['mload', ['add', input, 32]], typ=BaseType('bytes32')\n )\n elif input.location == \"storage\":\n return LLLnode.from_list(\n ['sload', ['add', ['sha3_32', input], 1]], typ=BaseType('bytes32')\n )\n else:\n return LLLnode(value=input.value, args=input.args, typ=BaseType('bytes32'), pos=getpos(expr))\n\n\ndef convert(expr, context):\n output_type = expr.args[1].s\n if output_type in conversion_table:\n return conversion_table[output_type](expr, context)\n else:\n raise Exception(\"Conversion to {} is invalid.\".format(output_type))\n\n\nconversion_table = {\n 'int128': to_int128,\n 'uint256': to_uint256,\n 'decimal': to_decimal,\n 'bytes32': to_bytes32,\n}\n", "path": "vyper/types/convert.py"}]} | 1,759 | 322 |
gh_patches_debug_25447 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-270 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add aliases for cookiecutters
Remembering the locations of cookiecutters can be a bit of a pain - even if it's just including the local path.
It would be useful to have an extra section in `cookiecutterrc` for aliases to cookiecutter locations. Something like:
```
aliases:
python: https://github.com/audreyr/cookiecutter-pypackage
django: https://github.com/marcofucci/cookiecutter-simple-django
docs: ~/samples/templates/documentation
```
The code would simply need to translate an alias into a path before doing any of the existing processing.
</issue>
<code>
[start of cookiecutter/main.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.main
6 -----------------
7
8 Main entry point for the `cookiecutter` command.
9
10 The code in this module is also a good example of how to use Cookiecutter as a
11 library rather than a script.
12 """
13
14 from __future__ import unicode_literals
15 import argparse
16 import logging
17 import os
18 import sys
19
20 from . import __version__
21 from .config import get_user_config
22 from .prompt import prompt_for_config
23 from .generate import generate_context, generate_files
24 from .vcs import clone
25
26 logger = logging.getLogger(__name__)
27
28
29 def cookiecutter(input_dir, checkout=None, no_input=False):
30 """
31 API equivalent to using Cookiecutter at the command line.
32
33 :param input_dir: A directory containing a project template dir,
34 or a URL to git repo.
35 :param checkout: The branch, tag or commit ID to checkout after clone
36 """
37
38 # Get user config from ~/.cookiecutterrc or equivalent
39 # If no config file, sensible defaults from config.DEFAULT_CONFIG are used
40 config_dict = get_user_config()
41
42 # TODO: find a better way to tell if it's a repo URL
43 if "git@" in input_dir or "https://" in input_dir:
44 repo_dir = clone(
45 repo_url=input_dir,
46 checkout=checkout,
47 clone_to_dir=config_dict['cookiecutters_dir']
48 )
49 else:
50 # If it's a local repo, no need to clone or copy to your cookiecutters_dir
51 repo_dir = input_dir
52
53 context_file = os.path.join(repo_dir, 'cookiecutter.json')
54 logging.debug('context_file is {0}'.format(context_file))
55
56 context = generate_context(
57 context_file=context_file,
58 default_context=config_dict['default_context']
59 )
60
61 # prompt the user to manually configure at the command line.
62 # except when 'no-input' flag is set
63 if not no_input:
64 cookiecutter_dict = prompt_for_config(context)
65 context['cookiecutter'] = cookiecutter_dict
66
67 # Create project from local context and project template.
68 generate_files(
69 repo_dir=repo_dir,
70 context=context
71 )
72
73
74 def _get_parser():
75 parser = argparse.ArgumentParser(
76 description='Create a project from a Cookiecutter project template.'
77 )
78 parser.add_argument(
79 '--no-input',
80 action="store_true",
81 help='Do not prompt for parameters and only use cookiecutter.json '
82 'file content')
83 parser.add_argument(
84 'input_dir',
85 help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'
86 )
87 parser.add_argument(
88 '-c', '--checkout',
89 help='branch, tag or commit to checkout after git clone'
90 )
91 cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
92 parser.add_argument(
93 '-V', '--version',
94 help="Show version information and exit.",
95 action='version',
96 version='Cookiecutter %s from %s (Python %s)' % (
97 __version__,
98 cookiecutter_pkg_dir,
99 sys.version[:3]
100 )
101 )
102 parser.add_argument(
103 '-v', '--verbose',
104 help='Print debug information',
105 action='store_true', default=False
106 )
107
108 return parser
109
110 def parse_cookiecutter_args(args):
111 """ Parse the command-line arguments to Cookiecutter. """
112 parser = _get_parser()
113 return parser.parse_args(args)
114
115
116 def main():
117 """ Entry point for the package, as defined in setup.py. """
118
119 args = parse_cookiecutter_args(sys.argv[1:])
120
121 if args.verbose:
122 logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)
123 else:
124 # Log info and above to console
125 logging.basicConfig(
126 format='%(levelname)s: %(message)s',
127 level=logging.INFO
128 )
129
130 cookiecutter(args.input_dir, args.checkout, args.no_input)
131
132
133 if __name__ == '__main__':
134 main()
135
[end of cookiecutter/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -25,6 +25,34 @@
logger = logging.getLogger(__name__)
+builtin_abbreviations = {
+ 'gh': 'https://github.com/{0}.git',
+ 'bb': 'https://bitbucket.org/{0}',
+}
+
+def expand_abbreviations(input_dir, config_dict):
+ """
+ Expand abbreviations in a template name.
+
+ :param input_dir: The project template name.
+ :param config_dict: The user config, which will contain abbreviation
+ definitions.
+ """
+
+ abbreviations = builtin_abbreviations.copy()
+ abbreviations.update(config_dict.get('abbreviations', {}))
+
+ if input_dir in abbreviations:
+ return abbreviations[input_dir]
+
+ # Split on colon. If there is no colon, rest will be empty
+ # and prefix will be the whole input_dir
+ prefix, sep, rest = input_dir.partition(':')
+ if prefix in abbreviations:
+ return abbreviations[prefix].format(rest)
+
+ return input_dir
+
def cookiecutter(input_dir, checkout=None, no_input=False):
"""
@@ -39,6 +67,8 @@
# If no config file, sensible defaults from config.DEFAULT_CONFIG are used
config_dict = get_user_config()
+ input_dir = expand_abbreviations(input_dir, config_dict)
+
# TODO: find a better way to tell if it's a repo URL
if "git@" in input_dir or "https://" in input_dir:
repo_dir = clone(
| {"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -25,6 +25,34 @@\n \n logger = logging.getLogger(__name__)\n \n+builtin_abbreviations = {\n+ 'gh': 'https://github.com/{0}.git',\n+ 'bb': 'https://bitbucket.org/{0}',\n+}\n+\n+def expand_abbreviations(input_dir, config_dict):\n+ \"\"\"\n+ Expand abbreviations in a template name.\n+\n+ :param input_dir: The project template name.\n+ :param config_dict: The user config, which will contain abbreviation\n+ definitions.\n+ \"\"\"\n+\n+ abbreviations = builtin_abbreviations.copy()\n+ abbreviations.update(config_dict.get('abbreviations', {}))\n+\n+ if input_dir in abbreviations:\n+ return abbreviations[input_dir]\n+\n+ # Split on colon. If there is no colon, rest will be empty\n+ # and prefix will be the whole input_dir\n+ prefix, sep, rest = input_dir.partition(':')\n+ if prefix in abbreviations:\n+ return abbreviations[prefix].format(rest)\n+\n+ return input_dir\n+\n \n def cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n@@ -39,6 +67,8 @@\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n \n+ input_dir = expand_abbreviations(input_dir, config_dict)\n+\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n", "issue": "Add aliases for cookiecutters\nRemembering the locations of cookiecutters can be a bit of a pain - even if it's just including the local path.\n\nIt would be useful to have an extra section in `cookiecutterrc` for aliases to cookiecutter locations. Something like:\n\n```\naliases:\n python: https://github.com/audreyr/cookiecutter-pypackage\n django: https://github.com/marcofucci/cookiecutter-simple-django\n docs: ~/samples/templates/documentation\n```\n\nThe code would simply need to translate an alias into a path before doing any of the existing processing.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.main\n-----------------\n\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom . import __version__\nfrom .config import get_user_config\nfrom .prompt import prompt_for_config\nfrom .generate import generate_context, generate_files\nfrom .vcs import clone\n\nlogger = logging.getLogger(__name__)\n\n\ndef cookiecutter(input_dir, checkout=None, no_input=False):\n \"\"\"\n API equivalent to using Cookiecutter at the command line.\n\n :param input_dir: A directory containing a project template dir,\n or a URL to git repo.\n :param checkout: The branch, tag or commit ID to checkout after clone\n \"\"\"\n\n # Get user config from ~/.cookiecutterrc or equivalent\n # If no config file, sensible defaults from config.DEFAULT_CONFIG are used\n config_dict = get_user_config()\n\n # TODO: find a better way to tell if it's a repo URL\n if \"git@\" in input_dir or \"https://\" in input_dir:\n repo_dir = clone(\n repo_url=input_dir,\n checkout=checkout,\n clone_to_dir=config_dict['cookiecutters_dir']\n )\n else:\n # If it's a local repo, no need to clone or copy to your cookiecutters_dir\n repo_dir = input_dir\n\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logging.debug('context_file is {0}'.format(context_file))\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context']\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n if not no_input:\n cookiecutter_dict = prompt_for_config(context)\n context['cookiecutter'] = cookiecutter_dict\n\n # Create project from local context and project template.\n generate_files(\n repo_dir=repo_dir,\n context=context\n )\n\n\ndef _get_parser():\n parser = argparse.ArgumentParser(\n description='Create a project from a Cookiecutter project template.'\n )\n parser.add_argument(\n '--no-input',\n action=\"store_true\",\n help='Do not prompt for parameters and only use cookiecutter.json '\n 'file content')\n parser.add_argument(\n 'input_dir',\n help='Cookiecutter project dir, e.g. cookiecutter-pypackage/'\n )\n parser.add_argument(\n '-c', '--checkout',\n help='branch, tag or commit to checkout after git clone'\n )\n cookiecutter_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.add_argument(\n '-V', '--version',\n help=\"Show version information and exit.\",\n action='version',\n version='Cookiecutter %s from %s (Python %s)' % (\n __version__,\n cookiecutter_pkg_dir,\n sys.version[:3]\n )\n )\n parser.add_argument(\n '-v', '--verbose',\n help='Print debug information',\n action='store_true', default=False\n )\n\n return parser\n\ndef parse_cookiecutter_args(args):\n \"\"\" Parse the command-line arguments to Cookiecutter. \"\"\"\n parser = _get_parser()\n return parser.parse_args(args)\n\n\ndef main():\n \"\"\" Entry point for the package, as defined in setup.py. \"\"\"\n\n args = parse_cookiecutter_args(sys.argv[1:])\n\n if args.verbose:\n logging.basicConfig(format='%(levelname)s %(filename)s: %(message)s', level=logging.DEBUG)\n else:\n # Log info and above to console\n logging.basicConfig(\n format='%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n cookiecutter(args.input_dir, args.checkout, args.no_input)\n\n\nif __name__ == '__main__':\n main()\n", "path": "cookiecutter/main.py"}]} | 1,854 | 383 |
gh_patches_debug_38603 | rasdani/github-patches | git_diff | docker__docker-py-347 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SSLAdapter init_poolmanager error because request.packages.urllib3.__version__ invalid
In ssladapter.py,
``` python
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
...
if urllib3 and urllib_ver == 'dev' and \
StrictVersion(urllib_ver) > StrictVersion('1.5'):
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
```
`requests.packages.urllib3` will be imported, and the `requests.packages.urllib3.__version__` is 'dev' at present, and the `StrictVersion(urllib_ver)` will raise a ValueError: "invalid version number 'dev'"
</issue>
<code>
[start of docker/tls.py]
1 import os
2
3 from . import errors
4 from .ssladapter import ssladapter
5
6
7 class TLSConfig(object):
8 cert = None
9 verify = None
10 ssl_version = None
11
12 def __init__(self, client_cert=None, ca_cert=None, verify=None,
13 ssl_version=None):
14 # Argument compatibility/mapping with
15 # http://docs.docker.com/examples/https/
16 # This diverges from the Docker CLI in that users can specify 'tls'
17 # here, but also disable any public/default CA pool verification by
18 # leaving tls_verify=False
19
20 # urllib3 sets a default ssl_version if ssl_version is None
21 # http://tinyurl.com/kxga8hb
22 self.ssl_version = ssl_version
23
24 # "tls" and "tls_verify" must have both or neither cert/key files
25 # In either case, Alert the user when both are expected, but any are
26 # missing.
27
28 if client_cert:
29 try:
30 tls_cert, tls_key = client_cert
31 except ValueError:
32 raise errors.TLSParameterError(
33 'client_config must be a tuple of'
34 ' (client certificate, key file)'
35 )
36
37 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
38 not os.path.isfile(tls_key)):
39 raise errors.TLSParameterError(
40 'Path to a certificate and key files must be provided'
41 ' through the client_config param'
42 )
43 self.cert = (tls_cert, tls_key)
44
45 # Either set verify to True (public/default CA checks) or to the
46 # path of a CA Cert file.
47 if verify is not None:
48 if not ca_cert:
49 self.verify = verify
50 elif os.path.isfile(ca_cert):
51 if not verify:
52 raise errors.TLSParameterError(
53 'verify can not be False when a CA cert is'
54 ' provided.'
55 )
56 self.verify = ca_cert
57 else:
58 raise errors.TLSParameterError(
59 'Invalid CA certificate provided for `tls_ca_cert`.'
60 )
61
62 def configure_client(self, client):
63 client.ssl_version = self.ssl_version
64 if self.verify is not None:
65 client.verify = self.verify
66 if self.cert:
67 client.cert = self.cert
68 client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))
69
[end of docker/tls.py]
[start of docker/ssladapter/ssladapter.py]
1 """ Resolves OpenSSL issues in some servers:
2 https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
3 https://github.com/kennethreitz/requests/pull/799
4 """
5 from distutils.version import StrictVersion
6 from requests.adapters import HTTPAdapter
7 try:
8 import requests.packages.urllib3 as urllib3
9 except ImportError:
10 import urllib3
11
12
13 PoolManager = urllib3.poolmanager.PoolManager
14
15
16 class SSLAdapter(HTTPAdapter):
17 '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
18 def __init__(self, ssl_version=None, **kwargs):
19 self.ssl_version = ssl_version
20 super(SSLAdapter, self).__init__(**kwargs)
21
22 def init_poolmanager(self, connections, maxsize, block=False):
23 urllib_ver = urllib3.__version__.split('-')[0]
24 kwargs = {
25 'num_pools': connections,
26 'maxsize': maxsize,
27 'block': block
28 }
29 if urllib3 and urllib_ver == 'dev' and \
30 StrictVersion(urllib_ver) > StrictVersion('1.5'):
31 kwargs['ssl_version'] = self.ssl_version
32
33 self.poolmanager = PoolManager(**kwargs)
34
[end of docker/ssladapter/ssladapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py
--- a/docker/ssladapter/ssladapter.py
+++ b/docker/ssladapter/ssladapter.py
@@ -15,19 +15,27 @@
class SSLAdapter(HTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
- def __init__(self, ssl_version=None, **kwargs):
+ def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):
self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
super(SSLAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
- urllib_ver = urllib3.__version__.split('-')[0]
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
- 'block': block
+ 'block': block,
+ 'assert_hostname': self.assert_hostname,
}
- if urllib3 and urllib_ver == 'dev' and \
- StrictVersion(urllib_ver) > StrictVersion('1.5'):
+ if self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
+
+ def can_override_ssl_version(self):
+ urllib_ver = urllib3.__version__.split('-')[0]
+ if urllib_ver is None:
+ return False
+ if urllib_ver == 'dev':
+ return True
+ return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/docker/tls.py b/docker/tls.py
--- a/docker/tls.py
+++ b/docker/tls.py
@@ -10,7 +10,7 @@
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
- ssl_version=None):
+ ssl_version=None, assert_hostname=None):
# Argument compatibility/mapping with
# http://docs.docker.com/examples/https/
# This diverges from the Docker CLI in that users can specify 'tls'
@@ -20,6 +20,7 @@
# urllib3 sets a default ssl_version if ssl_version is None
# http://tinyurl.com/kxga8hb
self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
# "tls" and "tls_verify" must have both or neither cert/key files
# In either case, Alert the user when both are expected, but any are
@@ -65,4 +66,7 @@
client.verify = self.verify
if self.cert:
client.cert = self.cert
- client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))
+ client.mount('https://', ssladapter.SSLAdapter(
+ ssl_version=self.ssl_version,
+ assert_hostname=self.assert_hostname,
+ ))
| {"golden_diff": "diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py\n--- a/docker/ssladapter/ssladapter.py\n+++ b/docker/ssladapter/ssladapter.py\n@@ -15,19 +15,27 @@\n \n class SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n- def __init__(self, ssl_version=None, **kwargs):\n+ def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n+ self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n \n def init_poolmanager(self, connections, maxsize, block=False):\n- urllib_ver = urllib3.__version__.split('-')[0]\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n- 'block': block\n+ 'block': block,\n+ 'assert_hostname': self.assert_hostname,\n }\n- if urllib3 and urllib_ver == 'dev' and \\\n- StrictVersion(urllib_ver) > StrictVersion('1.5'):\n+ if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n \n self.poolmanager = PoolManager(**kwargs)\n+\n+ def can_override_ssl_version(self):\n+ urllib_ver = urllib3.__version__.split('-')[0]\n+ if urllib_ver is None:\n+ return False\n+ if urllib_ver == 'dev':\n+ return True\n+ return StrictVersion(urllib_ver) > StrictVersion('1.5')\ndiff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -10,7 +10,7 @@\n ssl_version = None\n \n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n- ssl_version=None):\n+ ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n@@ -20,6 +20,7 @@\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n+ self.assert_hostname = assert_hostname\n \n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n@@ -65,4 +66,7 @@\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n- client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))\n+ client.mount('https://', ssladapter.SSLAdapter(\n+ ssl_version=self.ssl_version,\n+ assert_hostname=self.assert_hostname,\n+ ))\n", "issue": "SSLAdapter init_poolmanager error because request.packages.urllib3.__version__ invalid\nIn ssladapter.py, \n\n``` python\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n...\n\n if urllib3 and urllib_ver == 'dev' and \\\n StrictVersion(urllib_ver) > StrictVersion('1.5'):\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n```\n\n`requests.packages.urllib3` will be imported, and the `requests.packages.urllib3.__version__` is 'dev' at present, and the `StrictVersion(urllib_ver)` will raise a ValueError: \"invalid version number 'dev'\"\n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(self.ssl_version))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, **kwargs):\n self.ssl_version = ssl_version\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n urllib_ver = urllib3.__version__.split('-')[0]\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block\n }\n if urllib3 and urllib_ver == 'dev' and \\\n StrictVersion(urllib_ver) > StrictVersion('1.5'):\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n", "path": "docker/ssladapter/ssladapter.py"}]} | 1,674 | 640 |
gh_patches_debug_19397 | rasdani/github-patches | git_diff | elastic__apm-agent-python-766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'UnixDomainSocketConnection' object has no attribute 'port'
AttributeError: 'UnixDomainSocketConnection' object has no attribute 'port'
elastic-apm==5.5.1
```
File "django_redis/cache.py", line 32, in _decorator
return method(self, *args, **kwargs)
File "django_redis/cache.py", line 80, in get
return self.client.get(key, default=default, version=version,
File "django_redis/client/default.py", line 203, in get
value = client.get(key)
File "redis/client.py", line 976, in get
return self.execute_command('GET', name)
File "elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/redis.py", line 66, in call
return wrapped(*args, **kwargs)
File "redis/client.py", line 667, in execute_command
connection.send_command(*args)
File "redis/connection.py", line 610, in send_command
self.send_packed_command(self.pack_command(*args))
File "elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/redis.py", line 90, in call
port = int(instance.port) if instance.port else None
```
</issue>
<code>
[start of elasticapm/instrumentation/packages/redis.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
34 from elasticapm.traces import capture_span, execution_context
35
36
37 class Redis3CheckMixin(object):
38 instrument_list_3 = []
39 instrument_list = []
40
41 def get_instrument_list(self):
42 try:
43 from redis import VERSION
44
45 if VERSION[0] >= 3:
46 return self.instrument_list_3
47 return self.instrument_list
48 except ImportError:
49 return self.instrument_list
50
51
52 class RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):
53 name = "redis"
54
55 # no need to instrument StrictRedis in redis-py >= 3.0
56 instrument_list_3 = [("redis.client", "Redis.execute_command")]
57 instrument_list = [("redis.client", "Redis.execute_command"), ("redis.client", "StrictRedis.execute_command")]
58
59 def call(self, module, method, wrapped, instance, args, kwargs):
60 if len(args) > 0:
61 wrapped_name = str(args[0])
62 else:
63 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
64
65 with capture_span(wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True):
66 return wrapped(*args, **kwargs)
67
68
69 class RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):
70 name = "redis"
71
72 # BasePipeline has been renamed to Pipeline in redis-py 3
73 instrument_list_3 = [("redis.client", "Pipeline.execute")]
74 instrument_list = [("redis.client", "BasePipeline.execute")]
75
76 def call(self, module, method, wrapped, instance, args, kwargs):
77 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
78 with capture_span(wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True):
79 return wrapped(*args, **kwargs)
80
81
82 class RedisConnectionInstrumentation(AbstractInstrumentedModule):
83 name = "redis"
84
85 instrument_list = (("redis.connection", "Connection.send_packed_command"),)
86
87 def call(self, module, method, wrapped, instance, args, kwargs):
88 span = execution_context.get_span()
89 if span and span.subtype == "redis":
90 port = int(instance.port) if instance.port else None
91 destination_info = {
92 "address": instance.host,
93 "port": port,
94 "service": {"name": "redis", "resource": "redis", "type": "db"},
95 }
96 span.context["destination"] = destination_info
97 return wrapped(*args, **kwargs)
98
[end of elasticapm/instrumentation/packages/redis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/redis.py b/elasticapm/instrumentation/packages/redis.py
--- a/elasticapm/instrumentation/packages/redis.py
+++ b/elasticapm/instrumentation/packages/redis.py
@@ -87,11 +87,16 @@
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
if span and span.subtype == "redis":
- port = int(instance.port) if instance.port else None
- destination_info = {
- "address": instance.host,
- "port": port,
- "service": {"name": "redis", "resource": "redis", "type": "db"},
- }
- span.context["destination"] = destination_info
+ span.context["destination"] = get_destination_info(instance)
return wrapped(*args, **kwargs)
+
+
+def get_destination_info(connection):
+ destination_info = {"service": {"name": "redis", "resource": "redis", "type": "db"}}
+ if hasattr(connection, "port"):
+ destination_info["port"] = connection.port
+ destination_info["address"] = connection.host
+ elif hasattr(connection, "path"):
+ destination_info["port"] = None
+ destination_info["address"] = "unix://" + connection.path
+ return destination_info
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/redis.py b/elasticapm/instrumentation/packages/redis.py\n--- a/elasticapm/instrumentation/packages/redis.py\n+++ b/elasticapm/instrumentation/packages/redis.py\n@@ -87,11 +87,16 @@\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n- port = int(instance.port) if instance.port else None\n- destination_info = {\n- \"address\": instance.host,\n- \"port\": port,\n- \"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"},\n- }\n- span.context[\"destination\"] = destination_info\n+ span.context[\"destination\"] = get_destination_info(instance)\n return wrapped(*args, **kwargs)\n+\n+\n+def get_destination_info(connection):\n+ destination_info = {\"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"}}\n+ if hasattr(connection, \"port\"):\n+ destination_info[\"port\"] = connection.port\n+ destination_info[\"address\"] = connection.host\n+ elif hasattr(connection, \"path\"):\n+ destination_info[\"port\"] = None\n+ destination_info[\"address\"] = \"unix://\" + connection.path\n+ return destination_info\n", "issue": "'UnixDomainSocketConnection' object has no attribute 'port'\nAttributeError: 'UnixDomainSocketConnection' object has no attribute 'port'\r\nelastic-apm==5.5.1\r\n\r\n```\r\n File \"django_redis/cache.py\", line 32, in _decorator\r\n return method(self, *args, **kwargs)\r\n File \"django_redis/cache.py\", line 80, in get\r\n return self.client.get(key, default=default, version=version,\r\n File \"django_redis/client/default.py\", line 203, in get\r\n value = client.get(key)\r\n File \"redis/client.py\", line 976, in get\r\n return self.execute_command('GET', name)\r\n File \"elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/redis.py\", line 66, in call\r\n return wrapped(*args, **kwargs)\r\n File \"redis/client.py\", line 667, in execute_command\r\n connection.send_command(*args)\r\n File \"redis/connection.py\", line 610, in send_command\r\n self.send_packed_command(self.pack_command(*args))\r\n File \"elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/redis.py\", line 90, in call\r\n port = int(instance.port) if instance.port else None\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import capture_span, execution_context\n\n\nclass Redis3CheckMixin(object):\n instrument_list_3 = []\n instrument_list = []\n\n def get_instrument_list(self):\n try:\n from redis import VERSION\n\n if VERSION[0] >= 3:\n return self.instrument_list_3\n return self.instrument_list\n except ImportError:\n return self.instrument_list\n\n\nclass RedisInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # no need to instrument StrictRedis in redis-py >= 3.0\n instrument_list_3 = [(\"redis.client\", \"Redis.execute_command\")]\n instrument_list = [(\"redis.client\", \"Redis.execute_command\"), (\"redis.client\", \"StrictRedis.execute_command\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = str(args[0])\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(Redis3CheckMixin, AbstractInstrumentedModule):\n name = \"redis\"\n\n # BasePipeline has been renamed to Pipeline in redis-py 3\n instrument_list_3 = [(\"redis.client\", \"Pipeline.execute\")]\n instrument_list = [(\"redis.client\", \"BasePipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n with capture_span(wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True):\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"redis\"\n\n instrument_list = ((\"redis.connection\", \"Connection.send_packed_command\"),)\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"redis\":\n port = int(instance.port) if instance.port else None\n destination_info = {\n \"address\": instance.host,\n \"port\": port,\n \"service\": {\"name\": \"redis\", \"resource\": \"redis\", \"type\": \"db\"},\n }\n span.context[\"destination\"] = destination_info\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/redis.py"}]} | 1,989 | 307 |
gh_patches_debug_56 | rasdani/github-patches | git_diff | Anselmoo__spectrafit-660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs]: Update release drafter
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Information in the Docs
- Link to the complete changes of the latest release.
- Exclude auto commits in the contributor list
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of spectrafit/__init__.py]
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "1.0.0a0"
3
[end of spectrafit/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "1.0.0a0"
+__version__ = "1.0.0a1"
| {"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"1.0.0a0\"\n+__version__ = \"1.0.0a1\"\n", "issue": "[Docs]: Update release drafter\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Information in the Docs\n\n- Link to the complete changes of the latest release.\r\n- Exclude auto commits in the contributor list\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"1.0.0a0\"\n", "path": "spectrafit/__init__.py"}]} | 651 | 97 |
gh_patches_debug_62442 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1162 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reader study completed message is visible when study is not completed

</issue>
<code>
[start of app/grandchallenge/reader_studies/templatetags/get_ground_truth.py]
1 from django import template
2
3 register = template.Library()
4
5
6 @register.simple_tag
7 def get_ground_truth(obj, image, question):
8 """Get the auth token for the user."""
9 ground_truths = obj.statistics["ground_truths"]
10 return ground_truths[image][question]
11
[end of app/grandchallenge/reader_studies/templatetags/get_ground_truth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
--- a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
+++ b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py
@@ -5,6 +5,7 @@
@register.simple_tag
def get_ground_truth(obj, image, question):
- """Get the auth token for the user."""
+ """Get the ground truth value for the image/question combination in reader
+ study obj."""
ground_truths = obj.statistics["ground_truths"]
return ground_truths[image][question]
| {"golden_diff": "diff --git a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n--- a/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n+++ b/app/grandchallenge/reader_studies/templatetags/get_ground_truth.py\n@@ -5,6 +5,7 @@\n \n @register.simple_tag\n def get_ground_truth(obj, image, question):\n- \"\"\"Get the auth token for the user.\"\"\"\n+ \"\"\"Get the ground truth value for the image/question combination in reader\n+ study obj.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "issue": "Reader study completed message is visible when study is not completed\n\r\n\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\n\[email protected]_tag\ndef get_ground_truth(obj, image, question):\n \"\"\"Get the auth token for the user.\"\"\"\n ground_truths = obj.statistics[\"ground_truths\"]\n return ground_truths[image][question]\n", "path": "app/grandchallenge/reader_studies/templatetags/get_ground_truth.py"}]} | 727 | 160 |
gh_patches_debug_25949 | rasdani/github-patches | git_diff | larq__larq-319 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Refactor layer test to use pytest parameterized test
Currently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.
The only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).
I think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="larq",
11 version="0.7.4",
12 python_requires=">=3.6",
13 author="Plumerai",
14 author_email="[email protected]",
15 description="An Open Source Machine Learning Library for Training Binarized Neural Networks",
16 long_description=readme(),
17 long_description_content_type="text/markdown",
18 url="https://larq.dev/",
19 packages=find_packages(exclude=["larq.snapshots"]),
20 license="Apache 2.0",
21 install_requires=[
22 "numpy >= 1.15.4, < 2.0",
23 "terminaltables>=3.1.0",
24 "dataclasses ; python_version<'3.7'",
25 ],
26 extras_require={
27 "tensorflow": ["tensorflow>=1.14.0"],
28 "tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
29 "test": [
30 "absl-py==0.8.1",
31 "pytest==5.2.2",
32 "pytest-cov==2.8.1",
33 "pytest-xdist==1.30.0",
34 "snapshottest==0.5.1",
35 ],
36 "docs": [
37 "mkdocs==1.0.4",
38 "mkdocs-material==4.4.3",
39 "pymdown-extensions==6.1",
40 "mknotebooks==0.1.7",
41 "mkdocs-minify-plugin==0.2.1",
42 "larq-zoo==0.4.2",
43 "altair==3.2.0",
44 "pandas==0.25.3",
45 ],
46 },
47 classifiers=[
48 "Development Status :: 4 - Beta",
49 "Intended Audience :: Developers",
50 "Intended Audience :: Education",
51 "Intended Audience :: Science/Research",
52 "License :: OSI Approved :: Apache Software License",
53 "Programming Language :: Python :: 3",
54 "Programming Language :: Python :: 3 :: Only",
55 "Programming Language :: Python :: 3.6",
56 "Programming Language :: Python :: 3.7",
57 "Topic :: Scientific/Engineering",
58 "Topic :: Scientific/Engineering :: Mathematics",
59 "Topic :: Scientific/Engineering :: Artificial Intelligence",
60 "Topic :: Software Development",
61 "Topic :: Software Development :: Libraries",
62 "Topic :: Software Development :: Libraries :: Python Modules",
63 ],
64 )
65
[end of setup.py]
[start of larq/conftest.py]
1 import pytest
2 from tensorflow.python.eager import context
3
4
5 @pytest.fixture
6 def eager_mode():
7 """pytest fixture for running test in eager mode"""
8 with context.eager_mode():
9 yield
10
11
12 @pytest.fixture
13 def graph_mode():
14 """pytest fixture for running test in graph mode"""
15 with context.graph_mode():
16 yield
17
18
19 @pytest.fixture(params=["eager", "graph"])
20 def eager_and_graph_mode(request):
21 """pytest fixture for running test in eager and graph mode"""
22 with getattr(context, f"{request.param}_mode")():
23 yield request.param
24
[end of larq/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/larq/conftest.py b/larq/conftest.py
--- a/larq/conftest.py
+++ b/larq/conftest.py
@@ -1,4 +1,5 @@
import pytest
+import tensorflow as tf
from tensorflow.python.eager import context
@@ -21,3 +22,27 @@
"""pytest fixture for running test in eager and graph mode"""
with getattr(context, f"{request.param}_mode")():
yield request.param
+
+
[email protected](params=["graph", "tf_eager", "tf_keras_eager"])
+def keras_should_run_eagerly(request):
+ """Fixture to run in graph and two eager modes.
+
+ The modes are:
+ - Graph mode
+ - TensorFlow eager and Keras eager
+ - TensorFlow eager and Keras not eager
+
+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras
+ should run eagerly.
+ """
+
+ if request.param == "graph":
+ if int(tf.__version__[0]) >= 2:
+ pytest.skip("Skipping graph mode for TensorFlow 2+.")
+
+ with context.graph_mode():
+ yield
+ else:
+ with context.eager_mode():
+ yield request.param == "tf_keras_eager"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,6 @@
"tensorflow": ["tensorflow>=1.14.0"],
"tensorflow_gpu": ["tensorflow-gpu>=1.14.0"],
"test": [
- "absl-py==0.8.1",
"pytest==5.2.2",
"pytest-cov==2.8.1",
"pytest-xdist==1.30.0",
| {"golden_diff": "diff --git a/larq/conftest.py b/larq/conftest.py\n--- a/larq/conftest.py\n+++ b/larq/conftest.py\n@@ -1,4 +1,5 @@\n import pytest\n+import tensorflow as tf\n from tensorflow.python.eager import context\n \n \n@@ -21,3 +22,27 @@\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n+\n+\[email protected](params=[\"graph\", \"tf_eager\", \"tf_keras_eager\"])\n+def keras_should_run_eagerly(request):\n+ \"\"\"Fixture to run in graph and two eager modes.\n+\n+ The modes are:\n+ - Graph mode\n+ - TensorFlow eager and Keras eager\n+ - TensorFlow eager and Keras not eager\n+\n+ The `tf.context` sets graph/eager mode for TensorFlow. The yield is True if Keras\n+ should run eagerly.\n+ \"\"\"\n+\n+ if request.param == \"graph\":\n+ if int(tf.__version__[0]) >= 2:\n+ pytest.skip(\"Skipping graph mode for TensorFlow 2+.\")\n+\n+ with context.graph_mode():\n+ yield\n+ else:\n+ with context.eager_mode():\n+ yield request.param == \"tf_keras_eager\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,6 @@\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n- \"absl-py==0.8.1\",\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n", "issue": "Refactor layer test to use pytest parameterized test\nCurrently we use a mix of unittests written in `pytest` style and tests using `tf.test.TestCase` in larq. In #313 I added some simple `pytest` fixtures that allow to run tests in both eager and graph mode.\r\n\r\nThe only part of the test suite that uses `tf.test.TestCase` are the [layer tests](https://github.com/larq/larq/blob/f6f9277a006f6cb07b1e5f9bc591a087f2261152/larq/layers_test.py#L73-L187) which rely on [`keras_parameterized.run_all_keras_modes`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/keras_parameterized.py#L176-L303) and a forked/copied version of [`testing_utils. layer_test`](https://github.com/tensorflow/tensorflow/blob/669080135924d204a1c67dd556223d794efbf664/tensorflow/python/keras/testing_utils.py#L75-L261).\r\n\r\nI think it would be a lot cleaner to write a simple [`pytest` fixture](https://pytest.org/en/latest/fixture.html#fixture) similar to #313 that allows us to run a normal [`pytest` parameterized test](https://pytest.org/en/latest/parametrize.html#pytest-mark-parametrize) in all Keras modes and adapt the layer tests to use it. That way we can get rid of `absl.testing` and some requirements on TensorFlow internals.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"larq\",\n version=\"0.7.4\",\n python_requires=\">=3.6\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Library for Training Binarized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://larq.dev/\",\n packages=find_packages(exclude=[\"larq.snapshots\"]),\n license=\"Apache 2.0\",\n install_requires=[\n \"numpy >= 1.15.4, < 2.0\",\n \"terminaltables>=3.1.0\",\n \"dataclasses ; python_version<'3.7'\",\n ],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.14.0\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.14.0\"],\n \"test\": [\n \"absl-py==0.8.1\",\n \"pytest==5.2.2\",\n \"pytest-cov==2.8.1\",\n \"pytest-xdist==1.30.0\",\n \"snapshottest==0.5.1\",\n ],\n \"docs\": [\n \"mkdocs==1.0.4\",\n \"mkdocs-material==4.4.3\",\n \"pymdown-extensions==6.1\",\n \"mknotebooks==0.1.7\",\n \"mkdocs-minify-plugin==0.2.1\",\n \"larq-zoo==0.4.2\",\n \"altair==3.2.0\",\n \"pandas==0.25.3\",\n ],\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}, {"content": "import pytest\nfrom tensorflow.python.eager import context\n\n\[email protected]\ndef eager_mode():\n \"\"\"pytest fixture for running test in eager mode\"\"\"\n with context.eager_mode():\n yield\n\n\[email protected]\ndef graph_mode():\n \"\"\"pytest fixture for running test in graph mode\"\"\"\n with context.graph_mode():\n yield\n\n\[email protected](params=[\"eager\", \"graph\"])\ndef eager_and_graph_mode(request):\n \"\"\"pytest fixture for running test in eager and graph mode\"\"\"\n with getattr(context, f\"{request.param}_mode\")():\n yield request.param\n", "path": "larq/conftest.py"}]} | 1,803 | 417 |
gh_patches_debug_7229 | rasdani/github-patches | git_diff | docker__docker-py-1150 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Client.build crashes when trying to pull a new image if HttpHeaders are set in config file
``` python
import docker
c = docker.Client()
c.build('https://github.com/docker/compose.git')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-3-d78c607c9627> in <module>()
----> 1 c.build('https://github.com/docker/compose.git')
/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/api/build.pyc in build(self, path, tag, quiet, fileobj, nocache, rm, stream, timeout, custom_context, encoding, pull, forcerm, dockerfile, container_limits, decode, buildargs, gzip)
102 headers=headers,
103 stream=stream,
--> 104 timeout=timeout,
105 )
106
/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/utils/decorators.pyc in inner(self, *args, **kwargs)
44 kwargs['headers'] = self._auth_configs['HttpHeaders']
45 else:
---> 46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])
47 return f(self, *args, **kwargs)
48 return inner
AttributeError: 'NoneType' object has no attribute 'update'
```
</issue>
<code>
[start of docker/utils/decorators.py]
1 import functools
2
3 from .. import errors
4 from . import utils
5
6
7 def check_resource(f):
8 @functools.wraps(f)
9 def wrapped(self, resource_id=None, *args, **kwargs):
10 if resource_id is None:
11 if kwargs.get('container'):
12 resource_id = kwargs.pop('container')
13 elif kwargs.get('image'):
14 resource_id = kwargs.pop('image')
15 if isinstance(resource_id, dict):
16 resource_id = resource_id.get('Id')
17 if not resource_id:
18 raise errors.NullResource(
19 'image or container param is undefined'
20 )
21 return f(self, resource_id, *args, **kwargs)
22 return wrapped
23
24
25 def minimum_version(version):
26 def decorator(f):
27 @functools.wraps(f)
28 def wrapper(self, *args, **kwargs):
29 if utils.version_lt(self._version, version):
30 raise errors.InvalidVersion(
31 '{0} is not available for version < {1}'.format(
32 f.__name__, version
33 )
34 )
35 return f(self, *args, **kwargs)
36 return wrapper
37 return decorator
38
39
40 def update_headers(f):
41 def inner(self, *args, **kwargs):
42 if 'HttpHeaders' in self._auth_configs:
43 if 'headers' not in kwargs:
44 kwargs['headers'] = self._auth_configs['HttpHeaders']
45 else:
46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])
47 return f(self, *args, **kwargs)
48 return inner
49
[end of docker/utils/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -40,7 +40,7 @@
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._auth_configs:
- if 'headers' not in kwargs:
+ if not kwargs.get('headers'):
kwargs['headers'] = self._auth_configs['HttpHeaders']
else:
kwargs['headers'].update(self._auth_configs['HttpHeaders'])
| {"golden_diff": "diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py\n--- a/docker/utils/decorators.py\n+++ b/docker/utils/decorators.py\n@@ -40,7 +40,7 @@\n def update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n- if 'headers' not in kwargs:\n+ if not kwargs.get('headers'):\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n", "issue": "Client.build crashes when trying to pull a new image if HttpHeaders are set in config file\n``` python\n\nimport docker\n\nc = docker.Client()\nc.build('https://github.com/docker/compose.git')\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-3-d78c607c9627> in <module>()\n----> 1 c.build('https://github.com/docker/compose.git')\n\n/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/api/build.pyc in build(self, path, tag, quiet, fileobj, nocache, rm, stream, timeout, custom_context, encoding, pull, forcerm, dockerfile, container_limits, decode, buildargs, gzip)\n 102 headers=headers,\n 103 stream=stream,\n--> 104 timeout=timeout,\n 105 )\n 106 \n\n/home/joffrey/.envs/pydocker/local/lib/python2.7/site-packages/docker/utils/decorators.pyc in inner(self, *args, **kwargs)\n 44 kwargs['headers'] = self._auth_configs['HttpHeaders']\n 45 else:\n---> 46 kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n 47 return f(self, *args, **kwargs)\n 48 return inner\n\nAttributeError: 'NoneType' object has no attribute 'update'\n```\n\n", "before_files": [{"content": "import functools\n\nfrom .. import errors\nfrom . import utils\n\n\ndef check_resource(f):\n @functools.wraps(f)\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None:\n if kwargs.get('container'):\n resource_id = kwargs.pop('container')\n elif kwargs.get('image'):\n resource_id = kwargs.pop('image')\n if isinstance(resource_id, dict):\n resource_id = resource_id.get('Id')\n if not resource_id:\n raise errors.NullResource(\n 'image or container param is undefined'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n\n\ndef minimum_version(version):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if utils.version_lt(self._version, version):\n raise errors.InvalidVersion(\n '{0} is not available for version < {1}'.format(\n f.__name__, version\n )\n )\n return f(self, *args, **kwargs)\n return wrapper\n return decorator\n\n\ndef update_headers(f):\n def inner(self, *args, **kwargs):\n if 'HttpHeaders' in self._auth_configs:\n if 'headers' not in kwargs:\n kwargs['headers'] = self._auth_configs['HttpHeaders']\n else:\n kwargs['headers'].update(self._auth_configs['HttpHeaders'])\n return f(self, *args, **kwargs)\n return inner\n", "path": "docker/utils/decorators.py"}]} | 1,269 | 127 |
gh_patches_debug_13544 | rasdani/github-patches | git_diff | Azure__azure-cli-extensions-93 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change help for az webapp new
### Extension name (the extension in question)
webapp
### Description of issue (in as much detail as possible)
currently the help information for the command return the
Command
az webapp new: Create and deploy a node web app.
Requested changes:
- add the callout for .net core apps
- add explicit mention that the command is in preview
-----
</issue>
<code>
[start of src/webapp/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.1.0"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 setup(
30 name='webapp',
31 version=VERSION,
32 description='An Azure CLI Extension to manage appservice resources',
33 long_description='An Azure CLI Extension to manage appservice resources',
34 license='MIT',
35 author='Sisira Panchagnula',
36 author_email='[email protected]',
37 url='https://github.com/Azure/azure-cli-extensions',
38 classifiers=CLASSIFIERS,
39 packages=find_packages(exclude=["tests"]),
40 install_requires=DEPENDENCIES
41 )
42
[end of src/webapp/setup.py]
[start of src/webapp/azext_webapp/_help.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from knack.help_files import helps
7
8
9 helps['webapp new'] = """
10 type: command
11 short-summary: Create and deploy a node web app
12 examples:
13 - name: Create a web app with the default configuration.
14 text: >
15 az webapp new -n MyUniqueAppName --dryrun \n
16 az webapp new -n MyUniqueAppName -l locationName
17 """
18
[end of src/webapp/azext_webapp/_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/webapp/azext_webapp/_help.py b/src/webapp/azext_webapp/_help.py
--- a/src/webapp/azext_webapp/_help.py
+++ b/src/webapp/azext_webapp/_help.py
@@ -8,7 +8,8 @@
helps['webapp new'] = """
type: command
- short-summary: Create and deploy a node web app
+ short-summary: Experimental command to create and deploy a web app.
+ Current supports Node on Linux & .NET Core on Windows.
examples:
- name: Create a web app with the default configuration.
text: >
diff --git a/src/webapp/setup.py b/src/webapp/setup.py
--- a/src/webapp/setup.py
+++ b/src/webapp/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.1.0"
+VERSION = "0.1.1"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
| {"golden_diff": "diff --git a/src/webapp/azext_webapp/_help.py b/src/webapp/azext_webapp/_help.py\n--- a/src/webapp/azext_webapp/_help.py\n+++ b/src/webapp/azext_webapp/_help.py\n@@ -8,7 +8,8 @@\n \n helps['webapp new'] = \"\"\"\n type: command\n- short-summary: Create and deploy a node web app\n+ short-summary: Experimental command to create and deploy a web app.\n+ Current supports Node on Linux & .NET Core on Windows.\n examples:\n - name: Create a web app with the default configuration.\n text: >\ndiff --git a/src/webapp/setup.py b/src/webapp/setup.py\n--- a/src/webapp/setup.py\n+++ b/src/webapp/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.1.0\"\n+VERSION = \"0.1.1\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "Change help for az webapp new\n### Extension name (the extension in question)\r\nwebapp\r\n\r\n### Description of issue (in as much detail as possible)\r\ncurrently the help information for the command return the \r\n\r\nCommand\r\n az webapp new: Create and deploy a node web app.\r\n\r\nRequested changes:\r\n\r\n- add the callout for .net core apps\r\n- add explicit mention that the command is in preview\r\n\r\n-----\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.0\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nsetup(\n name='webapp',\n version=VERSION,\n description='An Azure CLI Extension to manage appservice resources',\n long_description='An Azure CLI Extension to manage appservice resources',\n license='MIT',\n author='Sisira Panchagnula',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=DEPENDENCIES\n)\n", "path": "src/webapp/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n\nhelps['webapp new'] = \"\"\"\n type: command\n short-summary: Create and deploy a node web app\n examples:\n - name: Create a web app with the default configuration.\n text: >\n az webapp new -n MyUniqueAppName --dryrun \\n\n az webapp new -n MyUniqueAppName -l locationName\n\"\"\"\n", "path": "src/webapp/azext_webapp/_help.py"}]} | 1,182 | 234 |
gh_patches_debug_23257 | rasdani/github-patches | git_diff | getredash__redash-2799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add front-end extension capability
This is similar in concept to https://github.com/getredash/redash/pull/2354/files
</issue>
<code>
[start of redash/extensions.py]
1 from pkg_resources import iter_entry_points
2
3
4 def init_extensions(app):
5 """
6 Load the Redash extensions for the given Redash Flask app.
7 """
8 if not hasattr(app, 'redash_extensions'):
9 app.redash_extensions = {}
10
11 for entry_point in iter_entry_points('redash.extensions'):
12 app.logger.info('Loading Redash extension %s.', entry_point.name)
13 extension = entry_point.load()
14 app.redash_extensions[entry_point.name] = extension(app)
15
[end of redash/extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/extensions.py b/redash/extensions.py
--- a/redash/extensions.py
+++ b/redash/extensions.py
@@ -1,4 +1,5 @@
-from pkg_resources import iter_entry_points
+import os
+from pkg_resources import iter_entry_points, resource_isdir, resource_listdir
def init_extensions(app):
@@ -10,5 +11,20 @@
for entry_point in iter_entry_points('redash.extensions'):
app.logger.info('Loading Redash extension %s.', entry_point.name)
- extension = entry_point.load()
- app.redash_extensions[entry_point.name] = extension(app)
+ try:
+ extension = entry_point.load()
+ app.redash_extensions[entry_point.name] = {
+ "entry_function": extension(app),
+ "resources_list": []
+ }
+ except ImportError:
+ app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)
+ (root_module, _) = os.path.splitext(entry_point.module_name)
+ content_folder_relative = os.path.join(entry_point.name, 'bundle')
+
+ # If it's a frontend extension only, store a list of files in the bundle directory.
+ if resource_isdir(root_module, content_folder_relative):
+ app.redash_extensions[entry_point.name] = {
+ "entry_function": None,
+ "resources_list": resource_listdir(root_module, content_folder_relative)
+ }
| {"golden_diff": "diff --git a/redash/extensions.py b/redash/extensions.py\n--- a/redash/extensions.py\n+++ b/redash/extensions.py\n@@ -1,4 +1,5 @@\n-from pkg_resources import iter_entry_points\n+import os\n+from pkg_resources import iter_entry_points, resource_isdir, resource_listdir\n \n \n def init_extensions(app):\n@@ -10,5 +11,20 @@\n \n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n- extension = entry_point.load()\n- app.redash_extensions[entry_point.name] = extension(app)\n+ try:\n+ extension = entry_point.load()\n+ app.redash_extensions[entry_point.name] = {\n+ \"entry_function\": extension(app),\n+ \"resources_list\": []\n+ }\n+ except ImportError:\n+ app.logger.info('%s does not have a callable and will not be loaded.', entry_point.name)\n+ (root_module, _) = os.path.splitext(entry_point.module_name)\n+ content_folder_relative = os.path.join(entry_point.name, 'bundle')\n+\n+ # If it's a frontend extension only, store a list of files in the bundle directory.\n+ if resource_isdir(root_module, content_folder_relative):\n+ app.redash_extensions[entry_point.name] = {\n+ \"entry_function\": None,\n+ \"resources_list\": resource_listdir(root_module, content_folder_relative)\n+ }\n", "issue": "Add front-end extension capability\nThis is similar in concept to https://github.com/getredash/redash/pull/2354/files\n", "before_files": [{"content": "from pkg_resources import iter_entry_points\n\n\ndef init_extensions(app):\n \"\"\"\n Load the Redash extensions for the given Redash Flask app.\n \"\"\"\n if not hasattr(app, 'redash_extensions'):\n app.redash_extensions = {}\n\n for entry_point in iter_entry_points('redash.extensions'):\n app.logger.info('Loading Redash extension %s.', entry_point.name)\n extension = entry_point.load()\n app.redash_extensions[entry_point.name] = extension(app)\n", "path": "redash/extensions.py"}]} | 687 | 318 |
gh_patches_debug_17786 | rasdani/github-patches | git_diff | lmfit__lmfit-py-152 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes
Due to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`
I discovered this issue while trying to fix #149
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # from distutils.core import setup
3 from setuptools import setup
4
5 import lmfit as lmfit
6 import numpy, scipy
7
8 long_desc = """A library for least-squares minimization and data fitting in
9 Python. Built on top of scipy.optimize, lmfit provides a Parameter object
10 which can be set as fixed or free, can have upper and/or lower bounds, or
11 can be written in terms of algebraic constraints of other Parameters. The
12 user writes a function to be minimized as a function of these Parameters,
13 and the scipy.optimize methods are used to find the optimal values for the
14 Parameters. The Levenberg-Marquardt (leastsq) is the default minimization
15 algorithm, and provides estimated standard errors and correlations between
16 varied Parameters. Other minimization methods, including Nelder-Mead's
17 downhill simplex, Powell's method, BFGS, Sequential Least Squares, and
18 others are also supported. Bounds and contraints can be placed on
19 Parameters for all of these methods.
20
21 In addition, methods for explicitly calculating confidence intervals are
22 provided for exploring minmization problems where the approximation of
23 estimating Parameter uncertainties from the covariance matrix is
24 questionable. """
25
26
27 setup(name = 'lmfit',
28 version = lmfit.__version__,
29 author = 'LMFit Development Team',
30 author_email = '[email protected]',
31 url = 'http://lmfit.github.io/lmfit-py/',
32 download_url = 'http://lmfit.github.io//lmfit-py/',
33 requires = ('numpy', 'scipy'),
34 license = 'BSD',
35 description = "Least-Squares Minimization with Bounds and Constraints",
36 long_description = long_desc,
37 platforms = ('Windows', 'Linux', 'Mac OS X'),
38 classifiers=['Intended Audience :: Science/Research',
39 'Operating System :: OS Independent',
40 'Programming Language :: Python',
41 'Topic :: Scientific/Engineering',
42 ],
43 # test_suite='nose.collector',
44 # test_requires=['Nose'],
45 package_dir = {'lmfit': 'lmfit'},
46 packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],
47 )
48
49
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,11 +30,11 @@
author_email = '[email protected]',
url = 'http://lmfit.github.io/lmfit-py/',
download_url = 'http://lmfit.github.io//lmfit-py/',
- requires = ('numpy', 'scipy'),
+ requires = ['numpy', 'scipy'],
license = 'BSD',
description = "Least-Squares Minimization with Bounds and Constraints",
long_description = long_desc,
- platforms = ('Windows', 'Linux', 'Mac OS X'),
+ platforms = ['Windows', 'Linux', 'Mac OS X'],
classifiers=['Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,11 +30,11 @@\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n- requires = ('numpy', 'scipy'),\n+ requires = ['numpy', 'scipy'],\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n- platforms = ('Windows', 'Linux', 'Mac OS X'),\n+ platforms = ['Windows', 'Linux', 'Mac OS X'],\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n", "issue": "Cannot deploy to pypi repo dues to tuples in the `setup.py` attributes\nDue to a python-bug (http://bugs.python.org/issue19610) i cannot install and deploy lmfit with `python setup install`\n\nI discovered this issue while trying to fix #149 \n\n", "before_files": [{"content": "#!/usr/bin/env python\n# from distutils.core import setup\nfrom setuptools import setup\n\nimport lmfit as lmfit\nimport numpy, scipy\n\nlong_desc = \"\"\"A library for least-squares minimization and data fitting in\nPython. Built on top of scipy.optimize, lmfit provides a Parameter object\nwhich can be set as fixed or free, can have upper and/or lower bounds, or\ncan be written in terms of algebraic constraints of other Parameters. The\nuser writes a function to be minimized as a function of these Parameters,\nand the scipy.optimize methods are used to find the optimal values for the\nParameters. The Levenberg-Marquardt (leastsq) is the default minimization\nalgorithm, and provides estimated standard errors and correlations between\nvaried Parameters. Other minimization methods, including Nelder-Mead's\ndownhill simplex, Powell's method, BFGS, Sequential Least Squares, and\nothers are also supported. Bounds and contraints can be placed on\nParameters for all of these methods.\n\nIn addition, methods for explicitly calculating confidence intervals are\nprovided for exploring minmization problems where the approximation of\nestimating Parameter uncertainties from the covariance matrix is\nquestionable. \"\"\"\n\n\nsetup(name = 'lmfit',\n version = lmfit.__version__,\n author = 'LMFit Development Team',\n author_email = '[email protected]',\n url = 'http://lmfit.github.io/lmfit-py/',\n download_url = 'http://lmfit.github.io//lmfit-py/',\n requires = ('numpy', 'scipy'),\n license = 'BSD',\n description = \"Least-Squares Minimization with Bounds and Constraints\",\n long_description = long_desc,\n platforms = ('Windows', 'Linux', 'Mac OS X'),\n classifiers=['Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering',\n ],\n # test_suite='nose.collector',\n # test_requires=['Nose'],\n package_dir = {'lmfit': 'lmfit'},\n packages = ['lmfit', 'lmfit.ui', 'lmfit.uncertainties'],\n )\n\n", "path": "setup.py"}]} | 1,152 | 186 |
gh_patches_debug_8538 | rasdani/github-patches | git_diff | qtile__qtile-1768 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
widget.CheckUpdates not hidden when no updates are available
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
The documentation for [`CheckUpdates`](http://docs.qtile.org/en/latest/manual/ref/widgets.html#checkupdates) `display_format` mentions that the given format-specifier will only be used when there are updates available - giving rise to the assumption that the widget would be invisible when no updates are available (which seems very reasonable).
This doesn't seem to be the case for me, though. I am permanently seeing a `0`. So either the documentation or the implementation might need a little tweak.
# Qtile version
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
0.15.1
</issue>
<code>
[start of libqtile/widget/check_updates.py]
1 # Copyright (c) 2015 Ali Mousavi
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 import os
22 from subprocess import CalledProcessError, Popen
23
24 from libqtile.log_utils import logger
25 from libqtile.widget import base
26
27
28 class CheckUpdates(base.ThreadedPollText):
29 """Shows number of pending updates in different unix systems"""
30 orientations = base.ORIENTATION_HORIZONTAL
31 defaults = [
32 ("distro", "Arch", "Name of your distribution"),
33 ("custom_command", None, "Custom shell command for checking updates (counts the lines of the output)"),
34 ("update_interval", 60, "Update interval in seconds."),
35 ('execute', None, 'Command to execute on click'),
36 ("display_format", "Updates: {updates}", "Display format if updates available"),
37 ("colour_no_updates", "ffffff", "Colour when there's no updates."),
38 ("colour_have_updates", "ffffff", "Colour when there are updates."),
39 ("restart_indicator", "", "Indicator to represent reboot is required. (Ubuntu only)")
40 ]
41
42 def __init__(self, **config):
43 base.ThreadedPollText.__init__(self, **config)
44 self.add_defaults(CheckUpdates.defaults)
45
46 # format: "Distro": ("cmd", "number of lines to subtract from output")
47 self.cmd_dict = {"Arch": ("pacman -Qu", 0),
48 "Arch_checkupdates": ("checkupdates", 0),
49 "Arch_Sup": ("pacman -Sup", 1),
50 "Arch_yay": ("yay -Qu", 0),
51 "Debian": ("apt-show-versions -u -b", 0),
52 "Ubuntu": ("aptitude search ~U", 0),
53 "Fedora": ("dnf list updates", 3),
54 "FreeBSD": ("pkg_version -I -l '<'", 0),
55 "Mandriva": ("urpmq --auto-select", 0)
56 }
57
58 # Check if distro name is valid.
59 try:
60 self.cmd = self.cmd_dict[self.distro][0].split()
61 self.subtr = self.cmd_dict[self.distro][1]
62 except KeyError:
63 distros = sorted(self.cmd_dict.keys())
64 logger.error(self.distro + ' is not a valid distro name. ' +
65 'Use one of the list: ' + str(distros) + '.')
66 self.cmd = None
67
68 def _check_updates(self):
69 # type: () -> str
70 try:
71 if self.custom_command is None:
72 updates = self.call_process(self.cmd)
73 else:
74 updates = self.call_process(self.custom_command, shell=True)
75 self.subtr = 0
76 except CalledProcessError:
77 updates = ""
78 num_updates = str(len(updates.splitlines()) - self.subtr)
79
80 if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
81 num_updates += self.restart_indicator
82
83 self._set_colour(num_updates)
84 return self.display_format.format(**{"updates": num_updates})
85
86 def _set_colour(self, num_updates):
87 # type: (str) -> None
88 if not num_updates.startswith("0"):
89 self.layout.colour = self.colour_have_updates
90 else:
91 self.layout.colour = self.colour_no_updates
92
93 def poll(self):
94 # type: () -> str
95 if not self.cmd:
96 return "N/A"
97 return self._check_updates()
98
99 def button_press(self, x, y, button):
100 # type: (int, int, int) -> None
101 base.ThreadedPollText.button_press(self, x, y, button)
102 if button == 1 and self.execute is not None:
103 Popen(self.execute, shell=True)
104
[end of libqtile/widget/check_updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py
--- a/libqtile/widget/check_updates.py
+++ b/libqtile/widget/check_updates.py
@@ -75,7 +75,11 @@
self.subtr = 0
except CalledProcessError:
updates = ""
- num_updates = str(len(updates.splitlines()) - self.subtr)
+ num_updates = len(updates.splitlines()) - self.subtr
+
+ if num_updates == 0:
+ return ""
+ num_updates = str(num_updates)
if self.restart_indicator and os.path.exists('/var/run/reboot-required'):
num_updates += self.restart_indicator
| {"golden_diff": "diff --git a/libqtile/widget/check_updates.py b/libqtile/widget/check_updates.py\n--- a/libqtile/widget/check_updates.py\n+++ b/libqtile/widget/check_updates.py\n@@ -75,7 +75,11 @@\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n- num_updates = str(len(updates.splitlines()) - self.subtr)\n+ num_updates = len(updates.splitlines()) - self.subtr\n+\n+ if num_updates == 0:\n+ return \"\"\n+ num_updates = str(num_updates)\n \n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n", "issue": "widget.CheckUpdates not hidden when no updates are available\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\nThe documentation for [`CheckUpdates`](http://docs.qtile.org/en/latest/manual/ref/widgets.html#checkupdates) `display_format` mentions that the given format-specifier will only be used when there are updates available - giving rise to the assumption that the widget would be invisible when no updates are available (which seems very reasonable).\r\n\r\nThis doesn't seem to be the case for me, though. I am permanently seeing a `0`. So either the documentation or the implementation might need a little tweak.\r\n\r\n# Qtile version\r\n\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n0.15.1\n", "before_files": [{"content": "# Copyright (c) 2015 Ali Mousavi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nfrom subprocess import CalledProcessError, Popen\n\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CheckUpdates(base.ThreadedPollText):\n \"\"\"Shows number of pending updates in different unix systems\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"distro\", \"Arch\", \"Name of your distribution\"),\n (\"custom_command\", None, \"Custom shell command for checking updates (counts the lines of the output)\"),\n (\"update_interval\", 60, \"Update interval in seconds.\"),\n ('execute', None, 'Command to execute on click'),\n (\"display_format\", \"Updates: {updates}\", \"Display format if updates available\"),\n (\"colour_no_updates\", \"ffffff\", \"Colour when there's no updates.\"),\n (\"colour_have_updates\", \"ffffff\", \"Colour when there are updates.\"),\n (\"restart_indicator\", \"\", \"Indicator to represent reboot is required. (Ubuntu only)\")\n ]\n\n def __init__(self, **config):\n base.ThreadedPollText.__init__(self, **config)\n self.add_defaults(CheckUpdates.defaults)\n\n # format: \"Distro\": (\"cmd\", \"number of lines to subtract from output\")\n self.cmd_dict = {\"Arch\": (\"pacman -Qu\", 0),\n \"Arch_checkupdates\": (\"checkupdates\", 0),\n \"Arch_Sup\": (\"pacman -Sup\", 1),\n \"Arch_yay\": (\"yay -Qu\", 0),\n \"Debian\": (\"apt-show-versions -u -b\", 0),\n \"Ubuntu\": (\"aptitude search ~U\", 0),\n \"Fedora\": (\"dnf list updates\", 3),\n \"FreeBSD\": (\"pkg_version -I -l '<'\", 0),\n \"Mandriva\": (\"urpmq --auto-select\", 0)\n }\n\n # Check if distro name is valid.\n try:\n self.cmd = self.cmd_dict[self.distro][0].split()\n self.subtr = self.cmd_dict[self.distro][1]\n except KeyError:\n distros = sorted(self.cmd_dict.keys())\n logger.error(self.distro + ' is not a valid distro name. ' +\n 'Use one of the list: ' + str(distros) + '.')\n self.cmd = None\n\n def _check_updates(self):\n # type: () -> str\n try:\n if self.custom_command is None:\n updates = self.call_process(self.cmd)\n else:\n updates = self.call_process(self.custom_command, shell=True)\n self.subtr = 0\n except CalledProcessError:\n updates = \"\"\n num_updates = str(len(updates.splitlines()) - self.subtr)\n\n if self.restart_indicator and os.path.exists('/var/run/reboot-required'):\n num_updates += self.restart_indicator\n\n self._set_colour(num_updates)\n return self.display_format.format(**{\"updates\": num_updates})\n\n def _set_colour(self, num_updates):\n # type: (str) -> None\n if not num_updates.startswith(\"0\"):\n self.layout.colour = self.colour_have_updates\n else:\n self.layout.colour = self.colour_no_updates\n\n def poll(self):\n # type: () -> str\n if not self.cmd:\n return \"N/A\"\n return self._check_updates()\n\n def button_press(self, x, y, button):\n # type: (int, int, int) -> None\n base.ThreadedPollText.button_press(self, x, y, button)\n if button == 1 and self.execute is not None:\n Popen(self.execute, shell=True)\n", "path": "libqtile/widget/check_updates.py"}]} | 2,002 | 153 |
gh_patches_debug_2136 | rasdani/github-patches | git_diff | Kinto__kinto-1752 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deleting a collection doesn't delete access_control_entries for its children
`buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.
</issue>
<code>
[start of kinto/views/collections.py]
1 import colander
2 from pyramid.events import subscriber
3
4 from kinto.core import resource, utils
5 from kinto.core.events import ResourceChanged, ACTIONS
6 from kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping
7
8
9 class CollectionSchema(resource.ResourceSchema):
10 schema = JSONSchemaMapping(missing=colander.drop)
11 cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)
12
13
14 @resource.register(name='collection',
15 collection_path='/buckets/{{bucket_id}}/collections',
16 record_path='/buckets/{{bucket_id}}/collections/{{id}}')
17 class Collection(resource.ShareableResource):
18 schema = CollectionSchema
19 permissions = ('read', 'write', 'record:create')
20
21 def get_parent_id(self, request):
22 bucket_id = request.matchdict['bucket_id']
23 parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)
24 return parent_id
25
26 def process_record(self, new, old=None):
27 """Additional collection schema validation from bucket, if any."""
28 new = super().process_record(new, old)
29
30 # Remove internal and auto-assigned fields.
31 internal_fields = (self.model.id_field,
32 self.model.modified_field,
33 self.model.permissions_field)
34 validate_from_bucket_schema_or_400(new, resource_name="collection", request=self.request,
35 ignore_fields=internal_fields)
36 return new
37
38
39 @subscriber(ResourceChanged,
40 for_resources=('collection',),
41 for_actions=(ACTIONS.DELETE,))
42 def on_collections_deleted(event):
43 """Some collections were deleted, delete records.
44 """
45 storage = event.request.registry.storage
46 permission = event.request.registry.permission
47
48 for change in event.impacted_records:
49 collection = change['old']
50 bucket_id = event.payload['bucket_id']
51 parent_id = utils.instance_uri(event.request, 'collection',
52 bucket_id=bucket_id,
53 id=collection['id'])
54 storage.delete_all(collection_id=None,
55 parent_id=parent_id,
56 with_deleted=False)
57 storage.purge_deleted(collection_id=None,
58 parent_id=parent_id)
59 permission.delete_object_permissions(parent_id)
60
[end of kinto/views/collections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/collections.py b/kinto/views/collections.py
--- a/kinto/views/collections.py
+++ b/kinto/views/collections.py
@@ -56,4 +56,4 @@
with_deleted=False)
storage.purge_deleted(collection_id=None,
parent_id=parent_id)
- permission.delete_object_permissions(parent_id)
+ permission.delete_object_permissions(parent_id + '/*')
| {"golden_diff": "diff --git a/kinto/views/collections.py b/kinto/views/collections.py\n--- a/kinto/views/collections.py\n+++ b/kinto/views/collections.py\n@@ -56,4 +56,4 @@\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n- permission.delete_object_permissions(parent_id)\n+ permission.delete_object_permissions(parent_id + '/*')\n", "issue": "Deleting a collection doesn't delete access_control_entries for its children\n`buckets.py` has an event listener that ensures that when a bucket is deleted, everything underneath it is recursively deleted. `collections.py` has one too but it doesn't appear to be as robust -- it doesn't have a wildcard to match objects more than one level below it (which might be OK, since only records are below collections now), and `delete_object_permissions` is only called on the collection rather than its children.\n", "before_files": [{"content": "import colander\nfrom pyramid.events import subscriber\n\nfrom kinto.core import resource, utils\nfrom kinto.core.events import ResourceChanged, ACTIONS\nfrom kinto.schema_validation import validate_from_bucket_schema_or_400, JSONSchemaMapping\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n cache_expires = colander.SchemaNode(colander.Int(), missing=colander.drop)\n\n\[email protected](name='collection',\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ShareableResource):\n schema = CollectionSchema\n permissions = ('read', 'write', 'record:create')\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = utils.instance_uri(request, 'bucket', id=bucket_id)\n return parent_id\n\n def process_record(self, new, old=None):\n \"\"\"Additional collection schema validation from bucket, if any.\"\"\"\n new = super().process_record(new, old)\n\n # Remove internal and auto-assigned fields.\n internal_fields = (self.model.id_field,\n self.model.modified_field,\n self.model.permissions_field)\n validate_from_bucket_schema_or_400(new, resource_name=\"collection\", request=self.request,\n ignore_fields=internal_fields)\n return new\n\n\n@subscriber(ResourceChanged,\n for_resources=('collection',),\n for_actions=(ACTIONS.DELETE,))\ndef on_collections_deleted(event):\n \"\"\"Some collections were deleted, delete records.\n \"\"\"\n storage = event.request.registry.storage\n permission = event.request.registry.permission\n\n for change in event.impacted_records:\n collection = change['old']\n bucket_id = event.payload['bucket_id']\n parent_id = utils.instance_uri(event.request, 'collection',\n bucket_id=bucket_id,\n id=collection['id'])\n storage.delete_all(collection_id=None,\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id=None,\n parent_id=parent_id)\n permission.delete_object_permissions(parent_id)\n", "path": "kinto/views/collections.py"}]} | 1,201 | 91 |
gh_patches_debug_1396 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-5366 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
General: v0.33.0 pip install fails
In a fresh Python v2.7.12 virtualenv on linux:
```
pip install google-cloud
```
Results in:
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/tmp/pip-install-3_n60m/google-cloud/setup.py", line 22, in <module>
with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:
IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst'
```
Note:
```
pip install google-cloud==0.32.0
```
works fine.
I believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0
</issue>
<code>
[start of legacy/google-cloud/setup.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import logging
17
18 from setuptools import setup
19
20 PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
21
22 with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:
23 README = file_obj.read()
24
25 # NOTE: This is duplicated throughout and we should try to
26 # consolidate.
27 SETUP_BASE = {
28 'author': 'Google Cloud Platform',
29 'author_email': '[email protected]',
30 'scripts': [],
31 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
32 'license': 'Apache 2.0',
33 'platforms': 'Posix; MacOS X; Windows',
34 'include_package_data': True,
35 'zip_safe': False,
36 'classifiers': [
37 'Development Status :: 7 - Inactive',
38 'Intended Audience :: Developers',
39 'License :: OSI Approved :: Apache Software License',
40 'Operating System :: OS Independent',
41 'Programming Language :: Python :: 2',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.4',
45 'Programming Language :: Python :: 3.5',
46 'Programming Language :: Python :: 3.6',
47 'Topic :: Internet',
48 ],
49 }
50
51 REQUIREMENTS = [
52 'google-api-core >= 0.1.2, < 0.2.0dev',
53 'google-cloud-bigquery >= 0.28.0, < 0.29dev',
54 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',
55 'google-cloud-bigtable >= 0.28.1, < 0.29dev',
56 'google-cloud-container >= 0.1.0, < 0.2dev',
57 'google-cloud-core >= 0.28.0, < 0.29dev',
58 'google-cloud-datastore >= 1.4.0, < 1.5dev',
59 'google-cloud-dns >= 0.28.0, < 0.29dev',
60 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',
61 'google-cloud-firestore >= 0.28.0, < 0.29dev',
62 'google-cloud-language >= 1.0.0, < 1.1dev',
63 'google-cloud-logging >= 1.4.0, < 1.5dev',
64 'google-cloud-monitoring >= 0.28.0, < 0.29dev',
65 'google-cloud-pubsub >= 0.30.0, < 0.31dev',
66 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',
67 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',
68 'google-cloud-spanner >= 0.29.0, < 0.30dev',
69 'google-cloud-speech >= 0.30.0, < 0.31dev',
70 'google-cloud-storage >= 1.6.0, < 1.7dev',
71 'google-cloud-trace >= 0.17.0, < 0.18dev',
72 'google-cloud-translate >= 1.3.0, < 1.4dev',
73 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',
74 'google-cloud-vision >= 0.29.0, < 0.30dev',
75 ]
76
77 setup(
78 name='google-cloud',
79 version='0.33.0',
80 description='API Client library for Google Cloud',
81 long_description=README,
82 install_requires=REQUIREMENTS,
83 **SETUP_BASE
84 )
85
86 warning = "WARNING: The google-cloud Python package is deprecated. On " \
87 "June 18, 2018, this package will no longer install any other " \
88 "packages. Please install the product-specific google-cloud-* " \
89 "packages needed for your application. See " \
90 "https://github.com/GoogleCloudPlatform/google-cloud-python."
91
92 logging.warn(warning)
93
[end of legacy/google-cloud/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py
--- a/legacy/google-cloud/setup.py
+++ b/legacy/google-cloud/setup.py
@@ -76,7 +76,7 @@
setup(
name='google-cloud',
- version='0.33.0',
+ version='0.33.1',
description='API Client library for Google Cloud',
long_description=README,
install_requires=REQUIREMENTS,
| {"golden_diff": "diff --git a/legacy/google-cloud/setup.py b/legacy/google-cloud/setup.py\n--- a/legacy/google-cloud/setup.py\n+++ b/legacy/google-cloud/setup.py\n@@ -76,7 +76,7 @@\n \n setup(\n name='google-cloud',\n- version='0.33.0',\n+ version='0.33.1',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n", "issue": "General: v0.33.0 pip install fails\nIn a fresh Python v2.7.12 virtualenv on linux:\r\n```\r\npip install google-cloud\r\n```\r\n\r\nResults in:\r\n```\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-install-3_n60m/google-cloud/setup.py\", line 22, in <module>\r\n with open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\r\n IOError: [Errno 2] No such file or directory: '/tmp/pip-install-3_n60m/google-cloud/setup-README.rst'\r\n```\r\n\r\nNote:\r\n```\r\npip install google-cloud==0.32.0\r\n```\r\nworks fine.\r\n\r\nI believe it has to do with recent changes: https://github.com/GoogleCloudPlatform/google-cloud-python/commit/71e5d4bf94745580834b86c3e92ac4186c3115c0\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport logging\n\nfrom setuptools import setup\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'setup-README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\nREQUIREMENTS = [\n 'google-api-core >= 0.1.2, < 0.2.0dev',\n 'google-cloud-bigquery >= 0.28.0, < 0.29dev',\n 'google-cloud-bigquery-datatransfer >= 0.1.0, < 0.2dev',\n 'google-cloud-bigtable >= 0.28.1, < 0.29dev',\n 'google-cloud-container >= 0.1.0, < 0.2dev',\n 'google-cloud-core >= 0.28.0, < 0.29dev',\n 'google-cloud-datastore >= 1.4.0, < 1.5dev',\n 'google-cloud-dns >= 0.28.0, < 0.29dev',\n 'google-cloud-error-reporting >= 0.28.0, < 0.29dev',\n 'google-cloud-firestore >= 0.28.0, < 0.29dev',\n 'google-cloud-language >= 1.0.0, < 1.1dev',\n 'google-cloud-logging >= 1.4.0, < 1.5dev',\n 'google-cloud-monitoring >= 0.28.0, < 0.29dev',\n 'google-cloud-pubsub >= 0.30.0, < 0.31dev',\n 'google-cloud-resource-manager >= 0.28.0, < 0.29dev',\n 'google-cloud-runtimeconfig >= 0.28.0, < 0.29dev',\n 'google-cloud-spanner >= 0.29.0, < 0.30dev',\n 'google-cloud-speech >= 0.30.0, < 0.31dev',\n 'google-cloud-storage >= 1.6.0, < 1.7dev',\n 'google-cloud-trace >= 0.17.0, < 0.18dev',\n 'google-cloud-translate >= 1.3.0, < 1.4dev',\n 'google-cloud-videointelligence >= 1.0.0, < 1.1dev',\n 'google-cloud-vision >= 0.29.0, < 0.30dev',\n]\n\nsetup(\n name='google-cloud',\n version='0.33.0',\n description='API Client library for Google Cloud',\n long_description=README,\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n\nwarning = \"WARNING: The google-cloud Python package is deprecated. On \" \\\n \"June 18, 2018, this package will no longer install any other \" \\\n \"packages. Please install the product-specific google-cloud-* \" \\\n \"packages needed for your application. See \" \\\n \"https://github.com/GoogleCloudPlatform/google-cloud-python.\"\n\nlogging.warn(warning)\n", "path": "legacy/google-cloud/setup.py"}]} | 2,016 | 104 |
gh_patches_debug_30505 | rasdani/github-patches | git_diff | keras-team__autokeras-459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
improve code quality using Codacy
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
<!---
A clear and concise description of what the bug is.
-->
We are now using Codacy and CodeClimate to improve our code quality.
Please try to solve anything reported by Codacy by following this [link](https://app.codacy.com/manual/jhfjhfj1/autokeras/issues/index).
On the page, you can see a list of **issues**.
You can click to see the details and suggestions to fix the issue.
Thanks.
</issue>
<code>
[start of autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py]
1 # coding: utf-8
2 from torch import nn
3 from torch.nn import functional as F
4
5
6 class Conv1d(nn.Conv1d):
7 """Extended nn.Conv1d for incremental dilated convolutions
8 """
9
10 def __init__(self, *args, **kwargs):
11 super().__init__(*args, **kwargs)
12 self.clear_buffer()
13 self._linearized_weight = None
14
15 def incremental_forward(self, input):
16
17 # reshape weight
18 weight = self._get_linearized_weight()
19 kw = self.kernel_size[0]
20 dilation = self.dilation[0]
21
22 bsz = input.size(0) # input: bsz x len x dim
23 if kw > 1:
24 input = input.data
25 if self.input_buffer is None:
26 self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))
27 self.input_buffer.zero_()
28 else:
29 # shift buffer
30 self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
31 # append next input
32 self.input_buffer[:, -1, :] = input[:, -1, :]
33 input = self.input_buffer
34 if dilation > 1:
35 input = input[:, 0::dilation, :].contiguous()
36 output = F.linear(input.view(bsz, -1), weight, self.bias)
37 return output.view(bsz, 1, -1)
38
39 def clear_buffer(self):
40 self.input_buffer = None
41
42 def _get_linearized_weight(self):
43 if self._linearized_weight is None:
44 kw = self.kernel_size[0]
45 # nn.Conv1d
46 weight = self.weight.transpose(1, 2).contiguous()
47
48 assert weight.size() == (self.out_channels, kw, self.in_channels)
49 self._linearized_weight = weight.view(self.out_channels, -1)
50 return self._linearized_weight
51
[end of autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
--- a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
+++ b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py
@@ -12,29 +12,29 @@
self.clear_buffer()
self._linearized_weight = None
- def incremental_forward(self, input):
+ def incremental_forward(self, input_data):
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
dilation = self.dilation[0]
- bsz = input.size(0) # input: bsz x len x dim
+ bsz = input_data.size(0) # conv_input: bsz x len x dim
if kw > 1:
- input = input.data
+ input_data = input_data.data
if self.input_buffer is None:
- self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))
+ self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))
self.input_buffer.zero_()
else:
# shift buffer
self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
# append next input
- self.input_buffer[:, -1, :] = input[:, -1, :]
- input = self.input_buffer
+ self.input_buffer[:, -1, :] = input_data[:, -1, :]
+ input_data = self.input_buffer
if dilation > 1:
- input = input[:, 0::dilation, :].contiguous()
- output = F.linear(input.view(bsz, -1), weight, self.bias)
- return output.view(bsz, 1, -1)
+ input_data = input_data[:, 0::dilation, :].contiguous()
+ input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)
+ return input_data.view(bsz, 1, -1)
def clear_buffer(self):
self.input_buffer = None
| {"golden_diff": "diff --git a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n--- a/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n+++ b/autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py\n@@ -12,29 +12,29 @@\n self.clear_buffer()\n self._linearized_weight = None\n \n- def incremental_forward(self, input):\n+ def incremental_forward(self, input_data):\n \n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n \n- bsz = input.size(0) # input: bsz x len x dim\n+ bsz = input_data.size(0) # conv_input: bsz x len x dim\n if kw > 1:\n- input = input.data\n+ input_data = input_data.data\n if self.input_buffer is None:\n- self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))\n+ self.input_buffer = input_data.new(bsz, kw + (kw - 1) * (dilation - 1), input_data.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n- self.input_buffer[:, -1, :] = input[:, -1, :]\n- input = self.input_buffer\n+ self.input_buffer[:, -1, :] = input_data[:, -1, :]\n+ input_data = self.input_buffer\n if dilation > 1:\n- input = input[:, 0::dilation, :].contiguous()\n- output = F.linear(input.view(bsz, -1), weight, self.bias)\n- return output.view(bsz, 1, -1)\n+ input_data = input_data[:, 0::dilation, :].contiguous()\n+ input_data = F.linear(input_data.view(bsz, -1), weight, self.bias)\n+ return input_data.view(bsz, 1, -1)\n \n def clear_buffer(self):\n self.input_buffer = None\n", "issue": "improve code quality using Codacy\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\n<!---\r\nA clear and concise description of what the bug is.\r\n-->\r\nWe are now using Codacy and CodeClimate to improve our code quality.\r\n\r\nPlease try to solve anything reported by Codacy by following this [link](https://app.codacy.com/manual/jhfjhfj1/autokeras/issues/index).\r\nOn the page, you can see a list of **issues**.\r\nYou can click to see the details and suggestions to fix the issue.\r\n\r\nThanks.\n", "before_files": [{"content": "# coding: utf-8\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass Conv1d(nn.Conv1d):\n \"\"\"Extended nn.Conv1d for incremental dilated convolutions\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.clear_buffer()\n self._linearized_weight = None\n\n def incremental_forward(self, input):\n\n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n dilation = self.dilation[0]\n\n bsz = input.size(0) # input: bsz x len x dim\n if kw > 1:\n input = input.data\n if self.input_buffer is None:\n self.input_buffer = input.new(bsz, kw + (kw - 1) * (dilation - 1), input.size(2))\n self.input_buffer.zero_()\n else:\n # shift buffer\n self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()\n # append next input\n self.input_buffer[:, -1, :] = input[:, -1, :]\n input = self.input_buffer\n if dilation > 1:\n input = input[:, 0::dilation, :].contiguous()\n output = F.linear(input.view(bsz, -1), weight, self.bias)\n return output.view(bsz, 1, -1)\n\n def clear_buffer(self):\n self.input_buffer = None\n\n def _get_linearized_weight(self):\n if self._linearized_weight is None:\n kw = self.kernel_size[0]\n # nn.Conv1d\n weight = self.weight.transpose(1, 2).contiguous()\n\n assert weight.size() == (self.out_channels, kw, self.in_channels)\n self._linearized_weight = weight.view(self.out_channels, -1)\n return self._linearized_weight\n", "path": "autokeras/pretrained/voice_generator/deepvoice3_pytorch/conv.py"}]} | 1,249 | 527 |
gh_patches_debug_8653 | rasdani/github-patches | git_diff | Health-Informatics-UoN__Carrot-Mapper-732 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scan Report Edit Table
</issue>
<code>
[start of app/api/proxy/urls.py]
1 from config import settings
2 from django.urls import re_path
3 from revproxy.views import ProxyView
4
5 # A set of urls that will override any root paths requested, and proxy them to the Next.js app.
6 urlpatterns = [
7 # /scanreports/ and escape any further paths
8 re_path(
9 r"^scanreports/(?P<path>(?!create))$",
10 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports"),
11 name="scan-report-list",
12 ),
13 re_path(
14 r"^scanreports/(?P<path>\d+)/?$",
15 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
16 name="scan-report-tables",
17 ),
18 re_path(
19 r"^scanreports/(?P<path>\d+/tables/\d+)/$",
20 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
21 name="scan-report-fields",
22 ),
23 re_path(
24 r"^scanreports/(?P<path>\d+/tables/\d+/fields/\d+)/$",
25 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
26 name="scan-report-values",
27 ),
28 re_path(
29 r"^datasets/(?P<path>(?![\d/]).*)$",
30 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
31 name="datasets-list",
32 ),
33 re_path(
34 r"^datasets/(?P<path>\d+)/?$",
35 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
36 name="datasets-scanreports-list",
37 ),
38 re_path(
39 "_next/(?P<path>.*)$",
40 ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/_next"),
41 ),
42 ]
43
[end of app/api/proxy/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/proxy/urls.py b/app/api/proxy/urls.py
--- a/app/api/proxy/urls.py
+++ b/app/api/proxy/urls.py
@@ -25,6 +25,11 @@
ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
name="scan-report-values",
),
+ re_path(
+ r"^scanreports/(?P<path>\d+/tables/\d+/update)/$",
+ ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/scanreports/"),
+ name="scan-report-edit-table",
+ ),
re_path(
r"^datasets/(?P<path>(?![\d/]).*)$",
ProxyView.as_view(upstream=f"{settings.NEXTJS_URL}/datasets"),
| {"golden_diff": "diff --git a/app/api/proxy/urls.py b/app/api/proxy/urls.py\n--- a/app/api/proxy/urls.py\n+++ b/app/api/proxy/urls.py\n@@ -25,6 +25,11 @@\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n+ re_path(\n+ r\"^scanreports/(?P<path>\\d+/tables/\\d+/update)/$\",\n+ ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n+ name=\"scan-report-edit-table\",\n+ ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n", "issue": "Scan Report Edit Table\n\n", "before_files": [{"content": "from config import settings\nfrom django.urls import re_path\nfrom revproxy.views import ProxyView\n\n# A set of urls that will override any root paths requested, and proxy them to the Next.js app.\nurlpatterns = [\n # /scanreports/ and escape any further paths\n re_path(\n r\"^scanreports/(?P<path>(?!create))$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports\"),\n name=\"scan-report-list\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-tables\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-fields\",\n ),\n re_path(\n r\"^scanreports/(?P<path>\\d+/tables/\\d+/fields/\\d+)/$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/scanreports/\"),\n name=\"scan-report-values\",\n ),\n re_path(\n r\"^datasets/(?P<path>(?![\\d/]).*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-list\",\n ),\n re_path(\n r\"^datasets/(?P<path>\\d+)/?$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/datasets\"),\n name=\"datasets-scanreports-list\",\n ),\n re_path(\n \"_next/(?P<path>.*)$\",\n ProxyView.as_view(upstream=f\"{settings.NEXTJS_URL}/_next\"),\n ),\n]\n", "path": "app/api/proxy/urls.py"}]} | 998 | 177 |
gh_patches_debug_8969 | rasdani/github-patches | git_diff | aws__serverless-application-model-610 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rendering on PyPI is broken
**Description:**
Rendering of the README on PyPI is broken because it expects RST by default and Markdown is being uploaded.
**Steps to reproduce the issue:**
1. Go to https://pypi.org/project/aws-sam-translator/
**Observed result:**
Raw markdown
**Expected result:**
Links and images working
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 #
3 # setup.py
4 #
5 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
6 #
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19
20 """
21 AWS SAM Serverless Application Model
22 """
23 import io
24 import os
25 import re
26
27 from setuptools import setup, find_packages
28
29
30 def read(*filenames, **kwargs):
31 encoding = kwargs.get('encoding', 'utf-8')
32 sep = kwargs.get('sep', os.linesep)
33 buf = []
34 for filename in filenames:
35 with io.open(filename, encoding=encoding) as f:
36 buf.append(f.read())
37 return sep.join(buf)
38
39
40 def read_version():
41 content = read(os.path.join(
42 os.path.dirname(__file__), 'samtranslator', '__init__.py'))
43 return re.search(r"__version__ = '([^']+)'", content).group(1)
44
45
46 def read_requirements(req='base.txt'):
47 content = read(os.path.join('requirements', req))
48 return [line for line in content.split(os.linesep)
49 if not line.strip().startswith('#')]
50
51
52 setup(
53 name='aws-sam-translator',
54 version=read_version(),
55 description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',
56 long_description=read('README.md'),
57 author='Amazon Web Services',
58 author_email='[email protected]',
59 url='https://github.com/awslabs/serverless-application-model',
60 license='Apache License 2.0',
61 # Exclude all but the code folders
62 packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),
63 install_requires=read_requirements('base.txt'),
64 include_package_data=True,
65 extras_require={
66 'dev': read_requirements('dev.txt')
67 },
68 keywords="AWS SAM Serverless Application Model",
69 classifiers=[
70 'Development Status :: 4 - Beta',
71 'Environment :: Console',
72 'Environment :: Other Environment',
73 'Intended Audience :: Developers',
74 'Intended Audience :: Information Technology',
75 'License :: OSI Approved :: Apache Software License',
76 'Operating System :: OS Independent',
77 'Programming Language :: Python',
78 'Programming Language :: Python :: 2.7',
79 'Topic :: Internet',
80 'Topic :: Software Development :: Build Tools',
81 'Topic :: Utilities'
82 ]
83 )
84
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -54,6 +54,7 @@
version=read_version(),
description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',
long_description=read('README.md'),
+ long_description_content_type='text/markdown',
author='Amazon Web Services',
author_email='[email protected]',
url='https://github.com/awslabs/serverless-application-model',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -54,6 +54,7 @@\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n+ long_description_content_type='text/markdown',\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n", "issue": "Rendering on PyPI is broken\n**Description:**\r\nRendering of the README on PyPI is broken because it expects RST by default and Markdown is being uploaded.\r\n\r\n**Steps to reproduce the issue:**\r\n\r\n1. Go to https://pypi.org/project/aws-sam-translator/\r\n\r\n**Observed result:**\r\n\r\nRaw markdown\r\n\r\n**Expected result:**\r\n\r\nLinks and images working\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# setup.py\n#\n# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAWS SAM Serverless Application Model\n\"\"\"\nimport io\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\ndef read(*filenames, **kwargs):\n encoding = kwargs.get('encoding', 'utf-8')\n sep = kwargs.get('sep', os.linesep)\n buf = []\n for filename in filenames:\n with io.open(filename, encoding=encoding) as f:\n buf.append(f.read())\n return sep.join(buf)\n\n\ndef read_version():\n content = read(os.path.join(\n os.path.dirname(__file__), 'samtranslator', '__init__.py'))\n return re.search(r\"__version__ = '([^']+)'\", content).group(1)\n\n\ndef read_requirements(req='base.txt'):\n content = read(os.path.join('requirements', req))\n return [line for line in content.split(os.linesep)\n if not line.strip().startswith('#')]\n\n\nsetup(\n name='aws-sam-translator',\n version=read_version(),\n description='AWS SAM Translator is a library that transform SAM templates into AWS CloudFormation templates',\n long_description=read('README.md'),\n author='Amazon Web Services',\n author_email='[email protected]',\n url='https://github.com/awslabs/serverless-application-model',\n license='Apache License 2.0',\n # Exclude all but the code folders\n packages=find_packages(exclude=('tests', 'docs', 'examples', 'versions')),\n install_requires=read_requirements('base.txt'),\n include_package_data=True,\n extras_require={\n 'dev': read_requirements('dev.txt')\n },\n keywords=\"AWS SAM Serverless Application Model\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet',\n 'Topic :: Software Development :: Build Tools',\n 'Topic :: Utilities'\n ]\n)\n", "path": "setup.py"}]} | 1,380 | 112 |
gh_patches_debug_18843 | rasdani/github-patches | git_diff | iterative__dvc-2366 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update `url` argument description in `get` and `import` help output
to `URL of Git repository with DVC project to download from.`
per https://github.com/iterative/dvc.org/pull/464#pullrequestreview-259776737
</issue>
<code>
[start of dvc/command/imp.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import logging
5
6 from dvc.exceptions import DvcException
7 from dvc.command.base import CmdBase, append_doc_link
8
9
10 logger = logging.getLogger(__name__)
11
12
13 class CmdImport(CmdBase):
14 def run(self):
15 try:
16 self.repo.imp(
17 self.args.url,
18 self.args.path,
19 out=self.args.out,
20 rev=self.args.rev,
21 )
22 except DvcException:
23 logger.exception(
24 "failed to import '{}' from '{}'.".format(
25 self.args.path, self.args.url
26 )
27 )
28 return 1
29 return 0
30
31
32 def add_parser(subparsers, parent_parser):
33 IMPORT_HELP = (
34 "Download data from DVC repository and take it under DVC control."
35 )
36
37 import_parser = subparsers.add_parser(
38 "import",
39 parents=[parent_parser],
40 description=append_doc_link(IMPORT_HELP, "import"),
41 help=IMPORT_HELP,
42 formatter_class=argparse.RawTextHelpFormatter,
43 )
44 import_parser.add_argument("url", help="DVC repository URL.")
45 import_parser.add_argument(
46 "path", help="Path to data within DVC repository."
47 )
48 import_parser.add_argument(
49 "-o", "--out", nargs="?", help="Destination path to put data to."
50 )
51 import_parser.add_argument(
52 "--rev", nargs="?", help="DVC repository git revision."
53 )
54 import_parser.set_defaults(func=CmdImport)
55
[end of dvc/command/imp.py]
[start of dvc/command/get.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import logging
5
6 from dvc.repo import Repo
7 from dvc.exceptions import DvcException
8 from .base import CmdBaseNoRepo, append_doc_link
9
10
11 logger = logging.getLogger(__name__)
12
13
14 class CmdGet(CmdBaseNoRepo):
15 def run(self):
16 try:
17 Repo.get(
18 self.args.url,
19 path=self.args.path,
20 out=self.args.out,
21 rev=self.args.rev,
22 )
23 return 0
24 except DvcException:
25 logger.exception(
26 "failed to get '{}' from '{}'".format(
27 self.args.path, self.args.url
28 )
29 )
30 return 1
31
32
33 def add_parser(subparsers, parent_parser):
34 GET_HELP = "Download data from DVC repository."
35 get_parser = subparsers.add_parser(
36 "get",
37 parents=[parent_parser],
38 description=append_doc_link(GET_HELP, "get"),
39 help=GET_HELP,
40 formatter_class=argparse.RawDescriptionHelpFormatter,
41 )
42 get_parser.add_argument(
43 "url", help="DVC repository URL to download data from."
44 )
45 get_parser.add_argument("path", help="Path to data within DVC repository.")
46 get_parser.add_argument(
47 "-o", "--out", nargs="?", help="Destination path to put data to."
48 )
49 get_parser.add_argument(
50 "--rev", nargs="?", help="DVC repository git revision."
51 )
52 get_parser.set_defaults(func=CmdGet)
53
[end of dvc/command/get.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/command/get.py b/dvc/command/get.py
--- a/dvc/command/get.py
+++ b/dvc/command/get.py
@@ -40,7 +40,7 @@
formatter_class=argparse.RawDescriptionHelpFormatter,
)
get_parser.add_argument(
- "url", help="DVC repository URL to download data from."
+ "url", help="URL of Git repository with DVC project to download from."
)
get_parser.add_argument("path", help="Path to data within DVC repository.")
get_parser.add_argument(
diff --git a/dvc/command/imp.py b/dvc/command/imp.py
--- a/dvc/command/imp.py
+++ b/dvc/command/imp.py
@@ -41,7 +41,9 @@
help=IMPORT_HELP,
formatter_class=argparse.RawTextHelpFormatter,
)
- import_parser.add_argument("url", help="DVC repository URL.")
+ import_parser.add_argument(
+ "url", help="URL of Git repository with DVC project to download from."
+ )
import_parser.add_argument(
"path", help="Path to data within DVC repository."
)
| {"golden_diff": "diff --git a/dvc/command/get.py b/dvc/command/get.py\n--- a/dvc/command/get.py\n+++ b/dvc/command/get.py\n@@ -40,7 +40,7 @@\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\n- \"url\", help=\"DVC repository URL to download data from.\"\n+ \"url\", help=\"URL of Git repository with DVC project to download from.\"\n )\n get_parser.add_argument(\"path\", help=\"Path to data within DVC repository.\")\n get_parser.add_argument(\ndiff --git a/dvc/command/imp.py b/dvc/command/imp.py\n--- a/dvc/command/imp.py\n+++ b/dvc/command/imp.py\n@@ -41,7 +41,9 @@\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n- import_parser.add_argument(\"url\", help=\"DVC repository URL.\")\n+ import_parser.add_argument(\n+ \"url\", help=\"URL of Git repository with DVC project to download from.\"\n+ )\n import_parser.add_argument(\n \"path\", help=\"Path to data within DVC repository.\"\n )\n", "issue": "update `url` argument description in `get` and `import` help output\nto `URL of Git repository with DVC project to download from.`\r\n\r\nper https://github.com/iterative/dvc.org/pull/464#pullrequestreview-259776737\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdImport(CmdBase):\n def run(self):\n try:\n self.repo.imp(\n self.args.url,\n self.args.path,\n out=self.args.out,\n rev=self.args.rev,\n )\n except DvcException:\n logger.exception(\n \"failed to import '{}' from '{}'.\".format(\n self.args.path, self.args.url\n )\n )\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n IMPORT_HELP = (\n \"Download data from DVC repository and take it under DVC control.\"\n )\n\n import_parser = subparsers.add_parser(\n \"import\",\n parents=[parent_parser],\n description=append_doc_link(IMPORT_HELP, \"import\"),\n help=IMPORT_HELP,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n import_parser.add_argument(\"url\", help=\"DVC repository URL.\")\n import_parser.add_argument(\n \"path\", help=\"Path to data within DVC repository.\"\n )\n import_parser.add_argument(\n \"-o\", \"--out\", nargs=\"?\", help=\"Destination path to put data to.\"\n )\n import_parser.add_argument(\n \"--rev\", nargs=\"?\", help=\"DVC repository git revision.\"\n )\n import_parser.set_defaults(func=CmdImport)\n", "path": "dvc/command/imp.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nfrom dvc.repo import Repo\nfrom dvc.exceptions import DvcException\nfrom .base import CmdBaseNoRepo, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdGet(CmdBaseNoRepo):\n def run(self):\n try:\n Repo.get(\n self.args.url,\n path=self.args.path,\n out=self.args.out,\n rev=self.args.rev,\n )\n return 0\n except DvcException:\n logger.exception(\n \"failed to get '{}' from '{}'\".format(\n self.args.path, self.args.url\n )\n )\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n GET_HELP = \"Download data from DVC repository.\"\n get_parser = subparsers.add_parser(\n \"get\",\n parents=[parent_parser],\n description=append_doc_link(GET_HELP, \"get\"),\n help=GET_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n get_parser.add_argument(\n \"url\", help=\"DVC repository URL to download data from.\"\n )\n get_parser.add_argument(\"path\", help=\"Path to data within DVC repository.\")\n get_parser.add_argument(\n \"-o\", \"--out\", nargs=\"?\", help=\"Destination path to put data to.\"\n )\n get_parser.add_argument(\n \"--rev\", nargs=\"?\", help=\"DVC repository git revision.\"\n )\n get_parser.set_defaults(func=CmdGet)\n", "path": "dvc/command/get.py"}]} | 1,457 | 259 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.