problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_32843 | rasdani/github-patches | git_diff | nextcloud__appstore-693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Password reset sends outdated reset token
When requesting a password reset link from apps.nextcloud.com, the token in the link of the reset e-mail does not work, the website reports:
> Bad API Token
>
> The password reset link was invalid, possibly because it has already been used. Please request a new password reset.
When requesting a new password reset, an e-mail with the very same token is sent. In consequence it is impossible to change the password.
## Details
* Firefox 72.0.2
## Steps to reproduce
1. "Forget" your password
2. Request password reset link
3. Receive e-mail and open reset link
4. Observe error, follow suggestion and request new link
5. Receive e-mail with exactly the same token
</issue>
<code>
[start of nextcloudappstore/user/forms.py]
1 from allauth.account.utils import filter_users_by_email, user_username, \
2 user_pk_to_url_str
3 from django import forms
4 from django.contrib.auth import get_user_model
5 from django.forms import EmailField, CharField, PasswordInput
6 from django.utils.translation import ugettext_lazy as _
7 from snowpenguin.django.recaptcha2.fields import ReCaptchaField
8 from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
9
10
11 class SignupFormRecaptcha(forms.Form):
12 """integrate a recaptcha field."""
13 recaptcha = ReCaptchaField(widget=ReCaptchaWidget())
14 first_name = CharField(max_length=30, label=_('First name'))
15 last_name = CharField(max_length=30, label=_('Last name'))
16
17 def signup(self, request, user):
18 user.first_name = self.cleaned_data['first_name']
19 user.last_name = self.cleaned_data['last_name']
20 user.save()
21
22
23 class DeleteAccountForm(forms.Form):
24 email = EmailField(required=True, label=_('Your e-mail address'))
25
26 def __init__(self, *args, **kwargs):
27 self.user = kwargs.pop('user', None)
28 super().__init__(*args, **kwargs)
29
30 def clean_email(self):
31 email = self.cleaned_data.get('email')
32 if self.user and self.user.email == email:
33 return email
34 else:
35 raise forms.ValidationError(_(
36 'The given e-mail address does not match your e-mail address'))
37
38
39 class AccountForm(forms.ModelForm):
40 passwd = CharField(widget=PasswordInput(), label=_('Confirm password'),
41 help_text=_('Password is required to prevent '
42 'unauthorized users from changing your '
43 'email address and resetting your '
44 'password. This field does not update your '
45 'password!'))
46
47 class Meta:
48 model = get_user_model()
49 fields = ('first_name', 'last_name', 'email')
50
51 def clean_email(self):
52 value = self.cleaned_data['email']
53 users = filter_users_by_email(value)
54 if [u for u in users if u.pk != self.instance.pk]:
55 msg = _(
56 'This e-mail address is already associated with another '
57 'account.')
58 raise forms.ValidationError(msg)
59 return value
60
61 def clean_passwd(self):
62 value = self.cleaned_data['passwd']
63 if self.instance.check_password(value):
64 return value
65 else:
66 raise forms.ValidationError(_('Invalid password'))
67
68
69 class CustomResetPasswordForm(forms.Form):
70 # remove this class once issue #1307 is resolved django-allauth
71 email = forms.EmailField(
72 label=_("E-mail"),
73 required=True,
74 widget=forms.TextInput(attrs={
75 "type": "email",
76 "size": "30",
77 "placeholder": _("E-mail address"),
78 })
79 )
80
81 def clean_email(self):
82 email = self.cleaned_data["email"]
83 from allauth.account.adapter import get_adapter
84 email = get_adapter().clean_email(email)
85 self.users = filter_users_by_email(email)
86
87 return self.cleaned_data["email"]
88
89 def save(self, request, **kwargs):
90 from django.contrib.sites.shortcuts import get_current_site
91 current_site = get_current_site(request)
92 email = self.cleaned_data["email"]
93 from django.contrib.auth.tokens import default_token_generator
94 token_generator = kwargs.get("token_generator",
95 default_token_generator)
96
97 for user in self.users:
98 temp_key = token_generator.make_token(user)
99
100 # save it to the password reset model
101 # password_reset = PasswordReset(user=user, temp_key=temp_key)
102 # password_reset.save()
103
104 # send the password reset email
105 from django.urls import reverse
106 path = reverse("account_reset_password_from_key",
107 kwargs=dict(uidb36=user_pk_to_url_str(user),
108 key=temp_key))
109 from allauth.utils import build_absolute_uri
110 url = build_absolute_uri(
111 request, path)
112
113 context = {"current_site": current_site,
114 "user": user,
115 "password_reset_url": url,
116 "request": request}
117
118 from allauth.account import app_settings
119
120 if app_settings.AUTHENTICATION_METHOD \
121 != app_settings.AuthenticationMethod.EMAIL:
122 context['username'] = user_username(user)
123 from allauth.account.adapter import get_adapter
124 get_adapter(request).send_mail(
125 'account/email/password_reset_key',
126 email,
127 context)
128 return self.cleaned_data["email"]
129
[end of nextcloudappstore/user/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/user/forms.py b/nextcloudappstore/user/forms.py
--- a/nextcloudappstore/user/forms.py
+++ b/nextcloudappstore/user/forms.py
@@ -1,3 +1,4 @@
+from allauth.account.forms import EmailAwarePasswordResetTokenGenerator
from allauth.account.utils import filter_users_by_email, user_username, \
user_pk_to_url_str
from django import forms
@@ -82,7 +83,7 @@
email = self.cleaned_data["email"]
from allauth.account.adapter import get_adapter
email = get_adapter().clean_email(email)
- self.users = filter_users_by_email(email)
+ self.users = filter_users_by_email(email, is_active=True)
return self.cleaned_data["email"]
@@ -90,9 +91,7 @@
from django.contrib.sites.shortcuts import get_current_site
current_site = get_current_site(request)
email = self.cleaned_data["email"]
- from django.contrib.auth.tokens import default_token_generator
- token_generator = kwargs.get("token_generator",
- default_token_generator)
+ token_generator = EmailAwarePasswordResetTokenGenerator()
for user in self.users:
temp_key = token_generator.make_token(user)
@@ -118,7 +117,7 @@
from allauth.account import app_settings
if app_settings.AUTHENTICATION_METHOD \
- != app_settings.AuthenticationMethod.EMAIL:
+ != app_settings.AuthenticationMethod.EMAIL:
context['username'] = user_username(user)
from allauth.account.adapter import get_adapter
get_adapter(request).send_mail(
| {"golden_diff": "diff --git a/nextcloudappstore/user/forms.py b/nextcloudappstore/user/forms.py\n--- a/nextcloudappstore/user/forms.py\n+++ b/nextcloudappstore/user/forms.py\n@@ -1,3 +1,4 @@\n+from allauth.account.forms import EmailAwarePasswordResetTokenGenerator\n from allauth.account.utils import filter_users_by_email, user_username, \\\n user_pk_to_url_str\n from django import forms\n@@ -82,7 +83,7 @@\n email = self.cleaned_data[\"email\"]\n from allauth.account.adapter import get_adapter\n email = get_adapter().clean_email(email)\n- self.users = filter_users_by_email(email)\n+ self.users = filter_users_by_email(email, is_active=True)\n \n return self.cleaned_data[\"email\"]\n \n@@ -90,9 +91,7 @@\n from django.contrib.sites.shortcuts import get_current_site\n current_site = get_current_site(request)\n email = self.cleaned_data[\"email\"]\n- from django.contrib.auth.tokens import default_token_generator\n- token_generator = kwargs.get(\"token_generator\",\n- default_token_generator)\n+ token_generator = EmailAwarePasswordResetTokenGenerator()\n \n for user in self.users:\n temp_key = token_generator.make_token(user)\n@@ -118,7 +117,7 @@\n from allauth.account import app_settings\n \n if app_settings.AUTHENTICATION_METHOD \\\n- != app_settings.AuthenticationMethod.EMAIL:\n+ != app_settings.AuthenticationMethod.EMAIL:\n context['username'] = user_username(user)\n from allauth.account.adapter import get_adapter\n get_adapter(request).send_mail(\n", "issue": "Password reset sends outdated reset token\nWhen requesting a password reset link from apps.nextcloud.com, the token in the link of the reset e-mail does not work, the website reports:\r\n\r\n> Bad API Token\r\n> \r\n> The password reset link was invalid, possibly because it has already been used. Please request a new password reset.\r\n\r\nWhen requesting a new password reset, an e-mail with the very same token is sent. In consequence it is impossible to change the password.\r\n\r\n## Details\r\n\r\n* Firefox 72.0.2\r\n\r\n## Steps to reproduce\r\n\r\n1. \"Forget\" your password\r\n2. Request password reset link\r\n3. Receive e-mail and open reset link\r\n4. Observe error, follow suggestion and request new link\r\n5. Receive e-mail with exactly the same token\r\n\n", "before_files": [{"content": "from allauth.account.utils import filter_users_by_email, user_username, \\\n user_pk_to_url_str\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.forms import EmailField, CharField, PasswordInput\nfrom django.utils.translation import ugettext_lazy as _\nfrom snowpenguin.django.recaptcha2.fields import ReCaptchaField\nfrom snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget\n\n\nclass SignupFormRecaptcha(forms.Form):\n \"\"\"integrate a recaptcha field.\"\"\"\n recaptcha = ReCaptchaField(widget=ReCaptchaWidget())\n first_name = CharField(max_length=30, label=_('First name'))\n last_name = CharField(max_length=30, label=_('Last name'))\n\n def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n\nclass DeleteAccountForm(forms.Form):\n email = EmailField(required=True, label=_('Your e-mail address'))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n if self.user and self.user.email == email:\n return email\n else:\n raise forms.ValidationError(_(\n 'The given e-mail address does not match your e-mail address'))\n\n\nclass AccountForm(forms.ModelForm):\n passwd = CharField(widget=PasswordInput(), label=_('Confirm password'),\n help_text=_('Password is required to prevent '\n 'unauthorized users from changing your '\n 'email address and resetting your '\n 'password. This field does not update your '\n 'password!'))\n\n class Meta:\n model = get_user_model()\n fields = ('first_name', 'last_name', 'email')\n\n def clean_email(self):\n value = self.cleaned_data['email']\n users = filter_users_by_email(value)\n if [u for u in users if u.pk != self.instance.pk]:\n msg = _(\n 'This e-mail address is already associated with another '\n 'account.')\n raise forms.ValidationError(msg)\n return value\n\n def clean_passwd(self):\n value = self.cleaned_data['passwd']\n if self.instance.check_password(value):\n return value\n else:\n raise forms.ValidationError(_('Invalid password'))\n\n\nclass CustomResetPasswordForm(forms.Form):\n # remove this class once issue #1307 is resolved django-allauth\n email = forms.EmailField(\n label=_(\"E-mail\"),\n required=True,\n widget=forms.TextInput(attrs={\n \"type\": \"email\",\n \"size\": \"30\",\n \"placeholder\": _(\"E-mail address\"),\n })\n )\n\n def clean_email(self):\n email = self.cleaned_data[\"email\"]\n from allauth.account.adapter import get_adapter\n email = get_adapter().clean_email(email)\n self.users = filter_users_by_email(email)\n\n return self.cleaned_data[\"email\"]\n\n def save(self, request, **kwargs):\n from django.contrib.sites.shortcuts import get_current_site\n current_site = get_current_site(request)\n email = self.cleaned_data[\"email\"]\n from django.contrib.auth.tokens import default_token_generator\n token_generator = kwargs.get(\"token_generator\",\n default_token_generator)\n\n for user in self.users:\n temp_key = token_generator.make_token(user)\n\n # save it to the password reset model\n # password_reset = PasswordReset(user=user, temp_key=temp_key)\n # password_reset.save()\n\n # send the password reset email\n from django.urls import reverse\n path = reverse(\"account_reset_password_from_key\",\n kwargs=dict(uidb36=user_pk_to_url_str(user),\n key=temp_key))\n from allauth.utils import build_absolute_uri\n url = build_absolute_uri(\n request, path)\n\n context = {\"current_site\": current_site,\n \"user\": user,\n \"password_reset_url\": url,\n \"request\": request}\n\n from allauth.account import app_settings\n\n if app_settings.AUTHENTICATION_METHOD \\\n != app_settings.AuthenticationMethod.EMAIL:\n context['username'] = user_username(user)\n from allauth.account.adapter import get_adapter\n get_adapter(request).send_mail(\n 'account/email/password_reset_key',\n email,\n context)\n return self.cleaned_data[\"email\"]\n", "path": "nextcloudappstore/user/forms.py"}]} | 1,925 | 351 |
gh_patches_debug_35444 | rasdani/github-patches | git_diff | InstaPy__InstaPy-831 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot focus element error
I'm running the script on mac and after some time i get this error:
Message: unknown error: cannot focus element
(Session info: chrome=61.0.3163.100)
(Driver info: chromedriver=2.32.498537 (cb2f855cbc7b82e20387eaf9a43f6b99b6105061),platform=Mac OS X 10.12.3 x86_64)
Now I'm trying to update chromedriver with all of the packages to check whether they are the reason, but does anybody else get this error?
</issue>
<code>
[start of instapy/comment_util.py]
1 # -*- coding: utf-8 -*-
2 """Module which handles the commenting features"""
3 from random import choice
4 from .time_util import sleep
5 import emoji
6
7
8 def comment_image(browser, comments):
9 """Checks if it should comment on the image"""
10 rand_comment = (choice(comments))
11 rand_comment = emoji.demojize(rand_comment)
12 rand_comment = emoji.emojize(rand_comment, use_aliases=True)
13
14 comment_input = browser.find_elements_by_xpath(
15 '//textarea[@placeholder = "Add a comment…"]')
16 if len(comment_input) <= 0:
17 comment_input = browser.find_elements_by_xpath(
18 '//input[@placeholder = "Add a comment…"]')
19
20 if len(comment_input) > 0:
21 browser.execute_script(
22 "arguments[0].value = '" + rand_comment + " ';", comment_input[0])
23 # An extra space is added here and then deleted.
24 # This forces the input box to update the reactJS core
25 comment_input[0].send_keys("\b")
26 comment_input[0].submit()
27 else:
28 print('--> Warning: Comment Action Likely Failed:'
29 ' Comment Element not found')
30
31 print("--> Commented: {}".format(rand_comment.encode('utf-8')))
32 sleep(2)
33
34 return 1
35
[end of instapy/comment_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instapy/comment_util.py b/instapy/comment_util.py
--- a/instapy/comment_util.py
+++ b/instapy/comment_util.py
@@ -2,8 +2,31 @@
"""Module which handles the commenting features"""
from random import choice
from .time_util import sleep
+from selenium.common.exceptions import WebDriverException
import emoji
+def get_comment_input(browser):
+ comment_input = browser.find_elements_by_xpath(
+ '//textarea[@placeholder = "Add a comment…"]')
+ if len(comment_input) <= 0:
+ comment_input = browser.find_elements_by_xpath(
+ '//input[@placeholder = "Add a comment…"]')
+ return comment_input
+
+def open_comment_section(browser):
+ missing_comment_elem_warning = (
+ '--> Warning: Comment Button Not Found:'
+ ' May cause issues with browser windows of smaller widths')
+ comment_elem = browser.find_elements_by_xpath(
+ "//a[@role='button']/span[text()='Comment']/..")
+ if len(comment_elem) > 0:
+ try:
+ browser.execute_script(
+ "arguments[0].click();", comment_elem[0])
+ except WebDriverException:
+ print(missing_comment_elem_warning)
+ else:
+ print(missing_comment_elem_warning)
def comment_image(browser, comments):
"""Checks if it should comment on the image"""
@@ -11,18 +34,19 @@
rand_comment = emoji.demojize(rand_comment)
rand_comment = emoji.emojize(rand_comment, use_aliases=True)
- comment_input = browser.find_elements_by_xpath(
- '//textarea[@placeholder = "Add a comment…"]')
- if len(comment_input) <= 0:
- comment_input = browser.find_elements_by_xpath(
- '//input[@placeholder = "Add a comment…"]')
+ open_comment_section(browser)
+ comment_input = get_comment_input(browser)
if len(comment_input) > 0:
+ comment_input[0].clear()
+ comment_input = get_comment_input(browser)
+
browser.execute_script(
"arguments[0].value = '" + rand_comment + " ';", comment_input[0])
# An extra space is added here and then deleted.
# This forces the input box to update the reactJS core
comment_input[0].send_keys("\b")
+ comment_input = get_comment_input(browser)
comment_input[0].submit()
else:
print('--> Warning: Comment Action Likely Failed:'
| {"golden_diff": "diff --git a/instapy/comment_util.py b/instapy/comment_util.py\n--- a/instapy/comment_util.py\n+++ b/instapy/comment_util.py\n@@ -2,8 +2,31 @@\n \"\"\"Module which handles the commenting features\"\"\"\n from random import choice\n from .time_util import sleep\n+from selenium.common.exceptions import WebDriverException\n import emoji\n \n+def get_comment_input(browser):\n+ comment_input = browser.find_elements_by_xpath(\n+ '//textarea[@placeholder = \"Add a comment\u2026\"]')\n+ if len(comment_input) <= 0:\n+ comment_input = browser.find_elements_by_xpath(\n+ '//input[@placeholder = \"Add a comment\u2026\"]')\n+ return comment_input\n+\n+def open_comment_section(browser):\n+ missing_comment_elem_warning = (\n+ '--> Warning: Comment Button Not Found:'\n+ ' May cause issues with browser windows of smaller widths')\n+ comment_elem = browser.find_elements_by_xpath(\n+ \"//a[@role='button']/span[text()='Comment']/..\")\n+ if len(comment_elem) > 0:\n+ try:\n+ browser.execute_script(\n+ \"arguments[0].click();\", comment_elem[0])\n+ except WebDriverException:\n+ print(missing_comment_elem_warning)\n+ else:\n+ print(missing_comment_elem_warning)\n \n def comment_image(browser, comments):\n \"\"\"Checks if it should comment on the image\"\"\"\n@@ -11,18 +34,19 @@\n rand_comment = emoji.demojize(rand_comment)\n rand_comment = emoji.emojize(rand_comment, use_aliases=True)\n \n- comment_input = browser.find_elements_by_xpath(\n- '//textarea[@placeholder = \"Add a comment\u2026\"]')\n- if len(comment_input) <= 0:\n- comment_input = browser.find_elements_by_xpath(\n- '//input[@placeholder = \"Add a comment\u2026\"]')\n+ open_comment_section(browser)\n+ comment_input = get_comment_input(browser)\n \n if len(comment_input) > 0:\n+ comment_input[0].clear()\n+ comment_input = get_comment_input(browser)\n+\n browser.execute_script(\n \"arguments[0].value = '\" + rand_comment + \" ';\", comment_input[0])\n # An extra space is added here and then deleted.\n # This forces the input box to update the reactJS core\n comment_input[0].send_keys(\"\\b\")\n+ comment_input = get_comment_input(browser)\n comment_input[0].submit()\n else:\n print('--> Warning: Comment Action Likely Failed:'\n", "issue": "Cannot focus element error\nI'm running the script on mac and after some time i get this error:\r\n\r\nMessage: unknown error: cannot focus element\r\n (Session info: chrome=61.0.3163.100)\r\n (Driver info: chromedriver=2.32.498537 (cb2f855cbc7b82e20387eaf9a43f6b99b6105061),platform=Mac OS X 10.12.3 x86_64)\r\n\r\nNow I'm trying to update chromedriver with all of the packages to check whether they are the reason, but does anybody else get this error?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Module which handles the commenting features\"\"\"\nfrom random import choice\nfrom .time_util import sleep\nimport emoji\n\n\ndef comment_image(browser, comments):\n \"\"\"Checks if it should comment on the image\"\"\"\n rand_comment = (choice(comments))\n rand_comment = emoji.demojize(rand_comment)\n rand_comment = emoji.emojize(rand_comment, use_aliases=True)\n\n comment_input = browser.find_elements_by_xpath(\n '//textarea[@placeholder = \"Add a comment\u2026\"]')\n if len(comment_input) <= 0:\n comment_input = browser.find_elements_by_xpath(\n '//input[@placeholder = \"Add a comment\u2026\"]')\n\n if len(comment_input) > 0:\n browser.execute_script(\n \"arguments[0].value = '\" + rand_comment + \" ';\", comment_input[0])\n # An extra space is added here and then deleted.\n # This forces the input box to update the reactJS core\n comment_input[0].send_keys(\"\\b\")\n comment_input[0].submit()\n else:\n print('--> Warning: Comment Action Likely Failed:'\n ' Comment Element not found')\n\n print(\"--> Commented: {}\".format(rand_comment.encode('utf-8')))\n sleep(2)\n\n return 1\n", "path": "instapy/comment_util.py"}]} | 1,030 | 550 |
gh_patches_debug_24835 | rasdani/github-patches | git_diff | napari__napari-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
novel gene discovery in Napari (a.k.a. imperfect layer name incrementing)
## 🐛 Bug
napari does a smart thing and adds an integer to the name of a layer if the name is already in use in the viewer. It then increments that number when another layer is added that fits the pattern. This is great until you have layer names that end in numbers

which in this case results in different names that are sometimes real genes and sometimes not.
## To Reproduce
Steps to reproduce the behavior:
```
v = napari.Viewer()
for i in range(3):
v.add_points(1000*np.random.rand(10,2), name = "no_problem")
for i in range(3):
v.add_points(1000*np.random.rand(10,2), name = "problem:GAD1")
```
## Expected behavior
consistent use of a space or other character to separate the auto-increment integer from the original string
</issue>
<code>
[start of napari/util/naming.py]
1 """Automatically generate names.
2 """
3 import re
4 from .misc import formatdoc
5
6
7 sep = ' '
8 start = 1
9
10 numbered_patt = re.compile(r'(?<!\d)(?:\d+|)$')
11
12
13 def _inc_name_count_sub(match):
14 count = match.group(0)
15
16 try:
17 count = int(count)
18 except ValueError: # not an int
19 count = f'{sep}{start}'
20 else:
21 count = f'{count + 1}'
22
23 return count
24
25
26 @formatdoc
27 def inc_name_count(name):
28 """Increase a name's count matching `{numbered_patt}` by ``1``.
29
30 If the name is not already numbered, append '{sep}{start}'.
31
32 Parameters
33 ----------
34 name : str
35 Original name.
36
37 Returns
38 -------
39 incremented_name : str
40 Numbered name incremented by ``1``.
41 """
42 return numbered_patt.sub(_inc_name_count_sub, name)
43
[end of napari/util/naming.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/util/naming.py b/napari/util/naming.py
--- a/napari/util/naming.py
+++ b/napari/util/naming.py
@@ -7,7 +7,9 @@
sep = ' '
start = 1
-numbered_patt = re.compile(r'(?<!\d)(?:\d+|)$')
+# Match integer between square brackets at end of string if after space
+# or at beginning of string or just match end of string
+numbered_patt = re.compile(r'((?<=\A\[)|(?<=\s\[))(?:\d+|)(?=\]$)|$')
def _inc_name_count_sub(match):
@@ -16,7 +18,7 @@
try:
count = int(count)
except ValueError: # not an int
- count = f'{sep}{start}'
+ count = f'{sep}[{start}]'
else:
count = f'{count + 1}'
@@ -27,7 +29,7 @@
def inc_name_count(name):
"""Increase a name's count matching `{numbered_patt}` by ``1``.
- If the name is not already numbered, append '{sep}{start}'.
+ If the name is not already numbered, append '{sep}[{start}]'.
Parameters
----------
@@ -39,4 +41,4 @@
incremented_name : str
Numbered name incremented by ``1``.
"""
- return numbered_patt.sub(_inc_name_count_sub, name)
+ return numbered_patt.sub(_inc_name_count_sub, name, count=1)
| {"golden_diff": "diff --git a/napari/util/naming.py b/napari/util/naming.py\n--- a/napari/util/naming.py\n+++ b/napari/util/naming.py\n@@ -7,7 +7,9 @@\n sep = ' '\n start = 1\n \n-numbered_patt = re.compile(r'(?<!\\d)(?:\\d+|)$')\n+# Match integer between square brackets at end of string if after space\n+# or at beginning of string or just match end of string\n+numbered_patt = re.compile(r'((?<=\\A\\[)|(?<=\\s\\[))(?:\\d+|)(?=\\]$)|$')\n \n \n def _inc_name_count_sub(match):\n@@ -16,7 +18,7 @@\n try:\n count = int(count)\n except ValueError: # not an int\n- count = f'{sep}{start}'\n+ count = f'{sep}[{start}]'\n else:\n count = f'{count + 1}'\n \n@@ -27,7 +29,7 @@\n def inc_name_count(name):\n \"\"\"Increase a name's count matching `{numbered_patt}` by ``1``.\n \n- If the name is not already numbered, append '{sep}{start}'.\n+ If the name is not already numbered, append '{sep}[{start}]'.\n \n Parameters\n ----------\n@@ -39,4 +41,4 @@\n incremented_name : str\n Numbered name incremented by ``1``.\n \"\"\"\n- return numbered_patt.sub(_inc_name_count_sub, name)\n+ return numbered_patt.sub(_inc_name_count_sub, name, count=1)\n", "issue": "novel gene discovery in Napari (a.k.a. imperfect layer name incrementing)\n## \ud83d\udc1b Bug\r\nnapari does a smart thing and adds an integer to the name of a layer if the name is already in use in the viewer. It then increments that number when another layer is added that fits the pattern. This is great until you have layer names that end in numbers \r\n\r\nwhich in this case results in different names that are sometimes real genes and sometimes not.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n```\r\nv = napari.Viewer()\r\nfor i in range(3):\r\n v.add_points(1000*np.random.rand(10,2), name = \"no_problem\")\r\nfor i in range(3):\r\n v.add_points(1000*np.random.rand(10,2), name = \"problem:GAD1\")\r\n```\r\n\r\n## Expected behavior\r\nconsistent use of a space or other character to separate the auto-increment integer from the original string\r\n\n", "before_files": [{"content": "\"\"\"Automatically generate names.\n\"\"\"\nimport re\nfrom .misc import formatdoc\n\n\nsep = ' '\nstart = 1\n\nnumbered_patt = re.compile(r'(?<!\\d)(?:\\d+|)$')\n\n\ndef _inc_name_count_sub(match):\n count = match.group(0)\n\n try:\n count = int(count)\n except ValueError: # not an int\n count = f'{sep}{start}'\n else:\n count = f'{count + 1}'\n\n return count\n\n\n@formatdoc\ndef inc_name_count(name):\n \"\"\"Increase a name's count matching `{numbered_patt}` by ``1``.\n\n If the name is not already numbered, append '{sep}{start}'.\n\n Parameters\n ----------\n name : str\n Original name.\n\n Returns\n -------\n incremented_name : str\n Numbered name incremented by ``1``.\n \"\"\"\n return numbered_patt.sub(_inc_name_count_sub, name)\n", "path": "napari/util/naming.py"}]} | 1,121 | 362 |
gh_patches_debug_13137 | rasdani/github-patches | git_diff | genialis__resolwe-313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
404 returned when deleting entity with `delete_content` set to `true`
Probably because sample gets deleted when the last data object is deleted.
</issue>
<code>
[start of resolwe/flow/views/entity.py]
1 """Entity viewset."""
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from distutils.util import strtobool # pylint: disable=import-error,no-name-in-module
5
6 from django.db.models import Max
7 from django.db.models.query import Prefetch
8
9 from rest_framework import exceptions, status
10 from rest_framework.decorators import detail_route
11 from rest_framework.response import Response
12
13 from resolwe.flow.filters import EntityFilter
14 from resolwe.flow.models import Collection, Data, Entity
15 from resolwe.flow.serializers import EntitySerializer
16 from resolwe.permissions.utils import remove_permission, update_permission
17
18 from .collection import CollectionViewSet
19
20
21 class EntityViewSet(CollectionViewSet):
22 """API view for entities."""
23
24 filter_class = EntityFilter
25 serializer_class = EntitySerializer
26
27 queryset = Entity.objects.prefetch_related(
28 Prefetch('data', queryset=Data.objects.all().order_by('id')),
29 'descriptor_schema',
30 'contributor'
31 ).annotate(
32 latest_date=Max('data__modified')
33 ).order_by('-latest_date')
34
35 def _check_collection_permissions(self, collection_id, user):
36 """Check that collection exists and user has `add` permission."""
37 collection_query = Collection.objects.filter(pk=collection_id)
38 if not collection_query.exists():
39 raise exceptions.ValidationError('Collection id does not exist')
40
41 collection = collection_query.first()
42 if not user.has_perm('add_collection', obj=collection):
43 if user.is_authenticated():
44 raise exceptions.PermissionDenied()
45 else:
46 raise exceptions.NotFound()
47
48 def set_content_permissions(self, user, obj, payload):
49 """Apply permissions to data objects in ``Entity``."""
50 # Data doesn't have "ADD" permission, so it has to be removed
51 payload = remove_permission(payload, 'add')
52
53 for data in obj.data.all():
54 if user.has_perm('share_data', data):
55 update_permission(data, payload)
56
57 def destroy(self, request, *args, **kwargs):
58 """Destroy a model instance.
59
60 If ``delete_content`` flag is set in query parameters, also all
61 Data objects contained in entity will be deleted.
62 """
63 obj = self.get_object()
64 user = request.user
65
66 if strtobool(request.query_params.get('delete_content', 'false')):
67 for data in obj.data.all():
68 if user.has_perm('edit_data', data):
69 data.delete()
70
71 # NOTE: Collection's ``destroy`` method should be skiped, so we
72 # intentionaly call it's parent.
73 return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call
74 request, *args, **kwargs
75 )
76
77 @detail_route(methods=[u'post'])
78 def add_to_collection(self, request, pk=None):
79 """Add Entity to a collection."""
80 entity = self.get_object()
81
82 if 'ids' not in request.data:
83 return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST)
84
85 for collection_id in request.data['ids']:
86 self._check_collection_permissions(collection_id, request.user)
87
88 for collection_id in request.data['ids']:
89 entity.collections.add(collection_id)
90
91 collection = Collection.objects.get(pk=collection_id)
92 for data in entity.data.all():
93 collection.data.add(data)
94
95 return Response()
96
97 @detail_route(methods=[u'post'])
98 def remove_from_collection(self, request, pk=None):
99 """Remove Entity from a collection."""
100 entity = self.get_object()
101
102 if 'ids' not in request.data:
103 return Response({"error": "`ids` parameter is required"}, status=status.HTTP_400_BAD_REQUEST)
104
105 for collection_id in request.data['ids']:
106 self._check_collection_permissions(collection_id, request.user)
107
108 for collection_id in request.data['ids']:
109 entity.collections.remove(collection_id)
110
111 collection = Collection.objects.get(pk=collection_id)
112 for data in entity.data.all():
113 collection.data.remove(data)
114
115 return Response()
116
117 @detail_route(methods=[u'post'])
118 def add_data(self, request, pk=None):
119 """Add data to Entity and it's collection."""
120 # add data to entity
121 resp = super(EntityViewSet, self).add_data(request, pk)
122
123 # add data to collections in which entity is
124 entity = self.get_object()
125 for collection in entity.collections.all():
126 collection.data.add(*request.data['ids'])
127
128 return resp
129
[end of resolwe/flow/views/entity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/resolwe/flow/views/entity.py b/resolwe/flow/views/entity.py
--- a/resolwe/flow/views/entity.py
+++ b/resolwe/flow/views/entity.py
@@ -68,6 +68,11 @@
if user.has_perm('edit_data', data):
data.delete()
+ # If all data objects in an entity are removed, the entity may
+ # have already been removed, so there is no need to call destroy.
+ if not Entity.objects.filter(pk=obj.pk).exists():
+ return Response(status=status.HTTP_204_NO_CONTENT)
+
# NOTE: Collection's ``destroy`` method should be skiped, so we
# intentionaly call it's parent.
return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call
| {"golden_diff": "diff --git a/resolwe/flow/views/entity.py b/resolwe/flow/views/entity.py\n--- a/resolwe/flow/views/entity.py\n+++ b/resolwe/flow/views/entity.py\n@@ -68,6 +68,11 @@\n if user.has_perm('edit_data', data):\n data.delete()\n \n+ # If all data objects in an entity are removed, the entity may\n+ # have already been removed, so there is no need to call destroy.\n+ if not Entity.objects.filter(pk=obj.pk).exists():\n+ return Response(status=status.HTTP_204_NO_CONTENT)\n+\n # NOTE: Collection's ``destroy`` method should be skiped, so we\n # intentionaly call it's parent.\n return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call\n", "issue": "404 returned when deleting entity with `delete_content` set to `true`\nProbably because sample gets deleted when the last data object is deleted.\n", "before_files": [{"content": "\"\"\"Entity viewset.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom distutils.util import strtobool # pylint: disable=import-error,no-name-in-module\n\nfrom django.db.models import Max\nfrom django.db.models.query import Prefetch\n\nfrom rest_framework import exceptions, status\nfrom rest_framework.decorators import detail_route\nfrom rest_framework.response import Response\n\nfrom resolwe.flow.filters import EntityFilter\nfrom resolwe.flow.models import Collection, Data, Entity\nfrom resolwe.flow.serializers import EntitySerializer\nfrom resolwe.permissions.utils import remove_permission, update_permission\n\nfrom .collection import CollectionViewSet\n\n\nclass EntityViewSet(CollectionViewSet):\n \"\"\"API view for entities.\"\"\"\n\n filter_class = EntityFilter\n serializer_class = EntitySerializer\n\n queryset = Entity.objects.prefetch_related(\n Prefetch('data', queryset=Data.objects.all().order_by('id')),\n 'descriptor_schema',\n 'contributor'\n ).annotate(\n latest_date=Max('data__modified')\n ).order_by('-latest_date')\n\n def _check_collection_permissions(self, collection_id, user):\n \"\"\"Check that collection exists and user has `add` permission.\"\"\"\n collection_query = Collection.objects.filter(pk=collection_id)\n if not collection_query.exists():\n raise exceptions.ValidationError('Collection id does not exist')\n\n collection = collection_query.first()\n if not user.has_perm('add_collection', obj=collection):\n if user.is_authenticated():\n raise exceptions.PermissionDenied()\n else:\n raise exceptions.NotFound()\n\n def set_content_permissions(self, user, obj, payload):\n \"\"\"Apply permissions to data objects in ``Entity``.\"\"\"\n # Data doesn't have \"ADD\" permission, so it has to be removed\n payload = remove_permission(payload, 'add')\n\n for data in obj.data.all():\n if user.has_perm('share_data', data):\n update_permission(data, payload)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"Destroy a model instance.\n\n If ``delete_content`` flag is set in query parameters, also all\n Data objects contained in entity will be deleted.\n \"\"\"\n obj = self.get_object()\n user = request.user\n\n if strtobool(request.query_params.get('delete_content', 'false')):\n for data in obj.data.all():\n if user.has_perm('edit_data', data):\n data.delete()\n\n # NOTE: Collection's ``destroy`` method should be skiped, so we\n # intentionaly call it's parent.\n return super(CollectionViewSet, self).destroy( # pylint: disable=no-member,bad-super-call\n request, *args, **kwargs\n )\n\n @detail_route(methods=[u'post'])\n def add_to_collection(self, request, pk=None):\n \"\"\"Add Entity to a collection.\"\"\"\n entity = self.get_object()\n\n if 'ids' not in request.data:\n return Response({\"error\": \"`ids` parameter is required\"}, status=status.HTTP_400_BAD_REQUEST)\n\n for collection_id in request.data['ids']:\n self._check_collection_permissions(collection_id, request.user)\n\n for collection_id in request.data['ids']:\n entity.collections.add(collection_id)\n\n collection = Collection.objects.get(pk=collection_id)\n for data in entity.data.all():\n collection.data.add(data)\n\n return Response()\n\n @detail_route(methods=[u'post'])\n def remove_from_collection(self, request, pk=None):\n \"\"\"Remove Entity from a collection.\"\"\"\n entity = self.get_object()\n\n if 'ids' not in request.data:\n return Response({\"error\": \"`ids` parameter is required\"}, status=status.HTTP_400_BAD_REQUEST)\n\n for collection_id in request.data['ids']:\n self._check_collection_permissions(collection_id, request.user)\n\n for collection_id in request.data['ids']:\n entity.collections.remove(collection_id)\n\n collection = Collection.objects.get(pk=collection_id)\n for data in entity.data.all():\n collection.data.remove(data)\n\n return Response()\n\n @detail_route(methods=[u'post'])\n def add_data(self, request, pk=None):\n \"\"\"Add data to Entity and it's collection.\"\"\"\n # add data to entity\n resp = super(EntityViewSet, self).add_data(request, pk)\n\n # add data to collections in which entity is\n entity = self.get_object()\n for collection in entity.collections.all():\n collection.data.add(*request.data['ids'])\n\n return resp\n", "path": "resolwe/flow/views/entity.py"}]} | 1,811 | 186 |
gh_patches_debug_6684 | rasdani/github-patches | git_diff | netbox-community__netbox-11404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scheduling a job in the past raises an exception
### NetBox version
v3.4.2
### Python version
3.10
### Steps to Reproduce
1. Create a script
2. Schedule it in the past
### Expected Behavior
Form validation error message
### Observed Behavior
```
Traceback (most recent call last):
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 55, in inner
response = get_response(request)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 197, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 103, in view
return self.dispatch(request, *args, **kwargs)
File "/home/main/devel/repos/netbox/netbox/utilities/views.py", line 53, in dispatch
return super().dispatch(request, *args, **kwargs)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 142, in dispatch
return handler(request, *args, **kwargs)
File "/home/main/devel/repos/netbox/netbox/extras/views.py", line 815, in post
elif form.is_valid():
File "/home/main/devel/repos/netbox/netbox/utilities/forms/forms.py", line 69, in is_valid
is_valid = super().is_valid()
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 205, in is_valid
return self.is_bound and not self.errors
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 200, in errors
self.full_clean()
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 437, in full_clean
self._clean_fields()
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 455, in _clean_fields
self.add_error(name, e)
File "/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py", line 392, in add_error
raise TypeError(
TypeError: The argument `field` must be `None` when the `error` argument contains errors for multiple fields.
```
</issue>
<code>
[start of netbox/extras/forms/scripts.py]
1 from django import forms
2 from django.utils import timezone
3 from django.utils.translation import gettext as _
4
5 from utilities.forms import BootstrapMixin, DateTimePicker, SelectDurationWidget
6
7 __all__ = (
8 'ScriptForm',
9 )
10
11
12 class ScriptForm(BootstrapMixin, forms.Form):
13 _commit = forms.BooleanField(
14 required=False,
15 initial=True,
16 label=_("Commit changes"),
17 help_text=_("Commit changes to the database (uncheck for a dry-run)")
18 )
19 _schedule_at = forms.DateTimeField(
20 required=False,
21 widget=DateTimePicker(),
22 label=_("Schedule at"),
23 help_text=_("Schedule execution of script to a set time"),
24 )
25 _interval = forms.IntegerField(
26 required=False,
27 min_value=1,
28 label=_("Recurs every"),
29 widget=SelectDurationWidget(),
30 help_text=_("Interval at which this script is re-run (in minutes)")
31 )
32
33 def __init__(self, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35
36 # Annotate the current system time for reference
37 now = timezone.now().strftime('%Y-%m-%d %H:%M:%S')
38 self.fields['_schedule_at'].help_text += f' (current time: <strong>{now}</strong>)'
39
40 # Move _commit and _schedule_at to the end of the form
41 schedule_at = self.fields.pop('_schedule_at')
42 interval = self.fields.pop('_interval')
43 commit = self.fields.pop('_commit')
44 self.fields['_schedule_at'] = schedule_at
45 self.fields['_interval'] = interval
46 self.fields['_commit'] = commit
47
48 def clean__schedule_at(self):
49 scheduled_time = self.cleaned_data['_schedule_at']
50 if scheduled_time and scheduled_time < timezone.now():
51 raise forms.ValidationError({
52 '_schedule_at': _('Scheduled time must be in the future.')
53 })
54
55 return scheduled_time
56
57 @property
58 def requires_input(self):
59 """
60 A boolean indicating whether the form requires user input (ignore the built-in fields).
61 """
62 return bool(len(self.fields) > 3)
63
[end of netbox/extras/forms/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/extras/forms/scripts.py b/netbox/extras/forms/scripts.py
--- a/netbox/extras/forms/scripts.py
+++ b/netbox/extras/forms/scripts.py
@@ -48,9 +48,7 @@
def clean__schedule_at(self):
scheduled_time = self.cleaned_data['_schedule_at']
if scheduled_time and scheduled_time < timezone.now():
- raise forms.ValidationError({
- '_schedule_at': _('Scheduled time must be in the future.')
- })
+ raise forms.ValidationError(_('Scheduled time must be in the future.'))
return scheduled_time
| {"golden_diff": "diff --git a/netbox/extras/forms/scripts.py b/netbox/extras/forms/scripts.py\n--- a/netbox/extras/forms/scripts.py\n+++ b/netbox/extras/forms/scripts.py\n@@ -48,9 +48,7 @@\n def clean__schedule_at(self):\n scheduled_time = self.cleaned_data['_schedule_at']\n if scheduled_time and scheduled_time < timezone.now():\n- raise forms.ValidationError({\n- '_schedule_at': _('Scheduled time must be in the future.')\n- })\n+ raise forms.ValidationError(_('Scheduled time must be in the future.'))\n \n return scheduled_time\n", "issue": "Scheduling a job in the past raises an exception\n### NetBox version\n\nv3.4.2\n\n### Python version\n\n3.10\n\n### Steps to Reproduce\n\n1. Create a script\r\n2. Schedule it in the past\n\n### Expected Behavior\n\nForm validation error message\n\n### Observed Behavior\n\n```\r\nTraceback (most recent call last):\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/exception.py\", line 55, in inner\r\n response = get_response(request)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/core/handlers/base.py\", line 197, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py\", line 103, in view\r\n return self.dispatch(request, *args, **kwargs)\r\n File \"/home/main/devel/repos/netbox/netbox/utilities/views.py\", line 53, in dispatch\r\n return super().dispatch(request, *args, **kwargs)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/views/generic/base.py\", line 142, in dispatch\r\n return handler(request, *args, **kwargs)\r\n File \"/home/main/devel/repos/netbox/netbox/extras/views.py\", line 815, in post\r\n elif form.is_valid():\r\n File \"/home/main/devel/repos/netbox/netbox/utilities/forms/forms.py\", line 69, in is_valid\r\n is_valid = super().is_valid()\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 205, in is_valid\r\n return self.is_bound and not self.errors\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 200, in errors\r\n self.full_clean()\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 437, in full_clean\r\n self._clean_fields()\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 455, in _clean_fields\r\n self.add_error(name, e)\r\n File \"/home/main/devel/repos/netbox/venv/lib/python3.10/site-packages/django/forms/forms.py\", line 392, in add_error\r\n raise TypeError(\r\nTypeError: The argument `field` must be `None` when the `error` argument contains errors for multiple fields.\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\n\nfrom utilities.forms import BootstrapMixin, DateTimePicker, SelectDurationWidget\n\n__all__ = (\n 'ScriptForm',\n)\n\n\nclass ScriptForm(BootstrapMixin, forms.Form):\n _commit = forms.BooleanField(\n required=False,\n initial=True,\n label=_(\"Commit changes\"),\n help_text=_(\"Commit changes to the database (uncheck for a dry-run)\")\n )\n _schedule_at = forms.DateTimeField(\n required=False,\n widget=DateTimePicker(),\n label=_(\"Schedule at\"),\n help_text=_(\"Schedule execution of script to a set time\"),\n )\n _interval = forms.IntegerField(\n required=False,\n min_value=1,\n label=_(\"Recurs every\"),\n widget=SelectDurationWidget(),\n help_text=_(\"Interval at which this script is re-run (in minutes)\")\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Annotate the current system time for reference\n now = timezone.now().strftime('%Y-%m-%d %H:%M:%S')\n self.fields['_schedule_at'].help_text += f' (current time: <strong>{now}</strong>)'\n\n # Move _commit and _schedule_at to the end of the form\n schedule_at = self.fields.pop('_schedule_at')\n interval = self.fields.pop('_interval')\n commit = self.fields.pop('_commit')\n self.fields['_schedule_at'] = schedule_at\n self.fields['_interval'] = interval\n self.fields['_commit'] = commit\n\n def clean__schedule_at(self):\n scheduled_time = self.cleaned_data['_schedule_at']\n if scheduled_time and scheduled_time < timezone.now():\n raise forms.ValidationError({\n '_schedule_at': _('Scheduled time must be in the future.')\n })\n\n return scheduled_time\n\n @property\n def requires_input(self):\n \"\"\"\n A boolean indicating whether the form requires user input (ignore the built-in fields).\n \"\"\"\n return bool(len(self.fields) > 3)\n", "path": "netbox/extras/forms/scripts.py"}]} | 1,727 | 128 |
gh_patches_debug_42729 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1227 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] Replace UseState() with UseBotState()
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3862
Fixes #3859
and use untyped params so that order and type are not fixed.
Tweak RegisterMiddlewareClass so you can provide the key for the turnstate.
# Changed projects
* Microsoft.Bot.Builder.Dialogs.Adaptive.Testing
* Microsoft.Bot.Builder
* Microsoft.Bot.Builder.AI.QnA.Tests
* Microsoft.Bot.Builder.Dialogs.Adaptive.Templates.Tests
* Microsoft.Bot.Builder.Dialogs.Adaptive.Tests
* Microsoft.Bot.Builder.Dialogs.Declarative.Tests
* Microsoft.Bot.Builder.Dialogs.Tests
* Microsoft.Bot.Builder.TestBot.Json
*
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/register_class_middleware.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 from typing import Callable, Awaitable
4
5 from botbuilder.core import Middleware, TurnContext
6
7
8 class RegisterClassMiddleware(Middleware):
9 """
10 Middleware for adding an object to or registering a service with the current turn context.
11 """
12
13 def __init__(self, service):
14 self.service = service
15
16 async def on_turn(
17 self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
18 ):
19 # C# has TurnStateCollection with has overrides for adding items
20 # to TurnState. Python does not. In C#'s case, there is an 'Add'
21 # to handle adding object, and that uses the fully qualified class name.
22 context.turn_state[self.fullname(self.service)] = self.service
23 await logic()
24
25 @staticmethod
26 def fullname(obj):
27 module = obj.__class__.__module__
28 if module is None or module == str.__class__.__module__:
29 return obj.__class__.__name__ # Avoid reporting __builtin__
30 return module + "." + obj.__class__.__name__
31
[end of libraries/botbuilder-core/botbuilder/core/register_class_middleware.py]
[start of libraries/botbuilder-core/botbuilder/core/adapter_extensions.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 from botbuilder.core import (
4 BotAdapter,
5 Storage,
6 RegisterClassMiddleware,
7 UserState,
8 ConversationState,
9 AutoSaveStateMiddleware,
10 )
11
12
13 class AdapterExtensions:
14 @staticmethod
15 def use_storage(adapter: BotAdapter, storage: Storage) -> BotAdapter:
16 """
17 Registers a storage layer with the adapter. The storage object will be available via the turn context's
18 `turn_state` property.
19
20 :param adapter: The BotAdapter on which to register the storage object.
21 :param storage: The Storage object to register.
22 :return: The BotAdapter
23 """
24 return adapter.use(RegisterClassMiddleware(storage))
25
26 @staticmethod
27 def use_state(
28 adapter: BotAdapter,
29 user_state: UserState,
30 conversation_state: ConversationState,
31 auto: bool = True,
32 ) -> BotAdapter:
33 """
34 Registers user and conversation state objects with the adapter. These objects will be available via
35 the turn context's `turn_state` property.
36
37 :param adapter: The BotAdapter on which to register the state objects.
38 :param user_state: The UserState object to register.
39 :param conversation_state: The ConversationState object to register.
40 :param auto: True to automatically persist state each turn.
41 :return: The BotAdapter
42 """
43 if not adapter:
44 raise TypeError("BotAdapter is required")
45
46 if not user_state:
47 raise TypeError("UserState is required")
48
49 if not conversation_state:
50 raise TypeError("ConversationState is required")
51
52 adapter.use(RegisterClassMiddleware(user_state))
53 adapter.use(RegisterClassMiddleware(conversation_state))
54
55 if auto:
56 adapter.use(AutoSaveStateMiddleware([user_state, conversation_state]))
57
58 return adapter
59
[end of libraries/botbuilder-core/botbuilder/core/adapter_extensions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py
--- a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py
+++ b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py
@@ -1,7 +1,10 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
+from warnings import warn
+
from botbuilder.core import (
BotAdapter,
+ BotState,
Storage,
RegisterClassMiddleware,
UserState,
@@ -23,6 +26,39 @@
"""
return adapter.use(RegisterClassMiddleware(storage))
+ @staticmethod
+ def use_bot_state(
+ bot_adapter: BotAdapter, *bot_states: BotState, auto: bool = True
+ ) -> BotAdapter:
+ """
+ Registers bot state object into the TurnContext. The botstate will be available via the turn context.
+
+ :param bot_adapter: The BotAdapter on which to register the state objects.
+ :param bot_states: One or more BotState objects to register.
+ :return: The updated adapter.
+ """
+ if not bot_states:
+ raise TypeError("At least one BotAdapter is required")
+
+ for bot_state in bot_states:
+ bot_adapter.use(
+ RegisterClassMiddleware(
+ bot_state, AdapterExtensions.fullname(bot_state)
+ )
+ )
+
+ if auto:
+ bot_adapter.use(AutoSaveStateMiddleware(bot_states))
+
+ return bot_adapter
+
+ @staticmethod
+ def fullname(obj):
+ module = obj.__class__.__module__
+ if module is None or module == str.__class__.__module__:
+ return obj.__class__.__name__ # Avoid reporting __builtin__
+ return module + "." + obj.__class__.__name__
+
@staticmethod
def use_state(
adapter: BotAdapter,
@@ -31,7 +67,7 @@
auto: bool = True,
) -> BotAdapter:
"""
- Registers user and conversation state objects with the adapter. These objects will be available via
+ [DEPRECATED] Registers user and conversation state objects with the adapter. These objects will be available via
the turn context's `turn_state` property.
:param adapter: The BotAdapter on which to register the state objects.
@@ -40,6 +76,11 @@
:param auto: True to automatically persist state each turn.
:return: The BotAdapter
"""
+ warn(
+ "This method is deprecated in 4.9. You should use the method .use_bot_state() instead.",
+ DeprecationWarning,
+ )
+
if not adapter:
raise TypeError("BotAdapter is required")
diff --git a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py
@@ -10,8 +10,9 @@
Middleware for adding an object to or registering a service with the current turn context.
"""
- def __init__(self, service):
+ def __init__(self, service, key: str = None):
self.service = service
+ self._key = key
async def on_turn(
self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]
@@ -19,7 +20,8 @@
# C# has TurnStateCollection with has overrides for adding items
# to TurnState. Python does not. In C#'s case, there is an 'Add'
# to handle adding object, and that uses the fully qualified class name.
- context.turn_state[self.fullname(self.service)] = self.service
+ key = self._key or self.fullname(self.service)
+ context.turn_state[key] = self.service
await logic()
@staticmethod
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py\n--- a/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py\n+++ b/libraries/botbuilder-core/botbuilder/core/adapter_extensions.py\n@@ -1,7 +1,10 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n+from warnings import warn\n+\n from botbuilder.core import (\n BotAdapter,\n+ BotState,\n Storage,\n RegisterClassMiddleware,\n UserState,\n@@ -23,6 +26,39 @@\n \"\"\"\n return adapter.use(RegisterClassMiddleware(storage))\n \n+ @staticmethod\n+ def use_bot_state(\n+ bot_adapter: BotAdapter, *bot_states: BotState, auto: bool = True\n+ ) -> BotAdapter:\n+ \"\"\"\n+ Registers bot state object into the TurnContext. The botstate will be available via the turn context.\n+\n+ :param bot_adapter: The BotAdapter on which to register the state objects.\n+ :param bot_states: One or more BotState objects to register.\n+ :return: The updated adapter.\n+ \"\"\"\n+ if not bot_states:\n+ raise TypeError(\"At least one BotAdapter is required\")\n+\n+ for bot_state in bot_states:\n+ bot_adapter.use(\n+ RegisterClassMiddleware(\n+ bot_state, AdapterExtensions.fullname(bot_state)\n+ )\n+ )\n+\n+ if auto:\n+ bot_adapter.use(AutoSaveStateMiddleware(bot_states))\n+\n+ return bot_adapter\n+\n+ @staticmethod\n+ def fullname(obj):\n+ module = obj.__class__.__module__\n+ if module is None or module == str.__class__.__module__:\n+ return obj.__class__.__name__ # Avoid reporting __builtin__\n+ return module + \".\" + obj.__class__.__name__\n+\n @staticmethod\n def use_state(\n adapter: BotAdapter,\n@@ -31,7 +67,7 @@\n auto: bool = True,\n ) -> BotAdapter:\n \"\"\"\n- Registers user and conversation state objects with the adapter. These objects will be available via\n+ [DEPRECATED] Registers user and conversation state objects with the adapter. These objects will be available via\n the turn context's `turn_state` property.\n \n :param adapter: The BotAdapter on which to register the state objects.\n@@ -40,6 +76,11 @@\n :param auto: True to automatically persist state each turn.\n :return: The BotAdapter\n \"\"\"\n+ warn(\n+ \"This method is deprecated in 4.9. You should use the method .use_bot_state() instead.\",\n+ DeprecationWarning,\n+ )\n+\n if not adapter:\n raise TypeError(\"BotAdapter is required\")\n \ndiff --git a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/register_class_middleware.py\n@@ -10,8 +10,9 @@\n Middleware for adding an object to or registering a service with the current turn context.\n \"\"\"\n \n- def __init__(self, service):\n+ def __init__(self, service, key: str = None):\n self.service = service\n+ self._key = key\n \n async def on_turn(\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\n@@ -19,7 +20,8 @@\n # C# has TurnStateCollection with has overrides for adding items\n # to TurnState. Python does not. In C#'s case, there is an 'Add'\n # to handle adding object, and that uses the fully qualified class name.\n- context.turn_state[self.fullname(self.service)] = self.service\n+ key = self._key or self.fullname(self.service)\n+ context.turn_state[key] = self.service\n await logic()\n \n @staticmethod\n", "issue": "[PORT] Replace UseState() with UseBotState() \n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3862\n\nFixes #3859 \r\nand use untyped params so that order and type are not fixed.\r\nTweak RegisterMiddlewareClass so you can provide the key for the turnstate.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive.Testing\r\n* Microsoft.Bot.Builder\r\n* Microsoft.Bot.Builder.AI.QnA.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive.Templates.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Adaptive.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Declarative.Tests\r\n* Microsoft.Bot.Builder.Dialogs.Tests\r\n* Microsoft.Bot.Builder.TestBot.Json\r\n* \r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom typing import Callable, Awaitable\n\nfrom botbuilder.core import Middleware, TurnContext\n\n\nclass RegisterClassMiddleware(Middleware):\n \"\"\"\n Middleware for adding an object to or registering a service with the current turn context.\n \"\"\"\n\n def __init__(self, service):\n self.service = service\n\n async def on_turn(\n self, context: TurnContext, logic: Callable[[TurnContext], Awaitable]\n ):\n # C# has TurnStateCollection with has overrides for adding items\n # to TurnState. Python does not. In C#'s case, there is an 'Add'\n # to handle adding object, and that uses the fully qualified class name.\n context.turn_state[self.fullname(self.service)] = self.service\n await logic()\n\n @staticmethod\n def fullname(obj):\n module = obj.__class__.__module__\n if module is None or module == str.__class__.__module__:\n return obj.__class__.__name__ # Avoid reporting __builtin__\n return module + \".\" + obj.__class__.__name__\n", "path": "libraries/botbuilder-core/botbuilder/core/register_class_middleware.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom botbuilder.core import (\n BotAdapter,\n Storage,\n RegisterClassMiddleware,\n UserState,\n ConversationState,\n AutoSaveStateMiddleware,\n)\n\n\nclass AdapterExtensions:\n @staticmethod\n def use_storage(adapter: BotAdapter, storage: Storage) -> BotAdapter:\n \"\"\"\n Registers a storage layer with the adapter. The storage object will be available via the turn context's\n `turn_state` property.\n\n :param adapter: The BotAdapter on which to register the storage object.\n :param storage: The Storage object to register.\n :return: The BotAdapter\n \"\"\"\n return adapter.use(RegisterClassMiddleware(storage))\n\n @staticmethod\n def use_state(\n adapter: BotAdapter,\n user_state: UserState,\n conversation_state: ConversationState,\n auto: bool = True,\n ) -> BotAdapter:\n \"\"\"\n Registers user and conversation state objects with the adapter. These objects will be available via\n the turn context's `turn_state` property.\n\n :param adapter: The BotAdapter on which to register the state objects.\n :param user_state: The UserState object to register.\n :param conversation_state: The ConversationState object to register.\n :param auto: True to automatically persist state each turn.\n :return: The BotAdapter\n \"\"\"\n if not adapter:\n raise TypeError(\"BotAdapter is required\")\n\n if not user_state:\n raise TypeError(\"UserState is required\")\n\n if not conversation_state:\n raise TypeError(\"ConversationState is required\")\n\n adapter.use(RegisterClassMiddleware(user_state))\n adapter.use(RegisterClassMiddleware(conversation_state))\n\n if auto:\n adapter.use(AutoSaveStateMiddleware([user_state, conversation_state]))\n\n return adapter\n", "path": "libraries/botbuilder-core/botbuilder/core/adapter_extensions.py"}]} | 1,547 | 911 |
gh_patches_debug_2897 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8922 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-3210] [Bug] Error using `dbt list --select` when there is a cross-project model that is `version=0` in the parent project
### Is this a new bug in dbt-core?
- [X] I believe this is a new bug in dbt-core
- [X] I have searched the existing issues, and I could not find an existing issue for this bug
### Current Behavior
When you attempt to reference a model version 0, you get a stack trace error.
### Expected Behavior
We should allow you to set model version to be 0.
### Steps To Reproduce
1. On parent/hub project, add a versioned model with `v: 0`
2. On the child/spoke project, attempt to reference that versioned model in a model:
`select * from {{ ref('example_hub', 'my_second_dbt_model', v=0) }}`
3. run `dbt list --select anything`
Outstanding question - is this only affecting cross-project refs? Or all refs to a model with `v: 0`?
### Relevant log output
_No response_
### Environment
```markdown
- OS:
- Python:
- dbt:
```
### Which database adapter are you using with dbt?
_No response_
### Additional Context
_No response_
</issue>
<code>
[start of core/dbt/contracts/graph/node_args.py]
1 from dataclasses import dataclass, field
2 from datetime import datetime
3 from typing import Optional, List
4
5 from dbt.contracts.graph.unparsed import NodeVersion
6 from dbt.node_types import NodeType, AccessType
7
8
9 @dataclass
10 class ModelNodeArgs:
11 name: str
12 package_name: str
13 identifier: str
14 schema: str
15 database: Optional[str] = None
16 relation_name: Optional[str] = None
17 version: Optional[NodeVersion] = None
18 latest_version: Optional[NodeVersion] = None
19 deprecation_date: Optional[datetime] = None
20 access: Optional[str] = AccessType.Protected.value
21 generated_at: datetime = field(default_factory=datetime.utcnow)
22 depends_on_nodes: List[str] = field(default_factory=list)
23 enabled: bool = True
24
25 @property
26 def unique_id(self) -> str:
27 unique_id = f"{NodeType.Model}.{self.package_name}.{self.name}"
28 if self.version:
29 unique_id = f"{unique_id}.v{self.version}"
30
31 return unique_id
32
33 @property
34 def fqn(self) -> List[str]:
35 fqn = [self.package_name, self.name]
36 if self.version:
37 fqn.append(f"v{self.version}")
38
39 return fqn
40
[end of core/dbt/contracts/graph/node_args.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/dbt/contracts/graph/node_args.py b/core/dbt/contracts/graph/node_args.py
--- a/core/dbt/contracts/graph/node_args.py
+++ b/core/dbt/contracts/graph/node_args.py
@@ -33,7 +33,8 @@
@property
def fqn(self) -> List[str]:
fqn = [self.package_name, self.name]
- if self.version:
+ # Test for None explicitly because version can be 0
+ if self.version is not None:
fqn.append(f"v{self.version}")
return fqn
| {"golden_diff": "diff --git a/core/dbt/contracts/graph/node_args.py b/core/dbt/contracts/graph/node_args.py\n--- a/core/dbt/contracts/graph/node_args.py\n+++ b/core/dbt/contracts/graph/node_args.py\n@@ -33,7 +33,8 @@\n @property\n def fqn(self) -> List[str]:\n fqn = [self.package_name, self.name]\n- if self.version:\n+ # Test for None explicitly because version can be 0\n+ if self.version is not None:\n fqn.append(f\"v{self.version}\")\n \n return fqn\n", "issue": "[CT-3210] [Bug] Error using `dbt list --select` when there is a cross-project model that is `version=0` in the parent project\n### Is this a new bug in dbt-core?\r\n\r\n- [X] I believe this is a new bug in dbt-core\r\n- [X] I have searched the existing issues, and I could not find an existing issue for this bug\r\n\r\n### Current Behavior\r\n\r\nWhen you attempt to reference a model version 0, you get a stack trace error.\r\n\r\n\r\n\r\n### Expected Behavior\r\n\r\nWe should allow you to set model version to be 0.\r\n\r\n### Steps To Reproduce\r\n\r\n1. On parent/hub project, add a versioned model with `v: 0`\r\n2. On the child/spoke project, attempt to reference that versioned model in a model:\r\n `select * from {{ ref('example_hub', 'my_second_dbt_model', v=0) }}`\r\n3. run `dbt list --select anything`\r\n\r\nOutstanding question - is this only affecting cross-project refs? Or all refs to a model with `v: 0`?\r\n\r\n### Relevant log output\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```markdown\r\n- OS:\r\n- Python:\r\n- dbt:\r\n```\r\n\r\n\r\n### Which database adapter are you using with dbt?\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import Optional, List\n\nfrom dbt.contracts.graph.unparsed import NodeVersion\nfrom dbt.node_types import NodeType, AccessType\n\n\n@dataclass\nclass ModelNodeArgs:\n name: str\n package_name: str\n identifier: str\n schema: str\n database: Optional[str] = None\n relation_name: Optional[str] = None\n version: Optional[NodeVersion] = None\n latest_version: Optional[NodeVersion] = None\n deprecation_date: Optional[datetime] = None\n access: Optional[str] = AccessType.Protected.value\n generated_at: datetime = field(default_factory=datetime.utcnow)\n depends_on_nodes: List[str] = field(default_factory=list)\n enabled: bool = True\n\n @property\n def unique_id(self) -> str:\n unique_id = f\"{NodeType.Model}.{self.package_name}.{self.name}\"\n if self.version:\n unique_id = f\"{unique_id}.v{self.version}\"\n\n return unique_id\n\n @property\n def fqn(self) -> List[str]:\n fqn = [self.package_name, self.name]\n if self.version:\n fqn.append(f\"v{self.version}\")\n\n return fqn\n", "path": "core/dbt/contracts/graph/node_args.py"}]} | 1,183 | 133 |
gh_patches_debug_2955 | rasdani/github-patches | git_diff | facebookresearch__hydra-2729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CI failing: `./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused`
```
./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused
nox > [2023-07-24 22:16:52,631] Command flake8 --config .flake8 failed with exit code 1
nox > [2023-07-24 22:16:52,632] Session lint-3.10 failed.
```
</issue>
<code>
[start of tools/configen/configen/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import sys
3 from enum import Enum
4 from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
5
6 from omegaconf._utils import (
7 _resolve_optional,
8 get_dict_key_value_types,
9 get_list_element_type,
10 is_dict_annotation,
11 is_list_annotation,
12 is_primitive_type_annotation,
13 )
14
15
16 # borrowed from OmegaConf
17 def type_str(t: Any) -> str:
18 is_optional, t = _resolve_optional(t)
19 if t is None:
20 return type(t).__name__
21 if t is Any:
22 return "Any"
23 if t is ...:
24 return "..."
25
26 if sys.version_info < (3, 7, 0): # pragma: no cover
27 # Python 3.6
28 if hasattr(t, "__name__"):
29 name = str(t.__name__)
30 else:
31 if t.__origin__ is not None:
32 name = type_str(t.__origin__)
33 else:
34 name = str(t)
35 if name.startswith("typing."):
36 name = name[len("typing.") :]
37 else: # pragma: no cover
38 # Python >= 3.7
39 if hasattr(t, "__name__"):
40 name = str(t.__name__)
41 else:
42 if t._name is None:
43 if t.__origin__ is not None:
44 name = type_str(t.__origin__)
45 else:
46 name = str(t._name)
47
48 args = getattr(t, "__args__", None)
49 if args is not None:
50 args = ", ".join(type_str(t) for t in t.__args__)
51 ret = f"{name}[{args}]"
52 else:
53 ret = name
54 if is_optional:
55 return f"Optional[{ret}]"
56 else:
57 return ret
58
59
60 def is_tuple_annotation(type_: Any) -> bool:
61 origin = getattr(type_, "__origin__", None)
62 return origin is tuple
63
64
65 def convert_imports(imports: Set[Any], string_imports: Iterable[str]) -> List[str]:
66 tmp = set()
67 for imp in string_imports:
68 tmp.add(imp)
69 for t in imports:
70 s = None
71 origin = getattr(t, "__origin__", None)
72 if t is Any:
73 classname = "Any"
74 elif t is Optional:
75 classname = "Optional"
76 else:
77 if origin is list:
78 classname = "List"
79 elif origin is tuple:
80 classname = "Tuple"
81 elif origin is dict:
82 classname = "Dict"
83 else:
84 classname = t.__name__
85
86 if not is_primitive_type_annotation(t) or issubclass(t, Enum):
87 s = f"from {t.__module__} import {classname}"
88
89 if s is not None:
90 tmp.add(s)
91 return sorted(list(tmp))
92
93
94 def collect_imports(imports: Set[Any], type_: Any) -> None:
95 if is_list_annotation(type_):
96 collect_imports(imports, get_list_element_type(type_))
97 type_ = List
98 elif is_dict_annotation(type_):
99 kvt = get_dict_key_value_types(type_)
100 collect_imports(imports, kvt[0])
101 collect_imports(imports, kvt[1])
102 type_ = Dict
103 else:
104 is_optional = _resolve_optional(type_)[0]
105 if is_optional and type_ is not Any:
106 type_ = Optional
107 imports.add(type_)
108
[end of tools/configen/configen/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/configen/configen/utils.py b/tools/configen/configen/utils.py
--- a/tools/configen/configen/utils.py
+++ b/tools/configen/configen/utils.py
@@ -1,7 +1,7 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
from enum import Enum
-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
+from typing import Any, Dict, Iterable, List, Optional, Set
from omegaconf._utils import (
_resolve_optional,
| {"golden_diff": "diff --git a/tools/configen/configen/utils.py b/tools/configen/configen/utils.py\n--- a/tools/configen/configen/utils.py\n+++ b/tools/configen/configen/utils.py\n@@ -1,7 +1,7 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import sys\n from enum import Enum\n-from typing import Any, Dict, Iterable, List, Optional, Set, Tuple\n+from typing import Any, Dict, Iterable, List, Optional, Set\n \n from omegaconf._utils import (\n _resolve_optional,\n", "issue": "CI failing: `./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused`\n```\r\n./tools/configen/configen/utils.py:4:1: F401 'typing.Tuple' imported but unused\r\nnox > [2023-07-24 22:16:52,631] Command flake8 --config .flake8 failed with exit code 1\r\nnox > [2023-07-24 22:16:52,632] Session lint-3.10 failed.\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport sys\nfrom enum import Enum\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple\n\nfrom omegaconf._utils import (\n _resolve_optional,\n get_dict_key_value_types,\n get_list_element_type,\n is_dict_annotation,\n is_list_annotation,\n is_primitive_type_annotation,\n)\n\n\n# borrowed from OmegaConf\ndef type_str(t: Any) -> str:\n is_optional, t = _resolve_optional(t)\n if t is None:\n return type(t).__name__\n if t is Any:\n return \"Any\"\n if t is ...:\n return \"...\"\n\n if sys.version_info < (3, 7, 0): # pragma: no cover\n # Python 3.6\n if hasattr(t, \"__name__\"):\n name = str(t.__name__)\n else:\n if t.__origin__ is not None:\n name = type_str(t.__origin__)\n else:\n name = str(t)\n if name.startswith(\"typing.\"):\n name = name[len(\"typing.\") :]\n else: # pragma: no cover\n # Python >= 3.7\n if hasattr(t, \"__name__\"):\n name = str(t.__name__)\n else:\n if t._name is None:\n if t.__origin__ is not None:\n name = type_str(t.__origin__)\n else:\n name = str(t._name)\n\n args = getattr(t, \"__args__\", None)\n if args is not None:\n args = \", \".join(type_str(t) for t in t.__args__)\n ret = f\"{name}[{args}]\"\n else:\n ret = name\n if is_optional:\n return f\"Optional[{ret}]\"\n else:\n return ret\n\n\ndef is_tuple_annotation(type_: Any) -> bool:\n origin = getattr(type_, \"__origin__\", None)\n return origin is tuple\n\n\ndef convert_imports(imports: Set[Any], string_imports: Iterable[str]) -> List[str]:\n tmp = set()\n for imp in string_imports:\n tmp.add(imp)\n for t in imports:\n s = None\n origin = getattr(t, \"__origin__\", None)\n if t is Any:\n classname = \"Any\"\n elif t is Optional:\n classname = \"Optional\"\n else:\n if origin is list:\n classname = \"List\"\n elif origin is tuple:\n classname = \"Tuple\"\n elif origin is dict:\n classname = \"Dict\"\n else:\n classname = t.__name__\n\n if not is_primitive_type_annotation(t) or issubclass(t, Enum):\n s = f\"from {t.__module__} import {classname}\"\n\n if s is not None:\n tmp.add(s)\n return sorted(list(tmp))\n\n\ndef collect_imports(imports: Set[Any], type_: Any) -> None:\n if is_list_annotation(type_):\n collect_imports(imports, get_list_element_type(type_))\n type_ = List\n elif is_dict_annotation(type_):\n kvt = get_dict_key_value_types(type_)\n collect_imports(imports, kvt[0])\n collect_imports(imports, kvt[1])\n type_ = Dict\n else:\n is_optional = _resolve_optional(type_)[0]\n if is_optional and type_ is not Any:\n type_ = Optional\n imports.add(type_)\n", "path": "tools/configen/configen/utils.py"}]} | 1,647 | 121 |
gh_patches_debug_2926 | rasdani/github-patches | git_diff | Mailu__Mailu-2116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error 404 not found when opening admin after upgrade 1.8 to master
## Before you open your issue
- [X] Check if no issue or pull-request for this already exists.
- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [X] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [X] docker-compose
### Versions
Before upgrade: Docker 1.8 images.
After upgrade: Docker master images (pulled 30 December 2021).
## Description
**Mailu 1.8** image redirects `/admin` to `/admin/ui`.
**Mailu master** image no longer redirects `/admin/ui` as the `ui` part in the URL has been removed according to [Tomcat 1929.enhacement](https://github.com/Mailu/Mailu/blob/master/towncrier/newsfragments/1929.enhancement):
> Removed the /admin/ prefix to reduce complexity of routing with Mailu. Admin is accessible directly via /admin instead of /admin/ui
After the upgrade from `1.8` to `master` and visiting the admin page, the browser still uses the cached URL `/admin/ui` and results in 404 not found.
## Replication Steps
1. Create 1.8 production environment on AMD64 platform using `mailu 1.8 Docker images`.
2. Make sure the Admin page works.
3. Remove docker containers (`docker-compose down`).
4. Recreate **all** containers at the same time using `mailu master Docker images`.
5. Open root mail domain. The browser uses the cached URL `admin/ui` and shows Error 404 not found.
Note: Tested with `TLS_FLAVOR=letsencrypt`, admin and roundcube and Firefox.
## Expected behaviour
Backwards compatibility after Mailu 1.8 upgrade without the need of removing browser caches.
## Front log
```
front_1 | <IP> - - [30/Dec/2021:10:14:35 +0000] "GET /admin/ui/ HTTP/2.0" 404 198 "https://mail.mydomain.nl/sso/login" "Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0"
```
## Bugfix
Proposal is to redirect `/admin/ui` always to `/admin` to prevent browser caching problems after the upgrade.
</issue>
<code>
[start of core/admin/mailu/ui/views/base.py]
1 from mailu import models, utils
2 from mailu.ui import ui, forms, access
3
4 from flask import current_app as app
5 import flask
6 import flask_login
7
8
9 @ui.route('/', methods=["GET"])
10 @access.authenticated
11 def index():
12 return flask.redirect(flask.url_for('.user_settings'))
13
14 @ui.route('/announcement', methods=['GET', 'POST'])
15 @access.global_admin
16 def announcement():
17 form = forms.AnnouncementForm()
18 if form.validate_on_submit():
19 for user in models.User.query.all():
20 user.sendmail(form.announcement_subject.data,
21 form.announcement_body.data)
22 # Force-empty the form
23 form.announcement_subject.data = ''
24 form.announcement_body.data = ''
25 flask.flash('Your announcement was sent', 'success')
26 return flask.render_template('announcement.html', form=form)
27
28 @ui.route('/webmail', methods=['GET'])
29 def webmail():
30 return flask.redirect(app.config['WEB_WEBMAIL'])
31
32 @ui.route('/client', methods=['GET'])
33 def client():
34 return flask.render_template('client.html')
35
36 @ui.route('/webui_antispam', methods=['GET'])
37 def antispam():
38 return flask.render_template('antispam.html')
39
[end of core/admin/mailu/ui/views/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py
--- a/core/admin/mailu/ui/views/base.py
+++ b/core/admin/mailu/ui/views/base.py
@@ -11,6 +11,10 @@
def index():
return flask.redirect(flask.url_for('.user_settings'))
[email protected]('/ui/')
+def redirect_old_path():
+ return flask.redirect(flask.url_for('.index'), code=301)
+
@ui.route('/announcement', methods=['GET', 'POST'])
@access.global_admin
def announcement():
| {"golden_diff": "diff --git a/core/admin/mailu/ui/views/base.py b/core/admin/mailu/ui/views/base.py\n--- a/core/admin/mailu/ui/views/base.py\n+++ b/core/admin/mailu/ui/views/base.py\n@@ -11,6 +11,10 @@\n def index():\n return flask.redirect(flask.url_for('.user_settings'))\n \[email protected]('/ui/')\n+def redirect_old_path():\n+ return flask.redirect(flask.url_for('.index'), code=301)\n+\n @ui.route('/announcement', methods=['GET', 'POST'])\n @access.global_admin\n def announcement():\n", "issue": "Error 404 not found when opening admin after upgrade 1.8 to master\n## Before you open your issue\r\n- [X] Check if no issue or pull-request for this already exists.\r\n- [X] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [X] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [X] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [X] docker-compose\r\n\r\n### Versions\r\n\r\nBefore upgrade: Docker 1.8 images.\r\nAfter upgrade: Docker master images (pulled 30 December 2021).\r\n\r\n## Description\r\n\r\n**Mailu 1.8** image redirects `/admin` to `/admin/ui`.\r\n\r\n**Mailu master** image no longer redirects `/admin/ui` as the `ui` part in the URL has been removed according to [Tomcat 1929.enhacement](https://github.com/Mailu/Mailu/blob/master/towncrier/newsfragments/1929.enhancement):\r\n\r\n> Removed the /admin/ prefix to reduce complexity of routing with Mailu. Admin is accessible directly via /admin instead of /admin/ui\r\n\r\nAfter the upgrade from `1.8` to `master` and visiting the admin page, the browser still uses the cached URL `/admin/ui` and results in 404 not found.\r\n\r\n\r\n## Replication Steps\r\n\r\n1. Create 1.8 production environment on AMD64 platform using `mailu 1.8 Docker images`.\r\n2. Make sure the Admin page works.\r\n3. Remove docker containers (`docker-compose down`).\r\n4. Recreate **all** containers at the same time using `mailu master Docker images`.\r\n5. Open root mail domain. The browser uses the cached URL `admin/ui` and shows Error 404 not found.\r\n\r\nNote: Tested with `TLS_FLAVOR=letsencrypt`, admin and roundcube and Firefox.\r\n\r\n\r\n## Expected behaviour\r\n\r\nBackwards compatibility after Mailu 1.8 upgrade without the need of removing browser caches.\r\n\r\n## Front log\r\n\r\n```\r\nfront_1 | <IP> - - [30/Dec/2021:10:14:35 +0000] \"GET /admin/ui/ HTTP/2.0\" 404 198 \"https://mail.mydomain.nl/sso/login\" \"Mozilla/5.0 (X11; Linux x86_64; rv:95.0) Gecko/20100101 Firefox/95.0\"\r\n```\r\n\r\n## Bugfix\r\n\r\nProposal is to redirect `/admin/ui` always to `/admin` to prevent browser caching problems after the upgrade.\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.ui import ui, forms, access\n\nfrom flask import current_app as app\nimport flask\nimport flask_login\n\n\[email protected]('/', methods=[\"GET\"])\[email protected]\ndef index():\n return flask.redirect(flask.url_for('.user_settings'))\n\[email protected]('/announcement', methods=['GET', 'POST'])\[email protected]_admin\ndef announcement():\n form = forms.AnnouncementForm()\n if form.validate_on_submit():\n for user in models.User.query.all():\n user.sendmail(form.announcement_subject.data,\n form.announcement_body.data)\n # Force-empty the form\n form.announcement_subject.data = ''\n form.announcement_body.data = ''\n flask.flash('Your announcement was sent', 'success')\n return flask.render_template('announcement.html', form=form)\n\[email protected]('/webmail', methods=['GET'])\ndef webmail():\n return flask.redirect(app.config['WEB_WEBMAIL'])\n\[email protected]('/client', methods=['GET'])\ndef client():\n return flask.render_template('client.html')\n\[email protected]('/webui_antispam', methods=['GET'])\ndef antispam():\n return flask.render_template('antispam.html')\n", "path": "core/admin/mailu/ui/views/base.py"}]} | 1,510 | 127 |
gh_patches_debug_8696 | rasdani/github-patches | git_diff | easybuilders__easybuild-framework-757 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
santiy_check_commands doesn't work for ipython
</issue>
<code>
[start of easybuild/framework/extensioneasyblock.py]
1 ##
2 # Copyright 2013 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of the University of Ghent (http://ugent.be/hpc).
6 #
7 # http://github.com/hpcugent/easybuild
8 #
9 # EasyBuild is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation v2.
12 #
13 # EasyBuild is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
20 ##
21 """
22 EasyBuild support for building and installing extensions as actual extensions or as stand-alone modules,
23 implemented as an easyblock
24
25 @author: Kenneth Hoste (Ghent University)
26 """
27 import copy
28 import os
29
30 from easybuild.framework.easyblock import EasyBlock
31 from easybuild.framework.easyconfig import CUSTOM
32 from easybuild.framework.extension import Extension
33 from easybuild.tools.filetools import apply_patch, extract_file
34 from easybuild.tools.utilities import remove_unwanted_chars
35
36
37 class ExtensionEasyBlock(EasyBlock, Extension):
38 """
39 Install an extension as a separate module, or as an extension.
40
41 Deriving classes should implement the following functions:
42 * required EasyBlock functions:
43 - configure_step
44 - build_step
45 - install_step
46 * required Extension functions
47 - run
48 """
49
50 @staticmethod
51 def extra_options(extra_vars=None):
52 """Extra easyconfig parameters specific to ExtensionEasyBlock."""
53
54 # using [] as default value is a bad idea, so we handle it this way
55 if extra_vars is None:
56 extra_vars = []
57
58 extra_vars.extend([
59 ('options', [{}, "Dictionary with extension options.", CUSTOM]),
60 ])
61 return EasyBlock.extra_options(extra_vars)
62
63 def __init__(self, *args, **kwargs):
64 """Initialize either as EasyBlock or as Extension."""
65
66 self.is_extension = False
67
68 if isinstance(args[0], EasyBlock):
69 Extension.__init__(self, *args, **kwargs)
70 # name and version properties of EasyBlock are used, so make sure name and version are correct
71 self.cfg['name'] = self.ext.get('name', None)
72 self.cfg['version'] = self.ext.get('version', None)
73 self.builddir = self.master.builddir
74 self.installdir = self.master.installdir
75 self.is_extension = True
76 self.unpack_options = None
77 else:
78 EasyBlock.__init__(self, *args, **kwargs)
79 self.options = copy.deepcopy(self.cfg.get('options', {})) # we need this for Extension.sanity_check_step
80
81 self.ext_dir = None # dir where extension source was unpacked
82
83 def run(self, unpack_src=False):
84 """Common operations for extensions: unpacking sources, patching, ..."""
85
86 # unpack file if desired
87 if unpack_src:
88 targetdir = os.path.join(self.master.builddir, remove_unwanted_chars(self.name))
89 self.ext_dir = extract_file("%s" % self.src, targetdir, extra_options=self.unpack_options)
90
91 # patch if needed
92 if self.patches:
93 for patchfile in self.patches:
94 if not apply_patch(patchfile, self.ext_dir):
95 self.log.error("Applying patch %s failed" % patchfile)
96
97 def sanity_check_step(self, exts_filter=None, custom_paths=None, custom_commands=None):
98 """
99 Custom sanity check for extensions, whether installed as stand-alone module or not
100 """
101 if not self.cfg['exts_filter']:
102 self.cfg['exts_filter'] = exts_filter
103 self.log.debug("starting sanity check for extension with filter %s", self.cfg['exts_filter'])
104
105 if not self.is_extension:
106 # load fake module
107 fake_mod_data = self.load_fake_module(purge=True)
108
109 # perform sanity check
110 sanity_check_ok = Extension.sanity_check_step(self)
111
112 if not self.is_extension:
113 # unload fake module and clean up
114 self.clean_up_fake_module(fake_mod_data)
115
116 if custom_paths or custom_commands:
117 EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,
118 extension=self.is_extension)
119
120 # pass or fail sanity check
121 if not sanity_check_ok:
122 msg = "Sanity check for %s failed: %s" % (self.name, '; '.join(self.sanity_check_fail_msgs))
123 if self.is_extension:
124 self.log.warning(msg)
125 else:
126 self.log.error(msg)
127 return False
128 else:
129 self.log.info("Sanity check for %s successful!" % self.name)
130 return True
131
132 def make_module_extra(self, extra=None):
133 """Add custom entries to module."""
134
135 txt = EasyBlock.make_module_extra(self)
136 if not extra is None:
137 txt += extra
138 return txt
139
[end of easybuild/framework/extensioneasyblock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/framework/extensioneasyblock.py b/easybuild/framework/extensioneasyblock.py
--- a/easybuild/framework/extensioneasyblock.py
+++ b/easybuild/framework/extensioneasyblock.py
@@ -113,7 +113,7 @@
# unload fake module and clean up
self.clean_up_fake_module(fake_mod_data)
- if custom_paths or custom_commands:
+ if custom_paths or self.cfg['sanity_check_paths'] or custom_commands or self.cfg['sanity_check_commands']:
EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,
extension=self.is_extension)
| {"golden_diff": "diff --git a/easybuild/framework/extensioneasyblock.py b/easybuild/framework/extensioneasyblock.py\n--- a/easybuild/framework/extensioneasyblock.py\n+++ b/easybuild/framework/extensioneasyblock.py\n@@ -113,7 +113,7 @@\n # unload fake module and clean up\n self.clean_up_fake_module(fake_mod_data)\n \n- if custom_paths or custom_commands:\n+ if custom_paths or self.cfg['sanity_check_paths'] or custom_commands or self.cfg['sanity_check_commands']:\n EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,\n extension=self.is_extension)\n", "issue": "santiy_check_commands doesn't work for ipython\n\n", "before_files": [{"content": "##\n# Copyright 2013 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of the University of Ghent (http://ugent.be/hpc).\n#\n# http://github.com/hpcugent/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing extensions as actual extensions or as stand-alone modules,\nimplemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport copy\nimport os\n\nfrom easybuild.framework.easyblock import EasyBlock\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.framework.extension import Extension\nfrom easybuild.tools.filetools import apply_patch, extract_file\nfrom easybuild.tools.utilities import remove_unwanted_chars\n\n\nclass ExtensionEasyBlock(EasyBlock, Extension):\n \"\"\"\n Install an extension as a separate module, or as an extension.\n\n Deriving classes should implement the following functions:\n * required EasyBlock functions:\n - configure_step\n - build_step\n - install_step\n * required Extension functions\n - run\n \"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Extra easyconfig parameters specific to ExtensionEasyBlock.\"\"\"\n\n # using [] as default value is a bad idea, so we handle it this way\n if extra_vars is None:\n extra_vars = []\n\n extra_vars.extend([\n ('options', [{}, \"Dictionary with extension options.\", CUSTOM]),\n ])\n return EasyBlock.extra_options(extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize either as EasyBlock or as Extension.\"\"\"\n\n self.is_extension = False\n\n if isinstance(args[0], EasyBlock):\n Extension.__init__(self, *args, **kwargs)\n # name and version properties of EasyBlock are used, so make sure name and version are correct\n self.cfg['name'] = self.ext.get('name', None)\n self.cfg['version'] = self.ext.get('version', None)\n self.builddir = self.master.builddir\n self.installdir = self.master.installdir\n self.is_extension = True\n self.unpack_options = None\n else:\n EasyBlock.__init__(self, *args, **kwargs)\n self.options = copy.deepcopy(self.cfg.get('options', {})) # we need this for Extension.sanity_check_step\n\n self.ext_dir = None # dir where extension source was unpacked\n\n def run(self, unpack_src=False):\n \"\"\"Common operations for extensions: unpacking sources, patching, ...\"\"\"\n\n # unpack file if desired\n if unpack_src:\n targetdir = os.path.join(self.master.builddir, remove_unwanted_chars(self.name))\n self.ext_dir = extract_file(\"%s\" % self.src, targetdir, extra_options=self.unpack_options)\n\n # patch if needed\n if self.patches:\n for patchfile in self.patches:\n if not apply_patch(patchfile, self.ext_dir):\n self.log.error(\"Applying patch %s failed\" % patchfile)\n\n def sanity_check_step(self, exts_filter=None, custom_paths=None, custom_commands=None):\n \"\"\"\n Custom sanity check for extensions, whether installed as stand-alone module or not\n \"\"\"\n if not self.cfg['exts_filter']:\n self.cfg['exts_filter'] = exts_filter\n self.log.debug(\"starting sanity check for extension with filter %s\", self.cfg['exts_filter'])\n\n if not self.is_extension:\n # load fake module\n fake_mod_data = self.load_fake_module(purge=True)\n\n # perform sanity check\n sanity_check_ok = Extension.sanity_check_step(self)\n\n if not self.is_extension:\n # unload fake module and clean up\n self.clean_up_fake_module(fake_mod_data)\n\n if custom_paths or custom_commands:\n EasyBlock.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands,\n extension=self.is_extension)\n\n # pass or fail sanity check\n if not sanity_check_ok:\n msg = \"Sanity check for %s failed: %s\" % (self.name, '; '.join(self.sanity_check_fail_msgs))\n if self.is_extension:\n self.log.warning(msg)\n else:\n self.log.error(msg)\n return False\n else:\n self.log.info(\"Sanity check for %s successful!\" % self.name)\n return True\n\n def make_module_extra(self, extra=None):\n \"\"\"Add custom entries to module.\"\"\"\n\n txt = EasyBlock.make_module_extra(self)\n if not extra is None:\n txt += extra\n return txt\n", "path": "easybuild/framework/extensioneasyblock.py"}]} | 1,982 | 145 |
gh_patches_debug_3275 | rasdani/github-patches | git_diff | apache__tvm-6502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[TOPI] Typo in operator key
https://github.com/apache/incubator-tvm/blob/bdfefbb03f5aab96ee677ee28a166dd6ab5dbf3f/python/tvm/topi/bifrost/dense.py#L26
"biforst" should be "bifrost". This bug makes the op totally unavailable in Relay.
I can fix this bug if expected, but I don't know how to add a proper test.
</issue>
<code>
[start of python/tvm/topi/bifrost/dense.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 # pylint: disable=invalid-name,unused-variable
18 """dense schedule on ARM Mali Biforst GPU"""
19 from tvm import te
20 from tvm import autotvm
21
22 from .. import nn
23 from ..util import traverse_inline
24
25
26 @autotvm.register_topi_compute("dense.biforst")
27 def dense(_, data, weight, bias=None, out_dtype=None):
28 """Dense operator on Biforst"""
29 return nn.dense(data, weight, bias, out_dtype)
30
31
32 @autotvm.register_topi_schedule("dense.bifrost")
33 def schedule_dense(cfg, outs):
34 """Schedule for dense operator.
35
36 Parameters
37 ----------
38 cfg: ConfigEntity
39 The config entity for this template
40 outs: Array of Tensor
41 The computation graph description of dense
42 in the format of an array of tensors.
43
44 Returns
45 -------
46 s: Schedule
47 The computation schedule for dense.
48 """
49 outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
50 s = te.create_schedule([x.op for x in outs])
51
52 def _callback(op):
53 if op.tag == "dense":
54 vec_size = [1, 2, 4, 8, 16]
55 max_unroll = 32
56
57 dense_out = op.output(0)
58 output = outs[0]
59
60 y, x = s[output].op.axis
61 c = s[dense_out].op.reduce_axis[0]
62
63 ##### space definition begin #####
64 cfg.define_split("tile_y", y, num_outputs=3)
65 cfg.define_split("tile_x", x, num_outputs=3)
66 cfg.define_split("c_unroll", c, num_outputs=2, max_factor=64)
67
68 # fallback support
69 if cfg.is_fallback:
70 ref_log = autotvm.tophub.load_reference_log("mali", "rk3399", "dense.bifrost")
71 cfg.fallback_with_reference_log(ref_log)
72 ##### space definition end #####
73
74 if dense_out.op in s.outputs:
75 dense_out = s.cache_write(output, "local")
76
77 by, ty, yi = cfg["tile_y"].apply(s, output, y)
78 bx, tx, xi = cfg["tile_x"].apply(s, output, x)
79
80 s[output].bind(by, te.thread_axis("blockIdx.y"))
81 s[output].bind(bx, te.thread_axis("blockIdx.x"))
82 s[output].bind(ty, te.thread_axis("threadIdx.y"))
83 s[output].bind(tx, te.thread_axis("threadIdx.x"))
84
85 if cfg["tile_y"].size[-1] < max_unroll:
86 s[output].unroll(yi)
87 if cfg["tile_x"].size[-1] in vec_size:
88 s[output].vectorize(xi)
89 s[dense_out].compute_at(s[output], tx)
90
91 k = s[dense_out].op.reduce_axis[0]
92 y, x = s[dense_out].op.axis
93 k, k_unroll = cfg["c_unroll"].apply(s, dense_out, k)
94 s[dense_out].reorder(k, k_unroll, y, x)
95 s[dense_out].unroll(k_unroll)
96 if cfg["tile_y"].size[-1] < max_unroll:
97 s[dense_out].unroll(y)
98 if cfg["tile_x"].size[-1] in vec_size:
99 s[dense_out].vectorize(x)
100
101 traverse_inline(s, outs[0].op, _callback)
102 return s
103
104
105 def fuse_and_bind(s, tensor, axis=None, num_thread=None):
106 """ fuse all the axis and bind to GPU threads """
107 axis = axis or s[tensor].op.axis
108 fused = s[tensor].fuse(*axis)
109 bx, tx = s[tensor].split(fused, num_thread)
110 s[tensor].bind(bx, te.thread_axis("blockIdx.x"))
111 s[tensor].bind(tx, te.thread_axis("threadIdx.x"))
112 return bx, tx
113
[end of python/tvm/topi/bifrost/dense.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/tvm/topi/bifrost/dense.py b/python/tvm/topi/bifrost/dense.py
--- a/python/tvm/topi/bifrost/dense.py
+++ b/python/tvm/topi/bifrost/dense.py
@@ -23,7 +23,7 @@
from ..util import traverse_inline
[email protected]_topi_compute("dense.biforst")
[email protected]_topi_compute("dense.bifrost")
def dense(_, data, weight, bias=None, out_dtype=None):
"""Dense operator on Biforst"""
return nn.dense(data, weight, bias, out_dtype)
| {"golden_diff": "diff --git a/python/tvm/topi/bifrost/dense.py b/python/tvm/topi/bifrost/dense.py\n--- a/python/tvm/topi/bifrost/dense.py\n+++ b/python/tvm/topi/bifrost/dense.py\n@@ -23,7 +23,7 @@\n from ..util import traverse_inline\n \n \[email protected]_topi_compute(\"dense.biforst\")\[email protected]_topi_compute(\"dense.bifrost\")\n def dense(_, data, weight, bias=None, out_dtype=None):\n \"\"\"Dense operator on Biforst\"\"\"\n return nn.dense(data, weight, bias, out_dtype)\n", "issue": "[TOPI] Typo in operator key\nhttps://github.com/apache/incubator-tvm/blob/bdfefbb03f5aab96ee677ee28a166dd6ab5dbf3f/python/tvm/topi/bifrost/dense.py#L26\r\n\r\n\"biforst\" should be \"bifrost\". This bug makes the op totally unavailable in Relay.\r\n\r\nI can fix this bug if expected, but I don't know how to add a proper test.\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=invalid-name,unused-variable\n\"\"\"dense schedule on ARM Mali Biforst GPU\"\"\"\nfrom tvm import te\nfrom tvm import autotvm\n\nfrom .. import nn\nfrom ..util import traverse_inline\n\n\[email protected]_topi_compute(\"dense.biforst\")\ndef dense(_, data, weight, bias=None, out_dtype=None):\n \"\"\"Dense operator on Biforst\"\"\"\n return nn.dense(data, weight, bias, out_dtype)\n\n\[email protected]_topi_schedule(\"dense.bifrost\")\ndef schedule_dense(cfg, outs):\n \"\"\"Schedule for dense operator.\n\n Parameters\n ----------\n cfg: ConfigEntity\n The config entity for this template\n outs: Array of Tensor\n The computation graph description of dense\n in the format of an array of tensors.\n\n Returns\n -------\n s: Schedule\n The computation schedule for dense.\n \"\"\"\n outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs\n s = te.create_schedule([x.op for x in outs])\n\n def _callback(op):\n if op.tag == \"dense\":\n vec_size = [1, 2, 4, 8, 16]\n max_unroll = 32\n\n dense_out = op.output(0)\n output = outs[0]\n\n y, x = s[output].op.axis\n c = s[dense_out].op.reduce_axis[0]\n\n ##### space definition begin #####\n cfg.define_split(\"tile_y\", y, num_outputs=3)\n cfg.define_split(\"tile_x\", x, num_outputs=3)\n cfg.define_split(\"c_unroll\", c, num_outputs=2, max_factor=64)\n\n # fallback support\n if cfg.is_fallback:\n ref_log = autotvm.tophub.load_reference_log(\"mali\", \"rk3399\", \"dense.bifrost\")\n cfg.fallback_with_reference_log(ref_log)\n ##### space definition end #####\n\n if dense_out.op in s.outputs:\n dense_out = s.cache_write(output, \"local\")\n\n by, ty, yi = cfg[\"tile_y\"].apply(s, output, y)\n bx, tx, xi = cfg[\"tile_x\"].apply(s, output, x)\n\n s[output].bind(by, te.thread_axis(\"blockIdx.y\"))\n s[output].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(ty, te.thread_axis(\"threadIdx.y\"))\n s[output].bind(tx, te.thread_axis(\"threadIdx.x\"))\n\n if cfg[\"tile_y\"].size[-1] < max_unroll:\n s[output].unroll(yi)\n if cfg[\"tile_x\"].size[-1] in vec_size:\n s[output].vectorize(xi)\n s[dense_out].compute_at(s[output], tx)\n\n k = s[dense_out].op.reduce_axis[0]\n y, x = s[dense_out].op.axis\n k, k_unroll = cfg[\"c_unroll\"].apply(s, dense_out, k)\n s[dense_out].reorder(k, k_unroll, y, x)\n s[dense_out].unroll(k_unroll)\n if cfg[\"tile_y\"].size[-1] < max_unroll:\n s[dense_out].unroll(y)\n if cfg[\"tile_x\"].size[-1] in vec_size:\n s[dense_out].vectorize(x)\n\n traverse_inline(s, outs[0].op, _callback)\n return s\n\n\ndef fuse_and_bind(s, tensor, axis=None, num_thread=None):\n \"\"\" fuse all the axis and bind to GPU threads \"\"\"\n axis = axis or s[tensor].op.axis\n fused = s[tensor].fuse(*axis)\n bx, tx = s[tensor].split(fused, num_thread)\n s[tensor].bind(bx, te.thread_axis(\"blockIdx.x\"))\n s[tensor].bind(tx, te.thread_axis(\"threadIdx.x\"))\n return bx, tx\n", "path": "python/tvm/topi/bifrost/dense.py"}]} | 1,948 | 144 |
gh_patches_debug_20922 | rasdani/github-patches | git_diff | pystiche__pystiche-228 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MD5 hash error
Hi, I get this error when running the script given in the example for beginner.
`FileExistsError: bird1.jpg with a different MD5 hash already exists in /root/.cache/pystiche. If you want to overwrite it, set overwrite=True.`
</issue>
<code>
[start of pystiche/demo.py]
1 import logging
2 import sys
3
4 from pystiche.data import (
5 DownloadableImage,
6 DownloadableImageCollection,
7 PixabayLicense,
8 PublicDomainLicense,
9 )
10 from pystiche.optim import OptimLogger
11
12 __all__ = ["demo_images", "demo_logger"]
13
14
15 def demo_images():
16 return DownloadableImageCollection(
17 {
18 "dancing": DownloadableImage(
19 "https://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg",
20 md5="0a2df538901452d639170a2ed89815a4",
21 ),
22 "picasso": DownloadableImage(
23 "https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg",
24 md5="d1d60fc3f9d0b22d2d826c47934a37ea",
25 ),
26 "bird1": DownloadableImage(
27 "https://cdn.pixabay.com/photo/2016/01/14/11/26/bird-1139734_960_720.jpg",
28 file="bird1.jpg",
29 author="gholmz0",
30 date="09.03.2013",
31 license=PixabayLicense(),
32 md5="d42444d3cd0afa47f07066cd083d6cea",
33 ),
34 "paint": DownloadableImage(
35 "https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg",
36 file="paint.jpg",
37 author="garageband",
38 date="03.07.2017",
39 license=PixabayLicense(),
40 md5="a991e222806ef49d34b172a67cf97d91",
41 ),
42 "bird2": DownloadableImage(
43 "https://cdn.pixabay.com/photo/2013/03/12/17/53/bird-92956_960_720.jpg",
44 file="bird2.jpg",
45 author="12019",
46 date="09.04.2012",
47 license=PixabayLicense(),
48 md5="dda3e1d0f93f783de823b4f91129d44e",
49 ),
50 "mosaic": DownloadableImage(
51 "https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg",
52 file="mosaic.jpg",
53 author="Marie-Lan Nguyen",
54 date="2006",
55 license=PublicDomainLicense(),
56 md5="5b60cd1724395f7a0c21dc6dd006f8ae",
57 ),
58 }
59 )
60
61
62 def demo_logger():
63 logger = logging.getLogger("demo_logger")
64 logger.setLevel(logging.INFO)
65
66 sh = logging.StreamHandler(sys.stdout)
67 sh.setLevel(logging.INFO)
68 logger.addHandler(sh)
69
70 return OptimLogger(logger)
71
[end of pystiche/demo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pystiche/demo.py b/pystiche/demo.py
--- a/pystiche/demo.py
+++ b/pystiche/demo.py
@@ -29,7 +29,7 @@
author="gholmz0",
date="09.03.2013",
license=PixabayLicense(),
- md5="d42444d3cd0afa47f07066cd083d6cea",
+ md5="36e5fef725943a5d1d22b5048095da86",
),
"paint": DownloadableImage(
"https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg",
@@ -45,7 +45,7 @@
author="12019",
date="09.04.2012",
license=PixabayLicense(),
- md5="dda3e1d0f93f783de823b4f91129d44e",
+ md5="8c5b608bd579d931e2cfe7229840fe9b",
),
"mosaic": DownloadableImage(
"https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg",
| {"golden_diff": "diff --git a/pystiche/demo.py b/pystiche/demo.py\n--- a/pystiche/demo.py\n+++ b/pystiche/demo.py\n@@ -29,7 +29,7 @@\n author=\"gholmz0\",\n date=\"09.03.2013\",\n license=PixabayLicense(),\n- md5=\"d42444d3cd0afa47f07066cd083d6cea\",\n+ md5=\"36e5fef725943a5d1d22b5048095da86\",\n ),\n \"paint\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg\",\n@@ -45,7 +45,7 @@\n author=\"12019\",\n date=\"09.04.2012\",\n license=PixabayLicense(),\n- md5=\"dda3e1d0f93f783de823b4f91129d44e\",\n+ md5=\"8c5b608bd579d931e2cfe7229840fe9b\",\n ),\n \"mosaic\": DownloadableImage(\n \"https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg\",\n", "issue": "MD5 hash error\nHi, I get this error when running the script given in the example for beginner.\r\n\r\n`FileExistsError: bird1.jpg with a different MD5 hash already exists in /root/.cache/pystiche. If you want to overwrite it, set overwrite=True.`\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom pystiche.data import (\n DownloadableImage,\n DownloadableImageCollection,\n PixabayLicense,\n PublicDomainLicense,\n)\nfrom pystiche.optim import OptimLogger\n\n__all__ = [\"demo_images\", \"demo_logger\"]\n\n\ndef demo_images():\n return DownloadableImageCollection(\n {\n \"dancing\": DownloadableImage(\n \"https://pytorch.org/tutorials/_static/img/neural-style/dancing.jpg\",\n md5=\"0a2df538901452d639170a2ed89815a4\",\n ),\n \"picasso\": DownloadableImage(\n \"https://pytorch.org/tutorials/_static/img/neural-style/picasso.jpg\",\n md5=\"d1d60fc3f9d0b22d2d826c47934a37ea\",\n ),\n \"bird1\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2016/01/14/11/26/bird-1139734_960_720.jpg\",\n file=\"bird1.jpg\",\n author=\"gholmz0\",\n date=\"09.03.2013\",\n license=PixabayLicense(),\n md5=\"d42444d3cd0afa47f07066cd083d6cea\",\n ),\n \"paint\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2017/07/03/20/17/abstract-2468874_960_720.jpg\",\n file=\"paint.jpg\",\n author=\"garageband\",\n date=\"03.07.2017\",\n license=PixabayLicense(),\n md5=\"a991e222806ef49d34b172a67cf97d91\",\n ),\n \"bird2\": DownloadableImage(\n \"https://cdn.pixabay.com/photo/2013/03/12/17/53/bird-92956_960_720.jpg\",\n file=\"bird2.jpg\",\n author=\"12019\",\n date=\"09.04.2012\",\n license=PixabayLicense(),\n md5=\"dda3e1d0f93f783de823b4f91129d44e\",\n ),\n \"mosaic\": DownloadableImage(\n \"https://upload.wikimedia.org/wikipedia/commons/2/23/Mosaic_ducks_Massimo.jpg\",\n file=\"mosaic.jpg\",\n author=\"Marie-Lan Nguyen\",\n date=\"2006\",\n license=PublicDomainLicense(),\n md5=\"5b60cd1724395f7a0c21dc6dd006f8ae\",\n ),\n }\n )\n\n\ndef demo_logger():\n logger = logging.getLogger(\"demo_logger\")\n logger.setLevel(logging.INFO)\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO)\n logger.addHandler(sh)\n\n return OptimLogger(logger)\n", "path": "pystiche/demo.py"}]} | 1,461 | 346 |
gh_patches_debug_28865 | rasdani/github-patches | git_diff | bokeh__bokeh-2790 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create example of using Hover tool to display custom images
It would be nice to show how someone can use the hovertool to display custom images using URL/URI upon hovering over a region of interest. It would allow users to embed an additional dimension into plots.
</issue>
<code>
[start of sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py]
1 from bokeh.plotting import figure, output_file, show, ColumnDataSource
2 from bokeh.models import HoverTool
3
4 output_file("toolbar.html")
5
6 source = ColumnDataSource(
7 data=dict(
8 x=[1, 2, 3, 4, 5],
9 y=[2, 5, 8, 2, 7],
10 desc=['A', 'b', 'C', 'd', 'E'],
11 )
12 )
13
14 hover = HoverTool(
15 tooltips="""
16 <div>
17 <span style="font-size: 17px; font-weight: bold;">@desc</span>
18 <span style="font-size: 15px; color: #966;">[$index]</span>
19 </div>
20 <div>
21 <span style="font-size: 15px;">Location</span>
22 <span style="font-size: 10px; color: #696;">($x, $y)</span>
23 </div>
24 """
25 )
26
27 p = figure(plot_width=400, plot_height=400, tools=[hover],
28 title="Mouse over the dots")
29
30 p.circle('x', 'y', size=20, source=source)
31
32 show(p)
33
34
35
[end of sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py
--- a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py
+++ b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py
@@ -8,18 +8,34 @@
x=[1, 2, 3, 4, 5],
y=[2, 5, 8, 2, 7],
desc=['A', 'b', 'C', 'd', 'E'],
+ imgs = [
+ 'http://bokeh.pydata.org/static/snake.jpg',
+ 'http://bokeh.pydata.org/static/snake2.png',
+ 'http://bokeh.pydata.org/static/snake3D.png',
+ 'http://bokeh.pydata.org/static/snake4_TheRevenge.png',
+ 'http://bokeh.pydata.org/static/snakebite.jpg'
+ ]
)
)
hover = HoverTool(
tooltips="""
<div>
- <span style="font-size: 17px; font-weight: bold;">@desc</span>
- <span style="font-size: 15px; color: #966;">[$index]</span>
- </div>
- <div>
- <span style="font-size: 15px;">Location</span>
- <span style="font-size: 10px; color: #696;">($x, $y)</span>
+ <div>
+ <img
+ src="@imgs" height="42" alt="@imgs" width="42"
+ style="float: left; margin: 0px 15px 15px 0px;"
+ border="2"
+ ></img>
+ </div>
+ <div>
+ <span style="font-size: 17px; font-weight: bold;">@desc</span>
+ <span style="font-size: 15px; color: #966;">[$index]</span>
+ </div>
+ <div>
+ <span style="font-size: 15px;">Location</span>
+ <span style="font-size: 10px; color: #696;">($x, $y)</span>
+ </div>
</div>
"""
)
@@ -30,5 +46,3 @@
p.circle('x', 'y', size=20, source=source)
show(p)
-
-
| {"golden_diff": "diff --git a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py\n--- a/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py\n+++ b/sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py\n@@ -8,18 +8,34 @@\n x=[1, 2, 3, 4, 5],\n y=[2, 5, 8, 2, 7],\n desc=['A', 'b', 'C', 'd', 'E'],\n+ imgs = [\n+ 'http://bokeh.pydata.org/static/snake.jpg',\n+ 'http://bokeh.pydata.org/static/snake2.png',\n+ 'http://bokeh.pydata.org/static/snake3D.png',\n+ 'http://bokeh.pydata.org/static/snake4_TheRevenge.png',\n+ 'http://bokeh.pydata.org/static/snakebite.jpg'\n+ ]\n )\n )\n \n hover = HoverTool(\n tooltips=\"\"\"\n <div>\n- <span style=\"font-size: 17px; font-weight: bold;\">@desc</span>\n- <span style=\"font-size: 15px; color: #966;\">[$index]</span>\n- </div>\n- <div>\n- <span style=\"font-size: 15px;\">Location</span>\n- <span style=\"font-size: 10px; color: #696;\">($x, $y)</span>\n+ <div>\n+ <img\n+ src=\"@imgs\" height=\"42\" alt=\"@imgs\" width=\"42\"\n+ style=\"float: left; margin: 0px 15px 15px 0px;\"\n+ border=\"2\"\n+ ></img>\n+ </div>\n+ <div>\n+ <span style=\"font-size: 17px; font-weight: bold;\">@desc</span>\n+ <span style=\"font-size: 15px; color: #966;\">[$index]</span>\n+ </div>\n+ <div>\n+ <span style=\"font-size: 15px;\">Location</span>\n+ <span style=\"font-size: 10px; color: #696;\">($x, $y)</span>\n+ </div>\n </div>\n \"\"\"\n )\n@@ -30,5 +46,3 @@\n p.circle('x', 'y', size=20, source=source)\n \n show(p)\n-\n-\n", "issue": "Create example of using Hover tool to display custom images\nIt would be nice to show how someone can use the hovertool to display custom images using URL/URI upon hovering over a region of interest. It would allow users to embed an additional dimension into plots.\n\n", "before_files": [{"content": "from bokeh.plotting import figure, output_file, show, ColumnDataSource\nfrom bokeh.models import HoverTool\n\noutput_file(\"toolbar.html\")\n\nsource = ColumnDataSource(\n data=dict(\n x=[1, 2, 3, 4, 5],\n y=[2, 5, 8, 2, 7],\n desc=['A', 'b', 'C', 'd', 'E'],\n )\n )\n\nhover = HoverTool(\n tooltips=\"\"\"\n <div>\n <span style=\"font-size: 17px; font-weight: bold;\">@desc</span>\n <span style=\"font-size: 15px; color: #966;\">[$index]</span>\n </div>\n <div>\n <span style=\"font-size: 15px;\">Location</span>\n <span style=\"font-size: 10px; color: #696;\">($x, $y)</span>\n </div>\n \"\"\"\n )\n\np = figure(plot_width=400, plot_height=400, tools=[hover],\n title=\"Mouse over the dots\")\n\np.circle('x', 'y', size=20, source=source)\n\nshow(p)\n\n \n", "path": "sphinx/source/docs/user_guide/source_examples/tools_hover_custom_tooltip.py"}]} | 939 | 576 |
gh_patches_debug_11629 | rasdani/github-patches | git_diff | beeware__toga-193 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ProgressBar doesn't appears in a Box [Core] [Cocoa]
Example code: https://gist.github.com/Dayof/528f9dc38f4178dbc25db6bab553e19a
When a progress bar is add inside of a box (bellow the label is the progress bar):

</issue>
<code>
[start of src/cocoa/toga_cocoa/widgets/progressbar.py]
1 from toga.interface import ProgressBar as ProgressBarInterface
2
3 from ..libs import *
4 from .base import WidgetMixin
5
6
7 class ProgressBar(ProgressBarInterface, WidgetMixin):
8 def __init__(self, id=None, style=None, max=None, value=None):
9 super().__init__(id=id, style=style, max=max, value=value)
10 self._create()
11
12 def create(self):
13 self._impl = NSProgressIndicator.new()
14 self._impl.setStyle_(NSProgressIndicatorBarStyle)
15 self._impl.setDisplayedWhenStopped_(True)
16
17 # Add the layout constraints
18 self._add_constraints()
19
20 def _set_value(self, value):
21 if value is not None:
22 self._impl.setDoubleValue_(value)
23
24 def start(self):
25 if self._impl and not self._running:
26 self._impl.startAnimation_(self._impl)
27 self._running = True
28
29 def stop(self):
30 if self._impl and self._running:
31 self._impl.stopAnimation_(self._impl)
32 self._running = False
33
34 def _set_max(self, value):
35 if value:
36 self._impl.setIndeterminate_(False)
37 self._impl.setMaxValue_(value)
38 else:
39 self._impl.setIndeterminate_(True)
40
[end of src/cocoa/toga_cocoa/widgets/progressbar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cocoa/toga_cocoa/widgets/progressbar.py b/src/cocoa/toga_cocoa/widgets/progressbar.py
--- a/src/cocoa/toga_cocoa/widgets/progressbar.py
+++ b/src/cocoa/toga_cocoa/widgets/progressbar.py
@@ -16,6 +16,7 @@
# Add the layout constraints
self._add_constraints()
+ self.rehint()
def _set_value(self, value):
if value is not None:
@@ -37,3 +38,9 @@
self._impl.setMaxValue_(value)
else:
self._impl.setIndeterminate_(True)
+
+ def rehint(self):
+ self.style.hint(
+ height=self._impl.fittingSize().height,
+ width=self._impl.fittingSize().width
+ )
| {"golden_diff": "diff --git a/src/cocoa/toga_cocoa/widgets/progressbar.py b/src/cocoa/toga_cocoa/widgets/progressbar.py\n--- a/src/cocoa/toga_cocoa/widgets/progressbar.py\n+++ b/src/cocoa/toga_cocoa/widgets/progressbar.py\n@@ -16,6 +16,7 @@\n \n # Add the layout constraints\n self._add_constraints()\n+ self.rehint()\n \n def _set_value(self, value):\n if value is not None:\n@@ -37,3 +38,9 @@\n self._impl.setMaxValue_(value)\n else:\n self._impl.setIndeterminate_(True)\n+\n+ def rehint(self):\n+ self.style.hint(\n+ height=self._impl.fittingSize().height,\n+ width=self._impl.fittingSize().width\n+ )\n", "issue": "ProgressBar doesn't appears in a Box [Core] [Cocoa]\nExample code: https://gist.github.com/Dayof/528f9dc38f4178dbc25db6bab553e19a\r\n\r\nWhen a progress bar is add inside of a box (bellow the label is the progress bar):\r\n\r\n\r\n\n", "before_files": [{"content": "from toga.interface import ProgressBar as ProgressBarInterface\n\nfrom ..libs import *\nfrom .base import WidgetMixin\n\n\nclass ProgressBar(ProgressBarInterface, WidgetMixin):\n def __init__(self, id=None, style=None, max=None, value=None):\n super().__init__(id=id, style=style, max=max, value=value)\n self._create()\n\n def create(self):\n self._impl = NSProgressIndicator.new()\n self._impl.setStyle_(NSProgressIndicatorBarStyle)\n self._impl.setDisplayedWhenStopped_(True)\n\n # Add the layout constraints\n self._add_constraints()\n\n def _set_value(self, value):\n if value is not None:\n self._impl.setDoubleValue_(value)\n\n def start(self):\n if self._impl and not self._running:\n self._impl.startAnimation_(self._impl)\n self._running = True\n\n def stop(self):\n if self._impl and self._running:\n self._impl.stopAnimation_(self._impl)\n self._running = False\n\n def _set_max(self, value):\n if value:\n self._impl.setIndeterminate_(False)\n self._impl.setMaxValue_(value)\n else:\n self._impl.setIndeterminate_(True)\n", "path": "src/cocoa/toga_cocoa/widgets/progressbar.py"}]} | 1,051 | 182 |
gh_patches_debug_1094 | rasdani/github-patches | git_diff | ESMCI__cime-4035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cheyenne needs a module load python
Now that we require python 3.5+, we need to do a module load python on cheyenne.
The lack of this module load is responsible for a failure in `J_TestCreateNewcase.test_f_createnewcase_with_user_compset` if you run the whole `J_TestCreateNewcase` suite, and may cause other problems as well.
I'll get a fix in shortly.
</issue>
<code>
[start of scripts/Tools/standard_script_setup.py]
1 """
2 Encapsulate the importing of python utils and logging setup, things
3 that every script should do.
4 """
5 # pylint: disable=unused-import
6
7 import sys, os
8 import __main__ as main
9 _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..")
10 _LIB_DIR = os.path.join(_CIMEROOT, "scripts", "lib")
11 sys.path.append(_LIB_DIR)
12
13 # Important: Allows external tools to link up with CIME
14 os.environ["CIMEROOT"] = _CIMEROOT
15
16 import CIME.utils
17 CIME.utils.check_minimum_python_version(2, 7)
18 CIME.utils.stop_buffering_output()
19 import logging, argparse
20
[end of scripts/Tools/standard_script_setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/Tools/standard_script_setup.py b/scripts/Tools/standard_script_setup.py
--- a/scripts/Tools/standard_script_setup.py
+++ b/scripts/Tools/standard_script_setup.py
@@ -14,6 +14,6 @@
os.environ["CIMEROOT"] = _CIMEROOT
import CIME.utils
-CIME.utils.check_minimum_python_version(2, 7)
+CIME.utils.check_minimum_python_version(3, 6)
CIME.utils.stop_buffering_output()
import logging, argparse
| {"golden_diff": "diff --git a/scripts/Tools/standard_script_setup.py b/scripts/Tools/standard_script_setup.py\n--- a/scripts/Tools/standard_script_setup.py\n+++ b/scripts/Tools/standard_script_setup.py\n@@ -14,6 +14,6 @@\n os.environ[\"CIMEROOT\"] = _CIMEROOT\n \n import CIME.utils\n-CIME.utils.check_minimum_python_version(2, 7)\n+CIME.utils.check_minimum_python_version(3, 6)\n CIME.utils.stop_buffering_output()\n import logging, argparse\n", "issue": "cheyenne needs a module load python\nNow that we require python 3.5+, we need to do a module load python on cheyenne.\r\n\r\nThe lack of this module load is responsible for a failure in `J_TestCreateNewcase.test_f_createnewcase_with_user_compset` if you run the whole `J_TestCreateNewcase` suite, and may cause other problems as well.\r\n\r\nI'll get a fix in shortly.\n", "before_files": [{"content": "\"\"\"\nEncapsulate the importing of python utils and logging setup, things\nthat every script should do.\n\"\"\"\n# pylint: disable=unused-import\n\nimport sys, os\nimport __main__ as main\n_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\",\"..\")\n_LIB_DIR = os.path.join(_CIMEROOT, \"scripts\", \"lib\")\nsys.path.append(_LIB_DIR)\n\n# Important: Allows external tools to link up with CIME\nos.environ[\"CIMEROOT\"] = _CIMEROOT\n\nimport CIME.utils\nCIME.utils.check_minimum_python_version(2, 7)\nCIME.utils.stop_buffering_output()\nimport logging, argparse\n", "path": "scripts/Tools/standard_script_setup.py"}]} | 818 | 116 |
gh_patches_debug_33119 | rasdani/github-patches | git_diff | nilearn__nilearn-4334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Globbing should be advertised in doc/examples.
</issue>
<code>
[start of examples/00_tutorials/plot_nilearn_101.py]
1 """
2 Basic nilearn example: manipulating and looking at data
3 =======================================================
4
5 A simple example showing how to load an existing Nifti file and use
6 basic nilearn functionalities.
7 """
8
9 # Let us use a Nifti file that is shipped with nilearn
10 from nilearn.datasets import MNI152_FILE_PATH
11
12 # Note that the variable MNI152_FILE_PATH is just a path to a Nifti file
13 print(f"Path to MNI152 template: {MNI152_FILE_PATH!r}")
14
15 # %%
16 # A first step: looking at our data
17 # ----------------------------------
18 #
19 # Let's quickly plot this file:
20 from nilearn import plotting
21
22 plotting.plot_img(MNI152_FILE_PATH)
23
24 # %%
25 # This is not a very pretty plot. We just used the simplest possible
26 # code. There is a whole :ref:`section of the documentation <plotting>`
27 # on making prettier code.
28 #
29 # **Exercise**: Try plotting one of your own files. In the above,
30 # MNI152_FILE_PATH is nothing more than a string with a path pointing to
31 # a nifti image. You can replace it with a string pointing to a file on
32 # your disk. Note that it should be a 3D volume, and not a 4D volume.
33
34 # %%
35 # Simple image manipulation: smoothing
36 # ------------------------------------
37 #
38 # Let's use an image-smoothing function from nilearn:
39 # :func:`nilearn.image.smooth_img`
40 #
41 # Functions containing 'img' can take either a filename or an image as input.
42 #
43 # Here we give as inputs the image filename and the smoothing value in mm
44 from nilearn import image
45
46 smooth_anat_img = image.smooth_img(MNI152_FILE_PATH, fwhm=3)
47
48 # While we are giving a file name as input, the function returns
49 # an in-memory object:
50 smooth_anat_img
51
52 # %%
53 # This is an in-memory object. We can pass it to nilearn function, for
54 # instance to look at it
55 plotting.plot_img(smooth_anat_img)
56
57 # %%
58 # We could also pass it to the smoothing function
59 more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)
60 plotting.plot_img(more_smooth_anat_img)
61
62 # %%
63 # Saving results to a file
64 # -------------------------
65 #
66 # We can save any in-memory object as follows:
67 from pathlib import Path
68
69 output_dir = Path.cwd() / "results" / "plot_nilearn_101"
70 output_dir.mkdir(exist_ok=True, parents=True)
71 print(f"Output will be saved to: {output_dir}")
72 more_smooth_anat_img.to_filename(output_dir / "more_smooth_anat_img.nii.gz")
73
74 # %%
75 # Finally, calling plotting.show() is necessary to display the figure
76 # when running as a script outside IPython
77 plotting.show()
78
79 # %%
80 # |
81 #
82 # ______
83 #
84 # To recap, all the nilearn tools can take data as filenames or in-memory
85 # objects, and return brain volumes as in-memory objects. These can be
86 # passed on to other nilearn tools, or saved to disk.
87
88 # sphinx_gallery_dummy_images=1
89
[end of examples/00_tutorials/plot_nilearn_101.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/00_tutorials/plot_nilearn_101.py b/examples/00_tutorials/plot_nilearn_101.py
--- a/examples/00_tutorials/plot_nilearn_101.py
+++ b/examples/00_tutorials/plot_nilearn_101.py
@@ -59,17 +59,49 @@
more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)
plotting.plot_img(more_smooth_anat_img)
+
+# %%
+# Globbing over multiple 3D volumes
+# ---------------------------------
+# Nilearn also supports reading multiple volumes at once,
+# using glob-style patterns.
+# For instance, we can smooth volumes from many subjects
+# at once and get a 4D image as output.
+
+# %%
+# First let's fetch Haxby dataset for subject 1 and 2
+from nilearn import datasets
+
+haxby = datasets.fetch_haxby(subjects=[1, 2])
+
+# %%
+# Now we can find the anatomical images from both
+# subjects using the `*` wildcard
+from pathlib import Path
+
+anats_all_subjects = (
+ Path(datasets.get_data_dirs()[0]) / "haxby2001" / "subj*" / "anat*"
+)
+
+# %%
+# Now we can smooth all the anatomical images at once
+anats_all_subjects_smooth = image.smooth_img(anats_all_subjects, fwhm=5)
+
+# %%
+# This is a 4D image containing one volume per subject
+print(anats_all_subjects_smooth.shape)
+
# %%
# Saving results to a file
# -------------------------
#
# We can save any in-memory object as follows:
-from pathlib import Path
-
output_dir = Path.cwd() / "results" / "plot_nilearn_101"
output_dir.mkdir(exist_ok=True, parents=True)
print(f"Output will be saved to: {output_dir}")
-more_smooth_anat_img.to_filename(output_dir / "more_smooth_anat_img.nii.gz")
+anats_all_subjects_smooth.to_filename(
+ output_dir / "anats_all_subjects_smooth.nii.gz"
+)
# %%
# Finally, calling plotting.show() is necessary to display the figure
@@ -81,8 +113,9 @@
#
# ______
#
-# To recap, all the nilearn tools can take data as filenames or in-memory
-# objects, and return brain volumes as in-memory objects. These can be
+# To recap, all the nilearn tools can take data as filenames or
+# glob-style patterns or in-memory objects, and return brain
+# volumes as in-memory objects. These can be
# passed on to other nilearn tools, or saved to disk.
# sphinx_gallery_dummy_images=1
| {"golden_diff": "diff --git a/examples/00_tutorials/plot_nilearn_101.py b/examples/00_tutorials/plot_nilearn_101.py\n--- a/examples/00_tutorials/plot_nilearn_101.py\n+++ b/examples/00_tutorials/plot_nilearn_101.py\n@@ -59,17 +59,49 @@\n more_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)\n plotting.plot_img(more_smooth_anat_img)\n \n+\n+# %%\n+# Globbing over multiple 3D volumes\n+# ---------------------------------\n+# Nilearn also supports reading multiple volumes at once,\n+# using glob-style patterns.\n+# For instance, we can smooth volumes from many subjects\n+# at once and get a 4D image as output.\n+\n+# %%\n+# First let's fetch Haxby dataset for subject 1 and 2\n+from nilearn import datasets\n+\n+haxby = datasets.fetch_haxby(subjects=[1, 2])\n+\n+# %%\n+# Now we can find the anatomical images from both\n+# subjects using the `*` wildcard\n+from pathlib import Path\n+\n+anats_all_subjects = (\n+ Path(datasets.get_data_dirs()[0]) / \"haxby2001\" / \"subj*\" / \"anat*\"\n+)\n+\n+# %%\n+# Now we can smooth all the anatomical images at once\n+anats_all_subjects_smooth = image.smooth_img(anats_all_subjects, fwhm=5)\n+\n+# %%\n+# This is a 4D image containing one volume per subject\n+print(anats_all_subjects_smooth.shape)\n+\n # %%\n # Saving results to a file\n # -------------------------\n #\n # We can save any in-memory object as follows:\n-from pathlib import Path\n-\n output_dir = Path.cwd() / \"results\" / \"plot_nilearn_101\"\n output_dir.mkdir(exist_ok=True, parents=True)\n print(f\"Output will be saved to: {output_dir}\")\n-more_smooth_anat_img.to_filename(output_dir / \"more_smooth_anat_img.nii.gz\")\n+anats_all_subjects_smooth.to_filename(\n+ output_dir / \"anats_all_subjects_smooth.nii.gz\"\n+)\n \n # %%\n # Finally, calling plotting.show() is necessary to display the figure\n@@ -81,8 +113,9 @@\n #\n # ______\n #\n-# To recap, all the nilearn tools can take data as filenames or in-memory\n-# objects, and return brain volumes as in-memory objects. These can be\n+# To recap, all the nilearn tools can take data as filenames or\n+# glob-style patterns or in-memory objects, and return brain\n+# volumes as in-memory objects. These can be\n # passed on to other nilearn tools, or saved to disk.\n \n # sphinx_gallery_dummy_images=1\n", "issue": "Globbing should be advertised in doc/examples.\n\n", "before_files": [{"content": "\"\"\"\nBasic nilearn example: manipulating and looking at data\n=======================================================\n\nA simple example showing how to load an existing Nifti file and use\nbasic nilearn functionalities.\n\"\"\"\n\n# Let us use a Nifti file that is shipped with nilearn\nfrom nilearn.datasets import MNI152_FILE_PATH\n\n# Note that the variable MNI152_FILE_PATH is just a path to a Nifti file\nprint(f\"Path to MNI152 template: {MNI152_FILE_PATH!r}\")\n\n# %%\n# A first step: looking at our data\n# ----------------------------------\n#\n# Let's quickly plot this file:\nfrom nilearn import plotting\n\nplotting.plot_img(MNI152_FILE_PATH)\n\n# %%\n# This is not a very pretty plot. We just used the simplest possible\n# code. There is a whole :ref:`section of the documentation <plotting>`\n# on making prettier code.\n#\n# **Exercise**: Try plotting one of your own files. In the above,\n# MNI152_FILE_PATH is nothing more than a string with a path pointing to\n# a nifti image. You can replace it with a string pointing to a file on\n# your disk. Note that it should be a 3D volume, and not a 4D volume.\n\n# %%\n# Simple image manipulation: smoothing\n# ------------------------------------\n#\n# Let's use an image-smoothing function from nilearn:\n# :func:`nilearn.image.smooth_img`\n#\n# Functions containing 'img' can take either a filename or an image as input.\n#\n# Here we give as inputs the image filename and the smoothing value in mm\nfrom nilearn import image\n\nsmooth_anat_img = image.smooth_img(MNI152_FILE_PATH, fwhm=3)\n\n# While we are giving a file name as input, the function returns\n# an in-memory object:\nsmooth_anat_img\n\n# %%\n# This is an in-memory object. We can pass it to nilearn function, for\n# instance to look at it\nplotting.plot_img(smooth_anat_img)\n\n# %%\n# We could also pass it to the smoothing function\nmore_smooth_anat_img = image.smooth_img(smooth_anat_img, fwhm=3)\nplotting.plot_img(more_smooth_anat_img)\n\n# %%\n# Saving results to a file\n# -------------------------\n#\n# We can save any in-memory object as follows:\nfrom pathlib import Path\n\noutput_dir = Path.cwd() / \"results\" / \"plot_nilearn_101\"\noutput_dir.mkdir(exist_ok=True, parents=True)\nprint(f\"Output will be saved to: {output_dir}\")\nmore_smooth_anat_img.to_filename(output_dir / \"more_smooth_anat_img.nii.gz\")\n\n# %%\n# Finally, calling plotting.show() is necessary to display the figure\n# when running as a script outside IPython\nplotting.show()\n\n# %%\n# |\n#\n# ______\n#\n# To recap, all the nilearn tools can take data as filenames or in-memory\n# objects, and return brain volumes as in-memory objects. These can be\n# passed on to other nilearn tools, or saved to disk.\n\n# sphinx_gallery_dummy_images=1\n", "path": "examples/00_tutorials/plot_nilearn_101.py"}]} | 1,436 | 627 |
gh_patches_debug_16643 | rasdani/github-patches | git_diff | sktime__sktime-5330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `temporal_train_test_split` does not work on panel datatypes with unequal length series.
**Describe the bug**
<!--
A clear and concise description of what the bug is.
-->
Also relates to #4968
`temporal_train_test_split` wrongly split panel datatypes (splitting per unique unequal time series). It could be that the split function does not support this type yet, If so, it should throw an error msg telling that it does not currently supported the datatypes.
**To Reproduce**
<!--
Add a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve
If the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com
-->
```python
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.utils._testing.panel import _make_panel
y = _make_panel(n_instances=2, n_timepoints=6)
# make first instance series shorter than the second
y.iloc[4:6] =None
y.dropna(inplace=True)
train_size, test_size = temporal_train_test_split(y, test_size=2)
# show shapes
print(
f"""
{y.shape=}
{train_size.shape=}, {test_size.shape=} #train size should be (6,1) 2+4=6
"""
)
# has also the same issue as #4968 below is the minimal example
train_fh, test_fh = temporal_train_test_split(y, fh=[1,2])
# show shapes
print(
f"""
{y.shape=}
{train_fh.shape=}, {test_fh.shape=} #train size should be (6,1) and test (4,1)
"""
)
```
output
```
y.shape=(10, 1)
train_size.shape=(4, 1), test_size.shape=(4, 1) #train size should be 6 (2+4)
y.shape=(10, 1)
train_fh.shape=(8, 1), test_fh.shape=(2, 1) #train size should be (6,1) and test (4,1)
```
**Versions**
<details>
<!--
Please run the following code snippet and paste the output here:
from sktime import show_versions; show_versions()
-->
main at [3cf69ed](https://github.com/sktime/sktime/commit/3cf69eddba315d6130b661ca5fe8e132e236aa47)
</details>
<!-- Thanks for contributing! -->
</issue>
<code>
[start of sktime/split/__init__.py]
1 """Module for splitters."""
2
3 __all__ = [
4 "CutoffSplitter",
5 "ExpandingGreedySplitter",
6 "ExpandingWindowSplitter",
7 "SameLocSplitter",
8 "SingleWindowSplitter",
9 "SlidingWindowSplitter",
10 "TestPlusTrainSplitter",
11 "temporal_train_test_split",
12 ]
13
14 from sktime.split.cutoff import CutoffSplitter
15 from sktime.split.expandinggreedy import ExpandingGreedySplitter
16 from sktime.split.expandingwindow import ExpandingWindowSplitter
17 from sktime.split.sameloc import SameLocSplitter
18 from sktime.split.singlewindow import SingleWindowSplitter
19 from sktime.split.slidingwindow import SlidingWindowSplitter
20 from sktime.split.temporal_train_test_split import temporal_train_test_split
21 from sktime.split.testplustrain import TestPlusTrainSplitter
22
[end of sktime/split/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sktime/split/__init__.py b/sktime/split/__init__.py
--- a/sktime/split/__init__.py
+++ b/sktime/split/__init__.py
@@ -7,6 +7,7 @@
"SameLocSplitter",
"SingleWindowSplitter",
"SlidingWindowSplitter",
+ "TemporalTrainTestSplitter",
"TestPlusTrainSplitter",
"temporal_train_test_split",
]
@@ -17,5 +18,8 @@
from sktime.split.sameloc import SameLocSplitter
from sktime.split.singlewindow import SingleWindowSplitter
from sktime.split.slidingwindow import SlidingWindowSplitter
-from sktime.split.temporal_train_test_split import temporal_train_test_split
+from sktime.split.temporal_train_test_split import (
+ TemporalTrainTestSplitter,
+ temporal_train_test_split,
+)
from sktime.split.testplustrain import TestPlusTrainSplitter
| {"golden_diff": "diff --git a/sktime/split/__init__.py b/sktime/split/__init__.py\n--- a/sktime/split/__init__.py\n+++ b/sktime/split/__init__.py\n@@ -7,6 +7,7 @@\n \"SameLocSplitter\",\n \"SingleWindowSplitter\",\n \"SlidingWindowSplitter\",\n+ \"TemporalTrainTestSplitter\",\n \"TestPlusTrainSplitter\",\n \"temporal_train_test_split\",\n ]\n@@ -17,5 +18,8 @@\n from sktime.split.sameloc import SameLocSplitter\n from sktime.split.singlewindow import SingleWindowSplitter\n from sktime.split.slidingwindow import SlidingWindowSplitter\n-from sktime.split.temporal_train_test_split import temporal_train_test_split\n+from sktime.split.temporal_train_test_split import (\n+ TemporalTrainTestSplitter,\n+ temporal_train_test_split,\n+)\n from sktime.split.testplustrain import TestPlusTrainSplitter\n", "issue": "[BUG] `temporal_train_test_split` does not work on panel datatypes with unequal length series.\n**Describe the bug**\r\n<!--\r\nA clear and concise description of what the bug is.\r\n-->\r\nAlso relates to #4968 \r\n\r\n`temporal_train_test_split` wrongly split panel datatypes (splitting per unique unequal time series). It could be that the split function does not support this type yet, If so, it should throw an error msg telling that it does not currently supported the datatypes.\r\n\r\n**To Reproduce**\r\n<!--\r\nAdd a Minimal, Complete, and Verifiable example (for more details, see e.g. https://stackoverflow.com/help/mcve\r\n\r\nIf the code is too long, feel free to put it in a public gist and link it in the issue: https://gist.github.com\r\n-->\r\n\r\n```python\r\nfrom sktime.forecasting.model_selection import temporal_train_test_split\r\nfrom sktime.utils._testing.panel import _make_panel\r\n\r\ny = _make_panel(n_instances=2, n_timepoints=6)\r\n# make first instance series shorter than the second\r\ny.iloc[4:6] =None\r\ny.dropna(inplace=True)\r\n\r\ntrain_size, test_size = temporal_train_test_split(y, test_size=2) \r\n# show shapes\r\nprint(\r\n f\"\"\"\r\n {y.shape=} \r\n {train_size.shape=}, {test_size.shape=} #train size should be (6,1) 2+4=6\r\n \"\"\"\r\n)\r\n\r\n# has also the same issue as #4968 below is the minimal example\r\ntrain_fh, test_fh = temporal_train_test_split(y, fh=[1,2]) \r\n# show shapes\r\nprint(\r\n f\"\"\"\r\n {y.shape=} \r\n {train_fh.shape=}, {test_fh.shape=} #train size should be (6,1) and test (4,1)\r\n \"\"\"\r\n)\r\n```\r\n\r\noutput\r\n```\r\ny.shape=(10, 1) \r\ntrain_size.shape=(4, 1), test_size.shape=(4, 1) #train size should be 6 (2+4)\r\n\r\ny.shape=(10, 1) \r\ntrain_fh.shape=(8, 1), test_fh.shape=(2, 1) #train size should be (6,1) and test (4,1)\r\n```\r\n\r\n**Versions**\r\n<details>\r\n\r\n<!--\r\nPlease run the following code snippet and paste the output here:\r\n\r\nfrom sktime import show_versions; show_versions()\r\n-->\r\n main at [3cf69ed](https://github.com/sktime/sktime/commit/3cf69eddba315d6130b661ca5fe8e132e236aa47)\r\n\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "\"\"\"Module for splitters.\"\"\"\n\n__all__ = [\n \"CutoffSplitter\",\n \"ExpandingGreedySplitter\",\n \"ExpandingWindowSplitter\",\n \"SameLocSplitter\",\n \"SingleWindowSplitter\",\n \"SlidingWindowSplitter\",\n \"TestPlusTrainSplitter\",\n \"temporal_train_test_split\",\n]\n\nfrom sktime.split.cutoff import CutoffSplitter\nfrom sktime.split.expandinggreedy import ExpandingGreedySplitter\nfrom sktime.split.expandingwindow import ExpandingWindowSplitter\nfrom sktime.split.sameloc import SameLocSplitter\nfrom sktime.split.singlewindow import SingleWindowSplitter\nfrom sktime.split.slidingwindow import SlidingWindowSplitter\nfrom sktime.split.temporal_train_test_split import temporal_train_test_split\nfrom sktime.split.testplustrain import TestPlusTrainSplitter\n", "path": "sktime/split/__init__.py"}]} | 1,348 | 218 |
gh_patches_debug_47400 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-2132 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Py 3.9
With Py 3.9 out, we should add it to the tests at some point. Maybe that can wait, until 3.9.x, x>0 is out, though
Need to check, if all the job thingies work out, as APS doesn't support py3.9 yet and there has been a [report](https://t.me/pythontelegrambotgroup/382731) that it doesn't work (with PTB).
On a related note: APS seems to be [preparing for v4.0](https://github.com/agronholm/apscheduler/issues/465), which will break some stuff, but also supports py3.9 and even uses the new ZoneInfo (also backporting to py3.6+), lifting the restriction to use `pytz` timezones. I already subscribed to releases. I guess updating APS in PTB should be done only when 4.x, x>0 is out and we're doing breaking things anyway …
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """The setup and build script for the python-telegram-bot library."""
3
4 import codecs
5 import os
6 import sys
7
8 from setuptools import setup, find_packages
9
10
11 def requirements():
12 """Build the requirements list for this project"""
13 requirements_list = []
14
15 with open('requirements.txt') as requirements:
16 for install in requirements:
17 requirements_list.append(install.strip())
18
19 return requirements_list
20
21
22 packages = find_packages(exclude=['tests*'])
23 requirements = requirements()
24
25 # Allow for a package install to not use the vendored urllib3
26 UPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'
27 if UPSTREAM_URLLIB3_FLAG in sys.argv:
28 sys.argv.remove(UPSTREAM_URLLIB3_FLAG)
29 requirements.append('urllib3 >= 1.19.1')
30 packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]
31
32 with codecs.open('README.rst', 'r', 'utf-8') as fd:
33 fn = os.path.join('telegram', 'version.py')
34 with open(fn) as fh:
35 code = compile(fh.read(), fn, 'exec')
36 exec(code)
37
38 setup(name='python-telegram-bot',
39 version=__version__,
40 author='Leandro Toledo',
41 author_email='[email protected]',
42 license='LGPLv3',
43 url='https://python-telegram-bot.org/',
44 keywords='python telegram bot api wrapper',
45 description="We have made you a wrapper you can't refuse",
46 long_description=fd.read(),
47 packages=packages,
48 install_requires=requirements,
49 extras_require={
50 'json': 'ujson',
51 'socks': 'PySocks'
52 },
53 include_package_data=True,
54 classifiers=[
55 'Development Status :: 5 - Production/Stable',
56 'Intended Audience :: Developers',
57 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
58 'Operating System :: OS Independent',
59 'Topic :: Software Development :: Libraries :: Python Modules',
60 'Topic :: Communications :: Chat',
61 'Topic :: Internet',
62 'Programming Language :: Python',
63 'Programming Language :: Python :: 3',
64 'Programming Language :: Python :: 3.6',
65 'Programming Language :: Python :: 3.7',
66 'Programming Language :: Python :: 3.8',
67 ],)
68
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -64,4 +64,5 @@
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
],)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,4 +64,5 @@\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n+ 'Programming Language :: Python :: 3.9',\n ],)\n", "issue": "Add support for Py 3.9\nWith Py 3.9 out, we should add it to the tests at some point. Maybe that can wait, until 3.9.x, x>0 is out, though\r\n\r\nNeed to check, if all the job thingies work out, as APS doesn't support py3.9 yet and there has been a [report](https://t.me/pythontelegrambotgroup/382731) that it doesn't work (with PTB).\r\n\r\nOn a related note: APS seems to be [preparing for v4.0](https://github.com/agronholm/apscheduler/issues/465), which will break some stuff, but also supports py3.9 and even uses the new ZoneInfo (also backporting to py3.6+), lifting the restriction to use `pytz` timezones. I already subscribed to releases. I guess updating APS in PTB should be done only when 4.x, x>0 is out and we're doing breaking things anyway \u2026\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"The setup and build script for the python-telegram-bot library.\"\"\"\n\nimport codecs\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\ndef requirements():\n \"\"\"Build the requirements list for this project\"\"\"\n requirements_list = []\n\n with open('requirements.txt') as requirements:\n for install in requirements:\n requirements_list.append(install.strip())\n\n return requirements_list\n\n\npackages = find_packages(exclude=['tests*'])\nrequirements = requirements()\n\n# Allow for a package install to not use the vendored urllib3\nUPSTREAM_URLLIB3_FLAG = '--with-upstream-urllib3'\nif UPSTREAM_URLLIB3_FLAG in sys.argv:\n sys.argv.remove(UPSTREAM_URLLIB3_FLAG)\n requirements.append('urllib3 >= 1.19.1')\n packages = [x for x in packages if not x.startswith('telegram.vendor.ptb_urllib3')]\n\nwith codecs.open('README.rst', 'r', 'utf-8') as fd:\n fn = os.path.join('telegram', 'version.py')\n with open(fn) as fh:\n code = compile(fh.read(), fn, 'exec')\n exec(code)\n\n setup(name='python-telegram-bot',\n version=__version__,\n author='Leandro Toledo',\n author_email='[email protected]',\n license='LGPLv3',\n url='https://python-telegram-bot.org/',\n keywords='python telegram bot api wrapper',\n description=\"We have made you a wrapper you can't refuse\",\n long_description=fd.read(),\n packages=packages,\n install_requires=requirements,\n extras_require={\n 'json': 'ujson',\n 'socks': 'PySocks'\n },\n include_package_data=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Communications :: Chat',\n 'Topic :: Internet',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],)\n", "path": "setup.py"}]} | 1,394 | 85 |
gh_patches_debug_20456 | rasdani/github-patches | git_diff | translate__pootle-5699 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lost timeSince data for suggestions
</issue>
<code>
[start of pootle/apps/pootle_misc/templatetags/locale.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django import template
10 from django.utils.formats import get_format
11 from django.utils.translation import trans_real
12
13 from pootle.core.utils import dateformat
14
15
16 register = template.Library()
17
18
19 @register.simple_tag
20 def locale_dir():
21 """Returns current locale's direction."""
22 return trans_real.get_language_bidi() and "rtl" or "ltr"
23
24
25 @register.filter(name='dateformat')
26 def do_dateformat(value, use_format='c'):
27 """Formats a `value` date using `format`.
28
29 :param value: a datetime object.
30 :param use_format: a format string accepted by
31 :func:`django.utils.formats.get_format` or
32 :func:`django.utils.dateformat.format`. If none is set, the current
33 locale's default format will be used.
34 """
35 try:
36 use_format = get_format(use_format)
37 except AttributeError:
38 pass
39
40 return dateformat.format(value, use_format)
41
42
43 @register.simple_tag
44 def locale_align():
45 """Returns current locale's default alignment."""
46 return trans_real.get_language_bidi() and "right" or "left"
47
[end of pootle/apps/pootle_misc/templatetags/locale.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_misc/templatetags/locale.py b/pootle/apps/pootle_misc/templatetags/locale.py
--- a/pootle/apps/pootle_misc/templatetags/locale.py
+++ b/pootle/apps/pootle_misc/templatetags/locale.py
@@ -6,11 +6,14 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+import calendar
+
from django import template
from django.utils.formats import get_format
from django.utils.translation import trans_real
from pootle.core.utils import dateformat
+from pootle.local.dates import timesince
register = template.Library()
@@ -40,6 +43,11 @@
return dateformat.format(value, use_format)
[email protected](name='relative_datetime_format')
+def do_relative_datetime_format(value):
+ return timesince(calendar.timegm(value.timetuple()))
+
+
@register.simple_tag
def locale_align():
"""Returns current locale's default alignment."""
| {"golden_diff": "diff --git a/pootle/apps/pootle_misc/templatetags/locale.py b/pootle/apps/pootle_misc/templatetags/locale.py\n--- a/pootle/apps/pootle_misc/templatetags/locale.py\n+++ b/pootle/apps/pootle_misc/templatetags/locale.py\n@@ -6,11 +6,14 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+import calendar\n+\n from django import template\n from django.utils.formats import get_format\n from django.utils.translation import trans_real\n \n from pootle.core.utils import dateformat\n+from pootle.local.dates import timesince\n \n \n register = template.Library()\n@@ -40,6 +43,11 @@\n return dateformat.format(value, use_format)\n \n \[email protected](name='relative_datetime_format')\n+def do_relative_datetime_format(value):\n+ return timesince(calendar.timegm(value.timetuple()))\n+\n+\n @register.simple_tag\n def locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n", "issue": "Lost timeSince data for suggestions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.formats import get_format\nfrom django.utils.translation import trans_real\n\nfrom pootle.core.utils import dateformat\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef locale_dir():\n \"\"\"Returns current locale's direction.\"\"\"\n return trans_real.get_language_bidi() and \"rtl\" or \"ltr\"\n\n\[email protected](name='dateformat')\ndef do_dateformat(value, use_format='c'):\n \"\"\"Formats a `value` date using `format`.\n\n :param value: a datetime object.\n :param use_format: a format string accepted by\n :func:`django.utils.formats.get_format` or\n :func:`django.utils.dateformat.format`. If none is set, the current\n locale's default format will be used.\n \"\"\"\n try:\n use_format = get_format(use_format)\n except AttributeError:\n pass\n\n return dateformat.format(value, use_format)\n\n\[email protected]_tag\ndef locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n return trans_real.get_language_bidi() and \"right\" or \"left\"\n", "path": "pootle/apps/pootle_misc/templatetags/locale.py"}]} | 958 | 244 |
gh_patches_debug_1597 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-915 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix a few issues in Django example
The Django example has a few issues, fix them.
</issue>
<code>
[start of docs/examples/django/pages/views.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from django.http import HttpResponse
15
16 from opentelemetry import trace
17 from opentelemetry.sdk.trace import TracerProvider
18 from opentelemetry.sdk.trace.export import (
19 ConsoleSpanExporter,
20 SimpleExportSpanProcessor,
21 )
22
23 trace.set_tracer_provider(TracerProvider())
24 tracer = trace.get_tracer_provider().get_tracer(__name__)
25
26 trace.get_tracer_provider().add_span_processor(
27 SimpleExportSpanProcessor(ConsoleSpanExporter())
28 )
29
30
31 def home_page_view(request):
32 return HttpResponse("Hello, world")
33
[end of docs/examples/django/pages/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/django/pages/views.py b/docs/examples/django/pages/views.py
--- a/docs/examples/django/pages/views.py
+++ b/docs/examples/django/pages/views.py
@@ -21,7 +21,6 @@
)
trace.set_tracer_provider(TracerProvider())
-tracer = trace.get_tracer_provider().get_tracer(__name__)
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
| {"golden_diff": "diff --git a/docs/examples/django/pages/views.py b/docs/examples/django/pages/views.py\n--- a/docs/examples/django/pages/views.py\n+++ b/docs/examples/django/pages/views.py\n@@ -21,7 +21,6 @@\n )\n \n trace.set_tracer_provider(TracerProvider())\n-tracer = trace.get_tracer_provider().get_tracer(__name__)\n \n trace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n", "issue": "Fix a few issues in Django example\nThe Django example has a few issues, fix them.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom django.http import HttpResponse\n\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleExportSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n)\n\n\ndef home_page_view(request):\n return HttpResponse(\"Hello, world\")\n", "path": "docs/examples/django/pages/views.py"}]} | 852 | 100 |
gh_patches_debug_21112 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'
**Describe the issue**
Checkov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html
Similar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.
**Examples**
```
data "aws_iam_policy_document" "myKmsKey" {
actions = [
"kms:GenerateDataKey",
"kms:Decrypt"
]
resources = [
"*"
]
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [
<SOME OTHER RESOURCE>.arn
]
}
}
}
```
**Version (please complete the following information):**
- Checkov Version 2.3.261
</issue>
<code>
[start of checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py]
1 from typing import Dict, List, Any
2
3 from checkov.common.util.data_structures_utils import pickle_deepcopy
4
5
6 def convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:
7 """
8 converts terraform parsed configuration to iam policy document
9 """
10 result = pickle_deepcopy(conf)
11 if "statement" in result.keys():
12 result["Statement"] = result.pop("statement")
13 for statement in result["Statement"]:
14 if "actions" in statement:
15 statement["Action"] = statement.pop("actions")[0]
16 if "resources" in statement:
17 statement["Resource"] = statement.pop("resources")[0]
18 if "not_actions" in statement:
19 statement["NotAction"] = statement.pop("not_actions")[0]
20 if "not_resources" in statement:
21 statement["NotResource"] = statement.pop("not_resources")[0]
22 if "effect" in statement:
23 statement["Effect"] = statement.pop("effect")[0]
24 if "effect" not in statement and "Effect" not in statement:
25 statement["Effect"] = "Allow"
26 return result
27
[end of checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import Dict, List, Any
from checkov.common.util.data_structures_utils import pickle_deepcopy
@@ -23,4 +25,13 @@
statement["Effect"] = statement.pop("effect")[0]
if "effect" not in statement and "Effect" not in statement:
statement["Effect"] = "Allow"
+ if "condition" in statement:
+ conditions = statement.pop("condition")
+ if conditions and isinstance(conditions, list):
+ statement["Condition"] = {}
+ for condition in conditions:
+ cond_operator = condition["test"][0]
+ cond_key = condition["variable"][0]
+ cond_value = condition["values"][0]
+ statement["Condition"].setdefault(cond_operator, {})[cond_key] = cond_value
return result
| {"golden_diff": "diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n from typing import Dict, List, Any\n \n from checkov.common.util.data_structures_utils import pickle_deepcopy\n@@ -23,4 +25,13 @@\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n+ if \"condition\" in statement:\n+ conditions = statement.pop(\"condition\")\n+ if conditions and isinstance(conditions, list):\n+ statement[\"Condition\"] = {}\n+ for condition in conditions:\n+ cond_operator = condition[\"test\"][0]\n+ cond_key = condition[\"variable\"][0]\n+ cond_value = condition[\"values\"][0]\n+ statement[\"Condition\"].setdefault(cond_operator, {})[cond_key] = cond_value\n return result\n", "issue": "Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'\n**Describe the issue**\r\nCheckov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html\r\n\r\nSimilar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.\r\n\r\n**Examples**\r\n```\r\ndata \"aws_iam_policy_document\" \"myKmsKey\" {\r\n actions = [\r\n \"kms:GenerateDataKey\",\r\n \"kms:Decrypt\"\r\n ]\r\n resources = [\r\n \"*\"\r\n ]\r\n\r\n condition {\r\n test = \"ArnEquals\"\r\n variable = \"aws:SourceArn\"\r\n values = [\r\n <SOME OTHER RESOURCE>.arn\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.261\r\n\n", "before_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.util.data_structures_utils import pickle_deepcopy\n\n\ndef convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:\n \"\"\"\n converts terraform parsed configuration to iam policy document\n \"\"\"\n result = pickle_deepcopy(conf)\n if \"statement\" in result.keys():\n result[\"Statement\"] = result.pop(\"statement\")\n for statement in result[\"Statement\"]:\n if \"actions\" in statement:\n statement[\"Action\"] = statement.pop(\"actions\")[0]\n if \"resources\" in statement:\n statement[\"Resource\"] = statement.pop(\"resources\")[0]\n if \"not_actions\" in statement:\n statement[\"NotAction\"] = statement.pop(\"not_actions\")[0]\n if \"not_resources\" in statement:\n statement[\"NotResource\"] = statement.pop(\"not_resources\")[0]\n if \"effect\" in statement:\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n return result\n", "path": "checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py"}]} | 1,106 | 271 |
gh_patches_debug_35693 | rasdani/github-patches | git_diff | falconry__falcon-1987 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update asgi look examples to use aioredis 2
A new major vesion of aioredis was released, and it has some api changes.
This is the changelog https://github.com/aio-libs/aioredis-py/blob/master/CHANGELOG.md
</issue>
<code>
[start of examples/asgilook/asgilook/config.py]
1 import os
2 import pathlib
3 import uuid
4
5 import aioredis
6
7
8 class Config:
9 DEFAULT_CONFIG_PATH = '/tmp/asgilook'
10 DEFAULT_MIN_THUMB_SIZE = 64
11 DEFAULT_REDIS_HOST = 'redis://localhost'
12 DEFAULT_REDIS_POOL = aioredis.create_redis_pool
13 DEFAULT_UUID_GENERATOR = uuid.uuid4
14
15 def __init__(self):
16 self.storage_path = pathlib.Path(
17 os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)
18 )
19 self.storage_path.mkdir(parents=True, exist_ok=True)
20
21 self.create_redis_pool = Config.DEFAULT_REDIS_POOL
22 self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
23 self.redis_host = self.DEFAULT_REDIS_HOST
24 self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
25
[end of examples/asgilook/asgilook/config.py]
[start of examples/asgilook/asgilook/cache.py]
1 import msgpack
2
3
4 class RedisCache:
5 PREFIX = 'asgilook:'
6 INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})
7 CACHE_HEADER = 'X-ASGILook-Cache'
8 TTL = 3600
9
10 def __init__(self, config):
11 self._config = config
12
13 # NOTE(vytas): To be initialized upon application startup (see the
14 # method below).
15 self._redis = None
16
17 async def _serialize_response(self, resp):
18 data = await resp.render_body()
19 return msgpack.packb([resp.content_type, data], use_bin_type=True)
20
21 def _deserialize_response(self, resp, data):
22 resp.content_type, resp.data = msgpack.unpackb(data, raw=False)
23 resp.complete = True
24 resp.context.cached = True
25
26 async def process_startup(self, scope, event):
27 if self._redis is None:
28 self._redis = await self._config.create_redis_pool(self._config.redis_host)
29
30 async def process_request(self, req, resp):
31 resp.context.cached = False
32
33 if req.method in self.INVALIDATE_ON:
34 return
35
36 key = f'{self.PREFIX}/{req.path}'
37 data = await self._redis.get(key)
38 if data is not None:
39 self._deserialize_response(resp, data)
40 resp.set_header(self.CACHE_HEADER, 'Hit')
41 else:
42 resp.set_header(self.CACHE_HEADER, 'Miss')
43
44 async def process_response(self, req, resp, resource, req_succeeded):
45 if not req_succeeded:
46 return
47
48 key = f'{self.PREFIX}/{req.path}'
49
50 if req.method in self.INVALIDATE_ON:
51 await self._redis.delete(key)
52 elif not resp.context.cached:
53 data = await self._serialize_response(resp)
54 await self._redis.set(key, data, expire=self.TTL)
55
[end of examples/asgilook/asgilook/cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/asgilook/asgilook/cache.py b/examples/asgilook/asgilook/cache.py
--- a/examples/asgilook/asgilook/cache.py
+++ b/examples/asgilook/asgilook/cache.py
@@ -9,10 +9,7 @@
def __init__(self, config):
self._config = config
-
- # NOTE(vytas): To be initialized upon application startup (see the
- # method below).
- self._redis = None
+ self._redis = self._config.redis_from_url(self._config.redis_host)
async def _serialize_response(self, resp):
data = await resp.render_body()
@@ -24,8 +21,10 @@
resp.context.cached = True
async def process_startup(self, scope, event):
- if self._redis is None:
- self._redis = await self._config.create_redis_pool(self._config.redis_host)
+ await self._redis.ping()
+
+ async def process_shutdown(self, scope, event):
+ await self._redis.close()
async def process_request(self, req, resp):
resp.context.cached = False
@@ -51,4 +50,4 @@
await self._redis.delete(key)
elif not resp.context.cached:
data = await self._serialize_response(resp)
- await self._redis.set(key, data, expire=self.TTL)
+ await self._redis.set(key, data, ex=self.TTL)
diff --git a/examples/asgilook/asgilook/config.py b/examples/asgilook/asgilook/config.py
--- a/examples/asgilook/asgilook/config.py
+++ b/examples/asgilook/asgilook/config.py
@@ -9,7 +9,7 @@
DEFAULT_CONFIG_PATH = '/tmp/asgilook'
DEFAULT_MIN_THUMB_SIZE = 64
DEFAULT_REDIS_HOST = 'redis://localhost'
- DEFAULT_REDIS_POOL = aioredis.create_redis_pool
+ DEFAULT_REDIS_FROM_URL = aioredis.from_url
DEFAULT_UUID_GENERATOR = uuid.uuid4
def __init__(self):
@@ -18,7 +18,7 @@
)
self.storage_path.mkdir(parents=True, exist_ok=True)
- self.create_redis_pool = Config.DEFAULT_REDIS_POOL
+ self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL
self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
self.redis_host = self.DEFAULT_REDIS_HOST
self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
| {"golden_diff": "diff --git a/examples/asgilook/asgilook/cache.py b/examples/asgilook/asgilook/cache.py\n--- a/examples/asgilook/asgilook/cache.py\n+++ b/examples/asgilook/asgilook/cache.py\n@@ -9,10 +9,7 @@\n \n def __init__(self, config):\n self._config = config\n-\n- # NOTE(vytas): To be initialized upon application startup (see the\n- # method below).\n- self._redis = None\n+ self._redis = self._config.redis_from_url(self._config.redis_host)\n \n async def _serialize_response(self, resp):\n data = await resp.render_body()\n@@ -24,8 +21,10 @@\n resp.context.cached = True\n \n async def process_startup(self, scope, event):\n- if self._redis is None:\n- self._redis = await self._config.create_redis_pool(self._config.redis_host)\n+ await self._redis.ping()\n+\n+ async def process_shutdown(self, scope, event):\n+ await self._redis.close()\n \n async def process_request(self, req, resp):\n resp.context.cached = False\n@@ -51,4 +50,4 @@\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n- await self._redis.set(key, data, expire=self.TTL)\n+ await self._redis.set(key, data, ex=self.TTL)\ndiff --git a/examples/asgilook/asgilook/config.py b/examples/asgilook/asgilook/config.py\n--- a/examples/asgilook/asgilook/config.py\n+++ b/examples/asgilook/asgilook/config.py\n@@ -9,7 +9,7 @@\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n- DEFAULT_REDIS_POOL = aioredis.create_redis_pool\n+ DEFAULT_REDIS_FROM_URL = aioredis.from_url\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n \n def __init__(self):\n@@ -18,7 +18,7 @@\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n \n- self.create_redis_pool = Config.DEFAULT_REDIS_POOL\n+ self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "issue": "Update asgi look examples to use aioredis 2\nA new major vesion of aioredis was released, and it has some api changes.\r\nThis is the changelog https://github.com/aio-libs/aioredis-py/blob/master/CHANGELOG.md\n", "before_files": [{"content": "import os\nimport pathlib\nimport uuid\n\nimport aioredis\n\n\nclass Config:\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n DEFAULT_REDIS_POOL = aioredis.create_redis_pool\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n\n def __init__(self):\n self.storage_path = pathlib.Path(\n os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n\n self.create_redis_pool = Config.DEFAULT_REDIS_POOL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "path": "examples/asgilook/asgilook/config.py"}, {"content": "import msgpack\n\n\nclass RedisCache:\n PREFIX = 'asgilook:'\n INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})\n CACHE_HEADER = 'X-ASGILook-Cache'\n TTL = 3600\n\n def __init__(self, config):\n self._config = config\n\n # NOTE(vytas): To be initialized upon application startup (see the\n # method below).\n self._redis = None\n\n async def _serialize_response(self, resp):\n data = await resp.render_body()\n return msgpack.packb([resp.content_type, data], use_bin_type=True)\n\n def _deserialize_response(self, resp, data):\n resp.content_type, resp.data = msgpack.unpackb(data, raw=False)\n resp.complete = True\n resp.context.cached = True\n\n async def process_startup(self, scope, event):\n if self._redis is None:\n self._redis = await self._config.create_redis_pool(self._config.redis_host)\n\n async def process_request(self, req, resp):\n resp.context.cached = False\n\n if req.method in self.INVALIDATE_ON:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n data = await self._redis.get(key)\n if data is not None:\n self._deserialize_response(resp, data)\n resp.set_header(self.CACHE_HEADER, 'Hit')\n else:\n resp.set_header(self.CACHE_HEADER, 'Miss')\n\n async def process_response(self, req, resp, resource, req_succeeded):\n if not req_succeeded:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n\n if req.method in self.INVALIDATE_ON:\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n await self._redis.set(key, data, expire=self.TTL)\n", "path": "examples/asgilook/asgilook/cache.py"}]} | 1,357 | 553 |
gh_patches_debug_8028 | rasdani/github-patches | git_diff | e-valuation__EvaP-848 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Username case sensitivity
Usernames are case sensitive. The importer makes all usernames lowercase, but automatically created accounts when logging in with Kerberos authentification can have uppercase letters.
This can lead to two users having the same username and then the system crashed on login.
Automatically created accounts should also get lowercase usernames, even if the user enters the name differently.
</issue>
<code>
[start of evap/evaluation/forms.py]
1 from django import forms
2 from django.contrib.auth import authenticate
3 from django.utils.translation import ugettext_lazy as _
4 from django.views.decorators.debug import sensitive_variables
5
6 from evap.evaluation.models import UserProfile
7
8
9 class LoginUsernameForm(forms.Form):
10 """Form encapsulating the login with username and password, for example from an Active Directory.
11 """
12
13 username = forms.CharField(label=_("Username"), max_length=254)
14 password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
15
16 def __init__(self, request=None, *args, **kwargs):
17 """
18 If request is passed in, the form will validate that cookies are
19 enabled. Note that the request (a HttpRequest object) must have set a
20 cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
21 running this validation.
22 """
23 self.request = request
24 self.user_cache = None
25 super().__init__(*args, **kwargs)
26
27 @sensitive_variables('password')
28 def clean_password(self):
29 username = self.cleaned_data.get('username')
30 password = self.cleaned_data.get('password')
31
32 if username and password:
33 self.user_cache = authenticate(username=username, password=password)
34 if self.user_cache is None:
35 raise forms.ValidationError(_("Please enter a correct username and password."))
36 self.check_for_test_cookie()
37 return password
38
39 def check_for_test_cookie(self):
40 if self.request and not self.request.session.test_cookie_worked():
41 raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
42
43 def get_user_id(self):
44 if self.user_cache:
45 return self.user_cache.id
46 return None
47
48 def get_user(self):
49 return self.user_cache
50
51
52 class NewKeyForm(forms.Form):
53 email = forms.EmailField(label=_("Email address"))
54
55 def __init__(self, *args, **kwargs):
56 self.user_cache = None
57
58 super().__init__(*args, **kwargs)
59
60 def clean_email(self):
61 email = self.cleaned_data.get('email')
62
63 if not UserProfile.email_needs_login_key(email):
64 raise forms.ValidationError(_("HPI users cannot request login keys. Please login using your domain credentials."))
65
66 try:
67 user = UserProfile.objects.get(email__iexact=email)
68 self.user_cache = user
69 except UserProfile.DoesNotExist:
70 raise forms.ValidationError(_("No user with this email address was found. Please make sure to enter the email address already known to the university office."))
71
72 return email
73
74 def get_user(self):
75 return self.user_cache
76
[end of evap/evaluation/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py
--- a/evap/evaluation/forms.py
+++ b/evap/evaluation/forms.py
@@ -29,6 +29,9 @@
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
+ # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.
+ username = username.lower()
+
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
| {"golden_diff": "diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py\n--- a/evap/evaluation/forms.py\n+++ b/evap/evaluation/forms.py\n@@ -29,6 +29,9 @@\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n \n+ # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.\n+ username = username.lower()\n+\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n", "issue": "Username case sensitivity\nUsernames are case sensitive. The importer makes all usernames lowercase, but automatically created accounts when logging in with Kerberos authentification can have uppercase letters.\nThis can lead to two users having the same username and then the system crashed on login.\n\nAutomatically created accounts should also get lowercase usernames, even if the user enters the name differently.\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.debug import sensitive_variables\n\nfrom evap.evaluation.models import UserProfile\n\n\nclass LoginUsernameForm(forms.Form):\n \"\"\"Form encapsulating the login with username and password, for example from an Active Directory.\n \"\"\"\n\n username = forms.CharField(label=_(\"Username\"), max_length=254)\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput)\n\n def __init__(self, request=None, *args, **kwargs):\n \"\"\"\n If request is passed in, the form will validate that cookies are\n enabled. Note that the request (a HttpRequest object) must have set a\n cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before\n running this validation.\n \"\"\"\n self.request = request\n self.user_cache = None\n super().__init__(*args, **kwargs)\n\n @sensitive_variables('password')\n def clean_password(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n raise forms.ValidationError(_(\"Please enter a correct username and password.\"))\n self.check_for_test_cookie()\n return password\n\n def check_for_test_cookie(self):\n if self.request and not self.request.session.test_cookie_worked():\n raise forms.ValidationError(_(\"Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in.\"))\n\n def get_user_id(self):\n if self.user_cache:\n return self.user_cache.id\n return None\n\n def get_user(self):\n return self.user_cache\n\n\nclass NewKeyForm(forms.Form):\n email = forms.EmailField(label=_(\"Email address\"))\n\n def __init__(self, *args, **kwargs):\n self.user_cache = None\n\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n\n if not UserProfile.email_needs_login_key(email):\n raise forms.ValidationError(_(\"HPI users cannot request login keys. Please login using your domain credentials.\"))\n\n try:\n user = UserProfile.objects.get(email__iexact=email)\n self.user_cache = user\n except UserProfile.DoesNotExist:\n raise forms.ValidationError(_(\"No user with this email address was found. Please make sure to enter the email address already known to the university office.\"))\n\n return email\n\n def get_user(self):\n return self.user_cache\n", "path": "evap/evaluation/forms.py"}]} | 1,301 | 131 |
gh_patches_debug_6806 | rasdani/github-patches | git_diff | Pylons__pyramid-2674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reify docstring doesn't render properly
The testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify
Unfortunately this section is essential for understanding the example.
</issue>
<code>
[start of pyramid/decorator.py]
1 from functools import update_wrapper
2
3
4 class reify(object):
5 """ Use as a class method decorator. It operates almost exactly like the
6 Python ``@property`` decorator, but it puts the result of the method it
7 decorates into the instance dict after the first call, effectively
8 replacing the function it decorates with an instance variable. It is, in
9 Python parlance, a non-data descriptor. An example:
10
11 .. testsetup::
12
13 from pyramid.decorator import reify
14
15 class Foo(object):
16 @reify
17 def jammy(self):
18 print('jammy called')
19 return 1
20
21 And usage of Foo:
22
23 .. doctest::
24
25 >>> f = Foo()
26 >>> v = f.jammy
27 jammy called
28 >>> print(v)
29 1
30 >>> f.jammy
31 1
32 >>> # jammy func not called the second time; it replaced itself with 1
33 >>> # Note: reassignment is possible
34 >>> f.jammy = 2
35 >>> f.jammy
36 2
37 """
38 def __init__(self, wrapped):
39 self.wrapped = wrapped
40 update_wrapper(self, wrapped)
41
42 def __get__(self, inst, objtype=None):
43 if inst is None:
44 return self
45 val = self.wrapped(inst)
46 setattr(inst, self.wrapped.__name__, val)
47 return val
48
49
[end of pyramid/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyramid/decorator.py b/pyramid/decorator.py
--- a/pyramid/decorator.py
+++ b/pyramid/decorator.py
@@ -8,6 +8,16 @@
replacing the function it decorates with an instance variable. It is, in
Python parlance, a non-data descriptor. An example:
+ .. code-block:: python
+
+ from pyramid.decorator import reify
+
+ class Foo(object):
+ @reify
+ def jammy(self):
+ print('jammy called')
+ return 1
+
.. testsetup::
from pyramid.decorator import reify
| {"golden_diff": "diff --git a/pyramid/decorator.py b/pyramid/decorator.py\n--- a/pyramid/decorator.py\n+++ b/pyramid/decorator.py\n@@ -8,6 +8,16 @@\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n \n+ .. code-block:: python\n+\n+ from pyramid.decorator import reify\n+\n+ class Foo(object):\n+ @reify\n+ def jammy(self):\n+ print('jammy called')\n+ return 1\n+\n .. testsetup::\n \n from pyramid.decorator import reify\n", "issue": "reify docstring doesn't render properly\nThe testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify\n\nUnfortunately this section is essential for understanding the example.\n\n", "before_files": [{"content": "from functools import update_wrapper\n\n\nclass reify(object):\n \"\"\" Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n decorates into the instance dict after the first call, effectively\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n\n .. testsetup::\n\n from pyramid.decorator import reify\n\n class Foo(object):\n @reify\n def jammy(self):\n print('jammy called')\n return 1\n\n And usage of Foo:\n\n .. doctest::\n\n >>> f = Foo()\n >>> v = f.jammy\n jammy called\n >>> print(v)\n 1\n >>> f.jammy\n 1\n >>> # jammy func not called the second time; it replaced itself with 1\n >>> # Note: reassignment is possible\n >>> f.jammy = 2\n >>> f.jammy\n 2\n \"\"\"\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n def __get__(self, inst, objtype=None):\n if inst is None:\n return self\n val = self.wrapped(inst)\n setattr(inst, self.wrapped.__name__, val)\n return val\n\n", "path": "pyramid/decorator.py"}]} | 995 | 150 |
gh_patches_debug_27882 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Where is `fast_pred_var` moving?
I received the following warning when using `fast_pred_var`:
```
/cluster/nhunt/anaconda/envs/bayes_opt/lib/python3.7/site-packages/gpytorch/beta_features.py:17:
DeprecationWarning: `gpytorch.settings.fast_pred_var` has moved to `gpytorch.settings.fast_pred_var`.
```
It seems that I'm being warned that `fast_pred_var` has moved to its current location. Was there a typo in the warning about how we should be using this setting now?
```bash
$ pip list | grep gpytorch
gpytorch 0.2.1
```
</issue>
<code>
[start of gpytorch/__init__.py]
1 #!/usr/bin/env python3
2 from .module import Module
3 from . import (
4 beta_features,
5 distributions,
6 kernels,
7 lazy,
8 likelihoods,
9 means,
10 mlls,
11 models,
12 priors,
13 settings,
14 utils,
15 variational,
16 )
17 from .functions import (
18 add_diag,
19 add_jitter,
20 dsmm,
21 inv_matmul,
22 inv_quad,
23 inv_quad_logdet,
24 logdet,
25 log_normal_cdf,
26 matmul,
27 normal_cdf,
28 root_decomposition,
29 root_inv_decomposition,
30 # Deprecated
31 inv_quad_log_det,
32 log_det,
33 )
34 from .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood
35 from .lazy import lazify, delazify
36
37
38 __version__ = "0.2.1"
39
40 # Old deprecated stuff
41 fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
42
43 __all__ = [
44 # Submodules
45 "distributions",
46 "kernels",
47 "lazy",
48 "likelihoods",
49 "means",
50 "mlls",
51 "models",
52 "priors",
53 "utils",
54 "variational",
55 # Classes
56 "Module",
57 "ExactMarginalLogLikelihood",
58 "VariationalMarginalLogLikelihood",
59 # Functions
60 "add_diag",
61 "add_jitter",
62 "delazify",
63 "dsmm",
64 "inv_matmul",
65 "inv_quad",
66 "inv_quad_logdet",
67 "lazify",
68 "logdet",
69 "log_normal_cdf",
70 "matmul",
71 "normal_cdf",
72 "root_decomposition",
73 "root_inv_decomposition",
74 # Context managers
75 "beta_features",
76 "settings",
77 # Other
78 "__version__",
79 # Deprecated
80 "fast_pred_var",
81 "inv_quad_log_det",
82 "log_det",
83 ]
84
[end of gpytorch/__init__.py]
[start of gpytorch/beta_features.py]
1 #!/usr/bin/env python3
2
3 import warnings
4 from .settings import _feature_flag, _value_context
5 from .settings import fast_pred_var as _fast_pred_var
6 from .settings import fast_pred_samples as _fast_pred_samples
7
8
9 class _moved_beta_feature(object):
10 def __init__(self, new_cls, orig_name=None):
11 self.new_cls = new_cls
12 self.orig_name = orig_name if orig_name is not None else "gpytorch.settings.{}".format(new_cls.__name__)
13
14 def __call__(self, *args, **kwargs):
15 warnings.warn(
16 "`{}` has moved to `gpytorch.settings.{}`.".format(self.orig_name, self.new_cls.__name__),
17 DeprecationWarning
18 )
19 return self.new_cls(*args, **kwargs)
20
21 def __getattr__(self, name):
22 return getattr(self.new_cls, name)
23
24
25 fast_pred_var = _moved_beta_feature(_fast_pred_var)
26 fast_pred_samples = _moved_beta_feature(_fast_pred_samples)
27
28
29 class checkpoint_kernel(_value_context):
30 """
31 Should the kernel be computed in chunks with checkpointing or not? (Default, no)
32
33 If `split_size = 0`:
34 The kernel is computed explicitly. During training, the kernel matrix is
35 kept in memory for the backward pass. This is the fastest option but the
36 most memory intensive.
37 If `split_size > 0`:
38 The kernel is never fully computed or stored. Instead, the kernel is only
39 accessed through matrix multiplication. The matrix multiplication is
40 computed in `segments` chunks. This is slower, but requires significantly less memory.
41
42 Default: 0
43 """
44
45 _global_value = 0
46
47
48 class diagonal_correction(_feature_flag):
49 """
50 Add a diagonal correction to scalable inducing point methods
51 """
52
53 _state = True
54
55
56 class default_preconditioner(_feature_flag):
57 """
58 Add a diagonal correction to scalable inducing point methods
59 """
60
61 pass
62
63
64 __all__ = ["fast_pred_var", "fast_pred_samples", "diagonal_correction", "default_preconditioner"]
65
[end of gpytorch/beta_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/__init__.py b/gpytorch/__init__.py
--- a/gpytorch/__init__.py
+++ b/gpytorch/__init__.py
@@ -37,9 +37,6 @@
__version__ = "0.2.1"
-# Old deprecated stuff
-fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
-
__all__ = [
# Submodules
"distributions",
@@ -77,7 +74,6 @@
# Other
"__version__",
# Deprecated
- "fast_pred_var",
"inv_quad_log_det",
"log_det",
]
diff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py
--- a/gpytorch/beta_features.py
+++ b/gpytorch/beta_features.py
@@ -2,8 +2,6 @@
import warnings
from .settings import _feature_flag, _value_context
-from .settings import fast_pred_var as _fast_pred_var
-from .settings import fast_pred_samples as _fast_pred_samples
class _moved_beta_feature(object):
@@ -22,10 +20,6 @@
return getattr(self.new_cls, name)
-fast_pred_var = _moved_beta_feature(_fast_pred_var)
-fast_pred_samples = _moved_beta_feature(_fast_pred_samples)
-
-
class checkpoint_kernel(_value_context):
"""
Should the kernel be computed in chunks with checkpointing or not? (Default, no)
@@ -61,4 +55,4 @@
pass
-__all__ = ["fast_pred_var", "fast_pred_samples", "diagonal_correction", "default_preconditioner"]
+__all__ = ["checkpoint_kernel", "diagonal_correction", "default_preconditioner"]
| {"golden_diff": "diff --git a/gpytorch/__init__.py b/gpytorch/__init__.py\n--- a/gpytorch/__init__.py\n+++ b/gpytorch/__init__.py\n@@ -37,9 +37,6 @@\n \n __version__ = \"0.2.1\"\n \n-# Old deprecated stuff\n-fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, \"gpytorch.settings.fast_pred_var\")\n-\n __all__ = [\n # Submodules\n \"distributions\",\n@@ -77,7 +74,6 @@\n # Other\n \"__version__\",\n # Deprecated\n- \"fast_pred_var\",\n \"inv_quad_log_det\",\n \"log_det\",\n ]\ndiff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py\n--- a/gpytorch/beta_features.py\n+++ b/gpytorch/beta_features.py\n@@ -2,8 +2,6 @@\n \n import warnings\n from .settings import _feature_flag, _value_context\n-from .settings import fast_pred_var as _fast_pred_var\n-from .settings import fast_pred_samples as _fast_pred_samples\n \n \n class _moved_beta_feature(object):\n@@ -22,10 +20,6 @@\n return getattr(self.new_cls, name)\n \n \n-fast_pred_var = _moved_beta_feature(_fast_pred_var)\n-fast_pred_samples = _moved_beta_feature(_fast_pred_samples)\n-\n-\n class checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n@@ -61,4 +55,4 @@\n pass\n \n \n-__all__ = [\"fast_pred_var\", \"fast_pred_samples\", \"diagonal_correction\", \"default_preconditioner\"]\n+__all__ = [\"checkpoint_kernel\", \"diagonal_correction\", \"default_preconditioner\"]\n", "issue": "Where is `fast_pred_var` moving?\nI received the following warning when using `fast_pred_var`:\r\n\r\n```\r\n/cluster/nhunt/anaconda/envs/bayes_opt/lib/python3.7/site-packages/gpytorch/beta_features.py:17:\r\nDeprecationWarning: `gpytorch.settings.fast_pred_var` has moved to `gpytorch.settings.fast_pred_var`.\r\n```\r\n\r\nIt seems that I'm being warned that `fast_pred_var` has moved to its current location. Was there a typo in the warning about how we should be using this setting now?\r\n\r\n```bash\r\n$ pip list | grep gpytorch\r\ngpytorch 0.2.1\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .module import Module\nfrom . import (\n beta_features,\n distributions,\n kernels,\n lazy,\n likelihoods,\n means,\n mlls,\n models,\n priors,\n settings,\n utils,\n variational,\n)\nfrom .functions import (\n add_diag,\n add_jitter,\n dsmm,\n inv_matmul,\n inv_quad,\n inv_quad_logdet,\n logdet,\n log_normal_cdf,\n matmul,\n normal_cdf,\n root_decomposition,\n root_inv_decomposition,\n # Deprecated\n inv_quad_log_det,\n log_det,\n)\nfrom .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood\nfrom .lazy import lazify, delazify\n\n\n__version__ = \"0.2.1\"\n\n# Old deprecated stuff\nfast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, \"gpytorch.settings.fast_pred_var\")\n\n__all__ = [\n # Submodules\n \"distributions\",\n \"kernels\",\n \"lazy\",\n \"likelihoods\",\n \"means\",\n \"mlls\",\n \"models\",\n \"priors\",\n \"utils\",\n \"variational\",\n # Classes\n \"Module\",\n \"ExactMarginalLogLikelihood\",\n \"VariationalMarginalLogLikelihood\",\n # Functions\n \"add_diag\",\n \"add_jitter\",\n \"delazify\",\n \"dsmm\",\n \"inv_matmul\",\n \"inv_quad\",\n \"inv_quad_logdet\",\n \"lazify\",\n \"logdet\",\n \"log_normal_cdf\",\n \"matmul\",\n \"normal_cdf\",\n \"root_decomposition\",\n \"root_inv_decomposition\",\n # Context managers\n \"beta_features\",\n \"settings\",\n # Other\n \"__version__\",\n # Deprecated\n \"fast_pred_var\",\n \"inv_quad_log_det\",\n \"log_det\",\n]\n", "path": "gpytorch/__init__.py"}, {"content": "#!/usr/bin/env python3\n\nimport warnings\nfrom .settings import _feature_flag, _value_context\nfrom .settings import fast_pred_var as _fast_pred_var\nfrom .settings import fast_pred_samples as _fast_pred_samples\n\n\nclass _moved_beta_feature(object):\n def __init__(self, new_cls, orig_name=None):\n self.new_cls = new_cls\n self.orig_name = orig_name if orig_name is not None else \"gpytorch.settings.{}\".format(new_cls.__name__)\n\n def __call__(self, *args, **kwargs):\n warnings.warn(\n \"`{}` has moved to `gpytorch.settings.{}`.\".format(self.orig_name, self.new_cls.__name__),\n DeprecationWarning\n )\n return self.new_cls(*args, **kwargs)\n\n def __getattr__(self, name):\n return getattr(self.new_cls, name)\n\n\nfast_pred_var = _moved_beta_feature(_fast_pred_var)\nfast_pred_samples = _moved_beta_feature(_fast_pred_samples)\n\n\nclass checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n\n If `split_size = 0`:\n The kernel is computed explicitly. During training, the kernel matrix is\n kept in memory for the backward pass. This is the fastest option but the\n most memory intensive.\n If `split_size > 0`:\n The kernel is never fully computed or stored. Instead, the kernel is only\n accessed through matrix multiplication. The matrix multiplication is\n computed in `segments` chunks. This is slower, but requires significantly less memory.\n\n Default: 0\n \"\"\"\n\n _global_value = 0\n\n\nclass diagonal_correction(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n _state = True\n\n\nclass default_preconditioner(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n pass\n\n\n__all__ = [\"fast_pred_var\", \"fast_pred_samples\", \"diagonal_correction\", \"default_preconditioner\"]\n", "path": "gpytorch/beta_features.py"}]} | 1,880 | 398 |
gh_patches_debug_652 | rasdani/github-patches | git_diff | pex-tool__pex-2086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.127
On the docket:
+ [x] Pex fails to subset a "foo @ file:///bar" URL lock. #2083
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.126"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.126"
+__version__ = "2.1.127"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.126\"\n+__version__ = \"2.1.127\"\n", "issue": "Release 2.1.127\nOn the docket:\r\n+ [x] Pex fails to subset a \"foo @ file:///bar\" URL lock. #2083\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.126\"\n", "path": "pex/version.py"}]} | 625 | 99 |
gh_patches_debug_27127 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Endpoint to disable telemtry
fairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.
Endpoint to disable telemtry
fairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.
</issue>
<code>
[start of mindsdb/api/http/namespaces/util.py]
1 from flask import request
2 from flask_restx import Resource, abort
3 from flask import current_app as ca
4
5 from mindsdb.api.http.namespaces.configs.util import ns_conf
6 from mindsdb import __about__
7
8 @ns_conf.route('/ping')
9 class Ping(Resource):
10 @ns_conf.doc('get_ping')
11 def get(self):
12 '''Checks server avaliable'''
13 return {'status': 'ok'}
14
15 @ns_conf.route('/report_uuid')
16 class ReportUUID(Resource):
17 @ns_conf.doc('get_report_uuid')
18 def get(self):
19 metamodel_name = '___monitroing_metamodel___'
20 predictor = ca.mindsdb_native.create(metamodel_name)
21 return {
22 'report_uuid': predictor.report_uuid
23 }
24
[end of mindsdb/api/http/namespaces/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py
--- a/mindsdb/api/http/namespaces/util.py
+++ b/mindsdb/api/http/namespaces/util.py
@@ -1,3 +1,4 @@
+import os
from flask import request
from flask_restx import Resource, abort
from flask import current_app as ca
@@ -5,6 +6,8 @@
from mindsdb.api.http.namespaces.configs.util import ns_conf
from mindsdb import __about__
+TELEMETRY_FILE = 'telemetry.lock'
+
@ns_conf.route('/ping')
class Ping(Resource):
@ns_conf.doc('get_ping')
@@ -21,3 +24,34 @@
return {
'report_uuid': predictor.report_uuid
}
+
+@ns_conf.route('/telemetry')
+class Telemetry(Resource):
+ @ns_conf.doc('get_telemetry_status')
+ def get(self):
+ status = "enabled" if is_telemetry_active() else "disabled"
+ return {"status": status}
+
+ @ns_conf.doc('set_telemetry')
+ def post(self):
+ data = request.json
+ action = data['action']
+ if str(action).lower() in ["true", "enable", "on"]:
+ enable_telemetry()
+ else:
+ disable_telemetry()
+
+
+def enable_telemetry():
+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)
+ if os.path.exists(path):
+ os.remove(path)
+
+def disable_telemetry():
+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)
+ with open(path, 'w') as _:
+ pass
+
+def is_telemetry_active():
+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)
+ return not os.path.exists(path)
| {"golden_diff": "diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py\n--- a/mindsdb/api/http/namespaces/util.py\n+++ b/mindsdb/api/http/namespaces/util.py\n@@ -1,3 +1,4 @@\n+import os\n from flask import request\n from flask_restx import Resource, abort\n from flask import current_app as ca\n@@ -5,6 +6,8 @@\n from mindsdb.api.http.namespaces.configs.util import ns_conf\n from mindsdb import __about__\n \n+TELEMETRY_FILE = 'telemetry.lock'\n+\n @ns_conf.route('/ping')\n class Ping(Resource):\n @ns_conf.doc('get_ping')\n@@ -21,3 +24,34 @@\n return {\n 'report_uuid': predictor.report_uuid\n }\n+\n+@ns_conf.route('/telemetry')\n+class Telemetry(Resource):\n+ @ns_conf.doc('get_telemetry_status')\n+ def get(self):\n+ status = \"enabled\" if is_telemetry_active() else \"disabled\"\n+ return {\"status\": status}\n+\n+ @ns_conf.doc('set_telemetry')\n+ def post(self):\n+ data = request.json\n+ action = data['action']\n+ if str(action).lower() in [\"true\", \"enable\", \"on\"]:\n+ enable_telemetry()\n+ else:\n+ disable_telemetry()\n+\n+\n+def enable_telemetry():\n+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n+ if os.path.exists(path):\n+ os.remove(path)\n+\n+def disable_telemetry():\n+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n+ with open(path, 'w') as _:\n+ pass\n+\n+def is_telemetry_active():\n+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n+ return not os.path.exists(path)\n", "issue": "Endpoint to disable telemtry\nfairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.\r\n\nEndpoint to disable telemtry\nfairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.\r\n\n", "before_files": [{"content": "from flask import request\nfrom flask_restx import Resource, abort\nfrom flask import current_app as ca\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\nfrom mindsdb import __about__\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n@ns_conf.route('/report_uuid')\nclass ReportUUID(Resource):\n @ns_conf.doc('get_report_uuid')\n def get(self):\n metamodel_name = '___monitroing_metamodel___'\n predictor = ca.mindsdb_native.create(metamodel_name)\n return {\n 'report_uuid': predictor.report_uuid\n }\n", "path": "mindsdb/api/http/namespaces/util.py"}]} | 811 | 434 |
gh_patches_debug_67113 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5120 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Erreur 403 après recherche dans la bibliothèque
> Bonjour,
>
> Je tombe sur une erreur 403 "Vous n’avez pas les droits suffisants pour accéder à cette page." après une recherche dans les cours présents dans la bibliothèque.
Voilà comment elle est apparue :
>
> - Arrivé sur le site déconnecté
> - Je suis allé dans "Développement web" depuis le bandeau en haut du site
> - A partir de là je me suis connecté à mon compte (même onglet, bouton classique), ce qui m'a ramené sur la page
> - Puis j'ai fait une recherche "PHP" dans la barre de rechercher, ce qui m'a amené sur [ce lien](https://zestedesavoir.com/rechercher/?q=PHP&models=content&from_library=on&category=informatique&subcategory=site-web)
>
> L'erreur 403 se produit lorsque je coche 1 à 3 cases (sous la barre de recherche), pour filtrer les résultats, et que je clique à nouveau sur le bouton "rechercher" dans la barre.
>
> Voilà [un exemple de lien](https://zestedesavoir.com/rechercher/?q=PHP&category=informatique&subcategory=site-web&from_library=on&models=) provoquant une erreur 403 de mon côté.
>
> Bye
Sujet : https://zestedesavoir.com/forums/sujet/11609/erreur-403-apres-recherche-dans-la-bibliotheque/
*Envoyé depuis Zeste de Savoir*
</issue>
<code>
[start of zds/searchv2/forms.py]
1 import os
2 import random
3
4 from django import forms
5 from django.conf import settings
6 from django.utils.translation import ugettext_lazy as _
7
8 from crispy_forms.bootstrap import StrictButton
9 from crispy_forms.helper import FormHelper
10 from crispy_forms.layout import Layout, Field
11 from django.core.urlresolvers import reverse
12
13
14 class SearchForm(forms.Form):
15 q = forms.CharField(
16 label=_('Recherche'),
17 max_length=150,
18 required=False,
19 widget=forms.TextInput(
20 attrs={
21 'type': 'search',
22 'required': 'required'
23 }
24 )
25 )
26
27 choices = sorted(
28 [(k, v[0]) for k, v in settings.ZDS_APP['search']['search_groups'].items()],
29 key=lambda pair: pair[1]
30 )
31
32 models = forms.MultipleChoiceField(
33 label='',
34 widget=forms.CheckboxSelectMultiple,
35 required=False,
36 choices=choices
37 )
38
39 category = forms.CharField(widget=forms.HiddenInput, required=False)
40 subcategory = forms.CharField(widget=forms.HiddenInput, required=False)
41 from_library = forms.CharField(widget=forms.HiddenInput, required=False)
42
43 def __init__(self, *args, **kwargs):
44
45 super(SearchForm, self).__init__(*args, **kwargs)
46
47 self.helper = FormHelper()
48 self.helper.form_id = 'search-form'
49 self.helper.form_class = 'clearfix'
50 self.helper.form_method = 'get'
51 self.helper.form_action = reverse('search:query')
52
53 try:
54 with open(os.path.join(settings.BASE_DIR, 'suggestions.txt'), 'r') as suggestions_file:
55 suggestions = ', '.join(random.sample(suggestions_file.readlines(), 5)) + '…'
56 except OSError:
57 suggestions = _('Mathématiques, Droit, UDK, Langues, Python…')
58
59 self.fields['q'].widget.attrs['placeholder'] = suggestions
60
61 self.helper.layout = Layout(
62 Field('q'),
63 StrictButton('', type='submit', css_class='ico-after ico-search', title=_('Rechercher')),
64 Field('category'),
65 Field('subcategory'),
66 Field('from_library')
67 )
68
[end of zds/searchv2/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/searchv2/forms.py b/zds/searchv2/forms.py
--- a/zds/searchv2/forms.py
+++ b/zds/searchv2/forms.py
@@ -31,7 +31,12 @@
models = forms.MultipleChoiceField(
label='',
- widget=forms.CheckboxSelectMultiple,
+ widget=forms.CheckboxSelectMultiple(
+ attrs={
+ 'class': 'search-filters',
+ 'form': 'search-form'
+ }
+ ),
required=False,
choices=choices
)
| {"golden_diff": "diff --git a/zds/searchv2/forms.py b/zds/searchv2/forms.py\n--- a/zds/searchv2/forms.py\n+++ b/zds/searchv2/forms.py\n@@ -31,7 +31,12 @@\n \n models = forms.MultipleChoiceField(\n label='',\n- widget=forms.CheckboxSelectMultiple,\n+ widget=forms.CheckboxSelectMultiple(\n+ attrs={\n+ 'class': 'search-filters',\n+ 'form': 'search-form'\n+ }\n+ ),\n required=False,\n choices=choices\n )\n", "issue": "Erreur 403 apr\u00e8s recherche dans la biblioth\u00e8que\n> Bonjour,\r\n> \r\n> Je tombe sur une erreur 403 \"Vous n\u2019avez pas les droits suffisants pour acc\u00e9der \u00e0 cette page.\" apr\u00e8s une recherche dans les cours pr\u00e9sents dans la biblioth\u00e8que.\r\nVoil\u00e0 comment elle est apparue :\r\n> \r\n> - Arriv\u00e9 sur le site d\u00e9connect\u00e9\r\n> - Je suis all\u00e9 dans \"D\u00e9veloppement web\" depuis le bandeau en haut du site\r\n> - A partir de l\u00e0 je me suis connect\u00e9 \u00e0 mon compte (m\u00eame onglet, bouton classique), ce qui m'a ramen\u00e9 sur la page\r\n> - Puis j'ai fait une recherche \"PHP\" dans la barre de rechercher, ce qui m'a amen\u00e9 sur [ce lien](https://zestedesavoir.com/rechercher/?q=PHP&models=content&from_library=on&category=informatique&subcategory=site-web)\r\n> \r\n> L'erreur 403 se produit lorsque je coche 1 \u00e0 3 cases (sous la barre de recherche), pour filtrer les r\u00e9sultats, et que je clique \u00e0 nouveau sur le bouton \"rechercher\" dans la barre.\r\n> \r\n> Voil\u00e0 [un exemple de lien](https://zestedesavoir.com/rechercher/?q=PHP&category=informatique&subcategory=site-web&from_library=on&models=) provoquant une erreur 403 de mon c\u00f4t\u00e9.\r\n> \r\n> Bye\r\n\r\nSujet : https://zestedesavoir.com/forums/sujet/11609/erreur-403-apres-recherche-dans-la-bibliotheque/\r\n*Envoy\u00e9 depuis Zeste de Savoir*\n", "before_files": [{"content": "import os\nimport random\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field\nfrom django.core.urlresolvers import reverse\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(\n label=_('Recherche'),\n max_length=150,\n required=False,\n widget=forms.TextInput(\n attrs={\n 'type': 'search',\n 'required': 'required'\n }\n )\n )\n\n choices = sorted(\n [(k, v[0]) for k, v in settings.ZDS_APP['search']['search_groups'].items()],\n key=lambda pair: pair[1]\n )\n\n models = forms.MultipleChoiceField(\n label='',\n widget=forms.CheckboxSelectMultiple,\n required=False,\n choices=choices\n )\n\n category = forms.CharField(widget=forms.HiddenInput, required=False)\n subcategory = forms.CharField(widget=forms.HiddenInput, required=False)\n from_library = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def __init__(self, *args, **kwargs):\n\n super(SearchForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_id = 'search-form'\n self.helper.form_class = 'clearfix'\n self.helper.form_method = 'get'\n self.helper.form_action = reverse('search:query')\n\n try:\n with open(os.path.join(settings.BASE_DIR, 'suggestions.txt'), 'r') as suggestions_file:\n suggestions = ', '.join(random.sample(suggestions_file.readlines(), 5)) + '\u2026'\n except OSError:\n suggestions = _('Math\u00e9matiques, Droit, UDK, Langues, Python\u2026')\n\n self.fields['q'].widget.attrs['placeholder'] = suggestions\n\n self.helper.layout = Layout(\n Field('q'),\n StrictButton('', type='submit', css_class='ico-after ico-search', title=_('Rechercher')),\n Field('category'),\n Field('subcategory'),\n Field('from_library')\n )\n", "path": "zds/searchv2/forms.py"}]} | 1,508 | 125 |
gh_patches_debug_2761 | rasdani/github-patches | git_diff | napari__napari-1088 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ListModel.append does not check type
## 🐛 Bug
in working on layer groups, I found a strange lack of type checking when appending to a `ListModel` (which inherits from `TypedList`). [`ListModel.append`](https://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L59) jumps right over `TypedList.append`
https://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L58-L60
... and if you try to something that is not a `Layer` to a `LayerList`, it works fine up until throwing an error (unrelated to typing) in `components.layerlist._add`. Is that supposed to be `TypedList.append(self, obj)`? or was that intentional?
</issue>
<code>
[start of napari/utils/list/_model.py]
1 from ...utils.event import EmitterGroup
2
3 from ._multi import MultiIndexList
4 from ._typed import TypedList
5
6
7 class ListModel(MultiIndexList, TypedList):
8 """List with events, tuple-indexing, typing, and filtering.
9
10 Parameters
11 ----------
12 basetype : type
13 Type of the elements in the list.
14 iterable : iterable, optional
15 Elements to initialize the list with.
16 lookup : dict of type : function(object, ``basetype``) -> bool
17 Functions that determine if an object is a reference to an
18 element of the list.
19
20 Attributes
21 ----------
22 events : vispy.util.event.EmitterGroup
23 Group of events for adding, removing, and reordering elements
24 within the list.
25 """
26
27 def __init__(self, basetype, iterable=(), lookup=None):
28 super().__init__(basetype, iterable, lookup)
29 self.events = EmitterGroup(
30 source=self,
31 auto_connect=True,
32 added=None,
33 removed=None,
34 reordered=None,
35 changed=None,
36 )
37 self.events.added.connect(self.events.changed)
38 self.events.removed.connect(self.events.changed)
39 self.events.reordered.connect(self.events.changed)
40
41 def __setitem__(self, query, values):
42 indices = tuple(self.__prsitem__(query))
43 new_indices = tuple(values)
44
45 if sorted(indices) != sorted(self.index(v) for v in new_indices):
46 raise TypeError(
47 'must be a reordering of indices; '
48 'setting of list items not allowed'
49 )
50
51 super().__setitem__(indices, new_indices)
52 self.events.reordered()
53
54 def insert(self, index, obj):
55 super().insert(index, obj)
56 self.events.added(item=obj, index=self.__locitem__(index))
57
58 def append(self, obj):
59 super(TypedList, self).append(obj)
60 self.events.added(item=obj, index=len(self) - 1)
61
62 def pop(self, key):
63 obj = super().pop(key)
64 self.events.removed(item=obj, index=key)
65 return obj
66
[end of napari/utils/list/_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/utils/list/_model.py b/napari/utils/list/_model.py
--- a/napari/utils/list/_model.py
+++ b/napari/utils/list/_model.py
@@ -56,7 +56,7 @@
self.events.added(item=obj, index=self.__locitem__(index))
def append(self, obj):
- super(TypedList, self).append(obj)
+ TypedList.append(self, obj)
self.events.added(item=obj, index=len(self) - 1)
def pop(self, key):
| {"golden_diff": "diff --git a/napari/utils/list/_model.py b/napari/utils/list/_model.py\n--- a/napari/utils/list/_model.py\n+++ b/napari/utils/list/_model.py\n@@ -56,7 +56,7 @@\n self.events.added(item=obj, index=self.__locitem__(index))\n \n def append(self, obj):\n- super(TypedList, self).append(obj)\n+ TypedList.append(self, obj)\n self.events.added(item=obj, index=len(self) - 1)\n \n def pop(self, key):\n", "issue": "ListModel.append does not check type\n## \ud83d\udc1b Bug\r\nin working on layer groups, I found a strange lack of type checking when appending to a `ListModel` (which inherits from `TypedList`). [`ListModel.append`](https://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L59) jumps right over `TypedList.append`\r\nhttps://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L58-L60\r\n\r\n... and if you try to something that is not a `Layer` to a `LayerList`, it works fine up until throwing an error (unrelated to typing) in `components.layerlist._add`. Is that supposed to be `TypedList.append(self, obj)`? or was that intentional?\n", "before_files": [{"content": "from ...utils.event import EmitterGroup\n\nfrom ._multi import MultiIndexList\nfrom ._typed import TypedList\n\n\nclass ListModel(MultiIndexList, TypedList):\n \"\"\"List with events, tuple-indexing, typing, and filtering.\n\n Parameters\n ----------\n basetype : type\n Type of the elements in the list.\n iterable : iterable, optional\n Elements to initialize the list with.\n lookup : dict of type : function(object, ``basetype``) -> bool\n Functions that determine if an object is a reference to an\n element of the list.\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Group of events for adding, removing, and reordering elements\n within the list.\n \"\"\"\n\n def __init__(self, basetype, iterable=(), lookup=None):\n super().__init__(basetype, iterable, lookup)\n self.events = EmitterGroup(\n source=self,\n auto_connect=True,\n added=None,\n removed=None,\n reordered=None,\n changed=None,\n )\n self.events.added.connect(self.events.changed)\n self.events.removed.connect(self.events.changed)\n self.events.reordered.connect(self.events.changed)\n\n def __setitem__(self, query, values):\n indices = tuple(self.__prsitem__(query))\n new_indices = tuple(values)\n\n if sorted(indices) != sorted(self.index(v) for v in new_indices):\n raise TypeError(\n 'must be a reordering of indices; '\n 'setting of list items not allowed'\n )\n\n super().__setitem__(indices, new_indices)\n self.events.reordered()\n\n def insert(self, index, obj):\n super().insert(index, obj)\n self.events.added(item=obj, index=self.__locitem__(index))\n\n def append(self, obj):\n super(TypedList, self).append(obj)\n self.events.added(item=obj, index=len(self) - 1)\n\n def pop(self, key):\n obj = super().pop(key)\n self.events.removed(item=obj, index=key)\n return obj\n", "path": "napari/utils/list/_model.py"}]} | 1,364 | 126 |
gh_patches_debug_17308 | rasdani/github-patches | git_diff | pre-commit__pre-commit-315 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
:args seems to break with {} in list.
I am working on a repo with some hooks for my company: https://github.com/marick/pre-commit-hooks
There is a hook that works fine with this `.pre-commit-config.yaml`:
``` yaml
- repo: /Users/marick/src/pre-commit-hooks
sha: d6dee96f56bf9290f7ebb852c4252c50b8f6215d
stages: [commit, push]
hooks:
- id: prohibit-suspicious-patterns
args: ["AKIA[[:alnum]]", --]
```
However, it I change the first arg by adding `{1}`:
``` yaml
args: ["AKIA[[:alnum]]{1}", --]
```
... I get this:
```
prohibit suspicious patterns..................................................................
An unexpected error has occurred: IndexError: tuple index out of range
Check the log at ~/.pre-commit/pre-commit.log
```
The contents of `pre-commit.log`:
```
An unexpected error has occurred: IndexError: tuple index out of range
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 36, in error_handler
yield
File "/usr/local/lib/python2.7/site-packages/pre_commit/main.py", line 150, in main
return run(runner, args)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 212, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 136, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 89, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/usr/local/lib/python2.7/site-packages/pre_commit/repository.py", line 145, in run_hook
self.cmd_runner, hook, file_args,
File "/usr/local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 23, in run_hook
encoding=None,
File "/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 40, in run
replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
File "/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 11, in _replace_cmd
return [part.format(**kwargs) for part in cmd]
IndexError: tuple index out of range
```
</issue>
<code>
[start of pre_commit/prefixed_command_runner.py]
1 from __future__ import unicode_literals
2
3 import os
4 import os.path
5 import subprocess
6
7 from pre_commit.util import cmd_output
8
9
10 def _replace_cmd(cmd, **kwargs):
11 return [part.format(**kwargs) for part in cmd]
12
13
14 class PrefixedCommandRunner(object):
15 """A PrefixedCommandRunner allows you to run subprocess commands with
16 comand substitution.
17
18 For instance:
19 PrefixedCommandRunner('/tmp/foo').run(['{prefix}foo.sh', 'bar', 'baz'])
20
21 will run ['/tmp/foo/foo.sh', 'bar', 'baz']
22 """
23
24 def __init__(
25 self,
26 prefix_dir,
27 popen=subprocess.Popen,
28 makedirs=os.makedirs
29 ):
30 self.prefix_dir = prefix_dir.rstrip(os.sep) + os.sep
31 self.__popen = popen
32 self.__makedirs = makedirs
33
34 def _create_path_if_not_exists(self):
35 if not os.path.exists(self.prefix_dir):
36 self.__makedirs(self.prefix_dir)
37
38 def run(self, cmd, **kwargs):
39 self._create_path_if_not_exists()
40 replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
41 return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
42
43 def path(self, *parts):
44 path = os.path.join(self.prefix_dir, *parts)
45 return os.path.normpath(path)
46
47 def exists(self, *parts):
48 return os.path.exists(self.path(*parts))
49
50 @classmethod
51 def from_command_runner(cls, command_runner, path_end):
52 """Constructs a new command runner from an existing one by appending
53 `path_end` to the command runner's prefix directory.
54 """
55 return cls(
56 command_runner.path(path_end),
57 popen=command_runner.__popen,
58 makedirs=command_runner.__makedirs,
59 )
60
[end of pre_commit/prefixed_command_runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/prefixed_command_runner.py b/pre_commit/prefixed_command_runner.py
--- a/pre_commit/prefixed_command_runner.py
+++ b/pre_commit/prefixed_command_runner.py
@@ -7,10 +7,6 @@
from pre_commit.util import cmd_output
-def _replace_cmd(cmd, **kwargs):
- return [part.format(**kwargs) for part in cmd]
-
-
class PrefixedCommandRunner(object):
"""A PrefixedCommandRunner allows you to run subprocess commands with
comand substitution.
@@ -37,7 +33,9 @@
def run(self, cmd, **kwargs):
self._create_path_if_not_exists()
- replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
+ replaced_cmd = [
+ part.replace('{prefix}', self.prefix_dir) for part in cmd
+ ]
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
def path(self, *parts):
| {"golden_diff": "diff --git a/pre_commit/prefixed_command_runner.py b/pre_commit/prefixed_command_runner.py\n--- a/pre_commit/prefixed_command_runner.py\n+++ b/pre_commit/prefixed_command_runner.py\n@@ -7,10 +7,6 @@\n from pre_commit.util import cmd_output\n \n \n-def _replace_cmd(cmd, **kwargs):\n- return [part.format(**kwargs) for part in cmd]\n-\n-\n class PrefixedCommandRunner(object):\n \"\"\"A PrefixedCommandRunner allows you to run subprocess commands with\n comand substitution.\n@@ -37,7 +33,9 @@\n \n def run(self, cmd, **kwargs):\n self._create_path_if_not_exists()\n- replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)\n+ replaced_cmd = [\n+ part.replace('{prefix}', self.prefix_dir) for part in cmd\n+ ]\n return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)\n \n def path(self, *parts):\n", "issue": ":args seems to break with {} in list.\nI am working on a repo with some hooks for my company: https://github.com/marick/pre-commit-hooks\n\nThere is a hook that works fine with this `.pre-commit-config.yaml`:\n\n``` yaml\n- repo: /Users/marick/src/pre-commit-hooks\n sha: d6dee96f56bf9290f7ebb852c4252c50b8f6215d\n stages: [commit, push]\n hooks:\n - id: prohibit-suspicious-patterns\n args: [\"AKIA[[:alnum]]\", --]\n```\n\nHowever, it I change the first arg by adding `{1}`:\n\n``` yaml\n args: [\"AKIA[[:alnum]]{1}\", --]\n```\n\n... I get this:\n\n```\nprohibit suspicious patterns..................................................................\nAn unexpected error has occurred: IndexError: tuple index out of range\nCheck the log at ~/.pre-commit/pre-commit.log\n```\n\nThe contents of `pre-commit.log`:\n\n```\nAn unexpected error has occurred: IndexError: tuple index out of range\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/error_handler.py\", line 36, in error_handler\n yield\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/main.py\", line 150, in main\n return run(runner, args)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 212, in run\n return _run_hooks(repo_hooks, args, write, environ)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 136, in _run_hooks\n retval |= _run_single_hook(hook, repo, args, write, skips)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 89, in _run_single_hook\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/repository.py\", line 145, in run_hook\n self.cmd_runner, hook, file_args,\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/languages/script.py\", line 23, in run_hook\n encoding=None,\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 40, in run\n replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 11, in _replace_cmd\n return [part.format(**kwargs) for part in cmd]\nIndexError: tuple index out of range\n\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport os.path\nimport subprocess\n\nfrom pre_commit.util import cmd_output\n\n\ndef _replace_cmd(cmd, **kwargs):\n return [part.format(**kwargs) for part in cmd]\n\n\nclass PrefixedCommandRunner(object):\n \"\"\"A PrefixedCommandRunner allows you to run subprocess commands with\n comand substitution.\n\n For instance:\n PrefixedCommandRunner('/tmp/foo').run(['{prefix}foo.sh', 'bar', 'baz'])\n\n will run ['/tmp/foo/foo.sh', 'bar', 'baz']\n \"\"\"\n\n def __init__(\n self,\n prefix_dir,\n popen=subprocess.Popen,\n makedirs=os.makedirs\n ):\n self.prefix_dir = prefix_dir.rstrip(os.sep) + os.sep\n self.__popen = popen\n self.__makedirs = makedirs\n\n def _create_path_if_not_exists(self):\n if not os.path.exists(self.prefix_dir):\n self.__makedirs(self.prefix_dir)\n\n def run(self, cmd, **kwargs):\n self._create_path_if_not_exists()\n replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)\n return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)\n\n def path(self, *parts):\n path = os.path.join(self.prefix_dir, *parts)\n return os.path.normpath(path)\n\n def exists(self, *parts):\n return os.path.exists(self.path(*parts))\n\n @classmethod\n def from_command_runner(cls, command_runner, path_end):\n \"\"\"Constructs a new command runner from an existing one by appending\n `path_end` to the command runner's prefix directory.\n \"\"\"\n return cls(\n command_runner.path(path_end),\n popen=command_runner.__popen,\n makedirs=command_runner.__makedirs,\n )\n", "path": "pre_commit/prefixed_command_runner.py"}]} | 1,672 | 218 |
gh_patches_debug_31061 | rasdani/github-patches | git_diff | conan-io__conan-center-index-2686 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] libunwind/1.3.1 library dependency order is wrong
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **libunwind/1.3.1**
* Operating System+version: **Linux Ubuntu 18.04**
* Compiler+version: **GCC 7**
* Conan version: **conan 1.21.1**
* Python version: **Python 2.7.17, 3.6.9**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=7
compiler.libcxx=libstdc++1+
[options]
[build_requires]
[env]
```
But **error occurs when static linking**, that is shared=False.
### Steps to reproduce (Include if Applicable)
Soon I will write an example to reproduce.
But there is [an example about libunwind library dependency](https://github.com/daniel-thompson/libunwind-examples/blob/master/Makefile)
* current order : `unwind` `unwind-generic` `unwind-ptrace`
* right order : `unwind-ptrace` `unwind-generic` `unwind`
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
Put your log output here
```
</details>
</issue>
<code>
[start of recipes/libunwind/all/conanfile.py]
1 from conans import ConanFile, AutoToolsBuildEnvironment, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import glob
5
6
7 class LiunwindConan(ConanFile):
8 name = "libunwind"
9 description = "Manipulate the preserved state of each call-frame and resume the execution at any point."
10 topics = ("conan", "libunwind", "unwind", "debuggers", "exception-handling", "introspection", "setjmp")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/libunwind/libunwind"
13 license = "MIT"
14 settings = "os", "arch", "compiler", "build_type"
15 options = {"shared": [True, False], "fPIC": [True, False], "coredump": [True, False], "ptrace": [True, False], "setjmp": [True, False]}
16 default_options = {"shared": False, "fPIC": True, "coredump": True, "ptrace": True, "setjmp": True}
17 requires = "xz_utils/5.2.4"
18 _autotools = None
19
20 @property
21 def _source_subfolder(self):
22 return "source_subfolder"
23
24 def configure(self):
25 if self.settings.os not in ["Linux", "FreeBSD"]:
26 raise ConanInvalidConfiguration("libunwind is only supported on Linux and FreeBSD")
27 del self.settings.compiler.libcxx
28 del self.settings.compiler.cppstd
29
30 def source(self):
31 tools.get(**self.conan_data["sources"][self.version])
32 extracted_dir = self.name + "-" + self.version
33 os.rename(extracted_dir, self._source_subfolder)
34
35 def _configure_autotools(self):
36 if not self._autotools:
37 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
38 args = [
39 "--enable-shared={}".format("yes" if self.options.shared else "no"),
40 "--enable-static={}".format("no" if self.options.shared else "yes"),
41 "--enable-coredump={}".format("yes" if self.options.coredump else "no"),
42 "--enable-ptrace={}".format("yes" if self.options.ptrace else "no"),
43 "--enable-setjmp={}".format("yes" if self.options.setjmp else "no"),
44 "--disable-tests",
45 "--disable-documentation"
46 ]
47 self._autotools.configure(configure_dir=self._source_subfolder, args=args)
48 return self._autotools
49
50 def build(self):
51 autotools = self._configure_autotools()
52 autotools.make()
53
54 def package(self):
55 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
56 autotools = self._configure_autotools()
57 autotools.install()
58 tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
59 with tools.chdir(os.path.join(self.package_folder, "lib")):
60 for filename in glob.glob("*.la"):
61 os.unlink(filename)
62
63 def package_info(self):
64 self.cpp_info.libs = tools.collect_libs(self)
65 if self.settings.os == "Linux":
66 self.cpp_info.system_libs.append("pthread")
67
[end of recipes/libunwind/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/libunwind/all/conanfile.py b/recipes/libunwind/all/conanfile.py
--- a/recipes/libunwind/all/conanfile.py
+++ b/recipes/libunwind/all/conanfile.py
@@ -24,6 +24,8 @@
def configure(self):
if self.settings.os not in ["Linux", "FreeBSD"]:
raise ConanInvalidConfiguration("libunwind is only supported on Linux and FreeBSD")
+ if self.options.shared:
+ del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
@@ -61,6 +63,23 @@
os.unlink(filename)
def package_info(self):
- self.cpp_info.libs = tools.collect_libs(self)
+ self.cpp_info.components["unwind"].names["pkg_config"] = "libunwind"
+ self.cpp_info.components["unwind"].libs = ["unwind"]
+ self.cpp_info.components["unwind"].requires = ["xz_utils::xz_utils"]
if self.settings.os == "Linux":
- self.cpp_info.system_libs.append("pthread")
+ self.cpp_info.components["unwind"].system_libs.append("pthread")
+ self.cpp_info.components["generic"].names["pkg_config"] = "libunwind-generic"
+ self.cpp_info.components["generic"].libs = ["unwind-generic"]
+ self.cpp_info.components["generic"].requires = ["unwind"]
+ if self.options.ptrace:
+ self.cpp_info.components["ptrace"].names["pkg_config"] = "libunwind-ptrace"
+ self.cpp_info.components["ptrace"].libs = ["unwind-ptrace"]
+ self.cpp_info.components["ptrace"].requires = ["generic", "unwind"]
+ if self.options.setjmp:
+ self.cpp_info.components["setjmp"].names["pkg_config"] = "libunwind-setjmp"
+ self.cpp_info.components["setjmp"].libs = ["unwind-setjmp"]
+ self.cpp_info.components["setjmp"].requires = ["unwind"]
+ if self.options.coredump:
+ self.cpp_info.components["coredump"].names["pkg_config"] = "libunwind-coredump"
+ self.cpp_info.components["coredump"].libs = ["unwind-coredump"]
+ self.cpp_info.components["coredump"].requires = ["generic", "unwind"]
| {"golden_diff": "diff --git a/recipes/libunwind/all/conanfile.py b/recipes/libunwind/all/conanfile.py\n--- a/recipes/libunwind/all/conanfile.py\n+++ b/recipes/libunwind/all/conanfile.py\n@@ -24,6 +24,8 @@\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"libunwind is only supported on Linux and FreeBSD\")\n+ if self.options.shared:\n+ del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n \n@@ -61,6 +63,23 @@\n os.unlink(filename)\n \n def package_info(self):\n- self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.components[\"unwind\"].names[\"pkg_config\"] = \"libunwind\"\n+ self.cpp_info.components[\"unwind\"].libs = [\"unwind\"]\n+ self.cpp_info.components[\"unwind\"].requires = [\"xz_utils::xz_utils\"]\n if self.settings.os == \"Linux\":\n- self.cpp_info.system_libs.append(\"pthread\")\n+ self.cpp_info.components[\"unwind\"].system_libs.append(\"pthread\")\n+ self.cpp_info.components[\"generic\"].names[\"pkg_config\"] = \"libunwind-generic\"\n+ self.cpp_info.components[\"generic\"].libs = [\"unwind-generic\"]\n+ self.cpp_info.components[\"generic\"].requires = [\"unwind\"]\n+ if self.options.ptrace:\n+ self.cpp_info.components[\"ptrace\"].names[\"pkg_config\"] = \"libunwind-ptrace\"\n+ self.cpp_info.components[\"ptrace\"].libs = [\"unwind-ptrace\"]\n+ self.cpp_info.components[\"ptrace\"].requires = [\"generic\", \"unwind\"]\n+ if self.options.setjmp:\n+ self.cpp_info.components[\"setjmp\"].names[\"pkg_config\"] = \"libunwind-setjmp\"\n+ self.cpp_info.components[\"setjmp\"].libs = [\"unwind-setjmp\"]\n+ self.cpp_info.components[\"setjmp\"].requires = [\"unwind\"]\n+ if self.options.coredump:\n+ self.cpp_info.components[\"coredump\"].names[\"pkg_config\"] = \"libunwind-coredump\"\n+ self.cpp_info.components[\"coredump\"].libs = [\"unwind-coredump\"]\n+ self.cpp_info.components[\"coredump\"].requires = [\"generic\", \"unwind\"]\n", "issue": "[package] libunwind/1.3.1 library dependency order is wrong\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **libunwind/1.3.1**\r\n * Operating System+version: **Linux Ubuntu 18.04**\r\n * Compiler+version: **GCC 7**\r\n * Conan version: **conan 1.21.1**\r\n * Python version: **Python 2.7.17, 3.6.9**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=7\r\ncompiler.libcxx=libstdc++1+\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\nBut **error occurs when static linking**, that is shared=False.\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nSoon I will write an example to reproduce.\r\n\r\nBut there is [an example about libunwind library dependency](https://github.com/daniel-thompson/libunwind-examples/blob/master/Makefile)\r\n\r\n* current order : `unwind` `unwind-generic` `unwind-ptrace`\r\n* right order : `unwind-ptrace` `unwind-generic` `unwind`\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nPut your log output here\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport glob\n\n\nclass LiunwindConan(ConanFile):\n name = \"libunwind\"\n description = \"Manipulate the preserved state of each call-frame and resume the execution at any point.\"\n topics = (\"conan\", \"libunwind\", \"unwind\", \"debuggers\", \"exception-handling\", \"introspection\", \"setjmp\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/libunwind/libunwind\"\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"coredump\": [True, False], \"ptrace\": [True, False], \"setjmp\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True, \"coredump\": True, \"ptrace\": True, \"setjmp\": True}\n requires = \"xz_utils/5.2.4\"\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"libunwind is only supported on Linux and FreeBSD\")\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_autotools(self):\n if not self._autotools:\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n args = [\n \"--enable-shared={}\".format(\"yes\" if self.options.shared else \"no\"),\n \"--enable-static={}\".format(\"no\" if self.options.shared else \"yes\"),\n \"--enable-coredump={}\".format(\"yes\" if self.options.coredump else \"no\"),\n \"--enable-ptrace={}\".format(\"yes\" if self.options.ptrace else \"no\"),\n \"--enable-setjmp={}\".format(\"yes\" if self.options.setjmp else \"no\"),\n \"--disable-tests\",\n \"--disable-documentation\"\n ]\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n return self._autotools\n\n def build(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\n with tools.chdir(os.path.join(self.package_folder, \"lib\")):\n for filename in glob.glob(\"*.la\"):\n os.unlink(filename)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n", "path": "recipes/libunwind/all/conanfile.py"}]} | 1,729 | 520 |
gh_patches_debug_5977 | rasdani/github-patches | git_diff | bokeh__bokeh-4754 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in categorical example plot title
File examples/plotting/file/categorical.py contains a typo in plot name.
</issue>
<code>
[start of examples/plotting/file/categorical.py]
1 from bokeh.layouts import row
2 from bokeh.plotting import figure, show, output_file
3
4 factors = ["a", "b", "c", "d", "e", "f", "g", "h"]
5 x = [50, 40, 65, 10, 25, 37, 80, 60]
6
7 dot = figure(title="Catgorical Dot Plot", tools="", toolbar_location=None,
8 y_range=factors, x_range=[0,100])
9
10 dot.segment(0, factors, x, factors, line_width=2, line_color="green", )
11 dot.circle(x, factors, size=15, fill_color="orange", line_color="green", line_width=3, )
12
13 factors = ["foo", "bar", "baz"]
14 x = ["foo", "foo", "foo", "bar", "bar", "bar", "baz", "baz", "baz"]
15 y = ["foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"]
16 colors = [
17 "#0B486B", "#79BD9A", "#CFF09E",
18 "#79BD9A", "#0B486B", "#79BD9A",
19 "#CFF09E", "#79BD9A", "#0B486B"
20 ]
21
22 hm = figure(title="Categorical Heatmap", tools="hover", toolbar_location=None,
23 x_range=factors, y_range=factors)
24
25 hm.rect(x, y, color=colors, width=1, height=1)
26
27 output_file("categorical.html", title="categorical.py example")
28
29 show(row(hm, dot, sizing_mode="scale_width")) # open a browser
30
[end of examples/plotting/file/categorical.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting/file/categorical.py b/examples/plotting/file/categorical.py
--- a/examples/plotting/file/categorical.py
+++ b/examples/plotting/file/categorical.py
@@ -4,7 +4,7 @@
factors = ["a", "b", "c", "d", "e", "f", "g", "h"]
x = [50, 40, 65, 10, 25, 37, 80, 60]
-dot = figure(title="Catgorical Dot Plot", tools="", toolbar_location=None,
+dot = figure(title="Categorical Dot Plot", tools="", toolbar_location=None,
y_range=factors, x_range=[0,100])
dot.segment(0, factors, x, factors, line_width=2, line_color="green", )
| {"golden_diff": "diff --git a/examples/plotting/file/categorical.py b/examples/plotting/file/categorical.py\n--- a/examples/plotting/file/categorical.py\n+++ b/examples/plotting/file/categorical.py\n@@ -4,7 +4,7 @@\n factors = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n x = [50, 40, 65, 10, 25, 37, 80, 60]\n \n-dot = figure(title=\"Catgorical Dot Plot\", tools=\"\", toolbar_location=None,\n+dot = figure(title=\"Categorical Dot Plot\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100])\n \n dot.segment(0, factors, x, factors, line_width=2, line_color=\"green\", )\n", "issue": "Typo in categorical example plot title\nFile examples/plotting/file/categorical.py contains a typo in plot name.\n\n", "before_files": [{"content": "from bokeh.layouts import row\nfrom bokeh.plotting import figure, show, output_file\n\nfactors = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\nx = [50, 40, 65, 10, 25, 37, 80, 60]\n\ndot = figure(title=\"Catgorical Dot Plot\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100])\n\ndot.segment(0, factors, x, factors, line_width=2, line_color=\"green\", )\ndot.circle(x, factors, size=15, fill_color=\"orange\", line_color=\"green\", line_width=3, )\n\nfactors = [\"foo\", \"bar\", \"baz\"]\nx = [\"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"baz\", \"baz\", \"baz\"]\ny = [\"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\"]\ncolors = [\n \"#0B486B\", \"#79BD9A\", \"#CFF09E\",\n \"#79BD9A\", \"#0B486B\", \"#79BD9A\",\n \"#CFF09E\", \"#79BD9A\", \"#0B486B\"\n]\n\nhm = figure(title=\"Categorical Heatmap\", tools=\"hover\", toolbar_location=None,\n x_range=factors, y_range=factors)\n\nhm.rect(x, y, color=colors, width=1, height=1)\n\noutput_file(\"categorical.html\", title=\"categorical.py example\")\n\nshow(row(hm, dot, sizing_mode=\"scale_width\")) # open a browser\n", "path": "examples/plotting/file/categorical.py"}]} | 1,009 | 189 |
gh_patches_debug_26203 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-6546 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bipartite graph support for utils.dense_to_sparse
### 🚀 The feature, motivation and pitch
I have a nearly-dense bipartite graph (that is, most features in node set A are connected to most features in node set B), and so it is easiest for me to define the edge adjacency matrix as a dense, non-square matrix. However, the message passing class expects a sparse edge adjacency layout. The dense_to_sparse utility would seem to be ideal for this purpose, but it can only take square matrices (thus, is unhelpful for bipartite graphs).
### Alternatives
A way to implicitly request propagate to pass messages from every node in A to every node in B would be even better (storing fully connected graphs is very memory inefficient), but I know that pyg is meant for sparser graph constructions so this would likely be a feature that wasn't used very much by other people.
### Additional context
_No response_
</issue>
<code>
[start of torch_geometric/utils/sparse.py]
1 from typing import Any, Optional, Tuple, Union
2
3 import torch
4 from torch import Tensor
5
6 from torch_geometric.typing import SparseTensor
7
8
9 def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]:
10 r"""Converts a dense adjacency matrix to a sparse adjacency matrix defined
11 by edge indices and edge attributes.
12
13 Args:
14 adj (Tensor): The dense adjacency matrix.
15 :rtype: (:class:`LongTensor`, :class:`Tensor`)
16
17 Examples:
18
19 >>> # Forr a single adjacency matrix
20 >>> adj = torch.tensor([[3, 1],
21 ... [2, 0]])
22 >>> dense_to_sparse(adj)
23 (tensor([[0, 0, 1],
24 [0, 1, 0]]),
25 tensor([3, 1, 2]))
26
27 >>> # For two adjacency matrixes
28 >>> adj = torch.tensor([[[3, 1],
29 ... [2, 0]],
30 ... [[0, 1],
31 ... [0, 2]]])
32 >>> dense_to_sparse(adj)
33 (tensor([[0, 0, 1, 2, 3],
34 [0, 1, 0, 3, 3]]),
35 tensor([3, 1, 2, 1, 2]))
36 """
37 assert adj.dim() >= 2 and adj.dim() <= 3
38 assert adj.size(-1) == adj.size(-2)
39
40 edge_index = adj.nonzero().t()
41
42 if edge_index.size(0) == 2:
43 edge_attr = adj[edge_index[0], edge_index[1]]
44 return edge_index, edge_attr
45 else:
46 edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]
47 batch = edge_index[0] * adj.size(-1)
48 row = batch + edge_index[1]
49 col = batch + edge_index[2]
50 return torch.stack([row, col], dim=0), edge_attr
51
52
53 def is_torch_sparse_tensor(src: Any) -> bool:
54 """Returns :obj:`True` if the input :obj:`src` is a
55 :class:`torch.sparse.Tensor` (in any sparse layout).
56
57 Args:
58 src (Any): The input object to be checked.
59 """
60 return isinstance(src, Tensor) and src.is_sparse
61
62
63 def is_sparse(src: Any) -> bool:
64 """Returns :obj:`True` if the input :obj:`src` is of type
65 :class:`torch.sparse.Tensor` (in any sparse layout) or of type
66 :class:`torch_sparse.SparseTensor`.
67
68 Args:
69 src (Any): The input object to be checked.
70 """
71 return is_torch_sparse_tensor(src) or isinstance(src, SparseTensor)
72
73
74 def to_torch_coo_tensor(
75 edge_index: Tensor,
76 edge_attr: Optional[Tensor] = None,
77 size: Optional[Union[int, Tuple[int, int]]] = None,
78 ) -> Tensor:
79 """Converts a sparse adjacency matrix defined by edge indices and edge
80 attributes to a :class:`torch.sparse.Tensor`.
81
82 Args:
83 edge_index (LongTensor): The edge indices.
84 edge_attr (Tensor, optional): The edge attributes.
85 (default: :obj:`None`)
86 size (int or (int, int), optional): The size of the sparse matrix.
87 If given as an integer, will create a quadratic sparse matrix.
88 If set to :obj:`None`, will infer a quadratic sparse matrix based
89 on :obj:`edge_index.max() + 1`. (default: :obj:`None`)
90
91 :rtype: :class:`torch.sparse.FloatTensor`
92
93 Example:
94
95 >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
96 ... [1, 0, 2, 1, 3, 2]])
97 >>> to_torch_coo_tensor(edge_index)
98 tensor(indices=tensor([[0, 1, 1, 2, 2, 3],
99 [1, 0, 2, 1, 3, 2]]),
100 values=tensor([1., 1., 1., 1., 1., 1.]),
101 size=(4, 4), nnz=6, layout=torch.sparse_coo)
102
103 """
104 if size is None:
105 size = int(edge_index.max()) + 1
106 if not isinstance(size, (tuple, list)):
107 size = (size, size)
108
109 if edge_attr is None:
110 edge_attr = torch.ones(edge_index.size(1), device=edge_index.device)
111
112 size = tuple(size) + edge_attr.size()[1:]
113 out = torch.sparse_coo_tensor(edge_index, edge_attr, size,
114 device=edge_index.device)
115 out = out.coalesce()
116 return out
117
[end of torch_geometric/utils/sparse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py
--- a/torch_geometric/utils/sparse.py
+++ b/torch_geometric/utils/sparse.py
@@ -11,7 +11,10 @@
by edge indices and edge attributes.
Args:
- adj (Tensor): The dense adjacency matrix.
+ adj (Tensor): The dense adjacency matrix of shape
+ :obj:`[num_nodes, num_nodes]` or
+ :obj:`[batch_size, num_nodes, num_nodes]`.
+
:rtype: (:class:`LongTensor`, :class:`Tensor`)
Examples:
@@ -34,8 +37,9 @@
[0, 1, 0, 3, 3]]),
tensor([3, 1, 2, 1, 2]))
"""
- assert adj.dim() >= 2 and adj.dim() <= 3
- assert adj.size(-1) == adj.size(-2)
+ if adj.dim() < 2 or adj.dim() > 3:
+ raise ValueError(f"Dense adjacency matrix 'adj' must be 2- or "
+ f"3-dimensional (got {adj.dim()} dimensions)")
edge_index = adj.nonzero().t()
@@ -44,9 +48,8 @@
return edge_index, edge_attr
else:
edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]
- batch = edge_index[0] * adj.size(-1)
- row = batch + edge_index[1]
- col = batch + edge_index[2]
+ row = edge_index[1] + adj.size(-2) * edge_index[0]
+ col = edge_index[2] + adj.size(-1) * edge_index[0]
return torch.stack([row, col], dim=0), edge_attr
| {"golden_diff": "diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py\n--- a/torch_geometric/utils/sparse.py\n+++ b/torch_geometric/utils/sparse.py\n@@ -11,7 +11,10 @@\n by edge indices and edge attributes.\n \n Args:\n- adj (Tensor): The dense adjacency matrix.\n+ adj (Tensor): The dense adjacency matrix of shape\n+ :obj:`[num_nodes, num_nodes]` or\n+ :obj:`[batch_size, num_nodes, num_nodes]`.\n+\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n \n Examples:\n@@ -34,8 +37,9 @@\n [0, 1, 0, 3, 3]]),\n tensor([3, 1, 2, 1, 2]))\n \"\"\"\n- assert adj.dim() >= 2 and adj.dim() <= 3\n- assert adj.size(-1) == adj.size(-2)\n+ if adj.dim() < 2 or adj.dim() > 3:\n+ raise ValueError(f\"Dense adjacency matrix 'adj' must be 2- or \"\n+ f\"3-dimensional (got {adj.dim()} dimensions)\")\n \n edge_index = adj.nonzero().t()\n \n@@ -44,9 +48,8 @@\n return edge_index, edge_attr\n else:\n edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]\n- batch = edge_index[0] * adj.size(-1)\n- row = batch + edge_index[1]\n- col = batch + edge_index[2]\n+ row = edge_index[1] + adj.size(-2) * edge_index[0]\n+ col = edge_index[2] + adj.size(-1) * edge_index[0]\n return torch.stack([row, col], dim=0), edge_attr\n", "issue": "Bipartite graph support for utils.dense_to_sparse\n### \ud83d\ude80 The feature, motivation and pitch\n\nI have a nearly-dense bipartite graph (that is, most features in node set A are connected to most features in node set B), and so it is easiest for me to define the edge adjacency matrix as a dense, non-square matrix. However, the message passing class expects a sparse edge adjacency layout. The dense_to_sparse utility would seem to be ideal for this purpose, but it can only take square matrices (thus, is unhelpful for bipartite graphs).\n\n### Alternatives\n\nA way to implicitly request propagate to pass messages from every node in A to every node in B would be even better (storing fully connected graphs is very memory inefficient), but I know that pyg is meant for sparser graph constructions so this would likely be a feature that wasn't used very much by other people.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from typing import Any, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import SparseTensor\n\n\ndef dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"Converts a dense adjacency matrix to a sparse adjacency matrix defined\n by edge indices and edge attributes.\n\n Args:\n adj (Tensor): The dense adjacency matrix.\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n\n Examples:\n\n >>> # Forr a single adjacency matrix\n >>> adj = torch.tensor([[3, 1],\n ... [2, 0]])\n >>> dense_to_sparse(adj)\n (tensor([[0, 0, 1],\n [0, 1, 0]]),\n tensor([3, 1, 2]))\n\n >>> # For two adjacency matrixes\n >>> adj = torch.tensor([[[3, 1],\n ... [2, 0]],\n ... [[0, 1],\n ... [0, 2]]])\n >>> dense_to_sparse(adj)\n (tensor([[0, 0, 1, 2, 3],\n [0, 1, 0, 3, 3]]),\n tensor([3, 1, 2, 1, 2]))\n \"\"\"\n assert adj.dim() >= 2 and adj.dim() <= 3\n assert adj.size(-1) == adj.size(-2)\n\n edge_index = adj.nonzero().t()\n\n if edge_index.size(0) == 2:\n edge_attr = adj[edge_index[0], edge_index[1]]\n return edge_index, edge_attr\n else:\n edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]\n batch = edge_index[0] * adj.size(-1)\n row = batch + edge_index[1]\n col = batch + edge_index[2]\n return torch.stack([row, col], dim=0), edge_attr\n\n\ndef is_torch_sparse_tensor(src: Any) -> bool:\n \"\"\"Returns :obj:`True` if the input :obj:`src` is a\n :class:`torch.sparse.Tensor` (in any sparse layout).\n\n Args:\n src (Any): The input object to be checked.\n \"\"\"\n return isinstance(src, Tensor) and src.is_sparse\n\n\ndef is_sparse(src: Any) -> bool:\n \"\"\"Returns :obj:`True` if the input :obj:`src` is of type\n :class:`torch.sparse.Tensor` (in any sparse layout) or of type\n :class:`torch_sparse.SparseTensor`.\n\n Args:\n src (Any): The input object to be checked.\n \"\"\"\n return is_torch_sparse_tensor(src) or isinstance(src, SparseTensor)\n\n\ndef to_torch_coo_tensor(\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n size: Optional[Union[int, Tuple[int, int]]] = None,\n) -> Tensor:\n \"\"\"Converts a sparse adjacency matrix defined by edge indices and edge\n attributes to a :class:`torch.sparse.Tensor`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): The edge attributes.\n (default: :obj:`None`)\n size (int or (int, int), optional): The size of the sparse matrix.\n If given as an integer, will create a quadratic sparse matrix.\n If set to :obj:`None`, will infer a quadratic sparse matrix based\n on :obj:`edge_index.max() + 1`. (default: :obj:`None`)\n\n :rtype: :class:`torch.sparse.FloatTensor`\n\n Example:\n\n >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],\n ... [1, 0, 2, 1, 3, 2]])\n >>> to_torch_coo_tensor(edge_index)\n tensor(indices=tensor([[0, 1, 1, 2, 2, 3],\n [1, 0, 2, 1, 3, 2]]),\n values=tensor([1., 1., 1., 1., 1., 1.]),\n size=(4, 4), nnz=6, layout=torch.sparse_coo)\n\n \"\"\"\n if size is None:\n size = int(edge_index.max()) + 1\n if not isinstance(size, (tuple, list)):\n size = (size, size)\n\n if edge_attr is None:\n edge_attr = torch.ones(edge_index.size(1), device=edge_index.device)\n\n size = tuple(size) + edge_attr.size()[1:]\n out = torch.sparse_coo_tensor(edge_index, edge_attr, size,\n device=edge_index.device)\n out = out.coalesce()\n return out\n", "path": "torch_geometric/utils/sparse.py"}]} | 2,042 | 424 |
gh_patches_debug_2242 | rasdani/github-patches | git_diff | python-poetry__poetry-1577 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
poetry v1.0.0b4 breaks on zip packages
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Windows 10 with a virtual environment for Python v3.7.4
- **Poetry version**: 1.0.0b4
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: (empty project)
## Issue Summary
The newly refactored distribution loading mechanism from [PR 1549](https://github.com/sdispater/poetry/pull/1549/) fails when a zip-based package is present. The presenting problem is that the `zipp.Path` class is not compatible with the `__fspath__` protocol.
## Issue Details
After updating to Poetry v1.0.0b4, I get this traceback
```
% poetry update -v
Using virtualenv: C:\Users\garyd\devel\video-storage\venv-new
[TypeError]
expected str, bytes or os.PathLike object, not Path
Traceback (most recent call last):
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\console_application.py", line 131, in run
status_code = command.handle(parsed_args, io)
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\api\command\command.py", line 120, in handle
status_code = self._do_handle(args, io)
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\api\command\command.py", line 171, in _do_handle
return getattr(handler, handler_method)(args, io, self)
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\cleo\commands\command.py", line 92, in wrap_handle
return self.handle()
File "C:\Users\garyd\.poetry\lib\poetry\console\commands\update.py", line 36, in handle
self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool
File "C:\Users\garyd\.poetry\lib\poetry\installation\installer.py", line 55, in __init__
installed = self._get_installed()
File "C:\Users\garyd\.poetry\lib\poetry\installation\installer.py", line 507, in _get_installed
return InstalledRepository.load(self._env)
File "C:\Users\garyd\.poetry\lib\poetry\repositories\installed_repository.py", line 30, in load
path = Path(distribution._path)
File "C:\Users\garyd\AppData\Local\Programs\Python\Python37\lib\pathlib.py", line 1010, in __new__
self = cls._from_parts(args, init=False)
```
When I run the broken part of the code in my console, I find that the broken distribution is:
* type == <class 'importlib_metadata.PathDistribution'>
* type(dist._path) == <class 'zipp.Path'>
* dist._path == C:\Users\garyd\devel\video-storage\venv\lib\site-packages\setuptools-40.8.0-py3.7.egg/EGG-INFO/
</issue>
<code>
[start of poetry/repositories/installed_repository.py]
1 from importlib_metadata import distributions
2 from poetry.packages import Package
3 from poetry.utils._compat import Path
4 from poetry.utils.env import Env
5
6 from .repository import Repository
7
8
9 class InstalledRepository(Repository):
10 @classmethod
11 def load(cls, env): # type: (Env) -> InstalledRepository
12 """
13 Load installed packages.
14
15 For now, it uses the pip "freeze" command.
16 """
17 repo = cls()
18
19 for distribution in sorted(
20 distributions(path=env.sys_path), key=lambda d: str(d._path),
21 ):
22 metadata = distribution.metadata
23 name = metadata["name"]
24 version = metadata["version"]
25 package = Package(name, version, version)
26 package.description = metadata.get("summary", "")
27
28 repo.add_package(package)
29
30 path = Path(distribution._path)
31 is_standard_package = True
32 try:
33 path.relative_to(env.site_packages)
34 except ValueError:
35 is_standard_package = False
36
37 if is_standard_package:
38 continue
39
40 src_path = env.path / "src"
41
42 # A VCS dependency should have been installed
43 # in the src directory. If not, it's a path dependency
44 try:
45 path.relative_to(src_path)
46
47 from poetry.vcs.git import Git
48
49 git = Git()
50 revision = git.rev_parse("HEAD", src_path / package.name).strip()
51 url = git.remote_url(src_path / package.name)
52
53 package.source_type = "git"
54 package.source_url = url
55 package.source_reference = revision
56 except ValueError:
57 package.source_type = "directory"
58 package.source_url = str(path.parent)
59
60 return repo
61
[end of poetry/repositories/installed_repository.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py
--- a/poetry/repositories/installed_repository.py
+++ b/poetry/repositories/installed_repository.py
@@ -27,7 +27,7 @@
repo.add_package(package)
- path = Path(distribution._path)
+ path = Path(str(distribution._path))
is_standard_package = True
try:
path.relative_to(env.site_packages)
| {"golden_diff": "diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py\n--- a/poetry/repositories/installed_repository.py\n+++ b/poetry/repositories/installed_repository.py\n@@ -27,7 +27,7 @@\n \n repo.add_package(package)\n \n- path = Path(distribution._path)\n+ path = Path(str(distribution._path))\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n", "issue": "poetry v1.0.0b4 breaks on zip packages\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Windows 10 with a virtual environment for Python v3.7.4\r\n- **Poetry version**: 1.0.0b4\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: (empty project)\r\n\r\n## Issue Summary\r\nThe newly refactored distribution loading mechanism from [PR 1549](https://github.com/sdispater/poetry/pull/1549/) fails when a zip-based package is present. The presenting problem is that the `zipp.Path` class is not compatible with the `__fspath__` protocol.\r\n\r\n## Issue Details\r\nAfter updating to Poetry v1.0.0b4, I get this traceback\r\n\r\n```\r\n% poetry update -v\r\nUsing virtualenv: C:\\Users\\garyd\\devel\\video-storage\\venv-new\r\n[TypeError]\r\nexpected str, bytes or os.PathLike object, not Path\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\clikit\\console_application.py\", line 131, in run\r\n status_code = command.handle(parsed_args, io)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\clikit\\api\\command\\command.py\", line 120, in handle\r\n status_code = self._do_handle(args, io)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\clikit\\api\\command\\command.py\", line 171, in _do_handle\r\n return getattr(handler, handler_method)(args, io, self)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\cleo\\commands\\command.py\", line 92, in wrap_handle\r\n return self.handle()\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\console\\commands\\update.py\", line 36, in handle\r\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\installation\\installer.py\", line 55, in __init__\r\n installed = self._get_installed()\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\installation\\installer.py\", line 507, in _get_installed\r\n return InstalledRepository.load(self._env)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\repositories\\installed_repository.py\", line 30, in load\r\n path = Path(distribution._path)\r\n File \"C:\\Users\\garyd\\AppData\\Local\\Programs\\Python\\Python37\\lib\\pathlib.py\", line 1010, in __new__\r\n self = cls._from_parts(args, init=False)\r\n```\r\n\r\nWhen I run the broken part of the code in my console, I find that the broken distribution is:\r\n\r\n* type == <class 'importlib_metadata.PathDistribution'>\r\n* type(dist._path) == <class 'zipp.Path'>\r\n* dist._path == C:\\Users\\garyd\\devel\\video-storage\\venv\\lib\\site-packages\\setuptools-40.8.0-py3.7.egg/EGG-INFO/\r\n\r\n\n", "before_files": [{"content": "from importlib_metadata import distributions\nfrom poetry.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n\n For now, it uses the pip \"freeze\" command.\n \"\"\"\n repo = cls()\n\n for distribution in sorted(\n distributions(path=env.sys_path), key=lambda d: str(d._path),\n ):\n metadata = distribution.metadata\n name = metadata[\"name\"]\n version = metadata[\"version\"]\n package = Package(name, version, version)\n package.description = metadata.get(\"summary\", \"\")\n\n repo.add_package(package)\n\n path = Path(distribution._path)\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}]} | 1,891 | 108 |
gh_patches_debug_21740 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-503 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New CSV imports use autogenerated column names.
## Description
<!-- A clear and concise description of what the bug is. -->
According to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
The first row of the CSV should be used as header names by default.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Upload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.
</issue>
<code>
[start of mathesar/serializers.py]
1 from django.urls import reverse
2 from rest_framework import serializers
3
4 from mathesar.models import Table, Schema, DataFile, Database
5
6
7 class NestedTableSerializer(serializers.HyperlinkedModelSerializer):
8 url = serializers.SerializerMethodField()
9
10 class Meta:
11 model = Table
12 fields = ['id', 'name', 'url']
13
14 def get_url(self, obj):
15 request = self.context['request']
16 return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))
17
18
19 class ModelNameField(serializers.CharField):
20 """
21 De-serializes the request field as a string, but serializes the response field as
22 `model.name`. Required to support passing and returing a model name from the
23 endpoint, while also storing the model as a related field.
24 """
25 def to_representation(self, value):
26 return value.name
27
28
29 class SchemaSerializer(serializers.HyperlinkedModelSerializer):
30 tables = NestedTableSerializer(many=True, read_only=True)
31 name = serializers.CharField()
32 database = ModelNameField(max_length=128)
33
34 class Meta:
35 model = Schema
36 fields = ['id', 'name', 'tables', 'database', 'has_dependencies']
37
38
39 class SimpleColumnSerializer(serializers.Serializer):
40 name = serializers.CharField()
41 type = serializers.CharField()
42
43
44 class ColumnSerializer(SimpleColumnSerializer):
45 index = serializers.IntegerField(source='column_index', read_only=True)
46 nullable = serializers.BooleanField(default=True)
47 primary_key = serializers.BooleanField(default=False)
48 valid_target_types = serializers.ListField(read_only=True)
49
50
51 class TableSerializer(serializers.ModelSerializer):
52 columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')
53 records = serializers.SerializerMethodField()
54 name = serializers.CharField()
55 data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())
56
57 class Meta:
58 model = Table
59 fields = ['id', 'name', 'schema', 'created_at', 'updated_at',
60 'columns', 'records', 'data_files', 'has_dependencies']
61
62 def get_records(self, obj):
63 if isinstance(obj, Table):
64 # Only get records if we are serializing an existing table
65 request = self.context['request']
66 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
67 else:
68 return None
69
70
71 class RecordSerializer(serializers.BaseSerializer):
72 def to_representation(self, instance):
73 return instance._asdict()
74
75
76 class RecordListParameterSerializer(serializers.Serializer):
77 filters = serializers.JSONField(required=False, default=[])
78 order_by = serializers.JSONField(required=False, default=[])
79 group_count_by = serializers.JSONField(required=False, default=[])
80
81
82 class DatabaseSerializer(serializers.ModelSerializer):
83 supported_types = serializers.ListField(child=serializers.CharField())
84
85 class Meta:
86 model = Database
87 fields = ['id', 'name', 'deleted', 'supported_types']
88 read_only_fields = ['id', 'name', 'deleted', 'supported_types']
89
90
91 class DataFileSerializer(serializers.ModelSerializer):
92 user = serializers.PrimaryKeyRelatedField(
93 default=serializers.CurrentUserDefault(), read_only=True
94 )
95
96 class Meta:
97 model = DataFile
98 fields = [
99 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'
100 ]
101 extra_kwargs = {'delimiter': {'trim_whitespace': False},
102 'escapechar': {'trim_whitespace': False},
103 'quotechar': {'trim_whitespace': False},
104 'header': {'required': True}}
105 # We only currently support importing to a new table, so setting a table via API is invalid.
106 # User should be set automatically, not submitted via the API.
107 read_only_fields = ['table_imported_to']
108
109 def save(self, **kwargs):
110 """
111 Set user to current user while saving the data file.
112 """
113 current_user = self.fields['user'].get_default()
114 if current_user.is_authenticated:
115 kwargs['user'] = current_user
116 return super().save(**kwargs)
117
[end of mathesar/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/serializers.py b/mathesar/serializers.py
--- a/mathesar/serializers.py
+++ b/mathesar/serializers.py
@@ -92,6 +92,7 @@
user = serializers.PrimaryKeyRelatedField(
default=serializers.CurrentUserDefault(), read_only=True
)
+ header = serializers.BooleanField(default=True)
class Meta:
model = DataFile
@@ -100,8 +101,8 @@
]
extra_kwargs = {'delimiter': {'trim_whitespace': False},
'escapechar': {'trim_whitespace': False},
- 'quotechar': {'trim_whitespace': False},
- 'header': {'required': True}}
+ 'quotechar': {'trim_whitespace': False}
+ }
# We only currently support importing to a new table, so setting a table via API is invalid.
# User should be set automatically, not submitted via the API.
read_only_fields = ['table_imported_to']
| {"golden_diff": "diff --git a/mathesar/serializers.py b/mathesar/serializers.py\n--- a/mathesar/serializers.py\n+++ b/mathesar/serializers.py\n@@ -92,6 +92,7 @@\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n+ header = serializers.BooleanField(default=True)\n \n class Meta:\n model = DataFile\n@@ -100,8 +101,8 @@\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n- 'quotechar': {'trim_whitespace': False},\n- 'header': {'required': True}}\n+ 'quotechar': {'trim_whitespace': False}\n+ }\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n", "issue": "New CSV imports use autogenerated column names.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nAccording to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe first row of the CSV should be used as header names by default.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\nUpload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.\n", "before_files": [{"content": "from django.urls import reverse\nfrom rest_framework import serializers\n\nfrom mathesar.models import Table, Schema, DataFile, Database\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n tables = NestedTableSerializer(many=True, read_only=True)\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'tables', 'database', 'has_dependencies']\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField()\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n index = serializers.IntegerField(source='column_index', read_only=True)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n valid_target_types = serializers.ListField(read_only=True)\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')\n records = serializers.SerializerMethodField()\n name = serializers.CharField()\n data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at',\n 'columns', 'records', 'data_files', 'has_dependencies']\n\n def get_records(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types = serializers.ListField(child=serializers.CharField())\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types']\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False},\n 'header': {'required': True}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n", "path": "mathesar/serializers.py"}]} | 1,807 | 215 |
gh_patches_debug_47851 | rasdani/github-patches | git_diff | searx__searx-2256 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make secret_key default one that will fail if not set to a custom value
Currently, the `secret_key` default value is `ultrasecretkey` which is a valid value. Would it not be better to let the default value of this setting be one that will make searx fail to start? This will force the user to conciously change this setting to a secure value instead of accidentally forgetting to set this to something random and secure.
</issue>
<code>
[start of searx/__init__.py]
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2013- by Adam Tauber, <[email protected]>
16 '''
17
18 import logging
19 from os import environ
20 from os.path import realpath, dirname, join, abspath, isfile
21 from io import open
22 from yaml import safe_load
23
24
25 searx_dir = abspath(dirname(__file__))
26 engine_dir = dirname(realpath(__file__))
27 static_path = abspath(join(dirname(__file__), 'static'))
28
29
30 def check_settings_yml(file_name):
31 if isfile(file_name):
32 return file_name
33 else:
34 return None
35
36
37 # find location of settings.yml
38 if 'SEARX_SETTINGS_PATH' in environ:
39 # if possible set path to settings using the
40 # enviroment variable SEARX_SETTINGS_PATH
41 settings_path = check_settings_yml(environ['SEARX_SETTINGS_PATH'])
42 else:
43 # if not, get it from searx code base or last solution from /etc/searx
44 settings_path = check_settings_yml(join(searx_dir, 'settings.yml')) or check_settings_yml('/etc/searx/settings.yml')
45
46 if not settings_path:
47 raise Exception('settings.yml not found')
48
49 # load settings
50 with open(settings_path, 'r', encoding='utf-8') as settings_yaml:
51 settings = safe_load(settings_yaml)
52
53 if settings['ui']['static_path']:
54 static_path = settings['ui']['static_path']
55
56 '''
57 enable debug if
58 the environnement variable SEARX_DEBUG is 1 or true
59 (whatever the value in settings.yml)
60 or general.debug=True in settings.yml
61
62 disable debug if
63 the environnement variable SEARX_DEBUG is 0 or false
64 (whatever the value in settings.yml)
65 or general.debug=False in settings.yml
66 '''
67 searx_debug_env = environ.get('SEARX_DEBUG', '').lower()
68 if searx_debug_env == 'true' or searx_debug_env == '1':
69 searx_debug = True
70 elif searx_debug_env == 'false' or searx_debug_env == '0':
71 searx_debug = False
72 else:
73 searx_debug = settings.get('general', {}).get('debug')
74
75 if searx_debug:
76 logging.basicConfig(level=logging.DEBUG)
77 else:
78 logging.basicConfig(level=logging.WARNING)
79
80 logger = logging.getLogger('searx')
81 logger.debug('read configuration from %s', settings_path)
82 logger.info('Initialisation done')
83
84 if 'SEARX_SECRET' in environ:
85 settings['server']['secret_key'] = environ['SEARX_SECRET']
86 if 'SEARX_BIND_ADDRESS' in environ:
87 settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']
88
[end of searx/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/__init__.py b/searx/__init__.py
--- a/searx/__init__.py
+++ b/searx/__init__.py
@@ -85,3 +85,7 @@
settings['server']['secret_key'] = environ['SEARX_SECRET']
if 'SEARX_BIND_ADDRESS' in environ:
settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']
+
+if not searx_debug and settings['server']['secret_key'] == 'ultrasecretkey':
+ logger.error('server.secret_key is not changed. Please use something else instead of ultrasecretkey.')
+ exit(1)
| {"golden_diff": "diff --git a/searx/__init__.py b/searx/__init__.py\n--- a/searx/__init__.py\n+++ b/searx/__init__.py\n@@ -85,3 +85,7 @@\n settings['server']['secret_key'] = environ['SEARX_SECRET']\n if 'SEARX_BIND_ADDRESS' in environ:\n settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']\n+\n+if not searx_debug and settings['server']['secret_key'] == 'ultrasecretkey':\n+ logger.error('server.secret_key is not changed. Please use something else instead of ultrasecretkey.')\n+ exit(1)\n", "issue": "Make secret_key default one that will fail if not set to a custom value\nCurrently, the `secret_key` default value is `ultrasecretkey` which is a valid value. Would it not be better to let the default value of this setting be one that will make searx fail to start? This will force the user to conciously change this setting to a secure value instead of accidentally forgetting to set this to something random and secure.\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\nimport logging\nfrom os import environ\nfrom os.path import realpath, dirname, join, abspath, isfile\nfrom io import open\nfrom yaml import safe_load\n\n\nsearx_dir = abspath(dirname(__file__))\nengine_dir = dirname(realpath(__file__))\nstatic_path = abspath(join(dirname(__file__), 'static'))\n\n\ndef check_settings_yml(file_name):\n if isfile(file_name):\n return file_name\n else:\n return None\n\n\n# find location of settings.yml\nif 'SEARX_SETTINGS_PATH' in environ:\n # if possible set path to settings using the\n # enviroment variable SEARX_SETTINGS_PATH\n settings_path = check_settings_yml(environ['SEARX_SETTINGS_PATH'])\nelse:\n # if not, get it from searx code base or last solution from /etc/searx\n settings_path = check_settings_yml(join(searx_dir, 'settings.yml')) or check_settings_yml('/etc/searx/settings.yml')\n\nif not settings_path:\n raise Exception('settings.yml not found')\n\n# load settings\nwith open(settings_path, 'r', encoding='utf-8') as settings_yaml:\n settings = safe_load(settings_yaml)\n\nif settings['ui']['static_path']:\n static_path = settings['ui']['static_path']\n\n'''\nenable debug if\nthe environnement variable SEARX_DEBUG is 1 or true\n(whatever the value in settings.yml)\nor general.debug=True in settings.yml\n\ndisable debug if\nthe environnement variable SEARX_DEBUG is 0 or false\n(whatever the value in settings.yml)\nor general.debug=False in settings.yml\n'''\nsearx_debug_env = environ.get('SEARX_DEBUG', '').lower()\nif searx_debug_env == 'true' or searx_debug_env == '1':\n searx_debug = True\nelif searx_debug_env == 'false' or searx_debug_env == '0':\n searx_debug = False\nelse:\n searx_debug = settings.get('general', {}).get('debug')\n\nif searx_debug:\n logging.basicConfig(level=logging.DEBUG)\nelse:\n logging.basicConfig(level=logging.WARNING)\n\nlogger = logging.getLogger('searx')\nlogger.debug('read configuration from %s', settings_path)\nlogger.info('Initialisation done')\n\nif 'SEARX_SECRET' in environ:\n settings['server']['secret_key'] = environ['SEARX_SECRET']\nif 'SEARX_BIND_ADDRESS' in environ:\n settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']\n", "path": "searx/__init__.py"}]} | 1,513 | 150 |
gh_patches_debug_20566 | rasdani/github-patches | git_diff | vispy__vispy-930 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing gloo should not automatically load the OpenGL library
I'm trying to run vispy on a headless server with docker, to use the ipynb webgl backend exclusively. I cannot `import vispy.gloo`:
```
File "/opt/conda/lib/python3.4/site-packages/vispy/gloo/__init__.py", line 47, in <module>
from . import gl # noqa
File "/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/__init__.py", line 213, in <module>
from . import gl2 as default_backend # noqa
File "/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/gl2.py", line 46, in <module>
raise RuntimeError('Could not load OpenGL library.')
RuntimeError: Could not load OpenGL library.
```
I should not need to have the OpenGL library on a headless server when using a remote backend.
</issue>
<code>
[start of vispy/gloo/gl/gl2.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2015, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """ GL ES 2.0 API implemented via desktop GL (i.e subset of normal OpenGL).
6 """
7
8 import os
9 import sys
10 import ctypes.util
11
12 from . import _copy_gl_functions
13 from ._constants import * # noqa
14
15 # Ctypes stuff
16
17
18 # Load the OpenGL library. We more or less follow the same approach
19 # as PyOpenGL does internally
20
21 _have_get_proc_address = False
22 _lib = os.getenv('VISPY_GL_LIB', '')
23 if _lib != '':
24 if sys.platform.startswith('win'):
25 _lib = ctypes.windll.LoadLibrary(_lib)
26 else:
27 _lib = ctypes.cdll.LoadLibrary(_lib)
28 elif sys.platform.startswith('win'):
29 # Windows
30 _lib = ctypes.windll.opengl32
31 try:
32 wglGetProcAddress = _lib.wglGetProcAddress
33 wglGetProcAddress.restype = ctypes.CFUNCTYPE(
34 ctypes.POINTER(ctypes.c_int))
35 wglGetProcAddress.argtypes = [ctypes.c_char_p]
36 _have_get_proc_address = True
37 except AttributeError:
38 pass
39 else:
40 # Unix-ish
41 if sys.platform.startswith('darwin'):
42 _fname = ctypes.util.find_library('OpenGL')
43 else:
44 _fname = ctypes.util.find_library('GL')
45 if not _fname:
46 raise RuntimeError('Could not load OpenGL library.')
47 # Load lib
48 _lib = ctypes.cdll.LoadLibrary(_fname)
49
50
51 def _have_context():
52 return _lib.glGetError() != 1282 # GL_INVALID_OPERATION
53
54
55 def _get_gl_version(_lib):
56 """Helper to get the GL version string"""
57 try:
58 return _lib.glGetString(7938).decode('utf-8')
59 except Exception:
60 return 'unknown'
61
62
63 def _get_gl_func(name, restype, argtypes):
64 # Based on a function in Pyglet
65 try:
66 # Try using normal ctypes stuff
67 func = getattr(_lib, name)
68 func.restype = restype
69 func.argtypes = argtypes
70 return func
71 except AttributeError:
72 if sys.platform.startswith('win'):
73 # Ask for a pointer to the function, this is the approach
74 # for OpenGL extensions on Windows
75 fargs = (restype,) + argtypes
76 ftype = ctypes.WINFUNCTYPE(*fargs)
77 if not _have_get_proc_address:
78 raise RuntimeError('Function %s not available '
79 '(OpenGL version is %s).'
80 % (name, _get_gl_version(_lib)))
81 if not _have_context():
82 raise RuntimeError('Using %s with no OpenGL context.' % name)
83 address = wglGetProcAddress(name.encode('utf-8'))
84 if address:
85 return ctypes.cast(address, ftype)
86 # If not Windows or if we did not return function object on Windows:
87 raise RuntimeError('Function %s not present in context '
88 '(OpenGL version is %s).'
89 % (name, _get_gl_version(_lib)))
90
91
92 # Inject
93
94 from . import _gl2 # noqa
95 _copy_gl_functions(_gl2, globals())
96
[end of vispy/gloo/gl/gl2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vispy/gloo/gl/gl2.py b/vispy/gloo/gl/gl2.py
--- a/vispy/gloo/gl/gl2.py
+++ b/vispy/gloo/gl/gl2.py
@@ -11,6 +11,7 @@
from . import _copy_gl_functions
from ._constants import * # noqa
+from ...util import logger
# Ctypes stuff
@@ -43,9 +44,11 @@
else:
_fname = ctypes.util.find_library('GL')
if not _fname:
- raise RuntimeError('Could not load OpenGL library.')
- # Load lib
- _lib = ctypes.cdll.LoadLibrary(_fname)
+ logger.warning('Could not load OpenGL library.')
+ _lib = None
+ else:
+ # Load lib
+ _lib = ctypes.cdll.LoadLibrary(_fname)
def _have_context():
@@ -62,6 +65,8 @@
def _get_gl_func(name, restype, argtypes):
# Based on a function in Pyglet
+ if _lib is None:
+ raise RuntimeError('Could not load OpenGL library, gl cannot be used')
try:
# Try using normal ctypes stuff
func = getattr(_lib, name)
| {"golden_diff": "diff --git a/vispy/gloo/gl/gl2.py b/vispy/gloo/gl/gl2.py\n--- a/vispy/gloo/gl/gl2.py\n+++ b/vispy/gloo/gl/gl2.py\n@@ -11,6 +11,7 @@\n \n from . import _copy_gl_functions\n from ._constants import * # noqa\n+from ...util import logger\n \n # Ctypes stuff\n \n@@ -43,9 +44,11 @@\n else:\n _fname = ctypes.util.find_library('GL')\n if not _fname:\n- raise RuntimeError('Could not load OpenGL library.')\n- # Load lib\n- _lib = ctypes.cdll.LoadLibrary(_fname)\n+ logger.warning('Could not load OpenGL library.')\n+ _lib = None\n+ else:\n+ # Load lib\n+ _lib = ctypes.cdll.LoadLibrary(_fname)\n \n \n def _have_context():\n@@ -62,6 +65,8 @@\n \n def _get_gl_func(name, restype, argtypes):\n # Based on a function in Pyglet\n+ if _lib is None:\n+ raise RuntimeError('Could not load OpenGL library, gl cannot be used')\n try:\n # Try using normal ctypes stuff\n func = getattr(_lib, name)\n", "issue": "Importing gloo should not automatically load the OpenGL library\nI'm trying to run vispy on a headless server with docker, to use the ipynb webgl backend exclusively. I cannot `import vispy.gloo`:\n\n```\n File \"/opt/conda/lib/python3.4/site-packages/vispy/gloo/__init__.py\", line 47, in <module>\n from . import gl # noqa\n File \"/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/__init__.py\", line 213, in <module>\n from . import gl2 as default_backend # noqa\n File \"/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/gl2.py\", line 46, in <module>\n raise RuntimeError('Could not load OpenGL library.')\nRuntimeError: Could not load OpenGL library.\n```\n\nI should not need to have the OpenGL library on a headless server when using a remote backend.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" GL ES 2.0 API implemented via desktop GL (i.e subset of normal OpenGL).\n\"\"\"\n\nimport os\nimport sys\nimport ctypes.util\n\nfrom . import _copy_gl_functions\nfrom ._constants import * # noqa\n\n# Ctypes stuff\n\n\n# Load the OpenGL library. We more or less follow the same approach\n# as PyOpenGL does internally\n\n_have_get_proc_address = False\n_lib = os.getenv('VISPY_GL_LIB', '')\nif _lib != '':\n if sys.platform.startswith('win'):\n _lib = ctypes.windll.LoadLibrary(_lib)\n else:\n _lib = ctypes.cdll.LoadLibrary(_lib)\nelif sys.platform.startswith('win'):\n # Windows\n _lib = ctypes.windll.opengl32\n try:\n wglGetProcAddress = _lib.wglGetProcAddress\n wglGetProcAddress.restype = ctypes.CFUNCTYPE(\n ctypes.POINTER(ctypes.c_int))\n wglGetProcAddress.argtypes = [ctypes.c_char_p]\n _have_get_proc_address = True\n except AttributeError:\n pass\nelse:\n # Unix-ish\n if sys.platform.startswith('darwin'):\n _fname = ctypes.util.find_library('OpenGL')\n else:\n _fname = ctypes.util.find_library('GL')\n if not _fname:\n raise RuntimeError('Could not load OpenGL library.')\n # Load lib\n _lib = ctypes.cdll.LoadLibrary(_fname)\n\n\ndef _have_context():\n return _lib.glGetError() != 1282 # GL_INVALID_OPERATION\n\n\ndef _get_gl_version(_lib):\n \"\"\"Helper to get the GL version string\"\"\"\n try:\n return _lib.glGetString(7938).decode('utf-8')\n except Exception:\n return 'unknown'\n\n\ndef _get_gl_func(name, restype, argtypes):\n # Based on a function in Pyglet\n try:\n # Try using normal ctypes stuff\n func = getattr(_lib, name)\n func.restype = restype\n func.argtypes = argtypes\n return func\n except AttributeError:\n if sys.platform.startswith('win'):\n # Ask for a pointer to the function, this is the approach\n # for OpenGL extensions on Windows\n fargs = (restype,) + argtypes\n ftype = ctypes.WINFUNCTYPE(*fargs)\n if not _have_get_proc_address:\n raise RuntimeError('Function %s not available '\n '(OpenGL version is %s).'\n % (name, _get_gl_version(_lib)))\n if not _have_context():\n raise RuntimeError('Using %s with no OpenGL context.' % name)\n address = wglGetProcAddress(name.encode('utf-8'))\n if address:\n return ctypes.cast(address, ftype)\n # If not Windows or if we did not return function object on Windows:\n raise RuntimeError('Function %s not present in context '\n '(OpenGL version is %s).'\n % (name, _get_gl_version(_lib)))\n\n\n# Inject\n\nfrom . import _gl2 # noqa\n_copy_gl_functions(_gl2, globals())\n", "path": "vispy/gloo/gl/gl2.py"}]} | 1,645 | 282 |
gh_patches_debug_29995 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Petsmart Canada
https://www.petsmart.ca/store-locator/all/
</issue>
<code>
[start of locations/spiders/petsmart.py]
1 import datetime
2 import re
3 import scrapy
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7 day_mapping = {'MON': 'Mo','TUE': 'Tu','WED': 'We','THU': 'Th',
8 'FRI': 'Fr','SAT': 'Sa','SUN': 'Su'}
9
10 def convert_24hour(time):
11 """
12 Takes 12 hour time as a string and converts it to 24 hour time.
13 """
14
15 if len(time[:-2].split(':')) < 2:
16 hour = time[:-2]
17 minute = '00'
18 else:
19 hour, minute = time[:-2].split(':')
20
21 if time[-2:] == 'AM':
22 time_formatted = hour + ':' + minute
23 elif time[-2:] == 'PM':
24 time_formatted = str(int(hour)+ 12) + ':' + minute
25
26 if time_formatted in ['24:00','0:00','00:00']:
27 time_formatted = '23:59'
28
29 return time_formatted
30
31 class PetSmartSpider(scrapy.Spider):
32 download_delay = 0.2
33 name = "petsmart"
34 item_attributes = { 'brand': "Petsmart" }
35 allowed_domains = ["petsmart.com"]
36 start_urls = (
37 'https://www.petsmart.com/store-locator/all/',
38 )
39
40 def parse(self, response):
41 state_urls = response.xpath('//li[@class="col-sm-12 col-md-4"]/a/@href').extract()
42 is_store_details_urls = response.xpath('//a[@class="store-details-link"]/@href').extract()
43
44 if not state_urls and is_store_details_urls:
45 for url in is_store_details_urls:
46 yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
47 else:
48 for url in state_urls:
49 yield scrapy.Request(response.urljoin(url))
50
51 def parse_store(self, response):
52 ref = re.search(r'.+/?\?(.+)', response.url).group(1)
53
54 properties = {
55 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first().strip(),
56 'addr_full': response.xpath('//div[@itemprop="streetAddress"]/text()').extract_first(),
57 'city': response.xpath('//span[@itemprop="addressLocality"][1]/text()').extract_first().title(),
58 'state': response.xpath('//span[@itemprop="addressLocality"][2]/text()').extract_first(),
59 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
60 'lat': float(response.xpath('//input[@name="storeLatitudeVal"]/@value').extract_first()),
61 'lon': float(response.xpath('//input[@name="storeLongitudeVal"]/@value').extract_first()),
62 'phone': response.xpath('//a[@class="store-contact-info"]/text()').extract_first(),
63 'ref': ref,
64 'website': response.url
65 }
66
67 hours = self.parse_hours(response.xpath('//div[@class="store-detail-address"]'))
68
69 if hours:
70 properties['opening_hours'] = hours
71
72 yield GeojsonPointItem(**properties)
73
74 def parse_hours(self, elements):
75 opening_hours = OpeningHours()
76
77 days = elements.xpath('//span[@itemprop="dayOfWeek"]/text()').extract()
78 today = (set(day_mapping) - set(days)).pop()
79 days.remove('TODAY')
80 days.insert(0,today)
81 open_hours = elements.xpath('//div[@class="store-hours"]/time[@itemprop="opens"]/@content').extract()
82 close_hours = elements.xpath('//div[@class="store-hours"]/time[@itemprop="closes"]/@content').extract()
83
84 store_hours = dict((z[0],list(z[1:])) for z in zip(days, open_hours, close_hours))
85
86 for day, hours in store_hours.items():
87 if 'CLOSED' in hours:
88 continue
89 opening_hours.add_range(day=day_mapping[day],
90 open_time=convert_24hour(hours[0]),
91 close_time=convert_24hour(hours[1]))
92 return opening_hours.as_opening_hours()
93
[end of locations/spiders/petsmart.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/petsmart.py b/locations/spiders/petsmart.py
--- a/locations/spiders/petsmart.py
+++ b/locations/spiders/petsmart.py
@@ -32,9 +32,10 @@
download_delay = 0.2
name = "petsmart"
item_attributes = { 'brand': "Petsmart" }
- allowed_domains = ["petsmart.com"]
+ allowed_domains = ["petsmart.com", "petsmart.ca"]
start_urls = (
'https://www.petsmart.com/store-locator/all/',
+ 'https://www.petsmart.ca/store-locator/all/'
)
def parse(self, response):
@@ -50,6 +51,10 @@
def parse_store(self, response):
ref = re.search(r'.+/?\?(.+)', response.url).group(1)
+ if 'petsmart.ca' in response.url:
+ country = 'CA'
+ elif 'petsmart.com' in response.url:
+ country = 'US'
properties = {
'name': response.xpath('//span[@itemprop="name"]/text()').extract_first().strip(),
@@ -60,6 +65,7 @@
'lat': float(response.xpath('//input[@name="storeLatitudeVal"]/@value').extract_first()),
'lon': float(response.xpath('//input[@name="storeLongitudeVal"]/@value').extract_first()),
'phone': response.xpath('//a[@class="store-contact-info"]/text()').extract_first(),
+ 'country': country,
'ref': ref,
'website': response.url
}
| {"golden_diff": "diff --git a/locations/spiders/petsmart.py b/locations/spiders/petsmart.py\n--- a/locations/spiders/petsmart.py\n+++ b/locations/spiders/petsmart.py\n@@ -32,9 +32,10 @@\n download_delay = 0.2\n name = \"petsmart\"\n item_attributes = { 'brand': \"Petsmart\" }\n- allowed_domains = [\"petsmart.com\"]\n+ allowed_domains = [\"petsmart.com\", \"petsmart.ca\"]\n start_urls = (\n 'https://www.petsmart.com/store-locator/all/',\n+ 'https://www.petsmart.ca/store-locator/all/'\n )\n \n def parse(self, response):\n@@ -50,6 +51,10 @@\n \n def parse_store(self, response):\n ref = re.search(r'.+/?\\?(.+)', response.url).group(1)\n+ if 'petsmart.ca' in response.url:\n+ country = 'CA'\n+ elif 'petsmart.com' in response.url:\n+ country = 'US'\n \n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first().strip(),\n@@ -60,6 +65,7 @@\n 'lat': float(response.xpath('//input[@name=\"storeLatitudeVal\"]/@value').extract_first()),\n 'lon': float(response.xpath('//input[@name=\"storeLongitudeVal\"]/@value').extract_first()),\n 'phone': response.xpath('//a[@class=\"store-contact-info\"]/text()').extract_first(),\n+ 'country': country,\n 'ref': ref,\n 'website': response.url\n }\n", "issue": "Petsmart Canada\nhttps://www.petsmart.ca/store-locator/all/\n", "before_files": [{"content": "import datetime\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nday_mapping = {'MON': 'Mo','TUE': 'Tu','WED': 'We','THU': 'Th',\n 'FRI': 'Fr','SAT': 'Sa','SUN': 'Su'}\n\ndef convert_24hour(time):\n \"\"\"\n Takes 12 hour time as a string and converts it to 24 hour time.\n \"\"\"\n\n if len(time[:-2].split(':')) < 2:\n hour = time[:-2]\n minute = '00'\n else:\n hour, minute = time[:-2].split(':')\n\n if time[-2:] == 'AM':\n time_formatted = hour + ':' + minute\n elif time[-2:] == 'PM':\n time_formatted = str(int(hour)+ 12) + ':' + minute\n\n if time_formatted in ['24:00','0:00','00:00']:\n time_formatted = '23:59'\n\n return time_formatted\n\nclass PetSmartSpider(scrapy.Spider):\n download_delay = 0.2\n name = \"petsmart\"\n item_attributes = { 'brand': \"Petsmart\" }\n allowed_domains = [\"petsmart.com\"]\n start_urls = (\n 'https://www.petsmart.com/store-locator/all/',\n )\n\n def parse(self, response):\n state_urls = response.xpath('//li[@class=\"col-sm-12 col-md-4\"]/a/@href').extract()\n is_store_details_urls = response.xpath('//a[@class=\"store-details-link\"]/@href').extract()\n\n if not state_urls and is_store_details_urls:\n for url in is_store_details_urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n for url in state_urls:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/?\\?(.+)', response.url).group(1)\n\n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first().strip(),\n 'addr_full': response.xpath('//div[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"][1]/text()').extract_first().title(),\n 'state': response.xpath('//span[@itemprop=\"addressLocality\"][2]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'lat': float(response.xpath('//input[@name=\"storeLatitudeVal\"]/@value').extract_first()),\n 'lon': float(response.xpath('//input[@name=\"storeLongitudeVal\"]/@value').extract_first()),\n 'phone': response.xpath('//a[@class=\"store-contact-info\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url\n }\n\n hours = self.parse_hours(response.xpath('//div[@class=\"store-detail-address\"]'))\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, elements):\n opening_hours = OpeningHours()\n\n days = elements.xpath('//span[@itemprop=\"dayOfWeek\"]/text()').extract()\n today = (set(day_mapping) - set(days)).pop()\n days.remove('TODAY')\n days.insert(0,today)\n open_hours = elements.xpath('//div[@class=\"store-hours\"]/time[@itemprop=\"opens\"]/@content').extract()\n close_hours = elements.xpath('//div[@class=\"store-hours\"]/time[@itemprop=\"closes\"]/@content').extract()\n\n store_hours = dict((z[0],list(z[1:])) for z in zip(days, open_hours, close_hours))\n\n for day, hours in store_hours.items():\n if 'CLOSED' in hours:\n continue\n opening_hours.add_range(day=day_mapping[day],\n open_time=convert_24hour(hours[0]),\n close_time=convert_24hour(hours[1]))\n return opening_hours.as_opening_hours()\n", "path": "locations/spiders/petsmart.py"}]} | 1,641 | 362 |
gh_patches_debug_36927 | rasdani/github-patches | git_diff | mdn__kuma-7800 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🧹 Remove hompage related frontend code
Once https://github.com/mdn/yari/issues/2438 lands, we need to go into the Kuma forest and find all things related to the homepage frontend code, chop it down, and clear it out.
</issue>
<code>
[start of kuma/landing/views.py]
1 from django.conf import settings
2 from django.http import HttpResponse
3 from django.shortcuts import redirect, render
4 from django.views import static
5 from django.views.decorators.cache import never_cache
6 from django.views.generic import RedirectView
7
8 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
9 from kuma.core.utils import is_wiki
10 from kuma.feeder.models import Bundle
11 from kuma.feeder.sections import SECTION_HACKS
12 from kuma.search.models import Filter
13
14 from .utils import favicon_url
15
16
17 @shared_cache_control
18 def contribute_json(request):
19 return static.serve(request, "contribute.json", document_root=settings.ROOT)
20
21
22 @shared_cache_control
23 def home(request):
24 """Home page."""
25 context = {}
26 # Need for both wiki and react homepage
27 context["updates"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
28
29 # The default template name
30 template_name = "landing/react_homepage.html"
31 if is_wiki(request):
32 template_name = "landing/homepage.html"
33 context["default_filters"] = Filter.objects.default_filters()
34 return render(request, template_name, context)
35
36
37 @ensure_wiki_domain
38 @never_cache
39 def maintenance_mode(request):
40 if settings.MAINTENANCE_MODE:
41 return render(request, "landing/maintenance-mode.html")
42 else:
43 return redirect("home")
44
45
46 @ensure_wiki_domain
47 @shared_cache_control
48 def promote_buttons(request):
49 """Bug 646192: MDN affiliate buttons"""
50 return render(request, "landing/promote_buttons.html")
51
52
53 ROBOTS_ALL_ALLOWED_TXT = """\
54 User-agent: *
55 Sitemap: https://wiki.developer.mozilla.org/sitemap.xml
56
57 Disallow:
58 """
59
60 ROBOTS_ALLOWED_TXT = """\
61 User-agent: *
62 Sitemap: https://developer.mozilla.org/sitemap.xml
63
64 Disallow: /api/
65 Disallow: /*docs/get-documents
66 Disallow: /*docs/Experiment:*
67 Disallow: /*$children
68 Disallow: /*docs.json
69 Disallow: /*/files/
70 Disallow: /media
71 Disallow: /*profiles*/edit
72 """ + "\n".join(
73 "Disallow: /{locale}/search".format(locale=locale)
74 for locale in settings.ENABLED_LOCALES
75 )
76
77 ROBOTS_GO_AWAY_TXT = """\
78 User-Agent: *
79 Disallow: /
80 """
81
82
83 @shared_cache_control
84 def robots_txt(request):
85 """Serve robots.txt that allows or forbids robots."""
86 host = request.get_host()
87 if host in settings.ALLOW_ROBOTS_DOMAINS:
88 robots = ""
89 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
90 if host == settings.WIKI_HOST:
91 robots = ROBOTS_ALL_ALLOWED_TXT
92 else:
93 robots = ROBOTS_ALLOWED_TXT
94 else:
95 robots = ROBOTS_GO_AWAY_TXT
96 return HttpResponse(robots, content_type="text/plain")
97
98
99 class FaviconRedirect(RedirectView):
100 """Redirect to the favicon in the static img folder (bug 1402497)"""
101
102 def get_redirect_url(self, *args, **kwargs):
103 return favicon_url()
104
[end of kuma/landing/views.py]
[start of kuma/landing/urls.py]
1 from django.urls import re_path
2
3 from kuma.core.decorators import shared_cache_control
4
5 from . import views
6
7
8 MONTH = 60 * 60 * 24 * 30
9
10
11 lang_urlpatterns = [
12 re_path(r"^$", views.home, name="home"),
13 re_path(r"^maintenance-mode/?$", views.maintenance_mode, name="maintenance_mode"),
14 re_path(r"^promote/?$", views.promote_buttons, name="promote"),
15 re_path(r"^promote/buttons/?$", views.promote_buttons, name="promote_buttons"),
16 ]
17
18 urlpatterns = [
19 re_path(r"^contribute\.json$", views.contribute_json, name="contribute_json"),
20 re_path(r"^robots.txt$", views.robots_txt, name="robots_txt"),
21 re_path(
22 r"^favicon.ico$",
23 shared_cache_control(views.FaviconRedirect.as_view(), s_maxage=MONTH),
24 name="favicon_ico",
25 ),
26 ]
27
[end of kuma/landing/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/landing/urls.py b/kuma/landing/urls.py
--- a/kuma/landing/urls.py
+++ b/kuma/landing/urls.py
@@ -11,12 +11,9 @@
lang_urlpatterns = [
re_path(r"^$", views.home, name="home"),
re_path(r"^maintenance-mode/?$", views.maintenance_mode, name="maintenance_mode"),
- re_path(r"^promote/?$", views.promote_buttons, name="promote"),
- re_path(r"^promote/buttons/?$", views.promote_buttons, name="promote_buttons"),
]
urlpatterns = [
- re_path(r"^contribute\.json$", views.contribute_json, name="contribute_json"),
re_path(r"^robots.txt$", views.robots_txt, name="robots_txt"),
re_path(
r"^favicon.ico$",
diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -1,37 +1,25 @@
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect, render
-from django.views import static
from django.views.decorators.cache import never_cache
from django.views.generic import RedirectView
from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
-from kuma.core.utils import is_wiki
-from kuma.feeder.models import Bundle
-from kuma.feeder.sections import SECTION_HACKS
-from kuma.search.models import Filter
from .utils import favicon_url
-@shared_cache_control
-def contribute_json(request):
- return static.serve(request, "contribute.json", document_root=settings.ROOT)
-
-
-@shared_cache_control
def home(request):
"""Home page."""
- context = {}
- # Need for both wiki and react homepage
- context["updates"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
-
- # The default template name
- template_name = "landing/react_homepage.html"
- if is_wiki(request):
- template_name = "landing/homepage.html"
- context["default_filters"] = Filter.objects.default_filters()
- return render(request, template_name, context)
+ return HttpResponse(
+ """
+ <html>
+ End of an era. Kuma's no longer rendering a home page.<br>
+ See project Yari.
+ </html>
+ """,
+ content_type="text/html",
+ )
@ensure_wiki_domain
@@ -43,13 +31,6 @@
return redirect("home")
-@ensure_wiki_domain
-@shared_cache_control
-def promote_buttons(request):
- """Bug 646192: MDN affiliate buttons"""
- return render(request, "landing/promote_buttons.html")
-
-
ROBOTS_ALL_ALLOWED_TXT = """\
User-agent: *
Sitemap: https://wiki.developer.mozilla.org/sitemap.xml
| {"golden_diff": "diff --git a/kuma/landing/urls.py b/kuma/landing/urls.py\n--- a/kuma/landing/urls.py\n+++ b/kuma/landing/urls.py\n@@ -11,12 +11,9 @@\n lang_urlpatterns = [\n re_path(r\"^$\", views.home, name=\"home\"),\n re_path(r\"^maintenance-mode/?$\", views.maintenance_mode, name=\"maintenance_mode\"),\n- re_path(r\"^promote/?$\", views.promote_buttons, name=\"promote\"),\n- re_path(r\"^promote/buttons/?$\", views.promote_buttons, name=\"promote_buttons\"),\n ]\n \n urlpatterns = [\n- re_path(r\"^contribute\\.json$\", views.contribute_json, name=\"contribute_json\"),\n re_path(r\"^robots.txt$\", views.robots_txt, name=\"robots_txt\"),\n re_path(\n r\"^favicon.ico$\",\ndiff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -1,37 +1,25 @@\n from django.conf import settings\n from django.http import HttpResponse\n from django.shortcuts import redirect, render\n-from django.views import static\n from django.views.decorators.cache import never_cache\n from django.views.generic import RedirectView\n \n from kuma.core.decorators import ensure_wiki_domain, shared_cache_control\n-from kuma.core.utils import is_wiki\n-from kuma.feeder.models import Bundle\n-from kuma.feeder.sections import SECTION_HACKS\n-from kuma.search.models import Filter\n \n from .utils import favicon_url\n \n \n-@shared_cache_control\n-def contribute_json(request):\n- return static.serve(request, \"contribute.json\", document_root=settings.ROOT)\n-\n-\n-@shared_cache_control\n def home(request):\n \"\"\"Home page.\"\"\"\n- context = {}\n- # Need for both wiki and react homepage\n- context[\"updates\"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n-\n- # The default template name\n- template_name = \"landing/react_homepage.html\"\n- if is_wiki(request):\n- template_name = \"landing/homepage.html\"\n- context[\"default_filters\"] = Filter.objects.default_filters()\n- return render(request, template_name, context)\n+ return HttpResponse(\n+ \"\"\"\n+ <html>\n+ End of an era. Kuma's no longer rendering a home page.<br>\n+ See project Yari.\n+ </html>\n+ \"\"\",\n+ content_type=\"text/html\",\n+ )\n \n \n @ensure_wiki_domain\n@@ -43,13 +31,6 @@\n return redirect(\"home\")\n \n \n-@ensure_wiki_domain\n-@shared_cache_control\n-def promote_buttons(request):\n- \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n- return render(request, \"landing/promote_buttons.html\")\n-\n-\n ROBOTS_ALL_ALLOWED_TXT = \"\"\"\\\n User-agent: *\n Sitemap: https://wiki.developer.mozilla.org/sitemap.xml\n", "issue": "\ud83e\uddf9 Remove hompage related frontend code\nOnce https://github.com/mdn/yari/issues/2438 lands, we need to go into the Kuma forest and find all things related to the homepage frontend code, chop it down, and clear it out.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, \"contribute.json\", document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n context = {}\n # Need for both wiki and react homepage\n context[\"updates\"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n\n # The default template name\n template_name = \"landing/react_homepage.html\"\n if is_wiki(request):\n template_name = \"landing/homepage.html\"\n context[\"default_filters\"] = Filter.objects.default_filters()\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, \"landing/maintenance-mode.html\")\n else:\n return redirect(\"home\")\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, \"landing/promote_buttons.html\")\n\n\nROBOTS_ALL_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://wiki.developer.mozilla.org/sitemap.xml\n\nDisallow:\n\"\"\"\n\nROBOTS_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /api/\nDisallow: /*docs/get-documents\nDisallow: /*docs/Experiment:*\nDisallow: /*$children\nDisallow: /*docs.json\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*profiles*/edit\n\"\"\" + \"\\n\".join(\n \"Disallow: /{locale}/search\".format(locale=locale)\n for locale in settings.ENABLED_LOCALES\n)\n\nROBOTS_GO_AWAY_TXT = \"\"\"\\\nUser-Agent: *\nDisallow: /\n\"\"\"\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n if host == settings.WIKI_HOST:\n robots = ROBOTS_ALL_ALLOWED_TXT\n else:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type=\"text/plain\")\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}, {"content": "from django.urls import re_path\n\nfrom kuma.core.decorators import shared_cache_control\n\nfrom . import views\n\n\nMONTH = 60 * 60 * 24 * 30\n\n\nlang_urlpatterns = [\n re_path(r\"^$\", views.home, name=\"home\"),\n re_path(r\"^maintenance-mode/?$\", views.maintenance_mode, name=\"maintenance_mode\"),\n re_path(r\"^promote/?$\", views.promote_buttons, name=\"promote\"),\n re_path(r\"^promote/buttons/?$\", views.promote_buttons, name=\"promote_buttons\"),\n]\n\nurlpatterns = [\n re_path(r\"^contribute\\.json$\", views.contribute_json, name=\"contribute_json\"),\n re_path(r\"^robots.txt$\", views.robots_txt, name=\"robots_txt\"),\n re_path(\n r\"^favicon.ico$\",\n shared_cache_control(views.FaviconRedirect.as_view(), s_maxage=MONTH),\n name=\"favicon_ico\",\n ),\n]\n", "path": "kuma/landing/urls.py"}]} | 1,755 | 655 |
gh_patches_debug_11333 | rasdani/github-patches | git_diff | searx__searx-3479 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using single apostrophe causes a "search error"
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SEARX -->
**Version of Searx, commit number if you are using on master branch and stipulate if you forked Searx**
1.1.0-27-bf0a583f
<!-- If you are running on master branch using git execute this command
in order to fetch the latest commit ID:
```
git log -1
```
If you are using searx-docker then look at the bottom of the Searx page
and check for the version after "Powered by searx"
Please also stipulate if you are using a forked version of Searx and
include a link to the fork source code.
-->
**How did you install Searx?**
searx-docker. Specifically using docker-compose and running ``docker-compose up``.
Compose file:
```yaml
version: '3.3'
services:
searx:
volumes:
- /mnt/storage1/configs/searx:/etc/searx
ports:
- 9999:8080
environment:
- BASE_URL=http://localhost:9999/
image: searx/searx
restart: unless-stopped
```
<!-- Did you install Searx using the official wiki or using searx-docker
or manually by executing the searx/webapp.py file? -->
**What happened?**
Using a single apostrophe in a word (for example, "What's") causes a search error
<!-- A clear and concise description of what the bug is. -->
**How To Reproduce**
Use any single apostrophe in a search query
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
**Expected behavior**
Should return results, instead of an error.
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots & Logs**
https://imgur.com/a/E2b4mNX
https://paste.ee/p/e5gS8
**Additional context**
It's also somewhat infuriating that it also returns from a result page to the home page when this happens, meaning I have to retype my query (though that's another issue entirely).
</issue>
<code>
[start of searx/plugins/search_operators.py]
1 import shlex
2 import string
3
4 from flask_babel import gettext
5
6 name = gettext("Search operators")
7 description = gettext("""Filter results using hyphen, site: and -site:.
8 Please note that you might get less results with the additional filtering.""")
9 default_on = False
10
11
12 def on_result(request, search, result):
13 q = search.search_query.query
14 qs = shlex.split(q)
15 spitems = [x.lower() for x in qs if ' ' in x]
16 mitems = [x.lower() for x in qs if x.startswith('-')]
17 siteitems = [x.lower() for x in qs if x.startswith('site:')]
18 msiteitems = [x.lower() for x in qs if x.startswith('-site:')]
19 url, title, content = (
20 result["url"].lower(),
21 result["title"].lower(),
22 (result.get("content").lower() if result.get("content") else '')
23 )
24 if all((x not in title or x not in content) for x in spitems):
25 return False
26 if all((x in title or x in content) for x in mitems):
27 return False
28 if all(x not in url for x in siteitems):
29 return False
30 if all(x in url for x in msiteitems):
31 return False
32 return True
33
[end of searx/plugins/search_operators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/plugins/search_operators.py b/searx/plugins/search_operators.py
--- a/searx/plugins/search_operators.py
+++ b/searx/plugins/search_operators.py
@@ -11,7 +11,11 @@
def on_result(request, search, result):
q = search.search_query.query
- qs = shlex.split(q)
+ # WARN: shlex.quote is designed only for Unix shells and may be vulnerable
+ # to command injection on non-POSIX compliant shells (Windows)
+ # https://docs.python.org/3/library/shlex.html#shlex.quote
+ squote = shlex.quote(q)
+ qs = shlex.split(squote)
spitems = [x.lower() for x in qs if ' ' in x]
mitems = [x.lower() for x in qs if x.startswith('-')]
siteitems = [x.lower() for x in qs if x.startswith('site:')]
| {"golden_diff": "diff --git a/searx/plugins/search_operators.py b/searx/plugins/search_operators.py\n--- a/searx/plugins/search_operators.py\n+++ b/searx/plugins/search_operators.py\n@@ -11,7 +11,11 @@\n \n def on_result(request, search, result):\n q = search.search_query.query\n- qs = shlex.split(q)\n+ # WARN: shlex.quote is designed only for Unix shells and may be vulnerable\n+ # to command injection on non-POSIX compliant shells (Windows)\n+ # https://docs.python.org/3/library/shlex.html#shlex.quote\n+ squote = shlex.quote(q)\n+ qs = shlex.split(squote)\n spitems = [x.lower() for x in qs if ' ' in x]\n mitems = [x.lower() for x in qs if x.startswith('-')]\n siteitems = [x.lower() for x in qs if x.startswith('site:')]\n", "issue": "Using single apostrophe causes a \"search error\"\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SEARX -->\r\n\r\n**Version of Searx, commit number if you are using on master branch and stipulate if you forked Searx**\r\n1.1.0-27-bf0a583f\r\n<!-- If you are running on master branch using git execute this command\r\nin order to fetch the latest commit ID:\r\n```\r\ngit log -1\r\n``` \r\nIf you are using searx-docker then look at the bottom of the Searx page\r\nand check for the version after \"Powered by searx\"\r\n\r\nPlease also stipulate if you are using a forked version of Searx and\r\ninclude a link to the fork source code.\r\n-->\r\n**How did you install Searx?**\r\nsearx-docker. Specifically using docker-compose and running ``docker-compose up``.\r\nCompose file:\r\n```yaml\r\nversion: '3.3'\r\nservices:\r\n searx:\r\n volumes:\r\n - /mnt/storage1/configs/searx:/etc/searx\r\n ports:\r\n - 9999:8080\r\n environment:\r\n - BASE_URL=http://localhost:9999/\r\n image: searx/searx\r\n restart: unless-stopped\r\n```\r\n<!-- Did you install Searx using the official wiki or using searx-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\nUsing a single apostrophe in a word (for example, \"What's\") causes a search error\r\n<!-- A clear and concise description of what the bug is. -->\r\n**How To Reproduce**\r\nUse any single apostrophe in a search query\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n**Expected behavior**\r\nShould return results, instead of an error.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n**Screenshots & Logs**\r\nhttps://imgur.com/a/E2b4mNX\r\nhttps://paste.ee/p/e5gS8\r\n\r\n**Additional context**\r\nIt's also somewhat infuriating that it also returns from a result page to the home page when this happens, meaning I have to retype my query (though that's another issue entirely).\n", "before_files": [{"content": "import shlex\nimport string\n\nfrom flask_babel import gettext\n\nname = gettext(\"Search operators\")\ndescription = gettext(\"\"\"Filter results using hyphen, site: and -site:.\nPlease note that you might get less results with the additional filtering.\"\"\")\ndefault_on = False\n\n\ndef on_result(request, search, result):\n q = search.search_query.query\n qs = shlex.split(q)\n spitems = [x.lower() for x in qs if ' ' in x]\n mitems = [x.lower() for x in qs if x.startswith('-')]\n siteitems = [x.lower() for x in qs if x.startswith('site:')]\n msiteitems = [x.lower() for x in qs if x.startswith('-site:')]\n url, title, content = (\n result[\"url\"].lower(),\n result[\"title\"].lower(),\n (result.get(\"content\").lower() if result.get(\"content\") else '')\n )\n if all((x not in title or x not in content) for x in spitems):\n return False\n if all((x in title or x in content) for x in mitems):\n return False\n if all(x not in url for x in siteitems):\n return False\n if all(x in url for x in msiteitems):\n return False\n return True\n", "path": "searx/plugins/search_operators.py"}]} | 1,380 | 213 |
gh_patches_debug_426 | rasdani/github-patches | git_diff | nautobot__nautobot-3981 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2.0: Tag string representation is incorrect
### Environment
* Nautobot version (Docker tag too if applicable): `next`, e13883b7b8a4f44bca0c40d8074dcf8f82e544e6
### Steps to Reproduce
1. Create a Tag and associate it to any model
2. View the legacy-UI list view or detail view of that model
<!-- What did you expect to happen? -->
### Expected Behavior
Tag name to be displayed
<!-- What happened instead? -->
### Observed Behavior
`Tag object (<uuid>)` is displayed.

Appears to be a regression due to #3914.
</issue>
<code>
[start of nautobot/extras/models/tags.py]
1 from django.contrib.contenttypes.models import ContentType
2 from django.db import models
3 from taggit.models import GenericUUIDTaggedItemBase
4
5 from nautobot.core.choices import ColorChoices
6 from nautobot.core.models import BaseManager, BaseModel
7 from nautobot.core.models.fields import ColorField
8 from nautobot.core.models.querysets import RestrictedQuerySet
9 from nautobot.extras.models import ChangeLoggedModel, CustomFieldModel
10 from nautobot.extras.models.mixins import NotesMixin
11 from nautobot.extras.models.relationships import RelationshipModel
12 from nautobot.extras.utils import extras_features, TaggableClassesQuery
13
14
15 #
16 # Tags
17 #
18
19
20 class TagQuerySet(RestrictedQuerySet):
21 """Queryset for `Tags` objects."""
22
23 def get_for_model(self, model):
24 """
25 Return all `Tags` assigned to the given model.
26 """
27 return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)
28
29
30 # Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.
31 @extras_features(
32 "custom_validators",
33 )
34 class Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):
35 name = models.CharField(max_length=100, unique=True)
36 content_types = models.ManyToManyField(
37 to=ContentType,
38 related_name="tags",
39 limit_choices_to=TaggableClassesQuery(),
40 )
41 color = ColorField(default=ColorChoices.COLOR_GREY)
42 description = models.CharField(
43 max_length=200,
44 blank=True,
45 )
46
47 objects = BaseManager.from_queryset(TagQuerySet)()
48
49 class Meta:
50 ordering = ["name"]
51
52 def validate_content_types_removal(self, content_types_id):
53 """Validate content_types to be removed are not tagged to a model"""
54 errors = {}
55
56 removed_content_types = self.content_types.exclude(id__in=content_types_id)
57
58 # check if tag is assigned to any of the removed content_types
59 for content_type in removed_content_types:
60 model = content_type.model_class()
61 if model.objects.filter(tags=self).exists():
62 errors.setdefault("content_types", []).append(
63 f"Unable to remove {model._meta.label_lower}. Dependent objects were found."
64 )
65
66 return errors
67
68
69 class TaggedItem(BaseModel, GenericUUIDTaggedItemBase):
70 tag = models.ForeignKey(to=Tag, related_name="%(app_label)s_%(class)s_items", on_delete=models.CASCADE)
71
72 class Meta:
73 index_together = ("content_type", "object_id")
74 unique_together = [["content_type", "object_id", "tag"]]
75
[end of nautobot/extras/models/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/extras/models/tags.py b/nautobot/extras/models/tags.py
--- a/nautobot/extras/models/tags.py
+++ b/nautobot/extras/models/tags.py
@@ -46,6 +46,9 @@
objects = BaseManager.from_queryset(TagQuerySet)()
+ def __str__(self):
+ return self.name
+
class Meta:
ordering = ["name"]
| {"golden_diff": "diff --git a/nautobot/extras/models/tags.py b/nautobot/extras/models/tags.py\n--- a/nautobot/extras/models/tags.py\n+++ b/nautobot/extras/models/tags.py\n@@ -46,6 +46,9 @@\n \n objects = BaseManager.from_queryset(TagQuerySet)()\n \n+ def __str__(self):\n+ return self.name\n+\n class Meta:\n ordering = [\"name\"]\n", "issue": "2.0: Tag string representation is incorrect\n### Environment\r\n* Nautobot version (Docker tag too if applicable): `next`, e13883b7b8a4f44bca0c40d8074dcf8f82e544e6\r\n\r\n### Steps to Reproduce\r\n1. Create a Tag and associate it to any model\r\n2. View the legacy-UI list view or detail view of that model\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nTag name to be displayed\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\n`Tag object (<uuid>)` is displayed.\r\n\r\n\r\n\r\nAppears to be a regression due to #3914.\r\n\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom taggit.models import GenericUUIDTaggedItemBase\n\nfrom nautobot.core.choices import ColorChoices\nfrom nautobot.core.models import BaseManager, BaseModel\nfrom nautobot.core.models.fields import ColorField\nfrom nautobot.core.models.querysets import RestrictedQuerySet\nfrom nautobot.extras.models import ChangeLoggedModel, CustomFieldModel\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.models.relationships import RelationshipModel\nfrom nautobot.extras.utils import extras_features, TaggableClassesQuery\n\n\n#\n# Tags\n#\n\n\nclass TagQuerySet(RestrictedQuerySet):\n \"\"\"Queryset for `Tags` objects.\"\"\"\n\n def get_for_model(self, model):\n \"\"\"\n Return all `Tags` assigned to the given model.\n \"\"\"\n return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)\n\n\n# Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.\n@extras_features(\n \"custom_validators\",\n)\nclass Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):\n name = models.CharField(max_length=100, unique=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"tags\",\n limit_choices_to=TaggableClassesQuery(),\n )\n color = ColorField(default=ColorChoices.COLOR_GREY)\n description = models.CharField(\n max_length=200,\n blank=True,\n )\n\n objects = BaseManager.from_queryset(TagQuerySet)()\n\n class Meta:\n ordering = [\"name\"]\n\n def validate_content_types_removal(self, content_types_id):\n \"\"\"Validate content_types to be removed are not tagged to a model\"\"\"\n errors = {}\n\n removed_content_types = self.content_types.exclude(id__in=content_types_id)\n\n # check if tag is assigned to any of the removed content_types\n for content_type in removed_content_types:\n model = content_type.model_class()\n if model.objects.filter(tags=self).exists():\n errors.setdefault(\"content_types\", []).append(\n f\"Unable to remove {model._meta.label_lower}. Dependent objects were found.\"\n )\n\n return errors\n\n\nclass TaggedItem(BaseModel, GenericUUIDTaggedItemBase):\n tag = models.ForeignKey(to=Tag, related_name=\"%(app_label)s_%(class)s_items\", on_delete=models.CASCADE)\n\n class Meta:\n index_together = (\"content_type\", \"object_id\")\n unique_together = [[\"content_type\", \"object_id\", \"tag\"]]\n", "path": "nautobot/extras/models/tags.py"}]} | 1,475 | 96 |
gh_patches_debug_30621 | rasdani/github-patches | git_diff | pyodide__pyodide-3483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
On first call, `pyodide config get emscripten_version` returns `Downloading xbuild environment Installing xbuild environment 3.1.27` instead of `3.1.27`
## 🐛 Bug
In [the docs for out-of-tree builds](https://pyodide.org/en/stable/development/building-and-testing-packages.html#building-and-testing-packages-out-of-tree) it gives this code snippet:
```bash
pip install pyodide-build
git clone https://github.com/emscripten-core/emsdk.git
cd emsdk
PYODIDE_EMSCRIPTEN_VERSION=$(pyodide config get emscripten_version)
./emsdk install ${PYODIDE_EMSCRIPTEN_VERSION}
./emsdk activate ${PYODIDE_EMSCRIPTEN_VERSION}
source emsdk_env.sh
```
But this doesn't work because on the first call, `pyodide config get emscripten_version` outputs this:
```
Downloading xbuild environment
Installing xbuild environment
3.1.27
```
On subsequent calls it returns `3.1.27`.
### To Reproduce
See above.
### Expected behavior
Calls to `pyodide config get emscripten_version` should only ever output the version string such that this command can be reliably used in build automation.
### Environment
- Pyodide Version<!-- (e.g. 1.8.1) -->: Pyodide CLI Version: 0.2.2
### Additional context
As a workaround for build scripts, `pyodide config get emscripten_version` can be called once before actually using it.
</issue>
<code>
[start of pyodide-build/pyodide_build/cli/config.py]
1 import typer
2
3 from ..common import get_make_environment_vars
4 from ..out_of_tree.utils import initialize_pyodide_root
5
6 app = typer.Typer(help="Manage config variables used in pyodide")
7
8
9 # A dictionary of config variables {key: env_var_in_makefile}
10 PYODIDE_CONFIGS = {
11 "emscripten_version": "PYODIDE_EMSCRIPTEN_VERSION",
12 "python_version": "PYVERSION",
13 }
14
15
16 @app.callback(no_args_is_help=True) # type: ignore[misc]
17 def callback() -> None:
18 return
19
20
21 def _get_configs() -> dict[str, str]:
22 initialize_pyodide_root()
23
24 configs: dict[str, str] = get_make_environment_vars()
25
26 configs_filtered = {k: configs[v] for k, v in PYODIDE_CONFIGS.items()}
27 return configs_filtered
28
29
30 @app.command("list")
31 def list_config():
32 """
33 List config variables used in pyodide
34 """
35 configs = _get_configs()
36
37 for k, v in configs.items():
38 typer.echo(f"{k}={v}")
39
40
41 @app.command("get") # type: ignore[misc]
42 def get_config(
43 config_var: str = typer.Argument(
44 ..., help="A config variable to get. Use `list` to see all possible values."
45 ),
46 ) -> None:
47 """
48 Get a value of a single config variable used in pyodide
49 """
50 configs = _get_configs()
51
52 if config_var not in configs:
53 typer.echo(f"Config variable {config_var} not found.")
54 typer.Exit(1)
55
56 typer.echo(configs[config_var])
57
[end of pyodide-build/pyodide_build/cli/config.py]
[start of pyodide-build/pyodide_build/out_of_tree/utils.py]
1 import os
2 from pathlib import Path
3
4 from ..common import search_pyodide_root
5
6
7 def ensure_env_installed(env: Path) -> None:
8 if env.exists():
9 return
10 from .. import __version__
11 from ..install_xbuildenv import download_xbuildenv, install_xbuildenv
12
13 if "dev" in __version__:
14 raise RuntimeError(
15 "To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT"
16 )
17
18 download_xbuildenv(__version__, env)
19 install_xbuildenv(__version__, env)
20
21
22 def initialize_pyodide_root() -> None:
23 if "PYODIDE_ROOT" in os.environ:
24 return
25 try:
26 os.environ["PYODIDE_ROOT"] = str(search_pyodide_root(__file__))
27 return
28 except FileNotFoundError:
29 pass
30 env = Path(".pyodide-xbuildenv")
31 os.environ["PYODIDE_ROOT"] = str(env / "xbuildenv/pyodide-root")
32 ensure_env_installed(env)
33
[end of pyodide-build/pyodide_build/out_of_tree/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyodide-build/pyodide_build/cli/config.py b/pyodide-build/pyodide_build/cli/config.py
--- a/pyodide-build/pyodide_build/cli/config.py
+++ b/pyodide-build/pyodide_build/cli/config.py
@@ -19,7 +19,7 @@
def _get_configs() -> dict[str, str]:
- initialize_pyodide_root()
+ initialize_pyodide_root(quiet=True)
configs: dict[str, str] = get_make_environment_vars()
diff --git a/pyodide-build/pyodide_build/out_of_tree/utils.py b/pyodide-build/pyodide_build/out_of_tree/utils.py
--- a/pyodide-build/pyodide_build/out_of_tree/utils.py
+++ b/pyodide-build/pyodide_build/out_of_tree/utils.py
@@ -1,10 +1,12 @@
import os
+from contextlib import ExitStack, redirect_stdout
+from io import StringIO
from pathlib import Path
from ..common import search_pyodide_root
-def ensure_env_installed(env: Path) -> None:
+def ensure_env_installed(env: Path, *, quiet: bool = False) -> None:
if env.exists():
return
from .. import __version__
@@ -15,11 +17,16 @@
"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT"
)
- download_xbuildenv(__version__, env)
- install_xbuildenv(__version__, env)
+ with ExitStack() as stack:
+ if quiet:
+ # Prevent writes to stdout
+ stack.enter_context(redirect_stdout(StringIO()))
+ download_xbuildenv(__version__, env)
+ install_xbuildenv(__version__, env)
-def initialize_pyodide_root() -> None:
+
+def initialize_pyodide_root(*, quiet: bool = False) -> None:
if "PYODIDE_ROOT" in os.environ:
return
try:
@@ -29,4 +36,4 @@
pass
env = Path(".pyodide-xbuildenv")
os.environ["PYODIDE_ROOT"] = str(env / "xbuildenv/pyodide-root")
- ensure_env_installed(env)
+ ensure_env_installed(env, quiet=quiet)
| {"golden_diff": "diff --git a/pyodide-build/pyodide_build/cli/config.py b/pyodide-build/pyodide_build/cli/config.py\n--- a/pyodide-build/pyodide_build/cli/config.py\n+++ b/pyodide-build/pyodide_build/cli/config.py\n@@ -19,7 +19,7 @@\n \n \n def _get_configs() -> dict[str, str]:\n- initialize_pyodide_root()\n+ initialize_pyodide_root(quiet=True)\n \n configs: dict[str, str] = get_make_environment_vars()\n \ndiff --git a/pyodide-build/pyodide_build/out_of_tree/utils.py b/pyodide-build/pyodide_build/out_of_tree/utils.py\n--- a/pyodide-build/pyodide_build/out_of_tree/utils.py\n+++ b/pyodide-build/pyodide_build/out_of_tree/utils.py\n@@ -1,10 +1,12 @@\n import os\n+from contextlib import ExitStack, redirect_stdout\n+from io import StringIO\n from pathlib import Path\n \n from ..common import search_pyodide_root\n \n \n-def ensure_env_installed(env: Path) -> None:\n+def ensure_env_installed(env: Path, *, quiet: bool = False) -> None:\n if env.exists():\n return\n from .. import __version__\n@@ -15,11 +17,16 @@\n \"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT\"\n )\n \n- download_xbuildenv(__version__, env)\n- install_xbuildenv(__version__, env)\n+ with ExitStack() as stack:\n+ if quiet:\n+ # Prevent writes to stdout\n+ stack.enter_context(redirect_stdout(StringIO()))\n \n+ download_xbuildenv(__version__, env)\n+ install_xbuildenv(__version__, env)\n \n-def initialize_pyodide_root() -> None:\n+\n+def initialize_pyodide_root(*, quiet: bool = False) -> None:\n if \"PYODIDE_ROOT\" in os.environ:\n return\n try:\n@@ -29,4 +36,4 @@\n pass\n env = Path(\".pyodide-xbuildenv\")\n os.environ[\"PYODIDE_ROOT\"] = str(env / \"xbuildenv/pyodide-root\")\n- ensure_env_installed(env)\n+ ensure_env_installed(env, quiet=quiet)\n", "issue": "On first call, `pyodide config get emscripten_version` returns `Downloading xbuild environment Installing xbuild environment 3.1.27` instead of `3.1.27`\n## \ud83d\udc1b Bug\r\n\r\nIn [the docs for out-of-tree builds](https://pyodide.org/en/stable/development/building-and-testing-packages.html#building-and-testing-packages-out-of-tree) it gives this code snippet:\r\n\r\n```bash\r\npip install pyodide-build\r\n\r\ngit clone https://github.com/emscripten-core/emsdk.git\r\ncd emsdk\r\n\r\nPYODIDE_EMSCRIPTEN_VERSION=$(pyodide config get emscripten_version)\r\n./emsdk install ${PYODIDE_EMSCRIPTEN_VERSION}\r\n./emsdk activate ${PYODIDE_EMSCRIPTEN_VERSION}\r\nsource emsdk_env.sh\r\n```\r\nBut this doesn't work because on the first call, `pyodide config get emscripten_version` outputs this:\r\n```\r\nDownloading xbuild environment\r\nInstalling xbuild environment\r\n3.1.27\r\n```\r\nOn subsequent calls it returns `3.1.27`.\r\n\r\n### To Reproduce\r\n\r\nSee above.\r\n\r\n### Expected behavior\r\n\r\nCalls to `pyodide config get emscripten_version` should only ever output the version string such that this command can be reliably used in build automation.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: Pyodide CLI Version: 0.2.2\r\n\r\n### Additional context\r\n\r\nAs a workaround for build scripts, `pyodide config get emscripten_version` can be called once before actually using it.\r\n\n", "before_files": [{"content": "import typer\n\nfrom ..common import get_make_environment_vars\nfrom ..out_of_tree.utils import initialize_pyodide_root\n\napp = typer.Typer(help=\"Manage config variables used in pyodide\")\n\n\n# A dictionary of config variables {key: env_var_in_makefile}\nPYODIDE_CONFIGS = {\n \"emscripten_version\": \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"python_version\": \"PYVERSION\",\n}\n\n\[email protected](no_args_is_help=True) # type: ignore[misc]\ndef callback() -> None:\n return\n\n\ndef _get_configs() -> dict[str, str]:\n initialize_pyodide_root()\n\n configs: dict[str, str] = get_make_environment_vars()\n\n configs_filtered = {k: configs[v] for k, v in PYODIDE_CONFIGS.items()}\n return configs_filtered\n\n\[email protected](\"list\")\ndef list_config():\n \"\"\"\n List config variables used in pyodide\n \"\"\"\n configs = _get_configs()\n\n for k, v in configs.items():\n typer.echo(f\"{k}={v}\")\n\n\[email protected](\"get\") # type: ignore[misc]\ndef get_config(\n config_var: str = typer.Argument(\n ..., help=\"A config variable to get. Use `list` to see all possible values.\"\n ),\n) -> None:\n \"\"\"\n Get a value of a single config variable used in pyodide\n \"\"\"\n configs = _get_configs()\n\n if config_var not in configs:\n typer.echo(f\"Config variable {config_var} not found.\")\n typer.Exit(1)\n\n typer.echo(configs[config_var])\n", "path": "pyodide-build/pyodide_build/cli/config.py"}, {"content": "import os\nfrom pathlib import Path\n\nfrom ..common import search_pyodide_root\n\n\ndef ensure_env_installed(env: Path) -> None:\n if env.exists():\n return\n from .. import __version__\n from ..install_xbuildenv import download_xbuildenv, install_xbuildenv\n\n if \"dev\" in __version__:\n raise RuntimeError(\n \"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT\"\n )\n\n download_xbuildenv(__version__, env)\n install_xbuildenv(__version__, env)\n\n\ndef initialize_pyodide_root() -> None:\n if \"PYODIDE_ROOT\" in os.environ:\n return\n try:\n os.environ[\"PYODIDE_ROOT\"] = str(search_pyodide_root(__file__))\n return\n except FileNotFoundError:\n pass\n env = Path(\".pyodide-xbuildenv\")\n os.environ[\"PYODIDE_ROOT\"] = str(env / \"xbuildenv/pyodide-root\")\n ensure_env_installed(env)\n", "path": "pyodide-build/pyodide_build/out_of_tree/utils.py"}]} | 1,676 | 506 |
gh_patches_debug_19145 | rasdani/github-patches | git_diff | kivy__python-for-android-1480 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
libxml2 build broken on latest p4a master with python 3
With latest p4a master & python3crystax & host python 3, the libxml2 recipe gets built inside `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a__ndk_target_19/libxml2/ `.
This is a folder name change, previously it used to be in `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a/libxml2/` - and as a result, the path detection inside the libxml2 fails for some reason.
### Versions
* Python: host python 3.x, python3crystax target
* OS: ubuntu in docker
* Kivy: not used
* Cython: not sure, but I don't think it's relevant
Edit: corrected lxml -> libxml2
</issue>
<code>
[start of pythonforandroid/recipes/libxslt/__init__.py]
1 from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
2 from os.path import exists, join, dirname
3 import sh
4
5
6 class LibxsltRecipe(Recipe):
7 version = "1.1.28"
8 url = "http://xmlsoft.org/sources/libxslt-{version}.tar.gz"
9 depends = ["libxml2"]
10 patches = ["fix-dlopen.patch"]
11
12 call_hostpython_via_targetpython = False
13
14 def should_build(self, arch):
15 super(LibxsltRecipe, self).should_build(arch)
16 return not exists(join(self.ctx.get_libs_dir(arch.arch), "libxslt.a"))
17
18 def build_arch(self, arch):
19 super(LibxsltRecipe, self).build_arch(arch)
20 env = self.get_recipe_env(arch)
21 with current_directory(self.get_build_dir(arch.arch)):
22 # If the build is done with /bin/sh things blow up,
23 # try really hard to use bash
24 env["CC"] += " -I%s" % self.get_build_dir(arch.arch)
25 libxml = (
26 dirname(dirname(self.get_build_container_dir(arch.arch)))
27 + "/libxml2/%s/libxml2" % arch.arch
28 )
29 shprint(
30 sh.Command("./configure"),
31 "--build=i686-pc-linux-gnu",
32 "--host=arm-linux-eabi",
33 "--without-plugins",
34 "--without-debug",
35 "--without-python",
36 "--without-crypto",
37 "--with-libxml-src=%s" % libxml,
38 _env=env,
39 )
40 shprint(sh.make, "V=1", _env=env)
41 shutil.copyfile(
42 "libxslt/.libs/libxslt.a",
43 join(self.ctx.get_libs_dir(arch.arch), "libxslt.a"),
44 )
45 shutil.copyfile(
46 "libexslt/.libs/libexslt.a",
47 join(self.ctx.get_libs_dir(arch.arch), "libexslt.a"),
48 )
49
50 def get_recipe_env(self, arch):
51 env = super(LibxsltRecipe, self).get_recipe_env(arch)
52 env["CONFIG_SHELL"] = "/bin/bash"
53 env["SHELL"] = "/bin/bash"
54 env[
55 "CC"
56 ] = "arm-linux-androideabi-gcc -DANDROID -mandroid -fomit-frame-pointer --sysroot={}".format(
57 self.ctx.ndk_platform
58 )
59
60 env["LDSHARED"] = "%s -nostartfiles -shared -fPIC" % env["CC"]
61 return env
62
63
64 recipe = LibxsltRecipe()
65
[end of pythonforandroid/recipes/libxslt/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/libxslt/__init__.py b/pythonforandroid/recipes/libxslt/__init__.py
--- a/pythonforandroid/recipes/libxslt/__init__.py
+++ b/pythonforandroid/recipes/libxslt/__init__.py
@@ -1,5 +1,5 @@
from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
-from os.path import exists, join, dirname
+from os.path import exists, join
import sh
@@ -22,10 +22,8 @@
# If the build is done with /bin/sh things blow up,
# try really hard to use bash
env["CC"] += " -I%s" % self.get_build_dir(arch.arch)
- libxml = (
- dirname(dirname(self.get_build_container_dir(arch.arch)))
- + "/libxml2/%s/libxml2" % arch.arch
- )
+ libxml = Recipe.get_recipe(
+ 'libxml2', self.ctx).get_build_dir(arch.arch)
shprint(
sh.Command("./configure"),
"--build=i686-pc-linux-gnu",
| {"golden_diff": "diff --git a/pythonforandroid/recipes/libxslt/__init__.py b/pythonforandroid/recipes/libxslt/__init__.py\n--- a/pythonforandroid/recipes/libxslt/__init__.py\n+++ b/pythonforandroid/recipes/libxslt/__init__.py\n@@ -1,5 +1,5 @@\n from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory\n-from os.path import exists, join, dirname\n+from os.path import exists, join\n import sh\n \n \n@@ -22,10 +22,8 @@\n # If the build is done with /bin/sh things blow up,\n # try really hard to use bash\n env[\"CC\"] += \" -I%s\" % self.get_build_dir(arch.arch)\n- libxml = (\n- dirname(dirname(self.get_build_container_dir(arch.arch)))\n- + \"/libxml2/%s/libxml2\" % arch.arch\n- )\n+ libxml = Recipe.get_recipe(\n+ 'libxml2', self.ctx).get_build_dir(arch.arch)\n shprint(\n sh.Command(\"./configure\"),\n \"--build=i686-pc-linux-gnu\",\n", "issue": "libxml2 build broken on latest p4a master with python 3\nWith latest p4a master & python3crystax & host python 3, the libxml2 recipe gets built inside `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a__ndk_target_19/libxml2/ `.\r\n\r\nThis is a folder name change, previously it used to be in `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a/libxml2/` - and as a result, the path detection inside the libxml2 fails for some reason.\r\n\r\n### Versions\r\n\r\n* Python: host python 3.x, python3crystax target\r\n* OS: ubuntu in docker\r\n* Kivy: not used\r\n* Cython: not sure, but I don't think it's relevant\r\n\r\nEdit: corrected lxml -> libxml2\n", "before_files": [{"content": "from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory\nfrom os.path import exists, join, dirname\nimport sh\n\n\nclass LibxsltRecipe(Recipe):\n version = \"1.1.28\"\n url = \"http://xmlsoft.org/sources/libxslt-{version}.tar.gz\"\n depends = [\"libxml2\"]\n patches = [\"fix-dlopen.patch\"]\n\n call_hostpython_via_targetpython = False\n\n def should_build(self, arch):\n super(LibxsltRecipe, self).should_build(arch)\n return not exists(join(self.ctx.get_libs_dir(arch.arch), \"libxslt.a\"))\n\n def build_arch(self, arch):\n super(LibxsltRecipe, self).build_arch(arch)\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # If the build is done with /bin/sh things blow up,\n # try really hard to use bash\n env[\"CC\"] += \" -I%s\" % self.get_build_dir(arch.arch)\n libxml = (\n dirname(dirname(self.get_build_container_dir(arch.arch)))\n + \"/libxml2/%s/libxml2\" % arch.arch\n )\n shprint(\n sh.Command(\"./configure\"),\n \"--build=i686-pc-linux-gnu\",\n \"--host=arm-linux-eabi\",\n \"--without-plugins\",\n \"--without-debug\",\n \"--without-python\",\n \"--without-crypto\",\n \"--with-libxml-src=%s\" % libxml,\n _env=env,\n )\n shprint(sh.make, \"V=1\", _env=env)\n shutil.copyfile(\n \"libxslt/.libs/libxslt.a\",\n join(self.ctx.get_libs_dir(arch.arch), \"libxslt.a\"),\n )\n shutil.copyfile(\n \"libexslt/.libs/libexslt.a\",\n join(self.ctx.get_libs_dir(arch.arch), \"libexslt.a\"),\n )\n\n def get_recipe_env(self, arch):\n env = super(LibxsltRecipe, self).get_recipe_env(arch)\n env[\"CONFIG_SHELL\"] = \"/bin/bash\"\n env[\"SHELL\"] = \"/bin/bash\"\n env[\n \"CC\"\n ] = \"arm-linux-androideabi-gcc -DANDROID -mandroid -fomit-frame-pointer --sysroot={}\".format(\n self.ctx.ndk_platform\n )\n\n env[\"LDSHARED\"] = \"%s -nostartfiles -shared -fPIC\" % env[\"CC\"]\n return env\n\n\nrecipe = LibxsltRecipe()\n", "path": "pythonforandroid/recipes/libxslt/__init__.py"}]} | 1,433 | 255 |
gh_patches_debug_12242 | rasdani/github-patches | git_diff | DataBiosphere__toil-562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Need `toil --version`
Need `toil --version`
</issue>
<code>
[start of src/toil/utils/toilMain.py]
1 from __future__ import absolute_import, print_function
2 import os
3 import sys
4
5 def main():
6 modules = loadModules()
7 try:
8 command = sys.argv[1]
9 except IndexError:
10 printHelp(modules)
11 else:
12 if command == '--help':
13 printHelp(modules)
14 else:
15 try:
16 module = modules[command]
17 except KeyError:
18 print("Unknown option '%s'. "
19 "Pass --help to display usage information.\n" % command, file=sys.stderr)
20 sys.exit(1)
21 else:
22 del sys.argv[1]
23 module.main()
24
25
26 def loadModules():
27 # noinspection PyUnresolvedReferences
28 from toil.utils import toilKill, toilRestart, toilStats, toilStatus, toilClean
29 return {name[4:].lower(): module for name, module in locals().iteritems()}
30
31
32 def printHelp(modules):
33 usage = ("\n"
34 "Usage: {name} COMMAND ...\n"
35 " {name} --help\n"
36 " {name} COMMAND --help\n\n"
37 "where COMMAND is one of the following:\n\n{descriptions}\n\n")
38 print(usage.format(
39 name=os.path.basename(sys.argv[0]),
40 commands='|'.join(modules.iterkeys()),
41 descriptions='\n'.join("%s - %s" % (n, m.__doc__.strip()) for n, m in modules.iteritems())))
42
[end of src/toil/utils/toilMain.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/toil/utils/toilMain.py b/src/toil/utils/toilMain.py
--- a/src/toil/utils/toilMain.py
+++ b/src/toil/utils/toilMain.py
@@ -1,4 +1,6 @@
from __future__ import absolute_import, print_function
+from toil.version import version
+import pkg_resources
import os
import sys
@@ -11,6 +13,11 @@
else:
if command == '--help':
printHelp(modules)
+ elif command == '--version':
+ try:
+ print(pkg_resources.get_distribution('toil').version)
+ except:
+ print("Version gathered from toil.version: "+version)
else:
try:
module = modules[command]
| {"golden_diff": "diff --git a/src/toil/utils/toilMain.py b/src/toil/utils/toilMain.py\n--- a/src/toil/utils/toilMain.py\n+++ b/src/toil/utils/toilMain.py\n@@ -1,4 +1,6 @@\n from __future__ import absolute_import, print_function\n+from toil.version import version\n+import pkg_resources\n import os\n import sys\n \n@@ -11,6 +13,11 @@\n else:\n if command == '--help':\n printHelp(modules)\n+ elif command == '--version':\n+ try:\n+ print(pkg_resources.get_distribution('toil').version)\n+ except:\n+ print(\"Version gathered from toil.version: \"+version)\n else:\n try:\n module = modules[command]\n", "issue": "Need `toil --version`\n\nNeed `toil --version`\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\nimport os\nimport sys\n\ndef main():\n modules = loadModules()\n try:\n command = sys.argv[1]\n except IndexError:\n printHelp(modules)\n else:\n if command == '--help':\n printHelp(modules)\n else:\n try:\n module = modules[command]\n except KeyError:\n print(\"Unknown option '%s'. \"\n \"Pass --help to display usage information.\\n\" % command, file=sys.stderr)\n sys.exit(1)\n else:\n del sys.argv[1]\n module.main()\n\n\ndef loadModules():\n # noinspection PyUnresolvedReferences\n from toil.utils import toilKill, toilRestart, toilStats, toilStatus, toilClean\n return {name[4:].lower(): module for name, module in locals().iteritems()}\n\n\ndef printHelp(modules):\n usage = (\"\\n\"\n \"Usage: {name} COMMAND ...\\n\"\n \" {name} --help\\n\"\n \" {name} COMMAND --help\\n\\n\"\n \"where COMMAND is one of the following:\\n\\n{descriptions}\\n\\n\")\n print(usage.format(\n name=os.path.basename(sys.argv[0]),\n commands='|'.join(modules.iterkeys()),\n descriptions='\\n'.join(\"%s - %s\" % (n, m.__doc__.strip()) for n, m in modules.iteritems())))\n", "path": "src/toil/utils/toilMain.py"}]} | 945 | 170 |
gh_patches_debug_39022 | rasdani/github-patches | git_diff | opsdroid__opsdroid-693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Google Style Docstrings
We should implement Google Style Docstrings to every function, method, class in opsdroid. This style will support existing documentation and will help in the future by generating documentation automatically.
This consists in a bit of effort so this issue can be worked by more than one contributor, just make sure that everyone knows what you are working on in order to avoid other contributors spending time on something that you are working on.
If you are unfamiliar with the Google Style Docstrings I'd recommend that you check these resources:
- [Sphix 1.8.0+ - Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
Docstrings that need to be updated:
- main.py
- [x] configure_lang
- [ ] configure_log
- [ ] get_logging_level
- [ ] check_dependencies
- [ ] print_version
- [ ] print_example_config
- [ ] edit_files
- [x] welcome_message
- ~~helper.py~~
- [x] get_opsdroid
- [x] del_rw
- [x] move_config_to_appdir
- memory.py
- [x] Memory
- [x] get
- [x] put
- [x] _get_from_database
- [x] _put_to_database
- message.py
- [x] Message
- [x] __init__
- [x] _thinking_delay
- [x] _typing delay
- [x] respond
- [x] react
- web.py
- [ ] Web
- [x] get_port
- [x] get_host
- [x] get_ssl_context
- [ ] start
- [ ] build_response
- [ ] web_index_handler
- [ ] web_stats_handler
- matchers.py
- [ ] match_regex
- [ ] match_apiai_action
- [ ] match_apiai_intent
- [ ] match_dialogflow_action
- [ ] match_dialogflow_intent
- [ ] match_luisai_intent
- [ ] match_rasanlu
- [ ] match_recastai
- [ ] match_witai
- [ ] match_crontab
- [ ] match_webhook
- [ ] match_always
- core.py
- [ ] OpsDroid
- [ ] default_connector
- [ ] exit
- [ ] critical
- [ ] call_stop
- [ ] disconnect
- [ ] stop
- [ ] load
- [ ] start_loop
- [x] setup_skills
- [ ] train_parsers
- [ ] start_connector_tasks
- [ ] start_database
- [ ] run_skill
- [ ] get_ranked_skills
- [ ] parse
- loader.py
- [ ] Loader
- [x] import_module_from_spec
- [x] import_module
- [x] check_cache
- [x] build_module_import_path
- [x] build_module_install_path
- [x] git_clone
- [x] git_pull
- [x] pip_install_deps
- [x] create_default_config
- [x] load_config_file
- [ ] envvar_constructor
- [ ] include_constructor
- [x] setup_modules_directory
- [x] load_modules_from_config
- [x] _load_modules
- [x] _install_module
- [x] _update_module
- [ ] _install_git_module
- [x] _install_local_module
---- ORIGINAL POST ----
I've been wondering about this for a while now and I would like to know if we should replace/update all the docstrings in opsdroid with the Google Style doc strings.
I think this could help new and old contributors to contribute and commit to opsdroid since the Google Style docstrings give more information about every method/function and specifies clearly what sort of input the function/method expects, what will it return and what will be raised (if applicable).
The downsize of this style is that the length of every .py file will increase due to the doc strings, but since most IDE's allow you to hide those fields it shouldn't be too bad.
Here is a good example of Google Style Doc strings: [Sphix 1.8.0+ - Google Style Docstrings](http://www.sphinx-doc.org/en/master/ext/example_google.html)
I would like to know what you all think about this idea and if its worth spending time on it.
</issue>
<code>
[start of opsdroid/memory.py]
1 """Class for persisting information in opsdroid."""
2
3 import logging
4
5
6 _LOGGER = logging.getLogger(__name__)
7
8
9 class Memory:
10 """An object to store and persist data outside of opsdroid."""
11
12 def __init__(self):
13 """Create memory dictionary."""
14 self.memory = {}
15 self.databases = []
16
17 async def get(self, key):
18 """Get data object for a given key."""
19 _LOGGER.debug(_("Getting %s from memory."), key)
20 database_result = await self._get_from_database(key)
21 if database_result is not None:
22 self.memory[key] = database_result
23 if key in self.memory:
24 return self.memory[key]
25
26 return None
27
28 async def put(self, key, data):
29 """Put a data object to a given key."""
30 _LOGGER.debug(_("Putting %s to memory"), key)
31 self.memory[key] = data
32 await self._put_to_database(key, self.memory[key])
33
34 async def _get_from_database(self, key):
35 """Get updates from databases for a given key."""
36 if not self.databases:
37 return None
38
39 results = []
40 for database in self.databases:
41 results.append(await database.get(key))
42 # TODO: Handle multiple databases
43 return results[0]
44
45 async def _put_to_database(self, key, data):
46 """Put updates into databases for a given key."""
47 if self.databases:
48 for database in self.databases:
49 await database.put(key, data)
50
[end of opsdroid/memory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/memory.py b/opsdroid/memory.py
--- a/opsdroid/memory.py
+++ b/opsdroid/memory.py
@@ -7,15 +7,33 @@
class Memory:
- """An object to store and persist data outside of opsdroid."""
+ """A Memory object.
+
+ An object to obtain, store and persist data outside of opsdroid.
+
+ Attributes:
+ databases (:obj:`list` of :obj:`Database`): List of database objects.
+ memory (:obj:`dict`): In-memory dictionary to store data.
+
+ """
def __init__(self):
- """Create memory dictionary."""
+ """Create object with minimum properties."""
self.memory = {}
self.databases = []
async def get(self, key):
- """Get data object for a given key."""
+ """Get data object for a given key.
+
+ Gets the key value found in-memory or from the database(s).
+
+ Args:
+ key (str): Key to retrieve data.
+
+ Returns:
+ A data object for the given key, otherwise `None`.
+
+ """
_LOGGER.debug(_("Getting %s from memory."), key)
database_result = await self._get_from_database(key)
if database_result is not None:
@@ -26,24 +44,53 @@
return None
async def put(self, key, data):
- """Put a data object to a given key."""
+ """Put a data object to a given key.
+
+ Stores the key and value in memory and the database(s).
+
+ Args:
+ key (str): Key for the data to store.
+ data (obj): Data object to store.
+
+ """
_LOGGER.debug(_("Putting %s to memory"), key)
self.memory[key] = data
await self._put_to_database(key, self.memory[key])
async def _get_from_database(self, key):
- """Get updates from databases for a given key."""
+ """Get updates from databases for a given key.
+
+ Gets the first key value found from the database(s).
+
+ Args:
+ key (str): Key to retrieve data from a database.
+
+ Returns:
+ The first key value (data object) found from the database(s).
+ Or `None` when no database is defined or no value is found.
+
+ Todo:
+ * Handle multiple databases
+
+ """
if not self.databases:
return None
results = []
for database in self.databases:
results.append(await database.get(key))
- # TODO: Handle multiple databases
return results[0]
async def _put_to_database(self, key, data):
- """Put updates into databases for a given key."""
+ """Put updates into databases for a given key.
+
+ Stores the key and value on each database defined.
+
+ Args:
+ key (str): Key for the data to store.
+ data (obj): Data object to store.
+
+ """
if self.databases:
for database in self.databases:
await database.put(key, data)
| {"golden_diff": "diff --git a/opsdroid/memory.py b/opsdroid/memory.py\n--- a/opsdroid/memory.py\n+++ b/opsdroid/memory.py\n@@ -7,15 +7,33 @@\n \n \n class Memory:\n- \"\"\"An object to store and persist data outside of opsdroid.\"\"\"\n+ \"\"\"A Memory object.\n+\n+ An object to obtain, store and persist data outside of opsdroid.\n+\n+ Attributes:\n+ databases (:obj:`list` of :obj:`Database`): List of database objects.\n+ memory (:obj:`dict`): In-memory dictionary to store data.\n+\n+ \"\"\"\n \n def __init__(self):\n- \"\"\"Create memory dictionary.\"\"\"\n+ \"\"\"Create object with minimum properties.\"\"\"\n self.memory = {}\n self.databases = []\n \n async def get(self, key):\n- \"\"\"Get data object for a given key.\"\"\"\n+ \"\"\"Get data object for a given key.\n+\n+ Gets the key value found in-memory or from the database(s).\n+\n+ Args:\n+ key (str): Key to retrieve data.\n+\n+ Returns:\n+ A data object for the given key, otherwise `None`.\n+\n+ \"\"\"\n _LOGGER.debug(_(\"Getting %s from memory.\"), key)\n database_result = await self._get_from_database(key)\n if database_result is not None:\n@@ -26,24 +44,53 @@\n return None\n \n async def put(self, key, data):\n- \"\"\"Put a data object to a given key.\"\"\"\n+ \"\"\"Put a data object to a given key.\n+\n+ Stores the key and value in memory and the database(s).\n+\n+ Args:\n+ key (str): Key for the data to store.\n+ data (obj): Data object to store.\n+\n+ \"\"\"\n _LOGGER.debug(_(\"Putting %s to memory\"), key)\n self.memory[key] = data\n await self._put_to_database(key, self.memory[key])\n \n async def _get_from_database(self, key):\n- \"\"\"Get updates from databases for a given key.\"\"\"\n+ \"\"\"Get updates from databases for a given key.\n+\n+ Gets the first key value found from the database(s).\n+\n+ Args:\n+ key (str): Key to retrieve data from a database.\n+\n+ Returns:\n+ The first key value (data object) found from the database(s).\n+ Or `None` when no database is defined or no value is found.\n+\n+ Todo:\n+ * Handle multiple databases\n+\n+ \"\"\"\n if not self.databases:\n return None\n \n results = []\n for database in self.databases:\n results.append(await database.get(key))\n- # TODO: Handle multiple databases\n return results[0]\n \n async def _put_to_database(self, key, data):\n- \"\"\"Put updates into databases for a given key.\"\"\"\n+ \"\"\"Put updates into databases for a given key.\n+\n+ Stores the key and value on each database defined.\n+\n+ Args:\n+ key (str): Key for the data to store.\n+ data (obj): Data object to store.\n+\n+ \"\"\"\n if self.databases:\n for database in self.databases:\n await database.put(key, data)\n", "issue": "Add Google Style Docstrings\nWe should implement Google Style Docstrings to every function, method, class in opsdroid. This style will support existing documentation and will help in the future by generating documentation automatically.\r\n\r\nThis consists in a bit of effort so this issue can be worked by more than one contributor, just make sure that everyone knows what you are working on in order to avoid other contributors spending time on something that you are working on.\r\n\r\nIf you are unfamiliar with the Google Style Docstrings I'd recommend that you check these resources:\r\n\r\n - [Sphix 1.8.0+ - Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)\r\n\r\n\r\n\r\nDocstrings that need to be updated:\r\n\r\n- main.py\r\n - [x] configure_lang\r\n - [ ] configure_log\r\n - [ ] get_logging_level\r\n - [ ] check_dependencies\r\n - [ ] print_version\r\n - [ ] print_example_config\r\n - [ ] edit_files\r\n - [x] welcome_message\r\n- ~~helper.py~~\r\n - [x] get_opsdroid\r\n - [x] del_rw\r\n - [x] move_config_to_appdir\r\n- memory.py\r\n - [x] Memory\r\n - [x] get\r\n - [x] put\r\n - [x] _get_from_database\r\n - [x] _put_to_database\r\n- message.py\r\n - [x] Message\r\n - [x] __init__\r\n - [x] _thinking_delay\r\n - [x] _typing delay\r\n - [x] respond\r\n - [x] react\r\n- web.py\r\n - [ ] Web\r\n - [x] get_port\r\n - [x] get_host\r\n - [x] get_ssl_context\r\n - [ ] start\r\n - [ ] build_response\r\n - [ ] web_index_handler\r\n - [ ] web_stats_handler\r\n- matchers.py\r\n - [ ] match_regex\r\n - [ ] match_apiai_action\r\n - [ ] match_apiai_intent\r\n - [ ] match_dialogflow_action\r\n - [ ] match_dialogflow_intent\r\n - [ ] match_luisai_intent\r\n - [ ] match_rasanlu\r\n - [ ] match_recastai\r\n - [ ] match_witai\r\n - [ ] match_crontab\r\n - [ ] match_webhook\r\n - [ ] match_always\r\n- core.py\r\n - [ ] OpsDroid\r\n - [ ] default_connector\r\n - [ ] exit\r\n - [ ] critical\r\n - [ ] call_stop\r\n - [ ] disconnect\r\n - [ ] stop\r\n - [ ] load\r\n - [ ] start_loop\r\n - [x] setup_skills\r\n - [ ] train_parsers\r\n - [ ] start_connector_tasks\r\n - [ ] start_database\r\n - [ ] run_skill\r\n - [ ] get_ranked_skills\r\n - [ ] parse\r\n- loader.py\r\n - [ ] Loader\r\n - [x] import_module_from_spec\r\n - [x] import_module\r\n - [x] check_cache\r\n - [x] build_module_import_path\r\n - [x] build_module_install_path\r\n - [x] git_clone\r\n - [x] git_pull\r\n - [x] pip_install_deps\r\n - [x] create_default_config\r\n - [x] load_config_file\r\n - [ ] envvar_constructor\r\n - [ ] include_constructor\r\n - [x] setup_modules_directory\r\n - [x] load_modules_from_config\r\n - [x] _load_modules\r\n - [x] _install_module\r\n - [x] _update_module\r\n - [ ] _install_git_module\r\n - [x] _install_local_module\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n---- ORIGINAL POST ---- \r\nI've been wondering about this for a while now and I would like to know if we should replace/update all the docstrings in opsdroid with the Google Style doc strings. \r\n\r\nI think this could help new and old contributors to contribute and commit to opsdroid since the Google Style docstrings give more information about every method/function and specifies clearly what sort of input the function/method expects, what will it return and what will be raised (if applicable).\r\n\r\nThe downsize of this style is that the length of every .py file will increase due to the doc strings, but since most IDE's allow you to hide those fields it shouldn't be too bad.\r\n\r\nHere is a good example of Google Style Doc strings: [Sphix 1.8.0+ - Google Style Docstrings](http://www.sphinx-doc.org/en/master/ext/example_google.html)\r\n\r\nI would like to know what you all think about this idea and if its worth spending time on it.\n", "before_files": [{"content": "\"\"\"Class for persisting information in opsdroid.\"\"\"\n\nimport logging\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Memory:\n \"\"\"An object to store and persist data outside of opsdroid.\"\"\"\n\n def __init__(self):\n \"\"\"Create memory dictionary.\"\"\"\n self.memory = {}\n self.databases = []\n\n async def get(self, key):\n \"\"\"Get data object for a given key.\"\"\"\n _LOGGER.debug(_(\"Getting %s from memory.\"), key)\n database_result = await self._get_from_database(key)\n if database_result is not None:\n self.memory[key] = database_result\n if key in self.memory:\n return self.memory[key]\n\n return None\n\n async def put(self, key, data):\n \"\"\"Put a data object to a given key.\"\"\"\n _LOGGER.debug(_(\"Putting %s to memory\"), key)\n self.memory[key] = data\n await self._put_to_database(key, self.memory[key])\n\n async def _get_from_database(self, key):\n \"\"\"Get updates from databases for a given key.\"\"\"\n if not self.databases:\n return None\n\n results = []\n for database in self.databases:\n results.append(await database.get(key))\n # TODO: Handle multiple databases\n return results[0]\n\n async def _put_to_database(self, key, data):\n \"\"\"Put updates into databases for a given key.\"\"\"\n if self.databases:\n for database in self.databases:\n await database.put(key, data)\n", "path": "opsdroid/memory.py"}]} | 1,984 | 702 |
gh_patches_debug_24411 | rasdani/github-patches | git_diff | python__python-docs-es-40 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mejorar la guía de CONTRIBUTING
Tenemos una pequeña guía que explica el procedimiento. Sin embargo, estaría bueno mejorarla un poco para que sea más fácil de seguir para persona que no sepan mucho de github y demás herramientas: https://github.com/raulcd/python-docs-es/blob/3.7/CONTRIBUTING.rst
</issue>
<code>
[start of conf.py]
1 # Sphinx configuration file.
2 #
3 # - import original configurations from cpython/Doc/conf.py
4 # - append the path considering the cpython submodule is at ./cpython
5 # - create the symbolic links under ./cpython/locale/es/LC_MESSAGES
6 # - make the build to work under Read the Docs
7 #
8 # The git submodule was created using this Stack Overflow answer
9 # to fetch only the commit that I needed and avoid clonning the whole history
10 # https://stackoverflow.com/a/27445058
11 #
12 # This can be built locally using `sphinx-build` by running
13 #
14 # $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html
15
16 import sys, os, time
17 sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))
18 sys.path.append(os.path.abspath('cpython/Doc/includes'))
19
20 # Import all the Sphinx settings from cpython
21 sys.path.append(os.path.abspath('cpython/Doc'))
22 from conf import *
23
24 version = '3.7'
25 release = '3.7.7'
26
27 project = 'Python en Español'
28 copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
29
30 html_theme_path = ['cpython/Doc/tools']
31 templates_path = ['cpython/Doc/tools/templates']
32 html_static_path = ['cpython/Doc/tools/static']
33
34 os.system('mkdir -p cpython/locales/es/')
35 os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
36
37 gettext_compact = False
38 locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir
39
40 def setup(app):
41 # Change the sourcedir programmatically because Read the Docs always call it with `.`
42 app.srcdir = 'cpython/Doc'
43
[end of conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -33,10 +33,34 @@
os.system('mkdir -p cpython/locales/es/')
os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
+os.system('ln -nfs `pwd`/CONTRIBUTING.rst cpython/Doc/CONTRIBUTING.rst')
gettext_compact = False
locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir
def setup(app):
+
+ def add_contributing_banner(app, doctree):
+ """
+ Insert a banner at the top of the index.
+
+ This way, we can easily communicate people to help with the translation,
+ pointing them to different resources.
+ """
+ from docutils import nodes, core
+
+ message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \
+ f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_ ' \
+ 'y así ayudarnos a acercar Python a más personas de habla hispana.'
+
+ paragraph = core.publish_doctree(message)[0]
+ banner = nodes.warning(ids=['contributing-banner'])
+ banner.append(paragraph)
+
+ for document in doctree.traverse(nodes.document):
+ document.insert(0, banner)
+
# Change the sourcedir programmatically because Read the Docs always call it with `.`
app.srcdir = 'cpython/Doc'
+
+ app.connect('doctree-read', add_contributing_banner)
| {"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -33,10 +33,34 @@\n \n os.system('mkdir -p cpython/locales/es/')\n os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n+os.system('ln -nfs `pwd`/CONTRIBUTING.rst cpython/Doc/CONTRIBUTING.rst')\n \n gettext_compact = False\n locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n \n def setup(app):\n+\n+ def add_contributing_banner(app, doctree):\n+ \"\"\"\n+ Insert a banner at the top of the index.\n+\n+ This way, we can easily communicate people to help with the translation,\n+ pointing them to different resources.\n+ \"\"\"\n+ from docutils import nodes, core\n+\n+ message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n+ f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_ ' \\\n+ 'y as\u00ed ayudarnos a acercar Python a m\u00e1s personas de habla hispana.'\n+\n+ paragraph = core.publish_doctree(message)[0]\n+ banner = nodes.warning(ids=['contributing-banner'])\n+ banner.append(paragraph)\n+\n+ for document in doctree.traverse(nodes.document):\n+ document.insert(0, banner)\n+\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n+\n+ app.connect('doctree-read', add_contributing_banner)\n", "issue": "Mejorar la gu\u00eda de CONTRIBUTING\nTenemos una peque\u00f1a gu\u00eda que explica el procedimiento. Sin embargo, estar\u00eda bueno mejorarla un poco para que sea m\u00e1s f\u00e1cil de seguir para persona que no sepan mucho de github y dem\u00e1s herramientas: https://github.com/raulcd/python-docs-es/blob/3.7/CONTRIBUTING.rst\n", "before_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\nversion = '3.7'\nrelease = '3.7.7'\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\ndef setup(app):\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n", "path": "conf.py"}]} | 1,082 | 377 |
gh_patches_debug_18863 | rasdani/github-patches | git_diff | vega__altair-2785 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Equal default chart dimensions
Currently the default theme in Altair specifies an aspect ratio of 4/3, with a width of 400 and a height of 300 (for continuous data):

The default in VegaLite is to make both dimensions of equal length, which I think makes sense since it spreads the data over the same amount of pixels on both the X and Y axis. This could have benefits in terms of making it easier to fairly compare the distribution of the data between the two plotted variables instead of it appearing more spread out over the X axis due to the increase chart width. The default in Vega-Lite is to use 200 px for the width and height which I think is a bit small, but setting both to 300 px looks good:

What do you all think about changing the default width in Altair to 300 px, so that both the X and Y axes occupy the same amount of pixels by default? Are there benefits of having an unequal aspect ratio like the current default that I am missing (maybe that it is more similar to the screen aspect ratio)? I don't think this is a major concern, but thought I would bring it up and see if others also regard it as a small improvement or just a matter of personal aesthetics/taste.
</issue>
<code>
[start of altair/vegalite/v5/theme.py]
1 """Tools for enabling and registering chart themes"""
2
3 from ...utils.theme import ThemeRegistry
4
5 VEGA_THEMES = [
6 "ggplot2",
7 "quartz",
8 "vox",
9 "fivethirtyeight",
10 "dark",
11 "latimes",
12 "urbaninstitute",
13 ]
14
15
16 class VegaTheme(object):
17 """Implementation of a builtin vega theme."""
18
19 def __init__(self, theme):
20 self.theme = theme
21
22 def __call__(self):
23 return {
24 "usermeta": {"embedOptions": {"theme": self.theme}},
25 "config": {"view": {"continuousWidth": 400, "continuousHeight": 300}},
26 }
27
28 def __repr__(self):
29 return "VegaTheme({!r})".format(self.theme)
30
31
32 # The entry point group that can be used by other packages to declare other
33 # renderers that will be auto-detected. Explicit registration is also
34 # allowed by the PluginRegistery API.
35 ENTRY_POINT_GROUP = "altair.vegalite.v5.theme" # type: str
36 themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
37
38 themes.register(
39 "default",
40 lambda: {"config": {"view": {"continuousWidth": 400, "continuousHeight": 300}}},
41 )
42 themes.register(
43 "opaque",
44 lambda: {
45 "config": {
46 "background": "white",
47 "view": {"continuousWidth": 400, "continuousHeight": 300},
48 }
49 },
50 )
51 themes.register("none", lambda: {})
52
53 for theme in VEGA_THEMES:
54 themes.register(theme, VegaTheme(theme))
55
56 themes.enable("default")
57
[end of altair/vegalite/v5/theme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/vegalite/v5/theme.py b/altair/vegalite/v5/theme.py
--- a/altair/vegalite/v5/theme.py
+++ b/altair/vegalite/v5/theme.py
@@ -22,7 +22,7 @@
def __call__(self):
return {
"usermeta": {"embedOptions": {"theme": self.theme}},
- "config": {"view": {"continuousWidth": 400, "continuousHeight": 300}},
+ "config": {"view": {"continuousWidth": 300, "continuousHeight": 300}},
}
def __repr__(self):
@@ -37,14 +37,14 @@
themes.register(
"default",
- lambda: {"config": {"view": {"continuousWidth": 400, "continuousHeight": 300}}},
+ lambda: {"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}},
)
themes.register(
"opaque",
lambda: {
"config": {
"background": "white",
- "view": {"continuousWidth": 400, "continuousHeight": 300},
+ "view": {"continuousWidth": 300, "continuousHeight": 300},
}
},
)
| {"golden_diff": "diff --git a/altair/vegalite/v5/theme.py b/altair/vegalite/v5/theme.py\n--- a/altair/vegalite/v5/theme.py\n+++ b/altair/vegalite/v5/theme.py\n@@ -22,7 +22,7 @@\n def __call__(self):\n return {\n \"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n- \"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}},\n+ \"config\": {\"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300}},\n }\n \n def __repr__(self):\n@@ -37,14 +37,14 @@\n \n themes.register(\n \"default\",\n- lambda: {\"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}}},\n+ lambda: {\"config\": {\"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300}}},\n )\n themes.register(\n \"opaque\",\n lambda: {\n \"config\": {\n \"background\": \"white\",\n- \"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300},\n+ \"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300},\n }\n },\n )\n", "issue": "Equal default chart dimensions\nCurrently the default theme in Altair specifies an aspect ratio of 4/3, with a width of 400 and a height of 300 (for continuous data):\r\n\r\n\r\n\r\nThe default in VegaLite is to make both dimensions of equal length, which I think makes sense since it spreads the data over the same amount of pixels on both the X and Y axis. This could have benefits in terms of making it easier to fairly compare the distribution of the data between the two plotted variables instead of it appearing more spread out over the X axis due to the increase chart width. The default in Vega-Lite is to use 200 px for the width and height which I think is a bit small, but setting both to 300 px looks good:\r\n\r\n\r\n\r\nWhat do you all think about changing the default width in Altair to 300 px, so that both the X and Y axes occupy the same amount of pixels by default? Are there benefits of having an unequal aspect ratio like the current default that I am missing (maybe that it is more similar to the screen aspect ratio)? I don't think this is a major concern, but thought I would bring it up and see if others also regard it as a small improvement or just a matter of personal aesthetics/taste.\n", "before_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\nVEGA_THEMES = [\n \"ggplot2\",\n \"quartz\",\n \"vox\",\n \"fivethirtyeight\",\n \"dark\",\n \"latimes\",\n \"urbaninstitute\",\n]\n\n\nclass VegaTheme(object):\n \"\"\"Implementation of a builtin vega theme.\"\"\"\n\n def __init__(self, theme):\n self.theme = theme\n\n def __call__(self):\n return {\n \"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n \"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}},\n }\n\n def __repr__(self):\n return \"VegaTheme({!r})\".format(self.theme)\n\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = \"altair.vegalite.v5.theme\" # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register(\n \"default\",\n lambda: {\"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}}},\n)\nthemes.register(\n \"opaque\",\n lambda: {\n \"config\": {\n \"background\": \"white\",\n \"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300},\n }\n },\n)\nthemes.register(\"none\", lambda: {})\n\nfor theme in VEGA_THEMES:\n themes.register(theme, VegaTheme(theme))\n\nthemes.enable(\"default\")\n", "path": "altair/vegalite/v5/theme.py"}]} | 1,407 | 309 |
gh_patches_debug_6214 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-1157 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to use svc pre-sd with pyannote.audio 3.1.1
### Describe the bug
To use svc pre-sd for a long audio file with multiple speakers, I followed the [setup guide](https://github.com/voicepaw/so-vits-svc-fork/#before-training) and manually installed pyannote.audio, getting the latest version, 3.1.1.
Attempting to run svc pre-sd triggered the following error messages:
```
Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.
Model was trained with torch 1.10.0+cu102, yours is 2.2.2+cu121. Bad things might happen unless you revert torch to 1.x.
```
According to [PyPI](https://pypi.org/project/pyannote.audio/3.1.1/), pyannote.audio 3.1.1 works with speaker-diarization-3.1. So, it’s necessary to explicitly specify this version in the code.
### To Reproduce
1. Set up the environment. (I'm using torch 2.2.2+cu121.)
2. Install so-vits-svc-fork and its dependencies.
3. Install pyannote.audio with `pip3 install pyannote-audio`.
4. Prepare your data and organize data folders.
5. Run svc pre-sd with options suited to your data.
### Additional context
_No response_
### Version
4.1.61
### Platform
WSL-Ubuntu 22.04 LTS
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct.
### No Duplicate
- [X] I have checked existing issues to avoid duplicates.
</issue>
<code>
[start of src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py]
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from logging import getLogger
5 from pathlib import Path
6
7 import librosa
8 import soundfile as sf
9 import torch
10 from joblib import Parallel, delayed
11 from pyannote.audio import Pipeline
12 from tqdm import tqdm
13 from tqdm_joblib import tqdm_joblib
14
15 LOG = getLogger(__name__)
16
17
18 def _process_one(
19 input_path: Path,
20 output_dir: Path,
21 sr: int,
22 *,
23 min_speakers: int = 1,
24 max_speakers: int = 1,
25 huggingface_token: str | None = None,
26 ) -> None:
27 try:
28 audio, sr = librosa.load(input_path, sr=sr, mono=True)
29 except Exception as e:
30 LOG.warning(f"Failed to read {input_path}: {e}")
31 return
32 pipeline = Pipeline.from_pretrained(
33 "pyannote/speaker-diarization", use_auth_token=huggingface_token
34 )
35 if pipeline is None:
36 raise ValueError("Failed to load pipeline")
37 pipeline = pipeline.to(torch.device("cuda"))
38 LOG.info(f"Processing {input_path}. This may take a while...")
39 diarization = pipeline(
40 input_path, min_speakers=min_speakers, max_speakers=max_speakers
41 )
42
43 LOG.info(f"Found {len(diarization)} tracks, writing to {output_dir}")
44 speaker_count = defaultdict(int)
45
46 output_dir.mkdir(parents=True, exist_ok=True)
47 for segment, track, speaker in tqdm(
48 list(diarization.itertracks(yield_label=True)), desc=f"Writing {input_path}"
49 ):
50 if segment.end - segment.start < 1:
51 continue
52 speaker_count[speaker] += 1
53 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]
54 sf.write(
55 (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"),
56 audio_cut,
57 sr,
58 )
59
60 LOG.info(f"Speaker count: {speaker_count}")
61
62
63 def preprocess_speaker_diarization(
64 input_dir: Path | str,
65 output_dir: Path | str,
66 sr: int,
67 *,
68 min_speakers: int = 1,
69 max_speakers: int = 1,
70 huggingface_token: str | None = None,
71 n_jobs: int = -1,
72 ) -> None:
73 if huggingface_token is not None and not huggingface_token.startswith("hf_"):
74 LOG.warning("Huggingface token probably should start with hf_")
75 if not torch.cuda.is_available():
76 LOG.warning("CUDA is not available. This will be extremely slow.")
77 input_dir = Path(input_dir)
78 output_dir = Path(output_dir)
79 input_dir.mkdir(parents=True, exist_ok=True)
80 output_dir.mkdir(parents=True, exist_ok=True)
81 input_paths = list(input_dir.rglob("*.*"))
82 with tqdm_joblib(desc="Preprocessing speaker diarization", total=len(input_paths)):
83 Parallel(n_jobs=n_jobs)(
84 delayed(_process_one)(
85 input_path,
86 output_dir / input_path.relative_to(input_dir).parent / input_path.stem,
87 sr,
88 max_speakers=max_speakers,
89 min_speakers=min_speakers,
90 huggingface_token=huggingface_token,
91 )
92 for input_path in input_paths
93 )
94
[end of src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
@@ -30,7 +30,7 @@
LOG.warning(f"Failed to read {input_path}: {e}")
return
pipeline = Pipeline.from_pretrained(
- "pyannote/speaker-diarization", use_auth_token=huggingface_token
+ "pyannote/speaker-diarization-3.1", use_auth_token=huggingface_token
)
if pipeline is None:
raise ValueError("Failed to load pipeline")
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n@@ -30,7 +30,7 @@\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n- \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n+ \"pyannote/speaker-diarization-3.1\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n", "issue": "Unable to use svc pre-sd with pyannote.audio 3.1.1\n### Describe the bug\n\nTo use svc pre-sd for a long audio file with multiple speakers, I followed the [setup guide](https://github.com/voicepaw/so-vits-svc-fork/#before-training) and manually installed pyannote.audio, getting the latest version, 3.1.1.\r\n\r\nAttempting to run svc pre-sd triggered the following error messages:\r\n\r\n```\r\nModel was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\r\n\r\nModel was trained with torch 1.10.0+cu102, yours is 2.2.2+cu121. Bad things might happen unless you revert torch to 1.x.\r\n```\r\n\r\nAccording to [PyPI](https://pypi.org/project/pyannote.audio/3.1.1/), pyannote.audio 3.1.1 works with speaker-diarization-3.1. So, it\u2019s necessary to explicitly specify this version in the code.\n\n### To Reproduce\n\n1. Set up the environment. (I'm using torch 2.2.2+cu121.)\r\n2. Install so-vits-svc-fork and its dependencies.\r\n3. Install pyannote.audio with `pip3 install pyannote-audio`.\r\n4. Prepare your data and organize data folders.\r\n5. Run svc pre-sd with options suited to your data.\n\n### Additional context\n\n_No response_\n\n### Version\n\n4.1.61\n\n### Platform\n\nWSL-Ubuntu 22.04 LTS\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct.\n\n### No Duplicate\n\n- [X] I have checked existing issues to avoid duplicates.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}]} | 1,875 | 184 |
gh_patches_debug_6467 | rasdani/github-patches | git_diff | getnikola__nikola-1145 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Never copy/list listings/*.py[co]
kinda related to invariance
</issue>
<code>
[start of nikola/plugins/task/listings.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import unicode_literals, print_function
28
29 import os
30
31 from pygments import highlight
32 from pygments.lexers import get_lexer_for_filename, TextLexer
33 from pygments.formatters import HtmlFormatter
34
35 from nikola.plugin_categories import Task
36 from nikola import utils
37
38
39 class Listings(Task):
40 """Render pretty listings."""
41
42 name = "render_listings"
43
44 def set_site(self, site):
45 site.register_path_handler('listing', self.listing_path)
46 return super(Listings, self).set_site(site)
47
48 def gen_tasks(self):
49 """Render pretty code listings."""
50 kw = {
51 "default_lang": self.site.config["DEFAULT_LANG"],
52 "listings_folder": self.site.config["LISTINGS_FOLDER"],
53 "output_folder": self.site.config["OUTPUT_FOLDER"],
54 "index_file": self.site.config["INDEX_FILE"],
55 }
56
57 # Things to ignore in listings
58 ignored_extensions = (".pyc", ".pyo")
59
60 def render_listing(in_name, out_name, folders=[], files=[]):
61 if in_name:
62 with open(in_name, 'r') as fd:
63 try:
64 lexer = get_lexer_for_filename(in_name)
65 except:
66 lexer = TextLexer()
67 code = highlight(fd.read(), lexer,
68 HtmlFormatter(cssclass='code',
69 linenos="table", nowrap=False,
70 lineanchors=utils.slugify(in_name),
71 anchorlinenos=True))
72 title = os.path.basename(in_name)
73 else:
74 code = ''
75 title = ''
76 crumbs = utils.get_crumbs(os.path.relpath(out_name,
77 kw['output_folder']),
78 is_file=True)
79 context = {
80 'code': code,
81 'title': title,
82 'crumbs': crumbs,
83 'lang': kw['default_lang'],
84 'folders': folders,
85 'files': files,
86 'description': title,
87 }
88 self.site.render_template('listing.tmpl', out_name,
89 context)
90
91 yield self.group_task()
92
93 template_deps = self.site.template_system.template_deps('listing.tmpl')
94 for root, dirs, files in os.walk(kw['listings_folder']):
95 # Render all files
96 out_name = os.path.join(
97 kw['output_folder'],
98 root, kw['index_file']
99 )
100 yield {
101 'basename': self.name,
102 'name': out_name,
103 'file_dep': template_deps,
104 'targets': [out_name],
105 'actions': [(render_listing, [None, out_name, dirs, files])],
106 # This is necessary to reflect changes in blog title,
107 # sidebar links, etc.
108 'uptodate': [utils.config_changed(
109 self.site.GLOBAL_CONTEXT)],
110 'clean': True,
111 }
112 for f in files:
113 ext = os.path.splitext(f)[-1]
114 if ext in ignored_extensions:
115 continue
116 in_name = os.path.join(root, f)
117 out_name = os.path.join(
118 kw['output_folder'],
119 root,
120 f) + '.html'
121 yield {
122 'basename': self.name,
123 'name': out_name,
124 'file_dep': template_deps + [in_name],
125 'targets': [out_name],
126 'actions': [(render_listing, [in_name, out_name])],
127 # This is necessary to reflect changes in blog title,
128 # sidebar links, etc.
129 'uptodate': [utils.config_changed(
130 self.site.GLOBAL_CONTEXT)],
131 'clean': True,
132 }
133
134 def listing_path(self, name, lang):
135 return [_f for _f in [self.site.config['LISTINGS_FOLDER'], name +
136 '.html'] if _f]
137
[end of nikola/plugins/task/listings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/task/listings.py b/nikola/plugins/task/listings.py
--- a/nikola/plugins/task/listings.py
+++ b/nikola/plugins/task/listings.py
@@ -92,6 +92,7 @@
template_deps = self.site.template_system.template_deps('listing.tmpl')
for root, dirs, files in os.walk(kw['listings_folder']):
+ files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]
# Render all files
out_name = os.path.join(
kw['output_folder'],
| {"golden_diff": "diff --git a/nikola/plugins/task/listings.py b/nikola/plugins/task/listings.py\n--- a/nikola/plugins/task/listings.py\n+++ b/nikola/plugins/task/listings.py\n@@ -92,6 +92,7 @@\n \n template_deps = self.site.template_system.template_deps('listing.tmpl')\n for root, dirs, files in os.walk(kw['listings_folder']):\n+ files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]\n # Render all files\n out_name = os.path.join(\n kw['output_folder'],\n", "issue": "Never copy/list listings/*.py[co]\nkinda related to invariance\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals, print_function\n\nimport os\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_for_filename, TextLexer\nfrom pygments.formatters import HtmlFormatter\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass Listings(Task):\n \"\"\"Render pretty listings.\"\"\"\n\n name = \"render_listings\"\n\n def set_site(self, site):\n site.register_path_handler('listing', self.listing_path)\n return super(Listings, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Render pretty code listings.\"\"\"\n kw = {\n \"default_lang\": self.site.config[\"DEFAULT_LANG\"],\n \"listings_folder\": self.site.config[\"LISTINGS_FOLDER\"],\n \"output_folder\": self.site.config[\"OUTPUT_FOLDER\"],\n \"index_file\": self.site.config[\"INDEX_FILE\"],\n }\n\n # Things to ignore in listings\n ignored_extensions = (\".pyc\", \".pyo\")\n\n def render_listing(in_name, out_name, folders=[], files=[]):\n if in_name:\n with open(in_name, 'r') as fd:\n try:\n lexer = get_lexer_for_filename(in_name)\n except:\n lexer = TextLexer()\n code = highlight(fd.read(), lexer,\n HtmlFormatter(cssclass='code',\n linenos=\"table\", nowrap=False,\n lineanchors=utils.slugify(in_name),\n anchorlinenos=True))\n title = os.path.basename(in_name)\n else:\n code = ''\n title = ''\n crumbs = utils.get_crumbs(os.path.relpath(out_name,\n kw['output_folder']),\n is_file=True)\n context = {\n 'code': code,\n 'title': title,\n 'crumbs': crumbs,\n 'lang': kw['default_lang'],\n 'folders': folders,\n 'files': files,\n 'description': title,\n }\n self.site.render_template('listing.tmpl', out_name,\n context)\n\n yield self.group_task()\n\n template_deps = self.site.template_system.template_deps('listing.tmpl')\n for root, dirs, files in os.walk(kw['listings_folder']):\n # Render all files\n out_name = os.path.join(\n kw['output_folder'],\n root, kw['index_file']\n )\n yield {\n 'basename': self.name,\n 'name': out_name,\n 'file_dep': template_deps,\n 'targets': [out_name],\n 'actions': [(render_listing, [None, out_name, dirs, files])],\n # This is necessary to reflect changes in blog title,\n # sidebar links, etc.\n 'uptodate': [utils.config_changed(\n self.site.GLOBAL_CONTEXT)],\n 'clean': True,\n }\n for f in files:\n ext = os.path.splitext(f)[-1]\n if ext in ignored_extensions:\n continue\n in_name = os.path.join(root, f)\n out_name = os.path.join(\n kw['output_folder'],\n root,\n f) + '.html'\n yield {\n 'basename': self.name,\n 'name': out_name,\n 'file_dep': template_deps + [in_name],\n 'targets': [out_name],\n 'actions': [(render_listing, [in_name, out_name])],\n # This is necessary to reflect changes in blog title,\n # sidebar links, etc.\n 'uptodate': [utils.config_changed(\n self.site.GLOBAL_CONTEXT)],\n 'clean': True,\n }\n\n def listing_path(self, name, lang):\n return [_f for _f in [self.site.config['LISTINGS_FOLDER'], name +\n '.html'] if _f]\n", "path": "nikola/plugins/task/listings.py"}]} | 1,900 | 133 |
gh_patches_debug_27442 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-839 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop Python 3.5 support
Python 3.5 was a famous release (async/await syntax was introduced) but the version has reached end-of-life.
We can (and should) drop it.
Speaking as the maintainer of other libraries, I can say that it simplifies the code base a little
</issue>
<code>
[start of setup.py]
1 import re
2 import os.path
3 import sys
4 import platform
5 from setuptools import setup, find_packages
6
7
8 install_requires = ['async-timeout']
9 if platform.python_implementation() == 'CPython':
10 install_requires.append('hiredis')
11
12 PY_VER = sys.version_info
13
14 if PY_VER < (3, 5):
15 raise RuntimeError("aioredis doesn't support Python version prior 3.5")
16
17
18 def read(*parts):
19 with open(os.path.join(*parts), 'rt') as f:
20 return f.read().strip()
21
22
23 def read_version():
24 regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
25 init_py = os.path.join(os.path.dirname(__file__),
26 'aioredis', '__init__.py')
27 with open(init_py) as f:
28 for line in f:
29 match = regexp.match(line)
30 if match is not None:
31 return match.group(1)
32 raise RuntimeError('Cannot find version in {}'.format(init_py))
33
34
35 classifiers = [
36 'License :: OSI Approved :: MIT License',
37 'Development Status :: 4 - Beta',
38 'Programming Language :: Python',
39 'Programming Language :: Python :: 3',
40 'Programming Language :: Python :: 3.5',
41 'Programming Language :: Python :: 3.6',
42 'Programming Language :: Python :: 3.7',
43 'Programming Language :: Python :: 3 :: Only',
44 'Operating System :: POSIX',
45 'Environment :: Web Environment',
46 'Intended Audience :: Developers',
47 'Topic :: Software Development',
48 'Topic :: Software Development :: Libraries',
49 'Framework :: AsyncIO',
50 ]
51
52 setup(name='aioredis',
53 version=read_version(),
54 description=("asyncio (PEP 3156) Redis support"),
55 long_description="\n\n".join((read('README.rst'), read('CHANGES.txt'))),
56 classifiers=classifiers,
57 platforms=["POSIX"],
58 author="Alexey Popravka",
59 author_email="[email protected]",
60 url="https://github.com/aio-libs/aioredis",
61 license="MIT",
62 packages=find_packages(exclude=["tests"]),
63 install_requires=install_requires,
64 include_package_data=True,
65 )
66
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,20 +1,8 @@
import re
import os.path
-import sys
-import platform
from setuptools import setup, find_packages
-install_requires = ['async-timeout']
-if platform.python_implementation() == 'CPython':
- install_requires.append('hiredis')
-
-PY_VER = sys.version_info
-
-if PY_VER < (3, 5):
- raise RuntimeError("aioredis doesn't support Python version prior 3.5")
-
-
def read(*parts):
with open(os.path.join(*parts), 'rt') as f:
return f.read().strip()
@@ -37,7 +25,6 @@
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
@@ -60,6 +47,10 @@
url="https://github.com/aio-libs/aioredis",
license="MIT",
packages=find_packages(exclude=["tests"]),
- install_requires=install_requires,
+ install_requires=[
+ 'async-timeout',
+ 'hiredis; implementation_name=="cpython"'
+ ],
+ python_requires=">=3.6",
include_package_data=True,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,20 +1,8 @@\n import re\n import os.path\n-import sys\n-import platform\n from setuptools import setup, find_packages\n \n \n-install_requires = ['async-timeout']\n-if platform.python_implementation() == 'CPython':\n- install_requires.append('hiredis')\n-\n-PY_VER = sys.version_info\n-\n-if PY_VER < (3, 5):\n- raise RuntimeError(\"aioredis doesn't support Python version prior 3.5\")\n-\n-\n def read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n@@ -37,7 +25,6 @@\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n@@ -60,6 +47,10 @@\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n- install_requires=install_requires,\n+ install_requires=[\n+ 'async-timeout',\n+ 'hiredis; implementation_name==\"cpython\"'\n+ ],\n+ python_requires=\">=3.6\",\n include_package_data=True,\n )\n", "issue": "Drop Python 3.5 support\nPython 3.5 was a famous release (async/await syntax was introduced) but the version has reached end-of-life.\r\nWe can (and should) drop it.\r\nSpeaking as the maintainer of other libraries, I can say that it simplifies the code base a little\n", "before_files": [{"content": "import re\nimport os.path\nimport sys\nimport platform\nfrom setuptools import setup, find_packages\n\n\ninstall_requires = ['async-timeout']\nif platform.python_implementation() == 'CPython':\n install_requires.append('hiredis')\n\nPY_VER = sys.version_info\n\nif PY_VER < (3, 5):\n raise RuntimeError(\"aioredis doesn't support Python version prior 3.5\")\n\n\ndef read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*'([\\d.abrc]+)'\")\n init_py = os.path.join(os.path.dirname(__file__),\n 'aioredis', '__init__.py')\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError('Cannot find version in {}'.format(init_py))\n\n\nclassifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Framework :: AsyncIO',\n]\n\nsetup(name='aioredis',\n version=read_version(),\n description=(\"asyncio (PEP 3156) Redis support\"),\n long_description=\"\\n\\n\".join((read('README.rst'), read('CHANGES.txt'))),\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n author=\"Alexey Popravka\",\n author_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=install_requires,\n include_package_data=True,\n )\n", "path": "setup.py"}]} | 1,210 | 336 |
gh_patches_debug_34889 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1818 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add contextlib support to the pyhf.schema API
> instead of having users overwriting the value by assignment, do you think there is value in offering a `pyhf.utils.update_schema_path` or something that performs this operation through the API?
>
> Thanks @kratsg for taking up my suggestion!
>
> I second @matthewfeickert's suggestion to change schemas via function call rather than assignment. It is much simpler to add functionality to a function if it ever becomes necessary, than to replace an entire existing module with a class instance with a property.
>
> I'd even go so far to say that a context manager doubling as an update function would be ideal IMO:
> ```python
> # in pyhf.utils
> _SCHEMAS = Path(...)
> class use_schema_path: # snake_case to remind of function-like usage
> def __init__(self, path):
> global _SCHEMAS
> self._old_schemas = _SCHEMAS
> _SCHEMAS = pathlib.Path(path)
> def __enter__(self):
> pass
> def __exit__(self, *args, **kwargs):
> global _SCHEMAS
> _SCHEMAS = self._old_schemas
> ```
> which can still be called as a function (only executing `__init__`), so short scripts etc. are not forced to use `with` blocks.
> But it can also be used like so:
> ```python
> def make_my_workspace(spec):
> with pyhf.utils.use_schema_path('/my/very/special/schemas'):
> return pyhf.Workspace(spec)
> ```
> So as a user writing code on top of pyhf, I don't have to worry about resesetting the global variable, the CM does it for me, and there are fewer mistakes to make.
>
_Originally posted by @lhenkelm in https://github.com/scikit-hep/pyhf/issues/1753#issuecomment-1026678066_
</issue>
<code>
[start of src/pyhf/schema/__init__.py]
1 """
2 See :class:`~pyhf.schema.Schema` for documentation.
3 """
4 import pathlib
5 import sys
6 from pyhf.schema.loader import load_schema
7 from pyhf.schema.validator import validate
8 from pyhf.schema import variables
9
10 __all__ = [
11 "load_schema",
12 "validate",
13 "path",
14 "version",
15 ]
16
17
18 def __dir__():
19 return __all__
20
21
22 class Schema(sys.modules[__name__].__class__):
23 """
24 A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.
25
26 Example:
27 >>> import pyhf.schema
28 >>> import pathlib
29 >>> curr_path = pyhf.schema.path
30 >>> curr_path # doctest: +ELLIPSIS
31 PosixPath('.../pyhf/schemas')
32 >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))
33 >>> pyhf.schema.path
34 PosixPath('/home/root/my/new/path')
35 >>> pyhf.schema(curr_path)
36 >>> pyhf.schema.path # doctest: +ELLIPSIS
37 PosixPath('.../pyhf/schemas')
38
39 """
40
41 def __call__(self, new_path: pathlib.Path):
42 """
43 Change the local search path for finding schemas locally.
44
45 Args:
46 new_path (pathlib.Path): Path to folder containing the schemas
47
48 Returns:
49 None
50 """
51 variables.schemas = new_path
52
53 @property
54 def path(self):
55 """
56 The local path for schemas.
57 """
58 return variables.schemas
59
60 @property
61 def version(self):
62 """
63 The default version used for finding schemas.
64 """
65 return variables.SCHEMA_VERSION
66
67
68 sys.modules[__name__].__class__ = Schema
69
[end of src/pyhf/schema/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/schema/__init__.py b/src/pyhf/schema/__init__.py
--- a/src/pyhf/schema/__init__.py
+++ b/src/pyhf/schema/__init__.py
@@ -23,17 +23,40 @@
"""
A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.
- Example:
+ .. rubric:: Example (callable)
+
+ .. code-block:: pycon
+
>>> import pyhf.schema
>>> import pathlib
>>> curr_path = pyhf.schema.path
- >>> curr_path # doctest: +ELLIPSIS
+ >>> curr_path # doctest: +ELLIPSIS
PosixPath('.../pyhf/schemas')
- >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))
+ >>> new_path = pathlib.Path("/home/root/my/new/path")
+ >>> pyhf.schema(new_path) # doctest: +ELLIPSIS
+ <module 'pyhf.schema' from ...>
>>> pyhf.schema.path
PosixPath('/home/root/my/new/path')
- >>> pyhf.schema(curr_path)
- >>> pyhf.schema.path # doctest: +ELLIPSIS
+ >>> pyhf.schema(curr_path) # doctest: +ELLIPSIS
+ <module 'pyhf.schema' from ...>
+ >>> pyhf.schema.path # doctest: +ELLIPSIS
+ PosixPath('.../pyhf/schemas')
+
+ .. rubric:: Example (context-manager)
+
+ .. code-block:: pycon
+
+ >>> import pyhf.schema
+ >>> import pathlib
+ >>> curr_path = pyhf.schema.path
+ >>> curr_path # doctest: +ELLIPSIS
+ PosixPath('.../pyhf/schemas')
+ >>> new_path = pathlib.Path("/home/root/my/new/path")
+ >>> with pyhf.schema(new_path):
+ ... print(repr(pyhf.schema.path))
+ ...
+ PosixPath('/home/root/my/new/path')
+ >>> pyhf.schema.path # doctest: +ELLIPSIS
PosixPath('.../pyhf/schemas')
"""
@@ -45,10 +68,23 @@
Args:
new_path (pathlib.Path): Path to folder containing the schemas
+ Returns:
+ self (pyhf.schema.Schema): Returns itself (for contextlib management)
+ """
+ self.orig_path, variables.schemas = variables.schemas, new_path
+ return self
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args, **kwargs):
+ """
+ Reset the local search path for finding schemas locally.
+
Returns:
None
"""
- variables.schemas = new_path
+ variables.schemas = self.orig_path
@property
def path(self):
| {"golden_diff": "diff --git a/src/pyhf/schema/__init__.py b/src/pyhf/schema/__init__.py\n--- a/src/pyhf/schema/__init__.py\n+++ b/src/pyhf/schema/__init__.py\n@@ -23,17 +23,40 @@\n \"\"\"\n A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.\n \n- Example:\n+ .. rubric:: Example (callable)\n+\n+ .. code-block:: pycon\n+\n >>> import pyhf.schema\n >>> import pathlib\n >>> curr_path = pyhf.schema.path\n- >>> curr_path # doctest: +ELLIPSIS\n+ >>> curr_path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n- >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))\n+ >>> new_path = pathlib.Path(\"/home/root/my/new/path\")\n+ >>> pyhf.schema(new_path) # doctest: +ELLIPSIS\n+ <module 'pyhf.schema' from ...>\n >>> pyhf.schema.path\n PosixPath('/home/root/my/new/path')\n- >>> pyhf.schema(curr_path)\n- >>> pyhf.schema.path # doctest: +ELLIPSIS\n+ >>> pyhf.schema(curr_path) # doctest: +ELLIPSIS\n+ <module 'pyhf.schema' from ...>\n+ >>> pyhf.schema.path # doctest: +ELLIPSIS\n+ PosixPath('.../pyhf/schemas')\n+\n+ .. rubric:: Example (context-manager)\n+\n+ .. code-block:: pycon\n+\n+ >>> import pyhf.schema\n+ >>> import pathlib\n+ >>> curr_path = pyhf.schema.path\n+ >>> curr_path # doctest: +ELLIPSIS\n+ PosixPath('.../pyhf/schemas')\n+ >>> new_path = pathlib.Path(\"/home/root/my/new/path\")\n+ >>> with pyhf.schema(new_path):\n+ ... print(repr(pyhf.schema.path))\n+ ...\n+ PosixPath('/home/root/my/new/path')\n+ >>> pyhf.schema.path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n \n \"\"\"\n@@ -45,10 +68,23 @@\n Args:\n new_path (pathlib.Path): Path to folder containing the schemas\n \n+ Returns:\n+ self (pyhf.schema.Schema): Returns itself (for contextlib management)\n+ \"\"\"\n+ self.orig_path, variables.schemas = variables.schemas, new_path\n+ return self\n+\n+ def __enter__(self):\n+ pass\n+\n+ def __exit__(self, *args, **kwargs):\n+ \"\"\"\n+ Reset the local search path for finding schemas locally.\n+\n Returns:\n None\n \"\"\"\n- variables.schemas = new_path\n+ variables.schemas = self.orig_path\n \n @property\n def path(self):\n", "issue": "Add contextlib support to the pyhf.schema API\n> instead of having users overwriting the value by assignment, do you think there is value in offering a `pyhf.utils.update_schema_path` or something that performs this operation through the API?\r\n>\r\n> Thanks @kratsg for taking up my suggestion!\r\n> \r\n> I second @matthewfeickert's suggestion to change schemas via function call rather than assignment. It is much simpler to add functionality to a function if it ever becomes necessary, than to replace an entire existing module with a class instance with a property.\r\n> \r\n> I'd even go so far to say that a context manager doubling as an update function would be ideal IMO:\r\n> ```python\r\n> # in pyhf.utils\r\n> _SCHEMAS = Path(...)\r\n> class use_schema_path: # snake_case to remind of function-like usage\r\n> def __init__(self, path):\r\n> global _SCHEMAS\r\n> self._old_schemas = _SCHEMAS\r\n> _SCHEMAS = pathlib.Path(path)\r\n> def __enter__(self):\r\n> pass\r\n> def __exit__(self, *args, **kwargs):\r\n> global _SCHEMAS\r\n> _SCHEMAS = self._old_schemas\r\n> ```\r\n> which can still be called as a function (only executing `__init__`), so short scripts etc. are not forced to use `with` blocks.\r\n> But it can also be used like so:\r\n> ```python\r\n> def make_my_workspace(spec):\r\n> with pyhf.utils.use_schema_path('/my/very/special/schemas'):\r\n> return pyhf.Workspace(spec)\r\n> ```\r\n> So as a user writing code on top of pyhf, I don't have to worry about resesetting the global variable, the CM does it for me, and there are fewer mistakes to make.\r\n> \r\n_Originally posted by @lhenkelm in https://github.com/scikit-hep/pyhf/issues/1753#issuecomment-1026678066_\n", "before_files": [{"content": "\"\"\"\nSee :class:`~pyhf.schema.Schema` for documentation.\n\"\"\"\nimport pathlib\nimport sys\nfrom pyhf.schema.loader import load_schema\nfrom pyhf.schema.validator import validate\nfrom pyhf.schema import variables\n\n__all__ = [\n \"load_schema\",\n \"validate\",\n \"path\",\n \"version\",\n]\n\n\ndef __dir__():\n return __all__\n\n\nclass Schema(sys.modules[__name__].__class__):\n \"\"\"\n A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.\n\n Example:\n >>> import pyhf.schema\n >>> import pathlib\n >>> curr_path = pyhf.schema.path\n >>> curr_path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))\n >>> pyhf.schema.path\n PosixPath('/home/root/my/new/path')\n >>> pyhf.schema(curr_path)\n >>> pyhf.schema.path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n\n \"\"\"\n\n def __call__(self, new_path: pathlib.Path):\n \"\"\"\n Change the local search path for finding schemas locally.\n\n Args:\n new_path (pathlib.Path): Path to folder containing the schemas\n\n Returns:\n None\n \"\"\"\n variables.schemas = new_path\n\n @property\n def path(self):\n \"\"\"\n The local path for schemas.\n \"\"\"\n return variables.schemas\n\n @property\n def version(self):\n \"\"\"\n The default version used for finding schemas.\n \"\"\"\n return variables.SCHEMA_VERSION\n\n\nsys.modules[__name__].__class__ = Schema\n", "path": "src/pyhf/schema/__init__.py"}]} | 1,486 | 657 |
gh_patches_debug_3498 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-1530 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Social Network Login Failure
When clicking on connect GitHub on the [social accounts page](https://readthedocs.org/accounts/social/connections/?) I get a message:
> An error occurred while attempting to login via your social network account.
There's a `?` in the url. Could that be a hint? Is it missing some request arguments? If I omit it, the bug persists.
Cheers!
</issue>
<code>
[start of readthedocs/settings/postgres.py]
1 import os
2
3 from .base import * # noqa
4
5
6 DATABASES = {
7 'default': {
8 'ENGINE': 'django.db.backends.postgresql_psycopg2',
9 'NAME': 'docs',
10 'USER': 'postgres', # Not used with sqlite3.
11 'PASSWORD': '',
12 'HOST': '10.177.73.97',
13 'PORT': '',
14 }
15 }
16
17 DEBUG = False
18 TEMPLATE_DEBUG = False
19 CELERY_ALWAYS_EAGER = False
20
21 MEDIA_URL = 'https://media.readthedocs.org/'
22 STATIC_URL = 'https://media.readthedocs.org/static/'
23 ADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'
24 SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
25
26 HAYSTACK_CONNECTIONS = {
27 'default': {
28 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
29 'URL': 'http://odin:8983/solr',
30 }
31 }
32
33 CACHES = {
34 'default': {
35 'BACKEND': 'redis_cache.RedisCache',
36 'LOCATION': 'localhost:6379',
37 'PREFIX': 'docs',
38 'OPTIONS': {
39 'DB': 1,
40 'PARSER_CLASS': 'redis.connection.HiredisParser'
41 },
42 },
43 }
44
45 # Elasticsearch settings.
46 ES_HOSTS = ['backup:9200', 'db:9200']
47 ES_DEFAULT_NUM_REPLICAS = 1
48 ES_DEFAULT_NUM_SHARDS = 5
49
50 SLUMBER_API_HOST = 'https://readthedocs.org'
51 WEBSOCKET_HOST = 'websocket.readthedocs.org:8088'
52
53 PRODUCTION_DOMAIN = 'readthedocs.org'
54 USE_SUBDOMAIN = True
55 NGINX_X_ACCEL_REDIRECT = True
56
57 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
58
59 # Lock builds for 10 minutes
60 REPO_LOCK_SECONDS = 300
61
62 # Don't re-confirm existing accounts
63 ACCOUNT_EMAIL_VERIFICATION = 'none'
64
65 FILE_SYNCER = 'privacy.backends.syncers.DoubleRemotePuller'
66
67 # set GitHub scope
68 SOCIALACCOUNT_PROVIDERS = {
69 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}
70 }
71
72 if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
73 try:
74 from local_settings import * # noqa
75 except ImportError:
76 pass
77
[end of readthedocs/settings/postgres.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/settings/postgres.py b/readthedocs/settings/postgres.py
--- a/readthedocs/settings/postgres.py
+++ b/readthedocs/settings/postgres.py
@@ -69,6 +69,9 @@
'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}
}
+# allauth settings
+ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
+
if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
try:
from local_settings import * # noqa
| {"golden_diff": "diff --git a/readthedocs/settings/postgres.py b/readthedocs/settings/postgres.py\n--- a/readthedocs/settings/postgres.py\n+++ b/readthedocs/settings/postgres.py\n@@ -69,6 +69,9 @@\n 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}\n }\n \n+# allauth settings\n+ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\n+\n if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):\n try:\n from local_settings import * # noqa\n", "issue": "Social Network Login Failure\nWhen clicking on connect GitHub on the [social accounts page](https://readthedocs.org/accounts/social/connections/?) I get a message:\n\n> An error occurred while attempting to login via your social network account.\n\nThere's a `?` in the url. Could that be a hint? Is it missing some request arguments? If I omit it, the bug persists.\n\nCheers!\n\n", "before_files": [{"content": "import os\n\nfrom .base import * # noqa\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'docs',\n 'USER': 'postgres', # Not used with sqlite3.\n 'PASSWORD': '',\n 'HOST': '10.177.73.97',\n 'PORT': '',\n }\n}\n\nDEBUG = False\nTEMPLATE_DEBUG = False\nCELERY_ALWAYS_EAGER = False\n\nMEDIA_URL = 'https://media.readthedocs.org/'\nSTATIC_URL = 'https://media.readthedocs.org/static/'\nADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://odin:8983/solr',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'localhost:6379',\n 'PREFIX': 'docs',\n 'OPTIONS': {\n 'DB': 1,\n 'PARSER_CLASS': 'redis.connection.HiredisParser'\n },\n },\n}\n\n# Elasticsearch settings.\nES_HOSTS = ['backup:9200', 'db:9200']\nES_DEFAULT_NUM_REPLICAS = 1\nES_DEFAULT_NUM_SHARDS = 5\n\nSLUMBER_API_HOST = 'https://readthedocs.org'\nWEBSOCKET_HOST = 'websocket.readthedocs.org:8088'\n\nPRODUCTION_DOMAIN = 'readthedocs.org'\nUSE_SUBDOMAIN = True\nNGINX_X_ACCEL_REDIRECT = True\n\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Lock builds for 10 minutes\nREPO_LOCK_SECONDS = 300\n\n# Don't re-confirm existing accounts\nACCOUNT_EMAIL_VERIFICATION = 'none'\n\nFILE_SYNCER = 'privacy.backends.syncers.DoubleRemotePuller'\n\n# set GitHub scope\nSOCIALACCOUNT_PROVIDERS = {\n 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}\n}\n\nif not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):\n try:\n from local_settings import * # noqa\n except ImportError:\n pass\n", "path": "readthedocs/settings/postgres.py"}]} | 1,295 | 126 |
gh_patches_debug_16013 | rasdani/github-patches | git_diff | pyro-ppl__pyro-3167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug with OneCycleLR on Apple Silicone
### Guidelines
**NOTE:** Issues are for bugs and feature requests only. If you have a question about using Pyro or general modeling questions, please post it on the [forum](https://forum.pyro.ai/).
If you would like to address any minor bugs in the documentation or source, please feel free to contribute a Pull Request without creating an issue first.
Please tag the issue appropriately in the title e.g. [bug], [feature request], [discussion], etc.
Please provide the following details:
--------------------------------------------------------------------------------------------------
### Issue Description
Provide a brief description of the issue.
### Environment
For any bugs, please provide the following:
- OS and python version.
- PyTorch version, or if relevant, output of `pip freeze`.
- Pyro version: output of `python -c 'import pyro; print pyro.__version__'`
### Code Snippet
Provide any relevant code snippets and commands run to replicate the issue.
</issue>
<code>
[start of pyro/optim/pytorch_optimizers.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import torch
5
6 from pyro.optim import PyroOptim
7 from pyro.optim.lr_scheduler import PyroLRScheduler
8
9 __all__ = []
10 # Programmatically load all optimizers from PyTorch.
11 for _name, _Optim in torch.optim.__dict__.items():
12 if not isinstance(_Optim, type):
13 continue
14 if not issubclass(_Optim, torch.optim.Optimizer):
15 continue
16 if _Optim is torch.optim.Optimizer:
17 continue
18 if _Optim is torch.optim.LBFGS:
19 # XXX LBFGS is not supported for SVI yet
20 continue
21
22 _PyroOptim = (
23 lambda _Optim: lambda optim_args, clip_args=None: PyroOptim(
24 _Optim, optim_args, clip_args
25 )
26 )(_Optim)
27 _PyroOptim.__name__ = _name
28 _PyroOptim.__doc__ = "Wraps :class:`torch.optim.{}` with :class:`~pyro.optim.optim.PyroOptim`.".format(
29 _name
30 )
31
32 locals()[_name] = _PyroOptim
33 __all__.append(_name)
34 del _PyroOptim
35
36 # Load all schedulers from PyTorch
37 for _name, _Optim in torch.optim.lr_scheduler.__dict__.items():
38 if not isinstance(_Optim, type):
39 continue
40 if (
41 not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)
42 and _name != "ReduceLROnPlateau"
43 ):
44 continue
45 if _Optim is torch.optim.Optimizer:
46 continue
47
48 _PyroOptim = (
49 lambda _Optim: lambda optim_args, clip_args=None: PyroLRScheduler(
50 _Optim, optim_args, clip_args
51 )
52 )(_Optim)
53 _PyroOptim.__name__ = _name
54 _PyroOptim.__doc__ = (
55 "Wraps :class:`torch.optim.{}` with ".format(_name)
56 + ":class:`~pyro.optim.lr_scheduler.PyroLRScheduler`."
57 )
58
59 locals()[_name] = _PyroOptim
60 __all__.append(_name)
61 del _PyroOptim
62
[end of pyro/optim/pytorch_optimizers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyro/optim/pytorch_optimizers.py b/pyro/optim/pytorch_optimizers.py
--- a/pyro/optim/pytorch_optimizers.py
+++ b/pyro/optim/pytorch_optimizers.py
@@ -34,13 +34,16 @@
del _PyroOptim
# Load all schedulers from PyTorch
+# breaking change in torch >= 1.14: LRScheduler is new base class
+if hasattr(torch.optim.lr_scheduler, "LRScheduler"):
+ _torch_scheduler_base = torch.optim.lr_scheduler.LRScheduler
+else: # for torch < 1.13, _LRScheduler is base class
+ _torch_scheduler_base = torch.optim.lr_scheduler._LRScheduler
+
for _name, _Optim in torch.optim.lr_scheduler.__dict__.items():
if not isinstance(_Optim, type):
continue
- if (
- not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)
- and _name != "ReduceLROnPlateau"
- ):
+ if not issubclass(_Optim, _torch_scheduler_base) and _name != "ReduceLROnPlateau":
continue
if _Optim is torch.optim.Optimizer:
continue
| {"golden_diff": "diff --git a/pyro/optim/pytorch_optimizers.py b/pyro/optim/pytorch_optimizers.py\n--- a/pyro/optim/pytorch_optimizers.py\n+++ b/pyro/optim/pytorch_optimizers.py\n@@ -34,13 +34,16 @@\n del _PyroOptim\n \n # Load all schedulers from PyTorch\n+# breaking change in torch >= 1.14: LRScheduler is new base class\n+if hasattr(torch.optim.lr_scheduler, \"LRScheduler\"):\n+ _torch_scheduler_base = torch.optim.lr_scheduler.LRScheduler\n+else: # for torch < 1.13, _LRScheduler is base class\n+ _torch_scheduler_base = torch.optim.lr_scheduler._LRScheduler\n+\n for _name, _Optim in torch.optim.lr_scheduler.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n- if (\n- not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)\n- and _name != \"ReduceLROnPlateau\"\n- ):\n+ if not issubclass(_Optim, _torch_scheduler_base) and _name != \"ReduceLROnPlateau\":\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n", "issue": "bug with OneCycleLR on Apple Silicone \n### Guidelines\r\n\r\n**NOTE:** Issues are for bugs and feature requests only. If you have a question about using Pyro or general modeling questions, please post it on the [forum](https://forum.pyro.ai/).\r\n\r\nIf you would like to address any minor bugs in the documentation or source, please feel free to contribute a Pull Request without creating an issue first. \r\n\r\nPlease tag the issue appropriately in the title e.g. [bug], [feature request], [discussion], etc.\r\n\r\nPlease provide the following details:\r\n--------------------------------------------------------------------------------------------------\r\n### Issue Description\r\nProvide a brief description of the issue.\r\n\r\n### Environment\r\nFor any bugs, please provide the following:\r\n - OS and python version.\r\n - PyTorch version, or if relevant, output of `pip freeze`.\r\n - Pyro version: output of `python -c 'import pyro; print pyro.__version__'`\r\n\r\n### Code Snippet\r\nProvide any relevant code snippets and commands run to replicate the issue.\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\n\nfrom pyro.optim import PyroOptim\nfrom pyro.optim.lr_scheduler import PyroLRScheduler\n\n__all__ = []\n# Programmatically load all optimizers from PyTorch.\nfor _name, _Optim in torch.optim.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n if not issubclass(_Optim, torch.optim.Optimizer):\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n if _Optim is torch.optim.LBFGS:\n # XXX LBFGS is not supported for SVI yet\n continue\n\n _PyroOptim = (\n lambda _Optim: lambda optim_args, clip_args=None: PyroOptim(\n _Optim, optim_args, clip_args\n )\n )(_Optim)\n _PyroOptim.__name__ = _name\n _PyroOptim.__doc__ = \"Wraps :class:`torch.optim.{}` with :class:`~pyro.optim.optim.PyroOptim`.\".format(\n _name\n )\n\n locals()[_name] = _PyroOptim\n __all__.append(_name)\n del _PyroOptim\n\n# Load all schedulers from PyTorch\nfor _name, _Optim in torch.optim.lr_scheduler.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n if (\n not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)\n and _name != \"ReduceLROnPlateau\"\n ):\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n\n _PyroOptim = (\n lambda _Optim: lambda optim_args, clip_args=None: PyroLRScheduler(\n _Optim, optim_args, clip_args\n )\n )(_Optim)\n _PyroOptim.__name__ = _name\n _PyroOptim.__doc__ = (\n \"Wraps :class:`torch.optim.{}` with \".format(_name)\n + \":class:`~pyro.optim.lr_scheduler.PyroLRScheduler`.\"\n )\n\n locals()[_name] = _PyroOptim\n __all__.append(_name)\n del _PyroOptim\n", "path": "pyro/optim/pytorch_optimizers.py"}]} | 1,401 | 286 |
gh_patches_debug_67407 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1711 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pathoc does not accept `:pa,f` to pause forever at end of message
##### Steps to reproduce the problem:
`pathoc www.example.com 'get:/:pa,f'`
##### What is the expected behavior?
Send request, but pause forever after sending.
##### What went wrong?
I get a stack trace with "a float is required".
```
$ pathoc www.example.com 'get:/:pa,f'
08-09-16 16:59:41: >> 'GET':/:pa,f
Traceback (most recent call last):
File "/usr/local/bin/pathoc", line 11, in <module>
sys.exit(go_pathoc())
File "/usr/local/lib/python2.7/dist-packages/pathod/pathoc_cmdline.py", line 226, in go_pathoc
pathoc.main(args)
File "/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py", line 522, in main
ret = p.request(spec)
File "/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py", line 452, in request
return self.http(r)
File "/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py", line 432, in http
return resp
File "/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py", line 411, in http
req = language.serve(r, self.wfile, self.settings)
File "/usr/local/lib/python2.7/dist-packages/pathod/language/__init__.py", line 105, in serve
disconnect = writer.write_values(fp, vals, actions[:])
File "/usr/local/lib/python2.7/dist-packages/pathod/language/writer.py", line 61, in write_values
time.sleep(a[2])
TypeError: a float is required
```
##### Any other comments? What have you tried so far?
All other combinations of pause flags work as expected:
```
$ pathoc www.example.com 'get:/:p2,5'
08-09-16 17:05:07: >> 'GET':/:p2,5
<< 200 OK: 1270 bytes
$ pathoc www.example.com 'get:/:pr,5'
08-09-16 17:05:21: >> 'GET':/:pr,5
<< 200 OK: 1270 bytes
$ pathoc www.example.com 'get:/:pa,5'
08-09-16 17:05:41: >> 'GET':/:pa,5
<< 200 OK: 1270 bytes
$ pathoc www.example.com 'get:/:p2,f'
^C08-09-16 17:04:46: >> 'GET':/:p2,f
$ pathoc www.example.com 'get:/:pr,f'
^C08-09-16 17:04:55: >> 'GET':/:pr,f
```
---
pathoc version: 0.17
Operating System: Debian Linux 8.5 "Jessie" x64
</issue>
<code>
[start of pathod/language/writer.py]
1 import time
2 from mitmproxy import exceptions
3
4 BLOCKSIZE = 1024
5 # It's not clear what the upper limit for time.sleep is. It's lower than the
6 # maximum int or float. 1 year should do.
7 FOREVER = 60 * 60 * 24 * 365
8
9
10 def send_chunk(fp, val, blocksize, start, end):
11 """
12 (start, end): Inclusive lower bound, exclusive upper bound.
13 """
14 for i in range(start, end, blocksize):
15 fp.write(
16 val[i:min(i + blocksize, end)]
17 )
18 return end - start
19
20
21 def write_values(fp, vals, actions, sofar=0, blocksize=BLOCKSIZE):
22 """
23 vals: A list of values, which may be strings or Value objects.
24
25 actions: A list of (offset, action, arg) tuples. Action may be "inject",
26 "pause" or "disconnect".
27
28 Both vals and actions are in reverse order, with the first items last.
29
30 Return True if connection should disconnect.
31 """
32 sofar = 0
33 try:
34 while vals:
35 v = vals.pop()
36 offset = 0
37 while actions and actions[-1][0] < (sofar + len(v)):
38 a = actions.pop()
39 offset += send_chunk(
40 fp,
41 v,
42 blocksize,
43 offset,
44 a[0] - sofar - offset
45 )
46 if a[1] == "pause":
47 time.sleep(
48 FOREVER if a[2] == "f" else a[2]
49 )
50 elif a[1] == "disconnect":
51 return True
52 elif a[1] == "inject":
53 send_chunk(fp, a[2], blocksize, 0, len(a[2]))
54 send_chunk(fp, v, blocksize, offset, len(v))
55 sofar += len(v)
56 # Remainders
57 while actions:
58 a = actions.pop()
59 if a[1] == "pause":
60 time.sleep(a[2])
61 elif a[1] == "disconnect":
62 return True
63 elif a[1] == "inject":
64 send_chunk(fp, a[2], blocksize, 0, len(a[2]))
65 except exceptions.TcpDisconnect: # pragma: no cover
66 return True
67
[end of pathod/language/writer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pathod/language/writer.py b/pathod/language/writer.py
--- a/pathod/language/writer.py
+++ b/pathod/language/writer.py
@@ -57,7 +57,9 @@
while actions:
a = actions.pop()
if a[1] == "pause":
- time.sleep(a[2])
+ time.sleep(
+ FOREVER if a[2] == "f" else a[2]
+ )
elif a[1] == "disconnect":
return True
elif a[1] == "inject":
| {"golden_diff": "diff --git a/pathod/language/writer.py b/pathod/language/writer.py\n--- a/pathod/language/writer.py\n+++ b/pathod/language/writer.py\n@@ -57,7 +57,9 @@\n while actions:\n a = actions.pop()\n if a[1] == \"pause\":\n- time.sleep(a[2])\n+ time.sleep(\n+ FOREVER if a[2] == \"f\" else a[2]\n+ )\n elif a[1] == \"disconnect\":\n return True\n elif a[1] == \"inject\":\n", "issue": "pathoc does not accept `:pa,f` to pause forever at end of message\n##### Steps to reproduce the problem:\n\n`pathoc www.example.com 'get:/:pa,f'`\n##### What is the expected behavior?\n\nSend request, but pause forever after sending.\n##### What went wrong?\n\nI get a stack trace with \"a float is required\".\n\n```\n$ pathoc www.example.com 'get:/:pa,f'\n08-09-16 16:59:41: >> 'GET':/:pa,f\nTraceback (most recent call last):\n File \"/usr/local/bin/pathoc\", line 11, in <module>\n sys.exit(go_pathoc())\n File \"/usr/local/lib/python2.7/dist-packages/pathod/pathoc_cmdline.py\", line 226, in go_pathoc\n pathoc.main(args)\n File \"/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py\", line 522, in main\n ret = p.request(spec)\n File \"/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py\", line 452, in request\n return self.http(r)\n File \"/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py\", line 432, in http\n return resp\n File \"/usr/local/lib/python2.7/dist-packages/pathod/pathoc.py\", line 411, in http\n req = language.serve(r, self.wfile, self.settings)\n File \"/usr/local/lib/python2.7/dist-packages/pathod/language/__init__.py\", line 105, in serve\n disconnect = writer.write_values(fp, vals, actions[:])\n File \"/usr/local/lib/python2.7/dist-packages/pathod/language/writer.py\", line 61, in write_values\n time.sleep(a[2])\nTypeError: a float is required\n```\n##### Any other comments? What have you tried so far?\n\nAll other combinations of pause flags work as expected:\n\n```\n$ pathoc www.example.com 'get:/:p2,5'\n08-09-16 17:05:07: >> 'GET':/:p2,5\n<< 200 OK: 1270 bytes\n$ pathoc www.example.com 'get:/:pr,5'\n08-09-16 17:05:21: >> 'GET':/:pr,5\n<< 200 OK: 1270 bytes\n$ pathoc www.example.com 'get:/:pa,5'\n08-09-16 17:05:41: >> 'GET':/:pa,5\n<< 200 OK: 1270 bytes\n$ pathoc www.example.com 'get:/:p2,f'\n^C08-09-16 17:04:46: >> 'GET':/:p2,f\n$ pathoc www.example.com 'get:/:pr,f'\n^C08-09-16 17:04:55: >> 'GET':/:pr,f\n```\n\n---\n\npathoc version: 0.17\nOperating System: Debian Linux 8.5 \"Jessie\" x64\n\n", "before_files": [{"content": "import time\nfrom mitmproxy import exceptions\n\nBLOCKSIZE = 1024\n# It's not clear what the upper limit for time.sleep is. It's lower than the\n# maximum int or float. 1 year should do.\nFOREVER = 60 * 60 * 24 * 365\n\n\ndef send_chunk(fp, val, blocksize, start, end):\n \"\"\"\n (start, end): Inclusive lower bound, exclusive upper bound.\n \"\"\"\n for i in range(start, end, blocksize):\n fp.write(\n val[i:min(i + blocksize, end)]\n )\n return end - start\n\n\ndef write_values(fp, vals, actions, sofar=0, blocksize=BLOCKSIZE):\n \"\"\"\n vals: A list of values, which may be strings or Value objects.\n\n actions: A list of (offset, action, arg) tuples. Action may be \"inject\",\n \"pause\" or \"disconnect\".\n\n Both vals and actions are in reverse order, with the first items last.\n\n Return True if connection should disconnect.\n \"\"\"\n sofar = 0\n try:\n while vals:\n v = vals.pop()\n offset = 0\n while actions and actions[-1][0] < (sofar + len(v)):\n a = actions.pop()\n offset += send_chunk(\n fp,\n v,\n blocksize,\n offset,\n a[0] - sofar - offset\n )\n if a[1] == \"pause\":\n time.sleep(\n FOREVER if a[2] == \"f\" else a[2]\n )\n elif a[1] == \"disconnect\":\n return True\n elif a[1] == \"inject\":\n send_chunk(fp, a[2], blocksize, 0, len(a[2]))\n send_chunk(fp, v, blocksize, offset, len(v))\n sofar += len(v)\n # Remainders\n while actions:\n a = actions.pop()\n if a[1] == \"pause\":\n time.sleep(a[2])\n elif a[1] == \"disconnect\":\n return True\n elif a[1] == \"inject\":\n send_chunk(fp, a[2], blocksize, 0, len(a[2]))\n except exceptions.TcpDisconnect: # pragma: no cover\n return True\n", "path": "pathod/language/writer.py"}]} | 1,891 | 127 |
gh_patches_debug_18789 | rasdani/github-patches | git_diff | numpy__numpy-14207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong capitalization on bitgenerator MT19937
Wrong capitalization of MT19937 in default argument values of functions in numpy.random._pickle.py
### Reproducing code example:
```python
import numpy as np
np.random._pickle.__randomstate_ctor() # => error
np.random._pickle.__randomstate_ctor('mt19937') # => error
np.random._pickle.__randomstate_ctor('MT19937') # => works
```
### Error message:
*** ValueError: mt19937 is not a known BitGenerator module.
### Numpy/Python version information:
1.17.0 3.6.8 (default, Jan 14 2019, 11:02:34)
[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]
</issue>
<code>
[start of numpy/random/_pickle.py]
1 from .mtrand import RandomState
2 from .philox import Philox
3 from .pcg64 import PCG64
4 from .sfc64 import SFC64
5
6 from .generator import Generator
7 from .mt19937 import MT19937
8
9 BitGenerators = {'MT19937': MT19937,
10 'PCG64': PCG64,
11 'Philox': Philox,
12 'SFC64': SFC64,
13 }
14
15
16 def __generator_ctor(bit_generator_name='mt19937'):
17 """
18 Pickling helper function that returns a Generator object
19
20 Parameters
21 ----------
22 bit_generator_name: str
23 String containing the core BitGenerator
24
25 Returns
26 -------
27 rg: Generator
28 Generator using the named core BitGenerator
29 """
30 if bit_generator_name in BitGenerators:
31 bit_generator = BitGenerators[bit_generator_name]
32 else:
33 raise ValueError(str(bit_generator_name) + ' is not a known '
34 'BitGenerator module.')
35
36 return Generator(bit_generator())
37
38
39 def __bit_generator_ctor(bit_generator_name='mt19937'):
40 """
41 Pickling helper function that returns a bit generator object
42
43 Parameters
44 ----------
45 bit_generator_name: str
46 String containing the name of the BitGenerator
47
48 Returns
49 -------
50 bit_generator: BitGenerator
51 BitGenerator instance
52 """
53 if bit_generator_name in BitGenerators:
54 bit_generator = BitGenerators[bit_generator_name]
55 else:
56 raise ValueError(str(bit_generator_name) + ' is not a known '
57 'BitGenerator module.')
58
59 return bit_generator()
60
61
62 def __randomstate_ctor(bit_generator_name='mt19937'):
63 """
64 Pickling helper function that returns a legacy RandomState-like object
65
66 Parameters
67 ----------
68 bit_generator_name: str
69 String containing the core BitGenerator
70
71 Returns
72 -------
73 rs: RandomState
74 Legacy RandomState using the named core BitGenerator
75 """
76 if bit_generator_name in BitGenerators:
77 bit_generator = BitGenerators[bit_generator_name]
78 else:
79 raise ValueError(str(bit_generator_name) + ' is not a known '
80 'BitGenerator module.')
81
82 return RandomState(bit_generator())
83
[end of numpy/random/_pickle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py
--- a/numpy/random/_pickle.py
+++ b/numpy/random/_pickle.py
@@ -13,7 +13,7 @@
}
-def __generator_ctor(bit_generator_name='mt19937'):
+def __generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a Generator object
@@ -36,7 +36,7 @@
return Generator(bit_generator())
-def __bit_generator_ctor(bit_generator_name='mt19937'):
+def __bit_generator_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a bit generator object
@@ -59,7 +59,7 @@
return bit_generator()
-def __randomstate_ctor(bit_generator_name='mt19937'):
+def __randomstate_ctor(bit_generator_name='MT19937'):
"""
Pickling helper function that returns a legacy RandomState-like object
| {"golden_diff": "diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py\n--- a/numpy/random/_pickle.py\n+++ b/numpy/random/_pickle.py\n@@ -13,7 +13,7 @@\n }\n \n \n-def __generator_ctor(bit_generator_name='mt19937'):\n+def __generator_ctor(bit_generator_name='MT19937'):\n \"\"\"\n Pickling helper function that returns a Generator object\n \n@@ -36,7 +36,7 @@\n return Generator(bit_generator())\n \n \n-def __bit_generator_ctor(bit_generator_name='mt19937'):\n+def __bit_generator_ctor(bit_generator_name='MT19937'):\n \"\"\"\n Pickling helper function that returns a bit generator object\n \n@@ -59,7 +59,7 @@\n return bit_generator()\n \n \n-def __randomstate_ctor(bit_generator_name='mt19937'):\n+def __randomstate_ctor(bit_generator_name='MT19937'):\n \"\"\"\n Pickling helper function that returns a legacy RandomState-like object\n", "issue": "Wrong capitalization on bitgenerator MT19937\nWrong capitalization of MT19937 in default argument values of functions in numpy.random._pickle.py\r\n\r\n### Reproducing code example:\r\n\r\n```python\r\nimport numpy as np\r\nnp.random._pickle.__randomstate_ctor() # => error\r\nnp.random._pickle.__randomstate_ctor('mt19937') # => error\r\nnp.random._pickle.__randomstate_ctor('MT19937') # => works\r\n```\r\n\r\n### Error message:\r\n\r\n*** ValueError: mt19937 is not a known BitGenerator module.\r\n\r\n### Numpy/Python version information:\r\n\r\n1.17.0 3.6.8 (default, Jan 14 2019, 11:02:34) \r\n[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]\r\n\n", "before_files": [{"content": "from .mtrand import RandomState\nfrom .philox import Philox\nfrom .pcg64 import PCG64\nfrom .sfc64 import SFC64\n\nfrom .generator import Generator\nfrom .mt19937 import MT19937\n\nBitGenerators = {'MT19937': MT19937,\n 'PCG64': PCG64,\n 'Philox': Philox,\n 'SFC64': SFC64,\n }\n\n\ndef __generator_ctor(bit_generator_name='mt19937'):\n \"\"\"\n Pickling helper function that returns a Generator object\n\n Parameters\n ----------\n bit_generator_name: str\n String containing the core BitGenerator\n\n Returns\n -------\n rg: Generator\n Generator using the named core BitGenerator\n \"\"\"\n if bit_generator_name in BitGenerators:\n bit_generator = BitGenerators[bit_generator_name]\n else:\n raise ValueError(str(bit_generator_name) + ' is not a known '\n 'BitGenerator module.')\n\n return Generator(bit_generator())\n\n\ndef __bit_generator_ctor(bit_generator_name='mt19937'):\n \"\"\"\n Pickling helper function that returns a bit generator object\n\n Parameters\n ----------\n bit_generator_name: str\n String containing the name of the BitGenerator\n\n Returns\n -------\n bit_generator: BitGenerator\n BitGenerator instance\n \"\"\"\n if bit_generator_name in BitGenerators:\n bit_generator = BitGenerators[bit_generator_name]\n else:\n raise ValueError(str(bit_generator_name) + ' is not a known '\n 'BitGenerator module.')\n\n return bit_generator()\n\n\ndef __randomstate_ctor(bit_generator_name='mt19937'):\n \"\"\"\n Pickling helper function that returns a legacy RandomState-like object\n\n Parameters\n ----------\n bit_generator_name: str\n String containing the core BitGenerator\n\n Returns\n -------\n rs: RandomState\n Legacy RandomState using the named core BitGenerator\n \"\"\"\n if bit_generator_name in BitGenerators:\n bit_generator = BitGenerators[bit_generator_name]\n else:\n raise ValueError(str(bit_generator_name) + ' is not a known '\n 'BitGenerator module.')\n\n return RandomState(bit_generator())\n", "path": "numpy/random/_pickle.py"}]} | 1,401 | 229 |
gh_patches_debug_2666 | rasdani/github-patches | git_diff | netbox-community__netbox-14935 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in DataSourceBulkEditForm
### Deployment Type
Self-hosted
### NetBox Version
v3.7.1
### Python Version
3.8
### Steps to Reproduce
"lavel" is defined as "Enforce unique space", but I think the correct definition is "Enabled".
https://github.com/netbox-community/netbox/blob/487f1ccfde26ef3c1f8a28089826acc0cd6fadb2/netbox/core/forms/bulk_edit.py#L21-L25
- Add a new data source

- Editing 1 Data Sources

### Expected Behavior
Enabled
### Observed Behavior
Enforce unique space
</issue>
<code>
[start of netbox/core/forms/bulk_edit.py]
1 from django import forms
2 from django.utils.translation import gettext_lazy as _
3
4 from core.models import *
5 from netbox.forms import NetBoxModelBulkEditForm
6 from netbox.utils import get_data_backend_choices
7 from utilities.forms.fields import CommentField
8 from utilities.forms.widgets import BulkEditNullBooleanSelect
9
10 __all__ = (
11 'DataSourceBulkEditForm',
12 )
13
14
15 class DataSourceBulkEditForm(NetBoxModelBulkEditForm):
16 type = forms.ChoiceField(
17 label=_('Type'),
18 choices=get_data_backend_choices,
19 required=False
20 )
21 enabled = forms.NullBooleanField(
22 required=False,
23 widget=BulkEditNullBooleanSelect(),
24 label=_('Enforce unique space')
25 )
26 description = forms.CharField(
27 label=_('Description'),
28 max_length=200,
29 required=False
30 )
31 comments = CommentField()
32 parameters = forms.JSONField(
33 label=_('Parameters'),
34 required=False
35 )
36 ignore_rules = forms.CharField(
37 label=_('Ignore rules'),
38 required=False,
39 widget=forms.Textarea()
40 )
41
42 model = DataSource
43 fieldsets = (
44 (None, ('type', 'enabled', 'description', 'comments', 'parameters', 'ignore_rules')),
45 )
46 nullable_fields = (
47 'description', 'description', 'parameters', 'comments', 'parameters', 'ignore_rules',
48 )
49
[end of netbox/core/forms/bulk_edit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/core/forms/bulk_edit.py b/netbox/core/forms/bulk_edit.py
--- a/netbox/core/forms/bulk_edit.py
+++ b/netbox/core/forms/bulk_edit.py
@@ -21,7 +21,7 @@
enabled = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect(),
- label=_('Enforce unique space')
+ label=_('Enabled')
)
description = forms.CharField(
label=_('Description'),
| {"golden_diff": "diff --git a/netbox/core/forms/bulk_edit.py b/netbox/core/forms/bulk_edit.py\n--- a/netbox/core/forms/bulk_edit.py\n+++ b/netbox/core/forms/bulk_edit.py\n@@ -21,7 +21,7 @@\n enabled = forms.NullBooleanField(\n required=False,\n widget=BulkEditNullBooleanSelect(),\n- label=_('Enforce unique space')\n+ label=_('Enabled')\n )\n description = forms.CharField(\n label=_('Description'),\n", "issue": "Typo in DataSourceBulkEditForm\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.1\n\n### Python Version\n\n3.8\n\n### Steps to Reproduce\n\n\"lavel\" is defined as \"Enforce unique space\", but I think the correct definition is \"Enabled\".\r\n\r\nhttps://github.com/netbox-community/netbox/blob/487f1ccfde26ef3c1f8a28089826acc0cd6fadb2/netbox/core/forms/bulk_edit.py#L21-L25\r\n\r\n- Add a new data source\r\n\r\n\r\n- Editing 1 Data Sources\r\n\r\n\n\n### Expected Behavior\n\nEnabled\n\n### Observed Behavior\n\nEnforce unique space\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom core.models import *\nfrom netbox.forms import NetBoxModelBulkEditForm\nfrom netbox.utils import get_data_backend_choices\nfrom utilities.forms.fields import CommentField\nfrom utilities.forms.widgets import BulkEditNullBooleanSelect\n\n__all__ = (\n 'DataSourceBulkEditForm',\n)\n\n\nclass DataSourceBulkEditForm(NetBoxModelBulkEditForm):\n type = forms.ChoiceField(\n label=_('Type'),\n choices=get_data_backend_choices,\n required=False\n )\n enabled = forms.NullBooleanField(\n required=False,\n widget=BulkEditNullBooleanSelect(),\n label=_('Enforce unique space')\n )\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n comments = CommentField()\n parameters = forms.JSONField(\n label=_('Parameters'),\n required=False\n )\n ignore_rules = forms.CharField(\n label=_('Ignore rules'),\n required=False,\n widget=forms.Textarea()\n )\n\n model = DataSource\n fieldsets = (\n (None, ('type', 'enabled', 'description', 'comments', 'parameters', 'ignore_rules')),\n )\n nullable_fields = (\n 'description', 'description', 'parameters', 'comments', 'parameters', 'ignore_rules',\n )\n", "path": "netbox/core/forms/bulk_edit.py"}]} | 1,202 | 106 |
gh_patches_debug_63309 | rasdani/github-patches | git_diff | scikit-hep__pyhf-924 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove cloudpickle constraints when updating to TensorFlow Probability v0.11
# Description
Once TensorFlow Probability `v0.11.0` is released there will no longer be the need for PR #915, and so that should be reverted.
Related Issues: #815
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'tensorflow': [
5 'tensorflow~=2.0',
6 'tensorflow-probability~=0.8',
7 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
11 'xmlio': ['uproot'],
12 'minuit': ['iminuit'],
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted(set(['matplotlib']))
23 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pytest~=3.5',
32 'pytest-cov>=2.5.1',
33 'pytest-mock',
34 'pytest-benchmark[histogram]',
35 'pytest-console-scripts',
36 'pytest-mpl',
37 'pydocstyle',
38 'coverage>=4.0', # coveralls
39 'papermill~=2.0',
40 'nteract-scrapbook~=0.2',
41 'jupyter',
42 'uproot~=3.3',
43 'graphviz',
44 'jsonpatch',
45 ]
46 )
47 )
48 extras_require['docs'] = sorted(
49 set(
50 [
51 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs
52 'sphinxcontrib-bibtex',
53 'sphinx-click',
54 'sphinx_rtd_theme',
55 'nbsphinx',
56 'ipywidgets',
57 'sphinx-issues',
58 'sphinx-copybutton>0.2.9',
59 ]
60 )
61 )
62 extras_require['develop'] = sorted(
63 set(
64 extras_require['docs']
65 + extras_require['lint']
66 + extras_require['test']
67 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
68 )
69 )
70 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
71
72
73 setup(
74 extras_require=extras_require,
75 use_scm_version=lambda: {'local_scheme': lambda version: ''},
76 )
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,8 +3,7 @@
extras_require = {
'tensorflow': [
'tensorflow~=2.0',
- 'tensorflow-probability~=0.8',
- 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11
+ 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11
],
'torch': ['torch~=1.2'],
'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,8 +3,7 @@\n extras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n- 'tensorflow-probability~=0.8',\n- 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n+ 'tensorflow-probability~=0.10', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n", "issue": "Remove cloudpickle constraints when updating to TensorFlow Probability v0.11\n# Description\r\n\r\nOnce TensorFlow Probability `v0.11.0` is released there will no longer be the need for PR #915, and so that should be reverted.\r\n\r\nRelated Issues: #815 \r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=2.0',\n 'tensorflow-probability~=0.8',\n 'cloudpickle!=1.5.0', # TODO: Temp patch until tfp v0.11\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx~=3.0.0', # Sphinx v3.1.X regressions break docs\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,292 | 163 |
gh_patches_debug_1435 | rasdani/github-patches | git_diff | keras-team__keras-1039 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
keras.utils.visualize_util
line 9: if type(model) == Sequential
Global name Sequential is not defined
line 25 elif type(model) == Graph:
Global name Graph is not defined
</issue>
<code>
[start of keras/utils/visualize_util.py]
1 import pydot
2 # old pydot will not work with python3, must use one
3 # that works with python3 such as pydot2 or pydot
4
5
6 def plot(model, to_file='model.png'):
7
8 graph = pydot.Dot(graph_type='digraph')
9 if type(model) == Sequential:
10 previous_node = None
11 written_nodes = []
12 n = 1
13 for node in model.get_config()['layers']:
14 # append number in case layers have same name to differentiate
15 if (node['name'] + str(n)) in written_nodes:
16 n += 1
17 current_node = pydot.Node(node['name'] + str(n))
18 written_nodes.append(node['name'] + str(n))
19 graph.add_node(current_node)
20 if previous_node:
21 graph.add_edge(pydot.Edge(previous_node, current_node))
22 previous_node = current_node
23 graph.write_png(to_file)
24
25 elif type(model) == Graph:
26 # don't need to append number for names since all nodes labeled
27 for input_node in model.input_config:
28 graph.add_node(pydot.Node(input_node['name']))
29
30 # intermediate and output nodes have input defined
31 for layer_config in [model.node_config, model.output_config]:
32 for node in layer_config:
33 graph.add_node(pydot.Node(node['name']))
34 # possible to have multiple 'inputs' vs 1 'input'
35 if node['inputs']:
36 for e in node['inputs']:
37 graph.add_edge(pydot.Edge(e, node['name']))
38 else:
39 graph.add_edge(pydot.Edge(node['input'], node['name']))
40
41 graph.write_png(to_file)
42
[end of keras/utils/visualize_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras/utils/visualize_util.py b/keras/utils/visualize_util.py
--- a/keras/utils/visualize_util.py
+++ b/keras/utils/visualize_util.py
@@ -1,7 +1,7 @@
import pydot
# old pydot will not work with python3, must use one
# that works with python3 such as pydot2 or pydot
-
+from keras.models import Sequential, Graph
def plot(model, to_file='model.png'):
| {"golden_diff": "diff --git a/keras/utils/visualize_util.py b/keras/utils/visualize_util.py\n--- a/keras/utils/visualize_util.py\n+++ b/keras/utils/visualize_util.py\n@@ -1,7 +1,7 @@\n import pydot\n # old pydot will not work with python3, must use one\n # that works with python3 such as pydot2 or pydot\n-\n+from keras.models import Sequential, Graph\n \n def plot(model, to_file='model.png'):\n", "issue": "keras.utils.visualize_util\nline 9: if type(model) == Sequential\nGlobal name Sequential is not defined\nline 25 elif type(model) == Graph:\nGlobal name Graph is not defined\n\n", "before_files": [{"content": "import pydot\n# old pydot will not work with python3, must use one\n# that works with python3 such as pydot2 or pydot\n\n\ndef plot(model, to_file='model.png'):\n\n graph = pydot.Dot(graph_type='digraph')\n if type(model) == Sequential:\n previous_node = None\n written_nodes = []\n n = 1\n for node in model.get_config()['layers']:\n # append number in case layers have same name to differentiate\n if (node['name'] + str(n)) in written_nodes:\n n += 1\n current_node = pydot.Node(node['name'] + str(n))\n written_nodes.append(node['name'] + str(n))\n graph.add_node(current_node)\n if previous_node:\n graph.add_edge(pydot.Edge(previous_node, current_node))\n previous_node = current_node\n graph.write_png(to_file)\n\n elif type(model) == Graph:\n # don't need to append number for names since all nodes labeled\n for input_node in model.input_config:\n graph.add_node(pydot.Node(input_node['name']))\n\n # intermediate and output nodes have input defined\n for layer_config in [model.node_config, model.output_config]:\n for node in layer_config:\n graph.add_node(pydot.Node(node['name']))\n # possible to have multiple 'inputs' vs 1 'input'\n if node['inputs']:\n for e in node['inputs']:\n graph.add_edge(pydot.Edge(e, node['name']))\n else:\n graph.add_edge(pydot.Edge(node['input'], node['name']))\n\n graph.write_png(to_file)\n", "path": "keras/utils/visualize_util.py"}]} | 1,011 | 113 |
gh_patches_debug_13968 | rasdani/github-patches | git_diff | Kinto__kinto-367 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kinto init should install postgresql dependencies
</issue>
<code>
[start of kinto/__main__.py]
1 from __future__ import print_function
2 import argparse
3 import os
4 import sys
5 from six.moves import input
6 from cliquet.scripts import cliquet
7 from pyramid.scripts import pserve
8 from pyramid.paster import bootstrap
9
10 from kinto.config import init
11
12 CONFIG_FILE = 'config/kinto.ini'
13
14
15 def main(args=None):
16 """The main routine."""
17 if args is None:
18 args = sys.argv[1:]
19
20 parser = argparse.ArgumentParser(description="Kinto commands")
21 parser.add_argument('--ini',
22 help='Application configuration file',
23 dest='ini_file',
24 required=False,
25 default=CONFIG_FILE)
26 parser.add_argument('--backend',
27 help='Specify backend',
28 dest='backend',
29 required=False,
30 default=None)
31
32 subparsers = parser.add_subparsers(title='subcommands',
33 description='valid subcommands',
34 help='init/start/migrate')
35
36 parser_init = subparsers.add_parser('init')
37 parser_init.set_defaults(which='init')
38
39 parser_migrate = subparsers.add_parser('migrate')
40 parser_migrate.set_defaults(which='migrate')
41
42 parser_start = subparsers.add_parser('start')
43 parser_start.set_defaults(which='start')
44
45 args = vars(parser.parse_args())
46 config_file = args['ini_file']
47
48 if args['which'] == 'init':
49 if os.path.exists(config_file):
50 print("%s already exist." % config_file, file=sys.stderr)
51 sys.exit(1)
52
53 backend = args['backend']
54 if not backend:
55 while True:
56 prompt = ("Which backend to use? "
57 "(1 - postgresql, 2 - redis, default - memory) ")
58 answer = input(prompt).strip()
59 try:
60 backends = {"1": "postgresql", "2": "redis", "": "memory"}
61 backend = backends[answer]
62 break
63 except KeyError:
64 pass
65
66 init(config_file, backend)
67
68 elif args['which'] == 'migrate':
69 env = bootstrap(config_file)
70 cliquet.init_schema(env)
71
72 elif args['which'] == 'start':
73 pserve_argv = ['pserve', config_file, '--reload']
74 pserve.main(pserve_argv)
75
76
77 if __name__ == "__main__":
78 main()
79
[end of kinto/__main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -2,6 +2,8 @@
import argparse
import os
import sys
+
+import pip
from six.moves import input
from cliquet.scripts import cliquet
from pyramid.scripts import pserve
@@ -65,6 +67,13 @@
init(config_file, backend)
+ # Install postgresql libraries if necessary
+ if backend == "postgresql":
+ try:
+ import psycopg2 # NOQA
+ except ImportError:
+ pip.main(['install', "cliquet[postgresql]"])
+
elif args['which'] == 'migrate':
env = bootstrap(config_file)
cliquet.init_schema(env)
| {"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -2,6 +2,8 @@\n import argparse\n import os\n import sys\n+\n+import pip\n from six.moves import input\n from cliquet.scripts import cliquet\n from pyramid.scripts import pserve\n@@ -65,6 +67,13 @@\n \n init(config_file, backend)\n \n+ # Install postgresql libraries if necessary\n+ if backend == \"postgresql\":\n+ try:\n+ import psycopg2 # NOQA\n+ except ImportError:\n+ pip.main(['install', \"cliquet[postgresql]\"])\n+\n elif args['which'] == 'migrate':\n env = bootstrap(config_file)\n cliquet.init_schema(env)\n", "issue": "kinto init should install postgresql dependencies\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nfrom six.moves import input\nfrom cliquet.scripts import cliquet\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\n\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n help='init/start/migrate')\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.set_defaults(which='start')\n\n args = vars(parser.parse_args())\n config_file = args['ini_file']\n\n if args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exist.\" % config_file, file=sys.stderr)\n sys.exit(1)\n\n backend = args['backend']\n if not backend:\n while True:\n prompt = (\"Which backend to use? \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n elif args['which'] == 'migrate':\n env = bootstrap(config_file)\n cliquet.init_schema(env)\n\n elif args['which'] == 'start':\n pserve_argv = ['pserve', config_file, '--reload']\n pserve.main(pserve_argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "kinto/__main__.py"}]} | 1,185 | 184 |
gh_patches_debug_33610 | rasdani/github-patches | git_diff | litestar-org__litestar-183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`from starlite import *` broken if `testing` extra not installed
This is only an issue on main, not in any release.
When I want to try a library out, I'll install it into a fresh env, run python repl and do `from lib import *` and have a play around. If just doing that raised an error it would freak me out a little about the lib.
Possible solution:
- remove `.testing` imports from `starlite.__all__`
- add deprecation warning for top-level `.testing` imports
- remove `if TYPE_CHECKING` too? May as well if we are doing the above, I think?
Refs: #174 #130
</issue>
<code>
[start of starlite/__init__.py]
1 from typing import TYPE_CHECKING, Any
2
3 from starlite.datastructures import File, Redirect, State, Stream, Template
4
5 from .app import Starlite
6 from .config import (
7 CacheConfig,
8 CORSConfig,
9 OpenAPIConfig,
10 StaticFilesConfig,
11 TemplateConfig,
12 )
13 from .connection import Request, WebSocket
14 from .controller import Controller
15 from .dto import DTOFactory
16 from .enums import (
17 HttpMethod,
18 MediaType,
19 OpenAPIMediaType,
20 RequestEncodingType,
21 ScopeType,
22 )
23 from .exceptions import (
24 HTTPException,
25 ImproperlyConfiguredException,
26 InternalServerException,
27 MissingDependencyException,
28 NotAuthorizedException,
29 NotFoundException,
30 PermissionDeniedException,
31 ServiceUnavailableException,
32 StarLiteException,
33 ValidationException,
34 )
35 from .handlers import (
36 ASGIRouteHandler,
37 BaseRouteHandler,
38 HTTPRouteHandler,
39 WebsocketRouteHandler,
40 asgi,
41 delete,
42 get,
43 patch,
44 post,
45 put,
46 route,
47 websocket,
48 )
49 from .logging import LoggingConfig, QueueListenerHandler
50 from .middleware import AbstractAuthenticationMiddleware, AuthenticationResult
51 from .openapi.controller import OpenAPIController
52 from .params import Body, Dependency, Parameter
53 from .plugins import PluginProtocol
54 from .provide import Provide
55 from .response import Response
56 from .router import Router
57 from .routes import BaseRoute, HTTPRoute, WebSocketRoute
58 from .types import MiddlewareProtocol, Partial, ResponseHeader
59
60 if TYPE_CHECKING:
61 from .testing import TestClient, create_test_client, create_test_request
62
63
64 __all__ = [
65 "ASGIRouteHandler",
66 "AbstractAuthenticationMiddleware",
67 "AuthenticationResult",
68 "BaseRoute",
69 "BaseRouteHandler",
70 "Body",
71 "CORSConfig",
72 "CacheConfig",
73 "Controller",
74 "Dependency",
75 "DTOFactory",
76 "File",
77 "HTTPException",
78 "HTTPRoute",
79 "HTTPRouteHandler",
80 "HttpMethod",
81 "ImproperlyConfiguredException",
82 "InternalServerException",
83 "LoggingConfig",
84 "MediaType",
85 "MiddlewareProtocol",
86 "MissingDependencyException",
87 "NotAuthorizedException",
88 "NotFoundException",
89 "OpenAPIConfig",
90 "OpenAPIController",
91 "OpenAPIMediaType",
92 "Parameter",
93 "Partial",
94 "PermissionDeniedException",
95 "PluginProtocol",
96 "Provide",
97 "QueueListenerHandler",
98 "Redirect",
99 "Request",
100 "RequestEncodingType",
101 "Response",
102 "ResponseHeader",
103 "Router",
104 "ScopeType",
105 "ServiceUnavailableException",
106 "StarLiteException",
107 "Starlite",
108 "State",
109 "StaticFilesConfig",
110 "Stream",
111 "Template",
112 "TemplateConfig",
113 "TestClient",
114 "ValidationException",
115 "WebSocket",
116 "WebSocketRoute",
117 "WebsocketRouteHandler",
118 "asgi",
119 "create_test_client",
120 "create_test_request",
121 "delete",
122 "get",
123 "patch",
124 "post",
125 "put",
126 "route",
127 "websocket",
128 ]
129
130 _dynamic_imports = {"TestClient", "create_test_client", "create_test_request"}
131
132
133 # pylint: disable=import-outside-toplevel
134 def __getattr__(name: str) -> Any:
135 """Provide lazy importing as per https://peps.python.org/pep-0562/"""
136 if name not in _dynamic_imports:
137 raise AttributeError(f"Module {__package__} has no attribute {name}")
138
139 from . import testing
140
141 attr = globals()[name] = getattr(testing, name)
142 return attr
143
[end of starlite/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlite/__init__.py b/starlite/__init__.py
--- a/starlite/__init__.py
+++ b/starlite/__init__.py
@@ -1,4 +1,4 @@
-from typing import TYPE_CHECKING, Any
+from typing import Any
from starlite.datastructures import File, Redirect, State, Stream, Template
@@ -57,10 +57,6 @@
from .routes import BaseRoute, HTTPRoute, WebSocketRoute
from .types import MiddlewareProtocol, Partial, ResponseHeader
-if TYPE_CHECKING:
- from .testing import TestClient, create_test_client, create_test_request
-
-
__all__ = [
"ASGIRouteHandler",
"AbstractAuthenticationMiddleware",
@@ -110,14 +106,11 @@
"Stream",
"Template",
"TemplateConfig",
- "TestClient",
"ValidationException",
"WebSocket",
"WebSocketRoute",
"WebsocketRouteHandler",
"asgi",
- "create_test_client",
- "create_test_request",
"delete",
"get",
"patch",
@@ -127,15 +120,24 @@
"websocket",
]
-_dynamic_imports = {"TestClient", "create_test_client", "create_test_request"}
+
+_deprecated_imports = {"TestClient", "create_test_client", "create_test_request"}
# pylint: disable=import-outside-toplevel
def __getattr__(name: str) -> Any:
"""Provide lazy importing as per https://peps.python.org/pep-0562/"""
- if name not in _dynamic_imports:
+ if name not in _deprecated_imports:
raise AttributeError(f"Module {__package__} has no attribute {name}")
+ import warnings
+
+ warnings.warn(
+ f"Importing {name} from {__package__} is deprecated, use `from startlite.testing import {name}` instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
from . import testing
attr = globals()[name] = getattr(testing, name)
| {"golden_diff": "diff --git a/starlite/__init__.py b/starlite/__init__.py\n--- a/starlite/__init__.py\n+++ b/starlite/__init__.py\n@@ -1,4 +1,4 @@\n-from typing import TYPE_CHECKING, Any\n+from typing import Any\n \n from starlite.datastructures import File, Redirect, State, Stream, Template\n \n@@ -57,10 +57,6 @@\n from .routes import BaseRoute, HTTPRoute, WebSocketRoute\n from .types import MiddlewareProtocol, Partial, ResponseHeader\n \n-if TYPE_CHECKING:\n- from .testing import TestClient, create_test_client, create_test_request\n-\n-\n __all__ = [\n \"ASGIRouteHandler\",\n \"AbstractAuthenticationMiddleware\",\n@@ -110,14 +106,11 @@\n \"Stream\",\n \"Template\",\n \"TemplateConfig\",\n- \"TestClient\",\n \"ValidationException\",\n \"WebSocket\",\n \"WebSocketRoute\",\n \"WebsocketRouteHandler\",\n \"asgi\",\n- \"create_test_client\",\n- \"create_test_request\",\n \"delete\",\n \"get\",\n \"patch\",\n@@ -127,15 +120,24 @@\n \"websocket\",\n ]\n \n-_dynamic_imports = {\"TestClient\", \"create_test_client\", \"create_test_request\"}\n+\n+_deprecated_imports = {\"TestClient\", \"create_test_client\", \"create_test_request\"}\n \n \n # pylint: disable=import-outside-toplevel\n def __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n- if name not in _dynamic_imports:\n+ if name not in _deprecated_imports:\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n \n+ import warnings\n+\n+ warnings.warn(\n+ f\"Importing {name} from {__package__} is deprecated, use `from startlite.testing import {name}` instead\",\n+ DeprecationWarning,\n+ stacklevel=2,\n+ )\n+\n from . import testing\n \n attr = globals()[name] = getattr(testing, name)\n", "issue": "`from starlite import *` broken if `testing` extra not installed\nThis is only an issue on main, not in any release.\r\n\r\nWhen I want to try a library out, I'll install it into a fresh env, run python repl and do `from lib import *` and have a play around. If just doing that raised an error it would freak me out a little about the lib.\r\n\r\nPossible solution:\r\n- remove `.testing` imports from `starlite.__all__`\r\n- add deprecation warning for top-level `.testing` imports\r\n- remove `if TYPE_CHECKING` too? May as well if we are doing the above, I think?\r\n\r\nRefs: #174 #130 \n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any\n\nfrom starlite.datastructures import File, Redirect, State, Stream, Template\n\nfrom .app import Starlite\nfrom .config import (\n CacheConfig,\n CORSConfig,\n OpenAPIConfig,\n StaticFilesConfig,\n TemplateConfig,\n)\nfrom .connection import Request, WebSocket\nfrom .controller import Controller\nfrom .dto import DTOFactory\nfrom .enums import (\n HttpMethod,\n MediaType,\n OpenAPIMediaType,\n RequestEncodingType,\n ScopeType,\n)\nfrom .exceptions import (\n HTTPException,\n ImproperlyConfiguredException,\n InternalServerException,\n MissingDependencyException,\n NotAuthorizedException,\n NotFoundException,\n PermissionDeniedException,\n ServiceUnavailableException,\n StarLiteException,\n ValidationException,\n)\nfrom .handlers import (\n ASGIRouteHandler,\n BaseRouteHandler,\n HTTPRouteHandler,\n WebsocketRouteHandler,\n asgi,\n delete,\n get,\n patch,\n post,\n put,\n route,\n websocket,\n)\nfrom .logging import LoggingConfig, QueueListenerHandler\nfrom .middleware import AbstractAuthenticationMiddleware, AuthenticationResult\nfrom .openapi.controller import OpenAPIController\nfrom .params import Body, Dependency, Parameter\nfrom .plugins import PluginProtocol\nfrom .provide import Provide\nfrom .response import Response\nfrom .router import Router\nfrom .routes import BaseRoute, HTTPRoute, WebSocketRoute\nfrom .types import MiddlewareProtocol, Partial, ResponseHeader\n\nif TYPE_CHECKING:\n from .testing import TestClient, create_test_client, create_test_request\n\n\n__all__ = [\n \"ASGIRouteHandler\",\n \"AbstractAuthenticationMiddleware\",\n \"AuthenticationResult\",\n \"BaseRoute\",\n \"BaseRouteHandler\",\n \"Body\",\n \"CORSConfig\",\n \"CacheConfig\",\n \"Controller\",\n \"Dependency\",\n \"DTOFactory\",\n \"File\",\n \"HTTPException\",\n \"HTTPRoute\",\n \"HTTPRouteHandler\",\n \"HttpMethod\",\n \"ImproperlyConfiguredException\",\n \"InternalServerException\",\n \"LoggingConfig\",\n \"MediaType\",\n \"MiddlewareProtocol\",\n \"MissingDependencyException\",\n \"NotAuthorizedException\",\n \"NotFoundException\",\n \"OpenAPIConfig\",\n \"OpenAPIController\",\n \"OpenAPIMediaType\",\n \"Parameter\",\n \"Partial\",\n \"PermissionDeniedException\",\n \"PluginProtocol\",\n \"Provide\",\n \"QueueListenerHandler\",\n \"Redirect\",\n \"Request\",\n \"RequestEncodingType\",\n \"Response\",\n \"ResponseHeader\",\n \"Router\",\n \"ScopeType\",\n \"ServiceUnavailableException\",\n \"StarLiteException\",\n \"Starlite\",\n \"State\",\n \"StaticFilesConfig\",\n \"Stream\",\n \"Template\",\n \"TemplateConfig\",\n \"TestClient\",\n \"ValidationException\",\n \"WebSocket\",\n \"WebSocketRoute\",\n \"WebsocketRouteHandler\",\n \"asgi\",\n \"create_test_client\",\n \"create_test_request\",\n \"delete\",\n \"get\",\n \"patch\",\n \"post\",\n \"put\",\n \"route\",\n \"websocket\",\n]\n\n_dynamic_imports = {\"TestClient\", \"create_test_client\", \"create_test_request\"}\n\n\n# pylint: disable=import-outside-toplevel\ndef __getattr__(name: str) -> Any:\n \"\"\"Provide lazy importing as per https://peps.python.org/pep-0562/\"\"\"\n if name not in _dynamic_imports:\n raise AttributeError(f\"Module {__package__} has no attribute {name}\")\n\n from . import testing\n\n attr = globals()[name] = getattr(testing, name)\n return attr\n", "path": "starlite/__init__.py"}]} | 1,788 | 472 |
gh_patches_debug_38574 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CL-SIC parser returns wrong datetime
CL-SIC parser returns datetime of 0018 instead of 2018, as illustrated below:
`datetime: 0018-02-18 (06:43:00.000) CET`
`production: {"unknown":0,"coal":2206.6,"hydro":2416.39660,"wind":309.314,....}`
`updatedAt: 2018-02-20 (23:40:06.979) CET`
@systemcatch any idea?
</issue>
<code>
[start of parsers/lib/quality.py]
1 import datetime
2 import warnings
3
4 import arrow
5
6
7 class ValidationError(ValueError):
8 pass
9
10
11 def validate_consumption(obj, zone_key):
12 # Data quality check
13 if obj['consumption'] is not None and obj['consumption'] < 0:
14 raise ValidationError('%s: consumption has negative value '
15 '%s' % (zone_key, obj['consumption']))
16
17
18 def validate_exchange(item, k):
19 if item.get('sortedZoneKeys', None) != k:
20 raise ValidationError("Sorted country codes %s and %s don't "
21 "match" % (item.get('sortedZoneKeys', None), k))
22 if 'datetime' not in item:
23 raise ValidationError('datetime was not returned for %s' % k)
24 if type(item['datetime']) != datetime.datetime:
25 raise ValidationError('datetime %s is not valid for %s' %
26 (item['datetime'], k))
27 data_time = arrow.get(item['datetime'])
28 if data_time > arrow.now():
29 raise ValidationError("Data from %s can't be in the future, data was "
30 "%s, now is %s" % (k, data_time, arrow.now()))
31 if data_time.year < 2000:
32 raise ValidationError("Data from %s can't be before year 2000, it was "
33 "%s" % (k, data_time))
34
35
36 def validate_production(obj, zone_key):
37 if 'datetime' not in obj:
38 raise ValidationError(
39 'datetime was not returned for %s' % zone_key)
40 if 'countryCode' in obj:
41 warnings.warn('object has field `countryCode`. It should have '
42 '`zoneKey` instead. In {}'.format(obj))
43 if 'zoneKey' not in obj and 'countryCode' not in obj:
44 raise ValidationError('zoneKey was not returned for %s' % zone_key)
45 if not isinstance(obj['datetime'], datetime.datetime):
46 raise ValidationError('datetime %s is not valid for %s' %
47 (obj['datetime'], zone_key))
48 if (obj.get('zoneKey', None) or obj.get('countryCode', None)) != zone_key:
49 raise ValidationError("Zone keys %s and %s don't match in %s" %
50 (obj.get('zoneKey', None), zone_key, obj))
51 data_time = arrow.get(obj['datetime'])
52 arrow_now = arrow.utcnow()
53 if data_time > arrow_now:
54 raise ValidationError(
55 "Data from %s can't be in the future, data was %s, now is "
56 "%s" % (zone_key, data_time, arrow_now))
57
58 if ((obj.get('production', {}).get('unknown', None) is None and
59 obj.get('production', {}).get('coal', None) is None and
60 obj.get('production', {}).get('oil', None) is None and
61 obj.get('production', {}).get('gas', None) is None and zone_key
62 not in ['CH', 'NO', 'AUS-TAS', 'DK-BHM', 'US-NEISO'])):
63 raise ValidationError(
64 "Coal or oil or unknown production value is required for"
65 " %s" % zone_key)
66 for k, v in obj['production'].items():
67 if v is None:
68 continue
69 if v < 0:
70 raise ValidationError('%s: key %s has negative value %s' %
71 (zone_key, k, v))
72
[end of parsers/lib/quality.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/lib/quality.py b/parsers/lib/quality.py
--- a/parsers/lib/quality.py
+++ b/parsers/lib/quality.py
@@ -8,11 +8,26 @@
pass
+def validate_reasonable_time(item, k):
+ data_time = arrow.get(item['datetime'])
+ if data_time.year < 2000:
+ raise ValidationError("Data from %s can't be before year 2000, it was "
+ "%s" % (k, data_time))
+
+ arrow_now = arrow.utcnow()
+ if data_time > arrow_now:
+ raise ValidationError(
+ "Data from %s can't be in the future, data was %s, now is "
+ "%s" % (k, data_time, arrow_now))
+
+
+
def validate_consumption(obj, zone_key):
# Data quality check
if obj['consumption'] is not None and obj['consumption'] < 0:
raise ValidationError('%s: consumption has negative value '
'%s' % (zone_key, obj['consumption']))
+ validate_reasonable_time(obj, zone_key)
def validate_exchange(item, k):
@@ -24,13 +39,7 @@
if type(item['datetime']) != datetime.datetime:
raise ValidationError('datetime %s is not valid for %s' %
(item['datetime'], k))
- data_time = arrow.get(item['datetime'])
- if data_time > arrow.now():
- raise ValidationError("Data from %s can't be in the future, data was "
- "%s, now is %s" % (k, data_time, arrow.now()))
- if data_time.year < 2000:
- raise ValidationError("Data from %s can't be before year 2000, it was "
- "%s" % (k, data_time))
+ validate_reasonable_time(item, k)
def validate_production(obj, zone_key):
@@ -48,12 +57,6 @@
if (obj.get('zoneKey', None) or obj.get('countryCode', None)) != zone_key:
raise ValidationError("Zone keys %s and %s don't match in %s" %
(obj.get('zoneKey', None), zone_key, obj))
- data_time = arrow.get(obj['datetime'])
- arrow_now = arrow.utcnow()
- if data_time > arrow_now:
- raise ValidationError(
- "Data from %s can't be in the future, data was %s, now is "
- "%s" % (zone_key, data_time, arrow_now))
if ((obj.get('production', {}).get('unknown', None) is None and
obj.get('production', {}).get('coal', None) is None and
@@ -69,3 +72,4 @@
if v < 0:
raise ValidationError('%s: key %s has negative value %s' %
(zone_key, k, v))
+ validate_reasonable_time(obj, zone_key)
| {"golden_diff": "diff --git a/parsers/lib/quality.py b/parsers/lib/quality.py\n--- a/parsers/lib/quality.py\n+++ b/parsers/lib/quality.py\n@@ -8,11 +8,26 @@\n pass\n \n \n+def validate_reasonable_time(item, k):\n+ data_time = arrow.get(item['datetime'])\n+ if data_time.year < 2000:\n+ raise ValidationError(\"Data from %s can't be before year 2000, it was \"\n+ \"%s\" % (k, data_time))\n+\n+ arrow_now = arrow.utcnow()\n+ if data_time > arrow_now:\n+ raise ValidationError(\n+ \"Data from %s can't be in the future, data was %s, now is \"\n+ \"%s\" % (k, data_time, arrow_now))\n+\n+\n+\n def validate_consumption(obj, zone_key):\n # Data quality check\n if obj['consumption'] is not None and obj['consumption'] < 0:\n raise ValidationError('%s: consumption has negative value '\n '%s' % (zone_key, obj['consumption']))\n+ validate_reasonable_time(obj, zone_key)\n \n \n def validate_exchange(item, k):\n@@ -24,13 +39,7 @@\n if type(item['datetime']) != datetime.datetime:\n raise ValidationError('datetime %s is not valid for %s' %\n (item['datetime'], k))\n- data_time = arrow.get(item['datetime'])\n- if data_time > arrow.now():\n- raise ValidationError(\"Data from %s can't be in the future, data was \"\n- \"%s, now is %s\" % (k, data_time, arrow.now()))\n- if data_time.year < 2000:\n- raise ValidationError(\"Data from %s can't be before year 2000, it was \"\n- \"%s\" % (k, data_time))\n+ validate_reasonable_time(item, k)\n \n \n def validate_production(obj, zone_key):\n@@ -48,12 +57,6 @@\n if (obj.get('zoneKey', None) or obj.get('countryCode', None)) != zone_key:\n raise ValidationError(\"Zone keys %s and %s don't match in %s\" %\n (obj.get('zoneKey', None), zone_key, obj))\n- data_time = arrow.get(obj['datetime'])\n- arrow_now = arrow.utcnow()\n- if data_time > arrow_now:\n- raise ValidationError(\n- \"Data from %s can't be in the future, data was %s, now is \"\n- \"%s\" % (zone_key, data_time, arrow_now))\n \n if ((obj.get('production', {}).get('unknown', None) is None and\n obj.get('production', {}).get('coal', None) is None and\n@@ -69,3 +72,4 @@\n if v < 0:\n raise ValidationError('%s: key %s has negative value %s' %\n (zone_key, k, v))\n+ validate_reasonable_time(obj, zone_key)\n", "issue": "CL-SIC parser returns wrong datetime\nCL-SIC parser returns datetime of 0018 instead of 2018, as illustrated below:\r\n\r\n`datetime: 0018-02-18 (06:43:00.000) CET`\r\n`production: {\"unknown\":0,\"coal\":2206.6,\"hydro\":2416.39660,\"wind\":309.314,....}`\r\n`updatedAt: 2018-02-20 (23:40:06.979) CET`\r\n\r\n@systemcatch any idea?\r\n\n", "before_files": [{"content": "import datetime\nimport warnings\n\nimport arrow\n\n\nclass ValidationError(ValueError):\n pass\n\n\ndef validate_consumption(obj, zone_key):\n # Data quality check\n if obj['consumption'] is not None and obj['consumption'] < 0:\n raise ValidationError('%s: consumption has negative value '\n '%s' % (zone_key, obj['consumption']))\n\n\ndef validate_exchange(item, k):\n if item.get('sortedZoneKeys', None) != k:\n raise ValidationError(\"Sorted country codes %s and %s don't \"\n \"match\" % (item.get('sortedZoneKeys', None), k))\n if 'datetime' not in item:\n raise ValidationError('datetime was not returned for %s' % k)\n if type(item['datetime']) != datetime.datetime:\n raise ValidationError('datetime %s is not valid for %s' %\n (item['datetime'], k))\n data_time = arrow.get(item['datetime'])\n if data_time > arrow.now():\n raise ValidationError(\"Data from %s can't be in the future, data was \"\n \"%s, now is %s\" % (k, data_time, arrow.now()))\n if data_time.year < 2000:\n raise ValidationError(\"Data from %s can't be before year 2000, it was \"\n \"%s\" % (k, data_time))\n\n\ndef validate_production(obj, zone_key):\n if 'datetime' not in obj:\n raise ValidationError(\n 'datetime was not returned for %s' % zone_key)\n if 'countryCode' in obj:\n warnings.warn('object has field `countryCode`. It should have '\n '`zoneKey` instead. In {}'.format(obj))\n if 'zoneKey' not in obj and 'countryCode' not in obj:\n raise ValidationError('zoneKey was not returned for %s' % zone_key)\n if not isinstance(obj['datetime'], datetime.datetime):\n raise ValidationError('datetime %s is not valid for %s' %\n (obj['datetime'], zone_key))\n if (obj.get('zoneKey', None) or obj.get('countryCode', None)) != zone_key:\n raise ValidationError(\"Zone keys %s and %s don't match in %s\" %\n (obj.get('zoneKey', None), zone_key, obj))\n data_time = arrow.get(obj['datetime'])\n arrow_now = arrow.utcnow()\n if data_time > arrow_now:\n raise ValidationError(\n \"Data from %s can't be in the future, data was %s, now is \"\n \"%s\" % (zone_key, data_time, arrow_now))\n\n if ((obj.get('production', {}).get('unknown', None) is None and\n obj.get('production', {}).get('coal', None) is None and\n obj.get('production', {}).get('oil', None) is None and\n obj.get('production', {}).get('gas', None) is None and zone_key\n not in ['CH', 'NO', 'AUS-TAS', 'DK-BHM', 'US-NEISO'])):\n raise ValidationError(\n \"Coal or oil or unknown production value is required for\"\n \" %s\" % zone_key)\n for k, v in obj['production'].items():\n if v is None:\n continue\n if v < 0:\n raise ValidationError('%s: key %s has negative value %s' %\n (zone_key, k, v))\n", "path": "parsers/lib/quality.py"}]} | 1,554 | 671 |
gh_patches_debug_10020 | rasdani/github-patches | git_diff | onnx__sklearn-onnx-440 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SelectFromModel converted model with threshold such that no features selected give error
SelectFromModel model in Scikit with threshold such that no features are selected returns an array of shape (M, 0), where M is the number of test instances, onnx converter however can't handle it.
RuntimeError: [ONNXRuntimeError] : 1 : GENERAL ERROR : Load model from pca1.onnx failed:[ShapeInferenceError] Shape input must be a one-dimensional tensor.
</issue>
<code>
[start of skl2onnx/operator_converters/feature_selection.py]
1 # -------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for
4 # license information.
5 # --------------------------------------------------------------------------
6
7 from ..proto import onnx_proto
8 from ..common._apply_operation import apply_cast
9 from ..common._registration import register_converter
10 from ..common.data_types import FloatTensorType, FloatType
11
12
13 def convert_sklearn_feature_selection(scope, operator, container):
14 op = operator.raw_operator
15 # Get indices of the features selected
16 index = op.get_support(indices=True)
17 needs_cast = not isinstance(operator.inputs[0].type,
18 (FloatTensorType, FloatType))
19 if needs_cast:
20 output_name = scope.get_unique_variable_name('output')
21 else:
22 output_name = operator.outputs[0].full_name
23
24 if index.any():
25 column_indices_name = scope.get_unique_variable_name('column_indices')
26
27 container.add_initializer(column_indices_name,
28 onnx_proto.TensorProto.INT64,
29 [len(index)], index)
30
31 container.add_node(
32 'ArrayFeatureExtractor',
33 [operator.inputs[0].full_name, column_indices_name],
34 output_name, op_domain='ai.onnx.ml',
35 name=scope.get_unique_operator_name('ArrayFeatureExtractor'))
36 else:
37 container.add_node('ConstantOfShape', operator.inputs[0].full_name,
38 output_name, op_version=9)
39 if needs_cast:
40 apply_cast(scope, output_name, operator.outputs[0].full_name,
41 container, to=onnx_proto.TensorProto.FLOAT)
42
43
44 register_converter('SklearnGenericUnivariateSelect',
45 convert_sklearn_feature_selection)
46 register_converter('SklearnRFE', convert_sklearn_feature_selection)
47 register_converter('SklearnRFECV', convert_sklearn_feature_selection)
48 register_converter('SklearnSelectFdr', convert_sklearn_feature_selection)
49 register_converter('SklearnSelectFpr', convert_sklearn_feature_selection)
50 register_converter('SklearnSelectFromModel', convert_sklearn_feature_selection)
51 register_converter('SklearnSelectFwe', convert_sklearn_feature_selection)
52 register_converter('SklearnSelectKBest', convert_sklearn_feature_selection)
53 register_converter('SklearnSelectPercentile',
54 convert_sklearn_feature_selection)
55 register_converter('SklearnVarianceThreshold',
56 convert_sklearn_feature_selection)
57
[end of skl2onnx/operator_converters/feature_selection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skl2onnx/operator_converters/feature_selection.py b/skl2onnx/operator_converters/feature_selection.py
--- a/skl2onnx/operator_converters/feature_selection.py
+++ b/skl2onnx/operator_converters/feature_selection.py
@@ -14,6 +14,11 @@
op = operator.raw_operator
# Get indices of the features selected
index = op.get_support(indices=True)
+ if len(index) == 0:
+ raise RuntimeError(
+ "Model '{}' did not select any feature. "
+ "This model cannot be converted into ONNX."
+ "".format(op.__class__.__name__))
needs_cast = not isinstance(operator.inputs[0].type,
(FloatTensorType, FloatType))
if needs_cast:
| {"golden_diff": "diff --git a/skl2onnx/operator_converters/feature_selection.py b/skl2onnx/operator_converters/feature_selection.py\n--- a/skl2onnx/operator_converters/feature_selection.py\n+++ b/skl2onnx/operator_converters/feature_selection.py\n@@ -14,6 +14,11 @@\n op = operator.raw_operator\n # Get indices of the features selected\n index = op.get_support(indices=True)\n+ if len(index) == 0:\n+ raise RuntimeError(\n+ \"Model '{}' did not select any feature. \"\n+ \"This model cannot be converted into ONNX.\"\n+ \"\".format(op.__class__.__name__))\n needs_cast = not isinstance(operator.inputs[0].type,\n (FloatTensorType, FloatType))\n if needs_cast:\n", "issue": "SelectFromModel converted model with threshold such that no features selected give error\nSelectFromModel model in Scikit with threshold such that no features are selected returns an array of shape (M, 0), where M is the number of test instances, onnx converter however can't handle it.\r\n\r\nRuntimeError: [ONNXRuntimeError] : 1 : GENERAL ERROR : Load model from pca1.onnx failed:[ShapeInferenceError] Shape input must be a one-dimensional tensor. \n", "before_files": [{"content": "# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nfrom ..proto import onnx_proto\nfrom ..common._apply_operation import apply_cast\nfrom ..common._registration import register_converter\nfrom ..common.data_types import FloatTensorType, FloatType\n\n\ndef convert_sklearn_feature_selection(scope, operator, container):\n op = operator.raw_operator\n # Get indices of the features selected\n index = op.get_support(indices=True)\n needs_cast = not isinstance(operator.inputs[0].type,\n (FloatTensorType, FloatType))\n if needs_cast:\n output_name = scope.get_unique_variable_name('output')\n else:\n output_name = operator.outputs[0].full_name\n\n if index.any():\n column_indices_name = scope.get_unique_variable_name('column_indices')\n\n container.add_initializer(column_indices_name,\n onnx_proto.TensorProto.INT64,\n [len(index)], index)\n\n container.add_node(\n 'ArrayFeatureExtractor',\n [operator.inputs[0].full_name, column_indices_name],\n output_name, op_domain='ai.onnx.ml',\n name=scope.get_unique_operator_name('ArrayFeatureExtractor'))\n else:\n container.add_node('ConstantOfShape', operator.inputs[0].full_name,\n output_name, op_version=9)\n if needs_cast:\n apply_cast(scope, output_name, operator.outputs[0].full_name,\n container, to=onnx_proto.TensorProto.FLOAT)\n\n\nregister_converter('SklearnGenericUnivariateSelect',\n convert_sklearn_feature_selection)\nregister_converter('SklearnRFE', convert_sklearn_feature_selection)\nregister_converter('SklearnRFECV', convert_sklearn_feature_selection)\nregister_converter('SklearnSelectFdr', convert_sklearn_feature_selection)\nregister_converter('SklearnSelectFpr', convert_sklearn_feature_selection)\nregister_converter('SklearnSelectFromModel', convert_sklearn_feature_selection)\nregister_converter('SklearnSelectFwe', convert_sklearn_feature_selection)\nregister_converter('SklearnSelectKBest', convert_sklearn_feature_selection)\nregister_converter('SklearnSelectPercentile',\n convert_sklearn_feature_selection)\nregister_converter('SklearnVarianceThreshold',\n convert_sklearn_feature_selection)\n", "path": "skl2onnx/operator_converters/feature_selection.py"}]} | 1,236 | 175 |
gh_patches_debug_42030 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-3352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Check if we have fallback if no trackerbot URL specified
We had some issues htat people with no trackerbot URL in config were not able to run test:
- Investigate what is happening
- Add fallback (do not check templates at all? Load them from all providers?)
</issue>
<code>
[start of fixtures/provider.py]
1 """``setup_provider`` fixture
2
3 In test modules paramatrized with :py:func:`utils.testgen.provider_by_type` (should be
4 just about any module that needs a provider to run its tests), this fixture will set up
5 the single provider needed to run that test.
6
7 If the provider setup fails, this fixture will record that failure and skip future tests
8 using the provider.
9
10 """
11 import pytest
12
13 from fixtures.artifactor_plugin import art_client, get_test_idents
14 from fixtures.templateloader import TEMPLATES
15 from utils import providers
16 from utils.log import logger
17
18 # failed provider tracking for _setup_provider_fixture
19 _failed_providers = set()
20
21
22 def _setup_provider(provider_key, request=None):
23 def skip(provider_key, previous_fail=False):
24 if request:
25 node = request.node
26 name, location = get_test_idents(node)
27 skip_data = {'type': 'provider', 'reason': provider_key}
28 art_client.fire_hook('skip_test', test_location=location, test_name=name,
29 skip_data=skip_data)
30 if previous_fail:
31 raise pytest.skip('Provider {} failed to set up previously in another test, '
32 'skipping test'.format(provider_key))
33 else:
34 raise pytest.skip('Provider {} failed to set up this time, '
35 'skipping test'.format(provider_key))
36 # This function is dynamically "fixturized" to setup up a specific provider,
37 # optionally skipping the provider setup if that provider has previously failed.
38 if provider_key in _failed_providers:
39 skip(provider_key, previous_fail=True)
40
41 try:
42 providers.setup_provider(provider_key)
43 except Exception as ex:
44 logger.error('Error setting up provider {}'.format(provider_key))
45 logger.exception(ex)
46 _failed_providers.add(provider_key)
47 skip(provider_key)
48
49
50 @pytest.fixture(scope='function')
51 def setup_provider(request, provider):
52 """Function-scoped fixture to set up a provider"""
53 _setup_provider(provider.key, request)
54
55
56 @pytest.fixture(scope='module')
57 def setup_provider_modscope(request, provider):
58 """Function-scoped fixture to set up a provider"""
59 _setup_provider(provider.key, request)
60
61
62 @pytest.fixture(scope='class')
63 def setup_provider_clsscope(request, provider):
64 """Module-scoped fixture to set up a provider"""
65 _setup_provider(provider.key, request)
66
67
68 @pytest.fixture
69 def setup_provider_funcscope(request, provider):
70 """Function-scoped fixture to set up a provider
71
72 Note:
73
74 While there are cases where this is useful, provider fixtures should
75 be module-scoped the majority of the time.
76
77 """
78 _setup_provider(provider.key, request)
79
80
81 @pytest.fixture(scope="session")
82 def any_provider_session():
83 providers.clear_providers() # To make it clean
84 providers.setup_a_provider(validate=True, check_existing=True)
85
86
87 @pytest.fixture(scope="function")
88 def template(template_location, provider):
89 if template_location is not None:
90 o = provider.data
91 try:
92 for field in template_location:
93 o = o[field]
94 except (IndexError, KeyError):
95 logger.info("Cannot apply %s to %s in the template specification, ignoring.",
96 repr(field), repr(o))
97 else:
98 if not isinstance(o, basestring):
99 raise ValueError("{} is not a string! (for template)".format(repr(o)))
100 templates = TEMPLATES.get(provider.key, None)
101 if templates is not None:
102 if o in templates:
103 return o
104 logger.info(
105 "Wanted template {} on {} but it is not there!\n".format(o, provider.key))
106 pytest.skip('Template not available')
107
108
109 def _small_template(provider):
110 template = provider.data.get('small_template', None)
111 if template:
112 templates = TEMPLATES.get(provider.key, None)
113 if templates is not None:
114 if template in templates:
115 return template
116 logger.info(
117 "Wanted template {} on {} but it is not there!\n".format(template, provider.key))
118 pytest.skip('Template not available')
119
120
121 @pytest.fixture(scope="function")
122 def small_template(provider):
123 return _small_template(provider)
124
125
126 @pytest.fixture(scope="module")
127 def small_template_modscope(provider):
128 return _small_template(provider)
129
130
131 @pytest.fixture(scope="function")
132 def full_template(provider):
133 template = provider.data.get('full_template', {})
134 if template:
135 templates = TEMPLATES.get(provider.key, None)
136 if templates is not None:
137 if template['name'] in templates:
138 return template
139 logger.info(
140 "Wanted template {} on {} but it is not there!\n".format(template, provider.key))
141 pytest.skip('Template not available')
142
143
144 @pytest.fixture(scope="function")
145 def provisioning(provider):
146 return provider.data['provisioning']
147
[end of fixtures/provider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fixtures/provider.py b/fixtures/provider.py
--- a/fixtures/provider.py
+++ b/fixtures/provider.py
@@ -9,6 +9,7 @@
"""
import pytest
+import six
from fixtures.artifactor_plugin import art_client, get_test_idents
from fixtures.templateloader import TEMPLATES
@@ -41,7 +42,7 @@
try:
providers.setup_provider(provider_key)
except Exception as ex:
- logger.error('Error setting up provider {}'.format(provider_key))
+ logger.error('Error setting up provider %s', provider_key)
logger.exception(ex)
_failed_providers.add(provider_key)
skip(provider_key)
@@ -92,29 +93,35 @@
for field in template_location:
o = o[field]
except (IndexError, KeyError):
- logger.info("Cannot apply %s to %s in the template specification, ignoring.",
- repr(field), repr(o))
+ logger.info("Cannot apply %r to %r in the template specification, ignoring.", field, o)
else:
- if not isinstance(o, basestring):
- raise ValueError("{} is not a string! (for template)".format(repr(o)))
+ if not isinstance(o, six.string_types):
+ raise ValueError("{!r} is not a string! (for template)".format(o))
+ if not TEMPLATES:
+ # There is nothing in TEMPLATES, that means no trackerbot URL and no data pulled.
+ # This should normally not constitute an issue so continue.
+ return o
templates = TEMPLATES.get(provider.key, None)
if templates is not None:
if o in templates:
return o
- logger.info(
- "Wanted template {} on {} but it is not there!\n".format(o, provider.key))
+ logger.info("Wanted template %s on %s but it is not there!", o, provider.key)
pytest.skip('Template not available')
def _small_template(provider):
template = provider.data.get('small_template', None)
if template:
+ if not TEMPLATES:
+ # Same as couple of lines above
+ return template
templates = TEMPLATES.get(provider.key, None)
if templates is not None:
if template in templates:
return template
- logger.info(
- "Wanted template {} on {} but it is not there!\n".format(template, provider.key))
+ else:
+ pytest.skip('No small_template for provider {}'.format(provider.key))
+ logger.info("Wanted template %s on %s but it is not there!", template, provider.key)
pytest.skip('Template not available')
@@ -132,12 +139,16 @@
def full_template(provider):
template = provider.data.get('full_template', {})
if template:
+ if not TEMPLATES:
+ # Same as couple of lines above
+ return template
templates = TEMPLATES.get(provider.key, None)
if templates is not None:
if template['name'] in templates:
return template
- logger.info(
- "Wanted template {} on {} but it is not there!\n".format(template, provider.key))
+ else:
+ pytest.skip('No full_template for provider {}'.format(provider.key))
+ logger.info("Wanted template %s on %s but it is not there!", template, provider.key)
pytest.skip('Template not available')
| {"golden_diff": "diff --git a/fixtures/provider.py b/fixtures/provider.py\n--- a/fixtures/provider.py\n+++ b/fixtures/provider.py\n@@ -9,6 +9,7 @@\n \n \"\"\"\n import pytest\n+import six\n \n from fixtures.artifactor_plugin import art_client, get_test_idents\n from fixtures.templateloader import TEMPLATES\n@@ -41,7 +42,7 @@\n try:\n providers.setup_provider(provider_key)\n except Exception as ex:\n- logger.error('Error setting up provider {}'.format(provider_key))\n+ logger.error('Error setting up provider %s', provider_key)\n logger.exception(ex)\n _failed_providers.add(provider_key)\n skip(provider_key)\n@@ -92,29 +93,35 @@\n for field in template_location:\n o = o[field]\n except (IndexError, KeyError):\n- logger.info(\"Cannot apply %s to %s in the template specification, ignoring.\",\n- repr(field), repr(o))\n+ logger.info(\"Cannot apply %r to %r in the template specification, ignoring.\", field, o)\n else:\n- if not isinstance(o, basestring):\n- raise ValueError(\"{} is not a string! (for template)\".format(repr(o)))\n+ if not isinstance(o, six.string_types):\n+ raise ValueError(\"{!r} is not a string! (for template)\".format(o))\n+ if not TEMPLATES:\n+ # There is nothing in TEMPLATES, that means no trackerbot URL and no data pulled.\n+ # This should normally not constitute an issue so continue.\n+ return o\n templates = TEMPLATES.get(provider.key, None)\n if templates is not None:\n if o in templates:\n return o\n- logger.info(\n- \"Wanted template {} on {} but it is not there!\\n\".format(o, provider.key))\n+ logger.info(\"Wanted template %s on %s but it is not there!\", o, provider.key)\n pytest.skip('Template not available')\n \n \n def _small_template(provider):\n template = provider.data.get('small_template', None)\n if template:\n+ if not TEMPLATES:\n+ # Same as couple of lines above\n+ return template\n templates = TEMPLATES.get(provider.key, None)\n if templates is not None:\n if template in templates:\n return template\n- logger.info(\n- \"Wanted template {} on {} but it is not there!\\n\".format(template, provider.key))\n+ else:\n+ pytest.skip('No small_template for provider {}'.format(provider.key))\n+ logger.info(\"Wanted template %s on %s but it is not there!\", template, provider.key)\n pytest.skip('Template not available')\n \n \n@@ -132,12 +139,16 @@\n def full_template(provider):\n template = provider.data.get('full_template', {})\n if template:\n+ if not TEMPLATES:\n+ # Same as couple of lines above\n+ return template\n templates = TEMPLATES.get(provider.key, None)\n if templates is not None:\n if template['name'] in templates:\n return template\n- logger.info(\n- \"Wanted template {} on {} but it is not there!\\n\".format(template, provider.key))\n+ else:\n+ pytest.skip('No full_template for provider {}'.format(provider.key))\n+ logger.info(\"Wanted template %s on %s but it is not there!\", template, provider.key)\n pytest.skip('Template not available')\n", "issue": "Check if we have fallback if no trackerbot URL specified\nWe had some issues htat people with no trackerbot URL in config were not able to run test:\n- Investigate what is happening\n- Add fallback (do not check templates at all? Load them from all providers?)\n\n", "before_files": [{"content": "\"\"\"``setup_provider`` fixture\n\nIn test modules paramatrized with :py:func:`utils.testgen.provider_by_type` (should be\njust about any module that needs a provider to run its tests), this fixture will set up\nthe single provider needed to run that test.\n\nIf the provider setup fails, this fixture will record that failure and skip future tests\nusing the provider.\n\n\"\"\"\nimport pytest\n\nfrom fixtures.artifactor_plugin import art_client, get_test_idents\nfrom fixtures.templateloader import TEMPLATES\nfrom utils import providers\nfrom utils.log import logger\n\n# failed provider tracking for _setup_provider_fixture\n_failed_providers = set()\n\n\ndef _setup_provider(provider_key, request=None):\n def skip(provider_key, previous_fail=False):\n if request:\n node = request.node\n name, location = get_test_idents(node)\n skip_data = {'type': 'provider', 'reason': provider_key}\n art_client.fire_hook('skip_test', test_location=location, test_name=name,\n skip_data=skip_data)\n if previous_fail:\n raise pytest.skip('Provider {} failed to set up previously in another test, '\n 'skipping test'.format(provider_key))\n else:\n raise pytest.skip('Provider {} failed to set up this time, '\n 'skipping test'.format(provider_key))\n # This function is dynamically \"fixturized\" to setup up a specific provider,\n # optionally skipping the provider setup if that provider has previously failed.\n if provider_key in _failed_providers:\n skip(provider_key, previous_fail=True)\n\n try:\n providers.setup_provider(provider_key)\n except Exception as ex:\n logger.error('Error setting up provider {}'.format(provider_key))\n logger.exception(ex)\n _failed_providers.add(provider_key)\n skip(provider_key)\n\n\[email protected](scope='function')\ndef setup_provider(request, provider):\n \"\"\"Function-scoped fixture to set up a provider\"\"\"\n _setup_provider(provider.key, request)\n\n\[email protected](scope='module')\ndef setup_provider_modscope(request, provider):\n \"\"\"Function-scoped fixture to set up a provider\"\"\"\n _setup_provider(provider.key, request)\n\n\[email protected](scope='class')\ndef setup_provider_clsscope(request, provider):\n \"\"\"Module-scoped fixture to set up a provider\"\"\"\n _setup_provider(provider.key, request)\n\n\[email protected]\ndef setup_provider_funcscope(request, provider):\n \"\"\"Function-scoped fixture to set up a provider\n\n Note:\n\n While there are cases where this is useful, provider fixtures should\n be module-scoped the majority of the time.\n\n \"\"\"\n _setup_provider(provider.key, request)\n\n\[email protected](scope=\"session\")\ndef any_provider_session():\n providers.clear_providers() # To make it clean\n providers.setup_a_provider(validate=True, check_existing=True)\n\n\[email protected](scope=\"function\")\ndef template(template_location, provider):\n if template_location is not None:\n o = provider.data\n try:\n for field in template_location:\n o = o[field]\n except (IndexError, KeyError):\n logger.info(\"Cannot apply %s to %s in the template specification, ignoring.\",\n repr(field), repr(o))\n else:\n if not isinstance(o, basestring):\n raise ValueError(\"{} is not a string! (for template)\".format(repr(o)))\n templates = TEMPLATES.get(provider.key, None)\n if templates is not None:\n if o in templates:\n return o\n logger.info(\n \"Wanted template {} on {} but it is not there!\\n\".format(o, provider.key))\n pytest.skip('Template not available')\n\n\ndef _small_template(provider):\n template = provider.data.get('small_template', None)\n if template:\n templates = TEMPLATES.get(provider.key, None)\n if templates is not None:\n if template in templates:\n return template\n logger.info(\n \"Wanted template {} on {} but it is not there!\\n\".format(template, provider.key))\n pytest.skip('Template not available')\n\n\[email protected](scope=\"function\")\ndef small_template(provider):\n return _small_template(provider)\n\n\[email protected](scope=\"module\")\ndef small_template_modscope(provider):\n return _small_template(provider)\n\n\[email protected](scope=\"function\")\ndef full_template(provider):\n template = provider.data.get('full_template', {})\n if template:\n templates = TEMPLATES.get(provider.key, None)\n if templates is not None:\n if template['name'] in templates:\n return template\n logger.info(\n \"Wanted template {} on {} but it is not there!\\n\".format(template, provider.key))\n pytest.skip('Template not available')\n\n\[email protected](scope=\"function\")\ndef provisioning(provider):\n return provider.data['provisioning']\n", "path": "fixtures/provider.py"}]} | 1,954 | 757 |
gh_patches_debug_12356 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2514 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UX and frontend implementation for upgrades
### Notes
- [Current Figma design](https://www.figma.com/file/xHb5oIqye3fnXtb2heRH34/Styling?node-id=3804%3A28864&t=HyNupYmgZ9PqjEGr-0)
- [Issue with user flow](https://github.com/centerofci/mathesar/issues/227)
### Tasks
- Finalize the user flow
- Request Figma UX changes if needed
- Implement the frontend (create additional GitHub issues if needed)
### Feasibility
Watchtower, our docker image upgrade backend, doesn't report progress. The only progress reporting available to the frontend will be periodically calling some HTTP endpoint on the service container to check whether it is online or not.
The final UX should take this into account.
</issue>
<code>
[start of mathesar/urls.py]
1 from django.contrib.auth.views import LoginView
2 from django.urls import include, path, re_path
3 from rest_framework_nested import routers
4
5 from mathesar import views
6 from mathesar.api.db import viewsets as db_viewsets
7 from mathesar.api.ui import viewsets as ui_viewsets
8 from mathesar.users.password_reset import MathesarPasswordResetConfirmView
9
10 db_router = routers.DefaultRouter()
11 db_router.register(r'tables', db_viewsets.TableViewSet, basename='table')
12 db_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')
13 db_router.register(r'links', db_viewsets.LinkViewSet, basename='links')
14 db_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')
15 db_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')
16 db_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')
17
18 db_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')
19 db_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')
20 db_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')
21 db_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')
22 db_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')
23
24 ui_router = routers.DefaultRouter()
25 ui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')
26 ui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')
27 ui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')
28 ui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')
29 ui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')
30
31 urlpatterns = [
32 path('api/db/v0/', include(db_router.urls)),
33 path('api/db/v0/', include(db_table_router.urls)),
34 path('api/ui/v0/', include(ui_router.urls)),
35 path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),
36 path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),
37 path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),
38 path('auth/', include('django.contrib.auth.urls')),
39 path('', views.home, name='home'),
40 path('profile/', views.profile, name='profile'),
41 path('administration/', views.admin_home, name='admin_home'),
42 path('administration/users/', views.admin_home, name='admin_users_home'),
43 path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),
44 path('administration/general/', views.admin_home, name='admin_general'),
45 path('<db_name>/', views.schemas, name='schemas'),
46 re_path(
47 r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/',
48 views.schema_home,
49 name='schema_home'
50 ),
51 ]
52
[end of mathesar/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/urls.py b/mathesar/urls.py
--- a/mathesar/urls.py
+++ b/mathesar/urls.py
@@ -41,7 +41,7 @@
path('administration/', views.admin_home, name='admin_home'),
path('administration/users/', views.admin_home, name='admin_users_home'),
path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),
- path('administration/general/', views.admin_home, name='admin_general'),
+ path('administration/update/', views.admin_home, name='admin_update'),
path('<db_name>/', views.schemas, name='schemas'),
re_path(
r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/',
| {"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -41,7 +41,7 @@\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n- path('administration/general/', views.admin_home, name='admin_general'),\n+ path('administration/update/', views.admin_home, name='admin_update'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n", "issue": "UX and frontend implementation for upgrades\n### Notes\r\n- [Current Figma design](https://www.figma.com/file/xHb5oIqye3fnXtb2heRH34/Styling?node-id=3804%3A28864&t=HyNupYmgZ9PqjEGr-0)\r\n- [Issue with user flow](https://github.com/centerofci/mathesar/issues/227)\r\n\r\n### Tasks\r\n- Finalize the user flow\r\n- Request Figma UX changes if needed\r\n- Implement the frontend (create additional GitHub issues if needed)\r\n\r\n### Feasibility \r\nWatchtower, our docker image upgrade backend, doesn't report progress. The only progress reporting available to the frontend will be periodically calling some HTTP endpoint on the service container to check whether it is online or not.\r\n\r\nThe final UX should take this into account.\n", "before_files": [{"content": "from django.contrib.auth.views import LoginView\nfrom django.urls import include, path, re_path\nfrom rest_framework_nested import routers\n\nfrom mathesar import views\nfrom mathesar.api.db import viewsets as db_viewsets\nfrom mathesar.api.ui import viewsets as ui_viewsets\nfrom mathesar.users.password_reset import MathesarPasswordResetConfirmView\n\ndb_router = routers.DefaultRouter()\ndb_router.register(r'tables', db_viewsets.TableViewSet, basename='table')\ndb_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')\ndb_router.register(r'links', db_viewsets.LinkViewSet, basename='links')\ndb_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')\ndb_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')\ndb_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')\n\ndb_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\ndb_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')\ndb_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')\ndb_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')\ndb_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')\n\nui_router = routers.DefaultRouter()\nui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')\nui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')\nui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')\nui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')\nui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')\n\nurlpatterns = [\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),\n path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),\n path('auth/', include('django.contrib.auth.urls')),\n path('', views.home, name='home'),\n path('profile/', views.profile, name='profile'),\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/general/', views.admin_home, name='admin_general'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n]\n", "path": "mathesar/urls.py"}]} | 1,463 | 166 |
gh_patches_debug_19852 | rasdani/github-patches | git_diff | open-mmlab__mmcv-256 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Deadlock 'fix' introduced deadlock.
Since https://github.com/open-mmlab/mmcv/pull/252 is merged my mmdetection code hangs after evaluation. After reverting the specific commit `git revert c203419f57c2e25ab4307420b9a3688f99e01dea`, my code runs again as expected..
</issue>
<code>
[start of mmcv/runner/hooks/logger/text.py]
1 # Copyright (c) Open-MMLab. All rights reserved.
2 import datetime
3 import os.path as osp
4 from collections import OrderedDict
5
6 import torch
7 import torch.distributed as dist
8
9 import mmcv
10 from ..hook import HOOKS
11 from .base import LoggerHook
12
13
14 @HOOKS.register_module
15 class TextLoggerHook(LoggerHook):
16
17 def __init__(self, interval=10, ignore_last=True, reset_flag=False):
18 super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)
19 self.time_sec_tot = 0
20
21 def before_run(self, runner):
22 super(TextLoggerHook, self).before_run(runner)
23 self.start_iter = runner.iter
24 self.json_log_path = osp.join(runner.work_dir,
25 f'{runner.timestamp}.log.json')
26 if runner.meta is not None:
27 self._dump_log(runner.meta, runner)
28
29 def _get_max_memory(self, runner):
30 mem = torch.cuda.max_memory_allocated()
31 mem_mb = torch.tensor([mem / (1024 * 1024)],
32 dtype=torch.int,
33 device=torch.device('cuda'))
34 if runner.world_size > 1:
35 dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
36 return mem_mb.item()
37
38 def _log_info(self, log_dict, runner):
39 if runner.mode == 'train':
40 log_str = f'Epoch [{log_dict["epoch"]}]' \
41 f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t' \
42 f'lr: {log_dict["lr"]:.5f}, '
43 if 'time' in log_dict.keys():
44 self.time_sec_tot += (log_dict['time'] * self.interval)
45 time_sec_avg = self.time_sec_tot / (
46 runner.iter - self.start_iter + 1)
47 eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
48 eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
49 log_str += f'eta: {eta_str}, '
50 log_str += f'time: {log_dict["time"]:.3f}, ' \
51 f'data_time: {log_dict["data_time"]:.3f}, '
52 # statistic memory
53 if torch.cuda.is_available():
54 log_str += f'memory: {log_dict["memory"]}, '
55 else:
56 log_str = 'Epoch({log_dict["mode"]}) ' \
57 f'[{log_dict["epoch"] - 1}][{log_dict["iter"]}]\t'
58 log_items = []
59 for name, val in log_dict.items():
60 # TODO: resolve this hack
61 # these items have been in log_str
62 if name in [
63 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
64 'memory', 'epoch'
65 ]:
66 continue
67 if isinstance(val, float):
68 val = f'{val:.4f}'
69 log_items.append(f'{name}: {val}')
70 log_str += ', '.join(log_items)
71 runner.logger.info(log_str)
72
73 def _dump_log(self, log_dict, runner):
74 # dump log in json format
75 json_log = OrderedDict()
76 for k, v in log_dict.items():
77 json_log[k] = self._round_float(v)
78 # only append log at last line
79 if runner.rank == 0:
80 with open(self.json_log_path, 'a+') as f:
81 mmcv.dump(json_log, f, file_format='json')
82 f.write('\n')
83
84 def _round_float(self, items):
85 if isinstance(items, list):
86 return [self._round_float(item) for item in items]
87 elif isinstance(items, float):
88 return round(items, 5)
89 else:
90 return items
91
92 def log(self, runner):
93 log_dict = OrderedDict()
94 # training mode if the output contains the key "time"
95 mode = 'train' if 'time' in runner.log_buffer.output else 'val'
96 log_dict['mode'] = mode
97 log_dict['epoch'] = runner.epoch + 1
98 log_dict['iter'] = runner.inner_iter + 1
99 # only record lr of the first param group
100 log_dict['lr'] = runner.current_lr()[0]
101 memory = None
102 if torch.cuda.is_available():
103 memory = self._get_max_memory(runner)
104 if mode == 'train':
105 log_dict['time'] = runner.log_buffer.output['time']
106 log_dict['data_time'] = runner.log_buffer.output['data_time']
107
108 # statistic memory
109 if memory is not None:
110 log_dict['memory'] = memory
111 for name, val in runner.log_buffer.output.items():
112 if name in ['time', 'data_time']:
113 continue
114 log_dict[name] = val
115
116 self._log_info(log_dict, runner)
117 self._dump_log(log_dict, runner)
118
[end of mmcv/runner/hooks/logger/text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mmcv/runner/hooks/logger/text.py b/mmcv/runner/hooks/logger/text.py
--- a/mmcv/runner/hooks/logger/text.py
+++ b/mmcv/runner/hooks/logger/text.py
@@ -98,16 +98,13 @@
log_dict['iter'] = runner.inner_iter + 1
# only record lr of the first param group
log_dict['lr'] = runner.current_lr()[0]
- memory = None
- if torch.cuda.is_available():
- memory = self._get_max_memory(runner)
if mode == 'train':
log_dict['time'] = runner.log_buffer.output['time']
log_dict['data_time'] = runner.log_buffer.output['data_time']
# statistic memory
- if memory is not None:
- log_dict['memory'] = memory
+ if torch.cuda.is_available():
+ log_dict['memory'] = self._get_max_memory(runner)
for name, val in runner.log_buffer.output.items():
if name in ['time', 'data_time']:
continue
| {"golden_diff": "diff --git a/mmcv/runner/hooks/logger/text.py b/mmcv/runner/hooks/logger/text.py\n--- a/mmcv/runner/hooks/logger/text.py\n+++ b/mmcv/runner/hooks/logger/text.py\n@@ -98,16 +98,13 @@\n log_dict['iter'] = runner.inner_iter + 1\n # only record lr of the first param group\n log_dict['lr'] = runner.current_lr()[0]\n- memory = None\n- if torch.cuda.is_available():\n- memory = self._get_max_memory(runner)\n if mode == 'train':\n log_dict['time'] = runner.log_buffer.output['time']\n log_dict['data_time'] = runner.log_buffer.output['data_time']\n \n # statistic memory\n- if memory is not None:\n- log_dict['memory'] = memory\n+ if torch.cuda.is_available():\n+ log_dict['memory'] = self._get_max_memory(runner)\n for name, val in runner.log_buffer.output.items():\n if name in ['time', 'data_time']:\n continue\n", "issue": "Bug: Deadlock 'fix' introduced deadlock.\nSince https://github.com/open-mmlab/mmcv/pull/252 is merged my mmdetection code hangs after evaluation. After reverting the specific commit `git revert c203419f57c2e25ab4307420b9a3688f99e01dea`, my code runs again as expected..\n", "before_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport datetime\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport torch\nimport torch.distributed as dist\n\nimport mmcv\nfrom ..hook import HOOKS\nfrom .base import LoggerHook\n\n\[email protected]_module\nclass TextLoggerHook(LoggerHook):\n\n def __init__(self, interval=10, ignore_last=True, reset_flag=False):\n super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)\n self.time_sec_tot = 0\n\n def before_run(self, runner):\n super(TextLoggerHook, self).before_run(runner)\n self.start_iter = runner.iter\n self.json_log_path = osp.join(runner.work_dir,\n f'{runner.timestamp}.log.json')\n if runner.meta is not None:\n self._dump_log(runner.meta, runner)\n\n def _get_max_memory(self, runner):\n mem = torch.cuda.max_memory_allocated()\n mem_mb = torch.tensor([mem / (1024 * 1024)],\n dtype=torch.int,\n device=torch.device('cuda'))\n if runner.world_size > 1:\n dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)\n return mem_mb.item()\n\n def _log_info(self, log_dict, runner):\n if runner.mode == 'train':\n log_str = f'Epoch [{log_dict[\"epoch\"]}]' \\\n f'[{log_dict[\"iter\"]}/{len(runner.data_loader)}]\\t' \\\n f'lr: {log_dict[\"lr\"]:.5f}, '\n if 'time' in log_dict.keys():\n self.time_sec_tot += (log_dict['time'] * self.interval)\n time_sec_avg = self.time_sec_tot / (\n runner.iter - self.start_iter + 1)\n eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)\n eta_str = str(datetime.timedelta(seconds=int(eta_sec)))\n log_str += f'eta: {eta_str}, '\n log_str += f'time: {log_dict[\"time\"]:.3f}, ' \\\n f'data_time: {log_dict[\"data_time\"]:.3f}, '\n # statistic memory\n if torch.cuda.is_available():\n log_str += f'memory: {log_dict[\"memory\"]}, '\n else:\n log_str = 'Epoch({log_dict[\"mode\"]}) ' \\\n f'[{log_dict[\"epoch\"] - 1}][{log_dict[\"iter\"]}]\\t'\n log_items = []\n for name, val in log_dict.items():\n # TODO: resolve this hack\n # these items have been in log_str\n if name in [\n 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',\n 'memory', 'epoch'\n ]:\n continue\n if isinstance(val, float):\n val = f'{val:.4f}'\n log_items.append(f'{name}: {val}')\n log_str += ', '.join(log_items)\n runner.logger.info(log_str)\n\n def _dump_log(self, log_dict, runner):\n # dump log in json format\n json_log = OrderedDict()\n for k, v in log_dict.items():\n json_log[k] = self._round_float(v)\n # only append log at last line\n if runner.rank == 0:\n with open(self.json_log_path, 'a+') as f:\n mmcv.dump(json_log, f, file_format='json')\n f.write('\\n')\n\n def _round_float(self, items):\n if isinstance(items, list):\n return [self._round_float(item) for item in items]\n elif isinstance(items, float):\n return round(items, 5)\n else:\n return items\n\n def log(self, runner):\n log_dict = OrderedDict()\n # training mode if the output contains the key \"time\"\n mode = 'train' if 'time' in runner.log_buffer.output else 'val'\n log_dict['mode'] = mode\n log_dict['epoch'] = runner.epoch + 1\n log_dict['iter'] = runner.inner_iter + 1\n # only record lr of the first param group\n log_dict['lr'] = runner.current_lr()[0]\n memory = None\n if torch.cuda.is_available():\n memory = self._get_max_memory(runner)\n if mode == 'train':\n log_dict['time'] = runner.log_buffer.output['time']\n log_dict['data_time'] = runner.log_buffer.output['data_time']\n\n # statistic memory\n if memory is not None:\n log_dict['memory'] = memory\n for name, val in runner.log_buffer.output.items():\n if name in ['time', 'data_time']:\n continue\n log_dict[name] = val\n\n self._log_info(log_dict, runner)\n self._dump_log(log_dict, runner)\n", "path": "mmcv/runner/hooks/logger/text.py"}]} | 1,944 | 238 |
gh_patches_debug_21010 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-3169 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Condition Specified but with no condition passes linting but fails deploy
### CloudFormation Lint Version
0.83.1
### What operating system are you using?
mac/ubuntu
### Describe the bug
in a cfn template if you specify root level item `Conditions` but have no conditions this passes cfn-lint but always fails on deploy
### Expected behavior
cfn-lint should fail if there is a Conditions root level object but no array entries under it.
### Reproduction template
```
AWSTemplateFormatVersion: "2010-09-09"
Parameters:
myParam
Conditions:
Resources:
myTopic:
Type: AWS::SNS::Topic
Properties:
DisplayName: mytopic
TopicName: mytopic
```
</issue>
<code>
[start of src/cfnlint/rules/conditions/Configuration.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 from cfnlint.rules import CloudFormationLintRule, RuleMatch
7
8
9 class Configuration(CloudFormationLintRule):
10 """Check if Conditions are configured correctly"""
11
12 id = "E8001"
13 shortdesc = "Conditions have appropriate properties"
14 description = "Check if Conditions are properly configured"
15 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html"
16 tags = ["conditions"]
17
18 condition_keys = [
19 "Condition",
20 "Fn::And",
21 "Fn::Equals",
22 "Fn::Not",
23 "Fn::Or",
24 ]
25
26 def match(self, cfn):
27 matches = []
28
29 conditions = cfn.template.get("Conditions", {})
30 if conditions:
31 for condname, condobj in conditions.items():
32 if not isinstance(condobj, dict):
33 message = "Condition {0} has invalid property"
34 matches.append(
35 RuleMatch(["Conditions", condname], message.format(condname))
36 )
37 else:
38 if len(condobj) != 1:
39 message = "Condition {0} has too many intrinsic conditions"
40 matches.append(
41 RuleMatch(
42 ["Conditions", condname], message.format(condname)
43 )
44 )
45 else:
46 for k, _ in condobj.items():
47 if k not in self.condition_keys:
48 message = "Condition {0} has invalid property {1}"
49 matches.append(
50 RuleMatch(
51 ["Conditions", condname] + [k],
52 message.format(condname, k),
53 )
54 )
55
56 return matches
57
[end of src/cfnlint/rules/conditions/Configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/conditions/Configuration.py b/src/cfnlint/rules/conditions/Configuration.py
--- a/src/cfnlint/rules/conditions/Configuration.py
+++ b/src/cfnlint/rules/conditions/Configuration.py
@@ -26,8 +26,10 @@
def match(self, cfn):
matches = []
- conditions = cfn.template.get("Conditions", {})
- if conditions:
+ if "Conditions" not in cfn.template:
+ return matches
+ conditions = cfn.template.get("Conditions", None)
+ if isinstance(conditions, dict):
for condname, condobj in conditions.items():
if not isinstance(condobj, dict):
message = "Condition {0} has invalid property"
@@ -52,5 +54,12 @@
message.format(condname, k),
)
)
+ else:
+ matches.append(
+ RuleMatch(
+ ["Conditions"],
+ "Condition must be an object",
+ )
+ )
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/conditions/Configuration.py b/src/cfnlint/rules/conditions/Configuration.py\n--- a/src/cfnlint/rules/conditions/Configuration.py\n+++ b/src/cfnlint/rules/conditions/Configuration.py\n@@ -26,8 +26,10 @@\n def match(self, cfn):\n matches = []\n \n- conditions = cfn.template.get(\"Conditions\", {})\n- if conditions:\n+ if \"Conditions\" not in cfn.template:\n+ return matches\n+ conditions = cfn.template.get(\"Conditions\", None)\n+ if isinstance(conditions, dict):\n for condname, condobj in conditions.items():\n if not isinstance(condobj, dict):\n message = \"Condition {0} has invalid property\"\n@@ -52,5 +54,12 @@\n message.format(condname, k),\n )\n )\n+ else:\n+ matches.append(\n+ RuleMatch(\n+ [\"Conditions\"],\n+ \"Condition must be an object\",\n+ )\n+ )\n \n return matches\n", "issue": "Condition Specified but with no condition passes linting but fails deploy\n### CloudFormation Lint Version\r\n\r\n0.83.1\r\n\r\n### What operating system are you using?\r\n\r\nmac/ubuntu\r\n\r\n### Describe the bug\r\n\r\nin a cfn template if you specify root level item `Conditions` but have no conditions this passes cfn-lint but always fails on deploy\r\n\r\n### Expected behavior\r\n\r\ncfn-lint should fail if there is a Conditions root level object but no array entries under it.\r\n\r\n### Reproduction template\r\n\r\n```\r\nAWSTemplateFormatVersion: \"2010-09-09\"\r\nParameters:\r\n myParam\r\nConditions:\r\nResources:\r\n myTopic:\r\n Type: AWS::SNS::Topic\r\n Properties:\r\n DisplayName: mytopic\r\n TopicName: mytopic\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Conditions are configured correctly\"\"\"\n\n id = \"E8001\"\n shortdesc = \"Conditions have appropriate properties\"\n description = \"Check if Conditions are properly configured\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html\"\n tags = [\"conditions\"]\n\n condition_keys = [\n \"Condition\",\n \"Fn::And\",\n \"Fn::Equals\",\n \"Fn::Not\",\n \"Fn::Or\",\n ]\n\n def match(self, cfn):\n matches = []\n\n conditions = cfn.template.get(\"Conditions\", {})\n if conditions:\n for condname, condobj in conditions.items():\n if not isinstance(condobj, dict):\n message = \"Condition {0} has invalid property\"\n matches.append(\n RuleMatch([\"Conditions\", condname], message.format(condname))\n )\n else:\n if len(condobj) != 1:\n message = \"Condition {0} has too many intrinsic conditions\"\n matches.append(\n RuleMatch(\n [\"Conditions\", condname], message.format(condname)\n )\n )\n else:\n for k, _ in condobj.items():\n if k not in self.condition_keys:\n message = \"Condition {0} has invalid property {1}\"\n matches.append(\n RuleMatch(\n [\"Conditions\", condname] + [k],\n message.format(condname, k),\n )\n )\n\n return matches\n", "path": "src/cfnlint/rules/conditions/Configuration.py"}]} | 1,179 | 231 |
gh_patches_debug_632 | rasdani/github-patches | git_diff | pex-tool__pex-2245 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.147
On the docket:
+ [x] pex does not use .pip/pip.conf to resolve packages #336 / #838
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.146"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.146"
+__version__ = "2.1.147"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.146\"\n+__version__ = \"2.1.147\"\n", "issue": "Release 2.1.147\nOn the docket:\r\n+ [x] pex does not use .pip/pip.conf to resolve packages #336 / #838\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.146\"\n", "path": "pex/version.py"}]} | 627 | 99 |
gh_patches_debug_24048 | rasdani/github-patches | git_diff | translate__pootle-5595 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Directory hashes are not expired when directories are added/removed
if you add or remove a directory it should expire hashes on all parents and related - ie /projects/x directories
</issue>
<code>
[start of pootle/apps/pootle_revision/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models.signals import post_save
10 from django.dispatch import receiver
11
12 from pootle.core.delegate import revision_updater
13 from pootle_data.models import StoreData
14 from pootle_store.models import Store
15
16
17 @receiver(post_save, sender=StoreData)
18 def handle_storedata_save(**kwargs):
19 revision_updater.get(Store)(
20 context=kwargs["instance"].store).update(keys=["stats", "checks"])
21
[end of pootle/apps/pootle_revision/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py
--- a/pootle/apps/pootle_revision/receivers.py
+++ b/pootle/apps/pootle_revision/receivers.py
@@ -6,10 +6,11 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
-from django.db.models.signals import post_save
+from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from pootle.core.delegate import revision_updater
+from pootle_app.models import Directory
from pootle_data.models import StoreData
from pootle_store.models import Store
@@ -18,3 +19,17 @@
def handle_storedata_save(**kwargs):
revision_updater.get(Store)(
context=kwargs["instance"].store).update(keys=["stats", "checks"])
+
+
+@receiver(post_save, sender=Directory)
+def handle_directory_save(**kwargs):
+ if kwargs.get("created"):
+ return
+ revision_updater.get(Directory)(
+ context=kwargs["instance"]).update(keys=["stats", "checks"])
+
+
+@receiver(pre_delete, sender=Directory)
+def handle_directory_delete(**kwargs):
+ revision_updater.get(Directory)(
+ context=kwargs["instance"].parent).update(keys=["stats", "checks"])
| {"golden_diff": "diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py\n--- a/pootle/apps/pootle_revision/receivers.py\n+++ b/pootle/apps/pootle_revision/receivers.py\n@@ -6,10 +6,11 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n-from django.db.models.signals import post_save\n+from django.db.models.signals import post_save, pre_delete\n from django.dispatch import receiver\n \n from pootle.core.delegate import revision_updater\n+from pootle_app.models import Directory\n from pootle_data.models import StoreData\n from pootle_store.models import Store\n \n@@ -18,3 +19,17 @@\n def handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n+\n+\n+@receiver(post_save, sender=Directory)\n+def handle_directory_save(**kwargs):\n+ if kwargs.get(\"created\"):\n+ return\n+ revision_updater.get(Directory)(\n+ context=kwargs[\"instance\"]).update(keys=[\"stats\", \"checks\"])\n+\n+\n+@receiver(pre_delete, sender=Directory)\n+def handle_directory_delete(**kwargs):\n+ revision_updater.get(Directory)(\n+ context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n", "issue": "Directory hashes are not expired when directories are added/removed\nif you add or remove a directory it should expire hashes on all parents and related - ie /projects/x directories\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\n\nfrom pootle.core.delegate import revision_updater\nfrom pootle_data.models import StoreData\nfrom pootle_store.models import Store\n\n\n@receiver(post_save, sender=StoreData)\ndef handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n", "path": "pootle/apps/pootle_revision/receivers.py"}]} | 779 | 320 |
gh_patches_debug_24492 | rasdani/github-patches | git_diff | scikit-hep__pyhf-186 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updated setup.py for pytorch > 0.4 dependency
# Description
I had 0.3.1 for Torch and that caused issues with some of the doctesting as the distributions did not have `cdf` methods. I forced an upgrade pytorch and things are fine now.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 setup(
3 name = 'pyhf',
4 version = '0.0.8',
5 description = '(partial) pure python histfactory implementation',
6 url = '',
7 author = 'Lukas Heinrich',
8 author_email = '[email protected]',
9 packages = find_packages(),
10 include_package_data = True,
11 install_requires = [
12 'numpy>=1.14.3',
13 'scipy'
14 ],
15 extras_require = {
16 'xmlimport': [
17 'uproot',
18 ],
19 'torch': [
20 'torch'
21 ],
22 'mxnet':[
23 'mxnet',
24 ],
25 'develop': [
26 'pyflakes',
27 'pytest>=3.5.1',
28 'pytest-cov>=2.5.1',
29 'pytest-benchmark[histogram]',
30 'python-coveralls',
31 'matplotlib',
32 'jupyter',
33 'uproot',
34 'papermill',
35 'torch',
36 'tensorflow',
37 'mxnet>=1.0.0',
38 'graphviz',
39 'sphinx',
40 'sphinxcontrib-bibtex',
41 'sphinxcontrib-napoleon',
42 'sphinx_rtd_theme',
43 'nbsphinx',
44 'jsonpatch',
45 'jsonschema>=2.6.0'
46 ]
47 },
48 entry_points = {
49 },
50 dependency_links = [
51 ]
52 )
53
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
packages = find_packages(),
include_package_data = True,
install_requires = [
- 'numpy>=1.14.3',
+ 'numpy<=1.14.5,>=1.14.3', # required by tensorflow, mxnet, and us
'scipy'
],
extras_require = {
@@ -17,10 +17,18 @@
'uproot',
],
'torch': [
- 'torch'
+ 'torch>=0.4.0'
],
'mxnet':[
- 'mxnet',
+ 'mxnet>=1.0.0',
+ 'requests<2.19.0,>=2.18.4',
+ 'numpy<1.15.0,>=1.8.2',
+ 'requests<2.19.0,>=2.18.4',
+ ],
+ 'tensorflow':[
+ 'tensorflow==1.10.0',
+ 'numpy<=1.14.5,>=1.13.3',
+ 'setuptools<=39.1.0',
],
'develop': [
'pyflakes',
@@ -28,13 +36,11 @@
'pytest-cov>=2.5.1',
'pytest-benchmark[histogram]',
'python-coveralls',
+ 'coverage==4.0.3', # coveralls
'matplotlib',
'jupyter',
'uproot',
'papermill',
- 'torch',
- 'tensorflow',
- 'mxnet>=1.0.0',
'graphviz',
'sphinx',
'sphinxcontrib-bibtex',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n- 'numpy>=1.14.3',\n+ 'numpy<=1.14.5,>=1.14.3', # required by tensorflow, mxnet, and us\n 'scipy'\n ],\n extras_require = {\n@@ -17,10 +17,18 @@\n 'uproot',\n ],\n 'torch': [\n- 'torch'\n+ 'torch>=0.4.0'\n ],\n 'mxnet':[\n- 'mxnet',\n+ 'mxnet>=1.0.0',\n+ 'requests<2.19.0,>=2.18.4',\n+ 'numpy<1.15.0,>=1.8.2',\n+ 'requests<2.19.0,>=2.18.4',\n+ ],\n+ 'tensorflow':[\n+ 'tensorflow==1.10.0',\n+ 'numpy<=1.14.5,>=1.13.3',\n+ 'setuptools<=39.1.0',\n ],\n 'develop': [\n 'pyflakes',\n@@ -28,13 +36,11 @@\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n+ 'coverage==4.0.3', # coveralls\n 'matplotlib',\n 'jupyter',\n 'uproot',\n 'papermill',\n- 'torch',\n- 'tensorflow',\n- 'mxnet>=1.0.0',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n", "issue": "Updated setup.py for pytorch > 0.4 dependency\n# Description\r\n\r\nI had 0.3.1 for Torch and that caused issues with some of the doctesting as the distributions did not have `cdf` methods. I forced an upgrade pytorch and things are fine now.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nsetup(\n name = 'pyhf',\n version = '0.0.8',\n description = '(partial) pure python histfactory implementation',\n url = '',\n author = 'Lukas Heinrich',\n author_email = '[email protected]',\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n 'numpy>=1.14.3',\n 'scipy'\n ],\n extras_require = {\n 'xmlimport': [\n 'uproot',\n ],\n 'torch': [\n 'torch'\n ],\n 'mxnet':[\n 'mxnet',\n ],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n 'matplotlib',\n 'jupyter',\n 'uproot',\n 'papermill',\n 'torch',\n 'tensorflow',\n 'mxnet>=1.0.0',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'jsonpatch',\n 'jsonschema>=2.6.0'\n ]\n },\n entry_points = {\n },\n dependency_links = [\n ]\n)\n", "path": "setup.py"}]} | 996 | 416 |
gh_patches_debug_6542 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-3071 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update to use new version of Hologram
As an extension of https://github.com/fishtown-analytics/hologram/issues/40 -- support Mashumaro in Hologram -- makes changes to pull in version 0.0.13 of Hologram.
</issue>
<code>
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 def read(fname):
23 return open(os.path.join(os.path.dirname(__file__), fname)).read()
24
25
26 package_name = "dbt-core"
27 package_version = "0.19.0"
28 description = """dbt (data build tool) is a command line tool that helps \
29 analysts and engineers transform data in their warehouse more effectively"""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35 description=description,
36 long_description=description,
37 author="Fishtown Analytics",
38 author_email="[email protected]",
39 url="https://github.com/fishtown-analytics/dbt",
40 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
41 package_data={
42 'dbt': [
43 'include/index.html',
44 'include/global_project/dbt_project.yml',
45 'include/global_project/docs/*.md',
46 'include/global_project/macros/*.sql',
47 'include/global_project/macros/**/*.sql',
48 'include/global_project/macros/**/**/*.sql',
49 'py.typed',
50 ]
51 },
52 test_suite='test',
53 entry_points={
54 'console_scripts': [
55 'dbt = dbt.main:main',
56 ],
57 },
58 scripts=[
59 'scripts/dbt',
60 ],
61 install_requires=[
62 'Jinja2==2.11.2',
63 'PyYAML>=3.11',
64 'sqlparse>=0.2.3,<0.4',
65 'networkx>=2.3,<3',
66 'minimal-snowplow-tracker==0.0.2',
67 'colorama>=0.3.9,<0.4.4',
68 'agate>=1.6,<2',
69 'isodate>=0.6,<0.7',
70 'json-rpc>=1.12,<2',
71 'werkzeug>=0.15,<2.0',
72 'dataclasses==0.6;python_version<"3.7"',
73 # 'hologram==0.0.12', # must be updated prior to release
74 'logbook>=1.5,<1.6',
75 'typing-extensions>=3.7.4,<3.8',
76 # the following are all to match snowflake-connector-python
77 'requests>=2.18.0,<2.24.0',
78 'idna<2.10',
79 'cffi>=1.9,<1.15',
80 ],
81 zip_safe=False,
82 classifiers=[
83 'Development Status :: 5 - Production/Stable',
84
85 'License :: OSI Approved :: Apache Software License',
86
87 'Operating System :: Microsoft :: Windows',
88 'Operating System :: MacOS :: MacOS X',
89 'Operating System :: POSIX :: Linux',
90
91 'Programming Language :: Python :: 3.6',
92 'Programming Language :: Python :: 3.7',
93 'Programming Language :: Python :: 3.8',
94 'Programming Language :: Python :: 3.9',
95 ],
96 python_requires=">=3.6.3",
97 )
98
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -70,7 +70,7 @@
'json-rpc>=1.12,<2',
'werkzeug>=0.15,<2.0',
'dataclasses==0.6;python_version<"3.7"',
- # 'hologram==0.0.12', # must be updated prior to release
+ 'hologram==0.0.13',
'logbook>=1.5,<1.6',
'typing-extensions>=3.7.4,<3.8',
# the following are all to match snowflake-connector-python
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -70,7 +70,7 @@\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n- # 'hologram==0.0.12', # must be updated prior to release\n+ 'hologram==0.0.13',\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n", "issue": "Update to use new version of Hologram\nAs an extension of https://github.com/fishtown-analytics/hologram/issues/40 -- support Mashumaro in Hologram -- makes changes to pull in version 0.0.13 of Hologram. \n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"0.19.0\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"Fishtown Analytics\",\n author_email=\"[email protected]\",\n url=\"https://github.com/fishtown-analytics/dbt\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/index.html',\n 'include/global_project/dbt_project.yml',\n 'include/global_project/docs/*.md',\n 'include/global_project/macros/*.sql',\n 'include/global_project/macros/**/*.sql',\n 'include/global_project/macros/**/**/*.sql',\n 'py.typed',\n ]\n },\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.2',\n 'PyYAML>=3.11',\n 'sqlparse>=0.2.3,<0.4',\n 'networkx>=2.3,<3',\n 'minimal-snowplow-tracker==0.0.2',\n 'colorama>=0.3.9,<0.4.4',\n 'agate>=1.6,<2',\n 'isodate>=0.6,<0.7',\n 'json-rpc>=1.12,<2',\n 'werkzeug>=0.15,<2.0',\n 'dataclasses==0.6;python_version<\"3.7\"',\n # 'hologram==0.0.12', # must be updated prior to release\n 'logbook>=1.5,<1.6',\n 'typing-extensions>=3.7.4,<3.8',\n # the following are all to match snowflake-connector-python\n 'requests>=2.18.0,<2.24.0',\n 'idna<2.10',\n 'cffi>=1.9,<1.15',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]} | 1,571 | 160 |
gh_patches_debug_25528 | rasdani/github-patches | git_diff | scrapy__scrapy-2464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
String value for order of Scrapy component
If Scrapy component order is defined as a string, it leads to undefined behaviour on Python 2 and to the following errors on Python 3:
```
File "/usr/local/lib/python3.5/site-packages/scrapy/middleware.py", line 58, in from_crawler
return cls.from_settings(crawler.settings, crawler)
File "/usr/local/lib/python3.5/site-packages/scrapy/middleware.py", line 29, in from_settings
mwlist = cls._get_mwlist_from_settings(settings)
File "/usr/local/lib/python3.5/site-packages/scrapy/core/spidermw.py", line 21, in _get_mwlist_from_settings
return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))
File "/usr/local/lib/python3.5/site-packages/scrapy/utils/conf.py", line 47, in build_component_list
return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
builtins.TypeError: unorderable types: str() < int()
```
My guess that 1) order of a Scrapy component should be stated as of integer type (or `None`) and there should be a check somewhere, 2) or the sorting logic should be fixed.
</issue>
<code>
[start of scrapy/utils/conf.py]
1 import os
2 import sys
3 from operator import itemgetter
4
5 import six
6 from six.moves.configparser import SafeConfigParser
7
8 from scrapy.settings import BaseSettings
9 from scrapy.utils.deprecate import update_classpath
10 from scrapy.utils.python import without_none_values
11
12
13 def build_component_list(compdict, custom=None, convert=update_classpath):
14 """Compose a component list from a { class: order } dictionary."""
15
16 def _check_components(complist):
17 if len({convert(c) for c in complist}) != len(complist):
18 raise ValueError('Some paths in {!r} convert to the same object, '
19 'please update your settings'.format(complist))
20
21 def _map_keys(compdict):
22 if isinstance(compdict, BaseSettings):
23 compbs = BaseSettings()
24 for k, v in six.iteritems(compdict):
25 prio = compdict.getpriority(k)
26 if compbs.getpriority(convert(k)) == prio:
27 raise ValueError('Some paths in {!r} convert to the same '
28 'object, please update your settings'
29 ''.format(list(compdict.keys())))
30 else:
31 compbs.set(convert(k), v, priority=prio)
32 return compbs
33 else:
34 _check_components(compdict)
35 return {convert(k): v for k, v in six.iteritems(compdict)}
36
37 # BEGIN Backwards compatibility for old (base, custom) call signature
38 if isinstance(custom, (list, tuple)):
39 _check_components(custom)
40 return type(custom)(convert(c) for c in custom)
41
42 if custom is not None:
43 compdict.update(custom)
44 # END Backwards compatibility
45
46 compdict = without_none_values(_map_keys(compdict))
47 return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
48
49
50 def arglist_to_dict(arglist):
51 """Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a
52 dict
53 """
54 return dict(x.split('=', 1) for x in arglist)
55
56
57 def closest_scrapy_cfg(path='.', prevpath=None):
58 """Return the path to the closest scrapy.cfg file by traversing the current
59 directory and its parents
60 """
61 if path == prevpath:
62 return ''
63 path = os.path.abspath(path)
64 cfgfile = os.path.join(path, 'scrapy.cfg')
65 if os.path.exists(cfgfile):
66 return cfgfile
67 return closest_scrapy_cfg(os.path.dirname(path), path)
68
69
70 def init_env(project='default', set_syspath=True):
71 """Initialize environment to use command-line tool from inside a project
72 dir. This sets the Scrapy settings module and modifies the Python path to
73 be able to locate the project module.
74 """
75 cfg = get_config()
76 if cfg.has_option('settings', project):
77 os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)
78 closest = closest_scrapy_cfg()
79 if closest:
80 projdir = os.path.dirname(closest)
81 if set_syspath and projdir not in sys.path:
82 sys.path.append(projdir)
83
84
85 def get_config(use_closest=True):
86 """Get Scrapy config file as a SafeConfigParser"""
87 sources = get_sources(use_closest)
88 cfg = SafeConfigParser()
89 cfg.read(sources)
90 return cfg
91
92
93 def get_sources(use_closest=True):
94 xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
95 os.path.expanduser('~/.config')
96 sources = ['/etc/scrapy.cfg', r'c:\scrapy\scrapy.cfg',
97 xdg_config_home + '/scrapy.cfg',
98 os.path.expanduser('~/.scrapy.cfg')]
99 if use_closest:
100 sources.append(closest_scrapy_cfg())
101 return sources
102
[end of scrapy/utils/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/conf.py b/scrapy/utils/conf.py
--- a/scrapy/utils/conf.py
+++ b/scrapy/utils/conf.py
@@ -1,5 +1,6 @@
import os
import sys
+import numbers
from operator import itemgetter
import six
@@ -34,6 +35,13 @@
_check_components(compdict)
return {convert(k): v for k, v in six.iteritems(compdict)}
+ def _validate_values(compdict):
+ """Fail if a value in the components dict is not a real number or None."""
+ for name, value in six.iteritems(compdict):
+ if value is not None and not isinstance(value, numbers.Real):
+ raise ValueError('Invalid value {} for component {}, please provide ' \
+ 'a real number or None instead'.format(value, name))
+
# BEGIN Backwards compatibility for old (base, custom) call signature
if isinstance(custom, (list, tuple)):
_check_components(custom)
@@ -43,6 +51,7 @@
compdict.update(custom)
# END Backwards compatibility
+ _validate_values(compdict)
compdict = without_none_values(_map_keys(compdict))
return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]
| {"golden_diff": "diff --git a/scrapy/utils/conf.py b/scrapy/utils/conf.py\n--- a/scrapy/utils/conf.py\n+++ b/scrapy/utils/conf.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+import numbers\n from operator import itemgetter\n \n import six\n@@ -34,6 +35,13 @@\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n \n+ def _validate_values(compdict):\n+ \"\"\"Fail if a value in the components dict is not a real number or None.\"\"\"\n+ for name, value in six.iteritems(compdict):\n+ if value is not None and not isinstance(value, numbers.Real):\n+ raise ValueError('Invalid value {} for component {}, please provide ' \\\n+ 'a real number or None instead'.format(value, name))\n+\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n@@ -43,6 +51,7 @@\n compdict.update(custom)\n # END Backwards compatibility\n \n+ _validate_values(compdict)\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n", "issue": "String value for order of Scrapy component\nIf Scrapy component order is defined as a string, it leads to undefined behaviour on Python 2 and to the following errors on Python 3:\r\n```\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/middleware.py\", line 58, in from_crawler\r\n return cls.from_settings(crawler.settings, crawler)\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/middleware.py\", line 29, in from_settings\r\n mwlist = cls._get_mwlist_from_settings(settings)\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/core/spidermw.py\", line 21, in _get_mwlist_from_settings\r\n return build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))\r\nFile \"/usr/local/lib/python3.5/site-packages/scrapy/utils/conf.py\", line 47, in build_component_list\r\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\r\nbuiltins.TypeError: unorderable types: str() < int()\r\n```\r\n\r\nMy guess that 1) order of a Scrapy component should be stated as of integer type (or `None`) and there should be a check somewhere, 2) or the sorting logic should be fixed.\n", "before_files": [{"content": "import os\nimport sys\nfrom operator import itemgetter\n\nimport six\nfrom six.moves.configparser import SafeConfigParser\n\nfrom scrapy.settings import BaseSettings\nfrom scrapy.utils.deprecate import update_classpath\nfrom scrapy.utils.python import without_none_values\n\n\ndef build_component_list(compdict, custom=None, convert=update_classpath):\n \"\"\"Compose a component list from a { class: order } dictionary.\"\"\"\n\n def _check_components(complist):\n if len({convert(c) for c in complist}) != len(complist):\n raise ValueError('Some paths in {!r} convert to the same object, '\n 'please update your settings'.format(complist))\n\n def _map_keys(compdict):\n if isinstance(compdict, BaseSettings):\n compbs = BaseSettings()\n for k, v in six.iteritems(compdict):\n prio = compdict.getpriority(k)\n if compbs.getpriority(convert(k)) == prio:\n raise ValueError('Some paths in {!r} convert to the same '\n 'object, please update your settings'\n ''.format(list(compdict.keys())))\n else:\n compbs.set(convert(k), v, priority=prio)\n return compbs\n else:\n _check_components(compdict)\n return {convert(k): v for k, v in six.iteritems(compdict)}\n\n # BEGIN Backwards compatibility for old (base, custom) call signature\n if isinstance(custom, (list, tuple)):\n _check_components(custom)\n return type(custom)(convert(c) for c in custom)\n\n if custom is not None:\n compdict.update(custom)\n # END Backwards compatibility\n\n compdict = without_none_values(_map_keys(compdict))\n return [k for k, v in sorted(six.iteritems(compdict), key=itemgetter(1))]\n\n\ndef arglist_to_dict(arglist):\n \"\"\"Convert a list of arguments like ['arg1=val1', 'arg2=val2', ...] to a\n dict\n \"\"\"\n return dict(x.split('=', 1) for x in arglist)\n\n\ndef closest_scrapy_cfg(path='.', prevpath=None):\n \"\"\"Return the path to the closest scrapy.cfg file by traversing the current\n directory and its parents\n \"\"\"\n if path == prevpath:\n return ''\n path = os.path.abspath(path)\n cfgfile = os.path.join(path, 'scrapy.cfg')\n if os.path.exists(cfgfile):\n return cfgfile\n return closest_scrapy_cfg(os.path.dirname(path), path)\n\n\ndef init_env(project='default', set_syspath=True):\n \"\"\"Initialize environment to use command-line tool from inside a project\n dir. This sets the Scrapy settings module and modifies the Python path to\n be able to locate the project module.\n \"\"\"\n cfg = get_config()\n if cfg.has_option('settings', project):\n os.environ['SCRAPY_SETTINGS_MODULE'] = cfg.get('settings', project)\n closest = closest_scrapy_cfg()\n if closest:\n projdir = os.path.dirname(closest)\n if set_syspath and projdir not in sys.path:\n sys.path.append(projdir)\n\n\ndef get_config(use_closest=True):\n \"\"\"Get Scrapy config file as a SafeConfigParser\"\"\"\n sources = get_sources(use_closest)\n cfg = SafeConfigParser()\n cfg.read(sources)\n return cfg\n\n\ndef get_sources(use_closest=True):\n xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \\\n os.path.expanduser('~/.config')\n sources = ['/etc/scrapy.cfg', r'c:\\scrapy\\scrapy.cfg',\n xdg_config_home + '/scrapy.cfg',\n os.path.expanduser('~/.scrapy.cfg')]\n if use_closest:\n sources.append(closest_scrapy_cfg())\n return sources\n", "path": "scrapy/utils/conf.py"}]} | 1,819 | 286 |
gh_patches_debug_26341 | rasdani/github-patches | git_diff | freqtrade__freqtrade-1896 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--help produces traceback
Seems I broke it somehow.
`python3 freqtrade hyperopt --help`
produces traceback
```
Fatal exception!
Traceback (most recent call last):
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py", line 42, in main
args: Namespace = arguments.get_parsed_arg()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py", line 46, in get_parsed_arg
self.parsed_arg = self.parse_args()
File "/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py", line 54, in parse_args
parsed_arg = self.parser.parse_args(self.args)
File "/usr/lib/python3.6/argparse.py", line 1743, in parse_args
args, argv = self.parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1775, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1963, in _parse_known_args
positionals_end_index = consume_positionals(start_index)
File "/usr/lib/python3.6/argparse.py", line 1940, in consume_positionals
take_action(action, args)
File "/usr/lib/python3.6/argparse.py", line 1849, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/lib/python3.6/argparse.py", line 1146, in __call__
subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)
File "/usr/lib/python3.6/argparse.py", line 1775, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/lib/python3.6/argparse.py", line 1981, in _parse_known_args
start_index = consume_optional(start_index)
File "/usr/lib/python3.6/argparse.py", line 1921, in consume_optional
take_action(action, args, option_string)
File "/usr/lib/python3.6/argparse.py", line 1849, in take_action
action(self, namespace, argument_values, option_string)
File "/usr/lib/python3.6/argparse.py", line 1034, in __call__
parser.exit()
File "/usr/lib/python3.6/argparse.py", line 2389, in exit
_sys.exit(status)
SystemExit: 0
```
</issue>
<code>
[start of freqtrade/main.py]
1 #!/usr/bin/env python3
2 """
3 Main Freqtrade bot script.
4 Read the documentation to know what cli arguments you need.
5 """
6
7 import sys
8 # check min. python version
9 if sys.version_info < (3, 6):
10 sys.exit("Freqtrade requires Python version >= 3.6")
11
12 # flake8: noqa E402
13 import logging
14 from argparse import Namespace
15 from typing import List
16
17 from freqtrade import OperationalException
18 from freqtrade.arguments import Arguments
19 from freqtrade.configuration import set_loggers
20 from freqtrade.worker import Worker
21
22
23 logger = logging.getLogger('freqtrade')
24
25
26 def main(sysargv: List[str] = None) -> None:
27 """
28 This function will initiate the bot and start the trading loop.
29 :return: None
30 """
31
32 try:
33 set_loggers()
34
35 worker = None
36 return_code = 1
37
38 arguments = Arguments(
39 sysargv,
40 'Free, open source crypto trading bot'
41 )
42 args: Namespace = arguments.get_parsed_arg()
43
44 # A subcommand has been issued.
45 # Means if Backtesting or Hyperopt have been called we exit the bot
46 if hasattr(args, 'func'):
47 args.func(args)
48 # TODO: fetch return_code as returned by the command function here
49 return_code = 0
50 else:
51 # Load and run worker
52 worker = Worker(args)
53 worker.run()
54
55 except KeyboardInterrupt:
56 logger.info('SIGINT received, aborting ...')
57 return_code = 0
58 except OperationalException as e:
59 logger.error(str(e))
60 return_code = 2
61 except BaseException:
62 logger.exception('Fatal exception!')
63 finally:
64 if worker:
65 worker.exit()
66 sys.exit(return_code)
67
68
69 if __name__ == '__main__':
70 main()
71
[end of freqtrade/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/main.py b/freqtrade/main.py
--- a/freqtrade/main.py
+++ b/freqtrade/main.py
@@ -12,7 +12,7 @@
# flake8: noqa E402
import logging
from argparse import Namespace
-from typing import List
+from typing import Any, List
from freqtrade import OperationalException
from freqtrade.arguments import Arguments
@@ -29,12 +29,11 @@
:return: None
"""
+ return_code: Any = 1
+ worker = None
try:
set_loggers()
- worker = None
- return_code = 1
-
arguments = Arguments(
sysargv,
'Free, open source crypto trading bot'
@@ -52,13 +51,15 @@
worker = Worker(args)
worker.run()
+ except SystemExit as e:
+ return_code = e
except KeyboardInterrupt:
logger.info('SIGINT received, aborting ...')
return_code = 0
except OperationalException as e:
logger.error(str(e))
return_code = 2
- except BaseException:
+ except Exception:
logger.exception('Fatal exception!')
finally:
if worker:
| {"golden_diff": "diff --git a/freqtrade/main.py b/freqtrade/main.py\n--- a/freqtrade/main.py\n+++ b/freqtrade/main.py\n@@ -12,7 +12,7 @@\n # flake8: noqa E402\n import logging\n from argparse import Namespace\n-from typing import List\n+from typing import Any, List\n \n from freqtrade import OperationalException\n from freqtrade.arguments import Arguments\n@@ -29,12 +29,11 @@\n :return: None\n \"\"\"\n \n+ return_code: Any = 1\n+ worker = None\n try:\n set_loggers()\n \n- worker = None\n- return_code = 1\n-\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n@@ -52,13 +51,15 @@\n worker = Worker(args)\n worker.run()\n \n+ except SystemExit as e:\n+ return_code = e\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n- except BaseException:\n+ except Exception:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n", "issue": "--help produces traceback\nSeems I broke it somehow.\r\n\r\n`python3 freqtrade hyperopt --help`\r\nproduces traceback \r\n```\r\nFatal exception!\r\nTraceback (most recent call last):\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/main.py\", line 42, in main\r\n args: Namespace = arguments.get_parsed_arg()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py\", line 46, in get_parsed_arg\r\n self.parsed_arg = self.parse_args()\r\n File \"/home/user/freqtrade-wrk/github-hroff-1902/freqtrade/freqtrade/arguments.py\", line 54, in parse_args\r\n parsed_arg = self.parser.parse_args(self.args)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1743, in parse_args\r\n args, argv = self.parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1775, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1963, in _parse_known_args\r\n positionals_end_index = consume_positionals(start_index)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1940, in consume_positionals\r\n take_action(action, args)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1849, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1146, in __call__\r\n subnamespace, arg_strings = parser.parse_known_args(arg_strings, None)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1775, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1981, in _parse_known_args\r\n start_index = consume_optional(start_index)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1921, in consume_optional\r\n take_action(action, args, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1849, in take_action\r\n action(self, namespace, argument_values, option_string)\r\n File \"/usr/lib/python3.6/argparse.py\", line 1034, in __call__\r\n parser.exit()\r\n File \"/usr/lib/python3.6/argparse.py\", line 2389, in exit\r\n _sys.exit(status)\r\nSystemExit: 0\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"\nMain Freqtrade bot script.\nRead the documentation to know what cli arguments you need.\n\"\"\"\n\nimport sys\n# check min. python version\nif sys.version_info < (3, 6):\n sys.exit(\"Freqtrade requires Python version >= 3.6\")\n\n# flake8: noqa E402\nimport logging\nfrom argparse import Namespace\nfrom typing import List\n\nfrom freqtrade import OperationalException\nfrom freqtrade.arguments import Arguments\nfrom freqtrade.configuration import set_loggers\nfrom freqtrade.worker import Worker\n\n\nlogger = logging.getLogger('freqtrade')\n\n\ndef main(sysargv: List[str] = None) -> None:\n \"\"\"\n This function will initiate the bot and start the trading loop.\n :return: None\n \"\"\"\n\n try:\n set_loggers()\n\n worker = None\n return_code = 1\n\n arguments = Arguments(\n sysargv,\n 'Free, open source crypto trading bot'\n )\n args: Namespace = arguments.get_parsed_arg()\n\n # A subcommand has been issued.\n # Means if Backtesting or Hyperopt have been called we exit the bot\n if hasattr(args, 'func'):\n args.func(args)\n # TODO: fetch return_code as returned by the command function here\n return_code = 0\n else:\n # Load and run worker\n worker = Worker(args)\n worker.run()\n\n except KeyboardInterrupt:\n logger.info('SIGINT received, aborting ...')\n return_code = 0\n except OperationalException as e:\n logger.error(str(e))\n return_code = 2\n except BaseException:\n logger.exception('Fatal exception!')\n finally:\n if worker:\n worker.exit()\n sys.exit(return_code)\n\n\nif __name__ == '__main__':\n main()\n", "path": "freqtrade/main.py"}]} | 1,686 | 281 |
gh_patches_debug_37710 | rasdani/github-patches | git_diff | localstack__localstack-4575 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: State Machine references don't get resolved properly
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Lambda refs get lost
### Expected Behavior
Lambda refs work in state machines
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
docker run localstack/localstack
#### Client commands (e.g., AWS SDK code snippet, or sequence of "awslocal" commands)
awslocal s3 mb s3://mybucket
### Environment
```markdown
- OS:
- LocalStack:
```
### Anything else?
This is based on a conversation I had with @dominikschubert
</issue>
<code>
[start of localstack/utils/generic/wait_utils.py]
1 import time
2 from typing import Callable
3
4 from typing_extensions import Literal
5
6
7 def wait_until(
8 fn: Callable[[], bool],
9 wait: float = 1.0,
10 max_retries: int = 10,
11 strategy: Literal["exponential", "static", "linear"] = "exponential",
12 _retries: int = 0,
13 _max_wait: float = 240,
14 ) -> None:
15 """waits until a given condition is true, rechecking it periodically"""
16 if max_retries < _retries:
17 return
18 completed = fn()
19 if not completed:
20 if wait > _max_wait:
21 return
22 time.sleep(wait)
23 next_wait = wait # default: static
24 if strategy == "linear":
25 next_wait = (wait / _retries) * (_retries + 1)
26 elif strategy == "exponential":
27 next_wait = wait ** 2
28 wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)
29
[end of localstack/utils/generic/wait_utils.py]
[start of localstack/services/cloudformation/models/stepfunctions.py]
1 from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME
2 from localstack.services.cloudformation.service_models import GenericBaseModel
3 from localstack.utils.aws import aws_stack
4
5
6 class SFNActivity(GenericBaseModel):
7 @staticmethod
8 def cloudformation_type():
9 return "AWS::StepFunctions::Activity"
10
11 def fetch_state(self, stack_name, resources):
12 activity_arn = self.physical_resource_id
13 if not activity_arn:
14 return None
15 client = aws_stack.connect_to_service("stepfunctions")
16 result = client.describe_activity(activityArn=activity_arn)
17 return result
18
19 @staticmethod
20 def get_deploy_templates():
21 return {
22 "create": {
23 "function": "create_activity",
24 "parameters": {"name": ["Name", PLACEHOLDER_RESOURCE_NAME], "tags": "Tags"},
25 },
26 "delete": {
27 "function": "delete_activity",
28 "parameters": {"activityArn": "PhysicalResourceId"},
29 },
30 }
31
32
33 class SFNStateMachine(GenericBaseModel):
34 @staticmethod
35 def cloudformation_type():
36 return "AWS::StepFunctions::StateMachine"
37
38 def get_resource_name(self):
39 return self.props.get("StateMachineName")
40
41 def get_physical_resource_id(self, attribute=None, **kwargs):
42 return self.props.get("stateMachineArn")
43
44 def fetch_state(self, stack_name, resources):
45 sm_name = self.props.get("StateMachineName") or self.resource_id
46 sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)
47 sfn_client = aws_stack.connect_to_service("stepfunctions")
48 state_machines = sfn_client.list_state_machines()["stateMachines"]
49 sm_arn = [m["stateMachineArn"] for m in state_machines if m["name"] == sm_name]
50 if not sm_arn:
51 return None
52 result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])
53 return result
54
55 def update_resource(self, new_resource, stack_name, resources):
56 props = new_resource["Properties"]
57 client = aws_stack.connect_to_service("stepfunctions")
58 sm_arn = self.props.get("stateMachineArn")
59 if not sm_arn:
60 self.state = self.fetch_state(stack_name=stack_name, resources=resources)
61 sm_arn = self.state["stateMachineArn"]
62 kwargs = {
63 "stateMachineArn": sm_arn,
64 "definition": props["DefinitionString"],
65 }
66 return client.update_state_machine(**kwargs)
67
68 @staticmethod
69 def get_deploy_templates():
70 return {
71 "create": {
72 "function": "create_state_machine",
73 "parameters": {
74 "name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
75 "definition": "DefinitionString",
76 "roleArn": "RoleArn",
77 },
78 },
79 "delete": {
80 "function": "delete_state_machine",
81 "parameters": {"stateMachineArn": "PhysicalResourceId"},
82 },
83 }
84
[end of localstack/services/cloudformation/models/stepfunctions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/localstack/services/cloudformation/models/stepfunctions.py b/localstack/services/cloudformation/models/stepfunctions.py
--- a/localstack/services/cloudformation/models/stepfunctions.py
+++ b/localstack/services/cloudformation/models/stepfunctions.py
@@ -1,3 +1,6 @@
+import re
+from typing import Dict
+
from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME
from localstack.services.cloudformation.service_models import GenericBaseModel
from localstack.utils.aws import aws_stack
@@ -65,19 +68,43 @@
}
return client.update_state_machine(**kwargs)
- @staticmethod
- def get_deploy_templates():
+ @classmethod
+ def get_deploy_templates(cls):
+ def _create_params(params, **kwargs):
+ def _get_definition(params):
+ definition_str = params.get("DefinitionString")
+ substitutions = params.get("DefinitionSubstitutions")
+ if substitutions is not None:
+ definition_str = _apply_substitutions(definition_str, substitutions)
+ return definition_str
+
+ return {
+ "name": params.get("StateMachineName", PLACEHOLDER_RESOURCE_NAME),
+ "definition": _get_definition(params),
+ "roleArn": params.get("RoleArn"),
+ "type": params.get("StateMachineTyp", None),
+ }
+
return {
"create": {
"function": "create_state_machine",
- "parameters": {
- "name": ["StateMachineName", PLACEHOLDER_RESOURCE_NAME],
- "definition": "DefinitionString",
- "roleArn": "RoleArn",
- },
+ "parameters": _create_params,
},
"delete": {
"function": "delete_state_machine",
"parameters": {"stateMachineArn": "PhysicalResourceId"},
},
}
+
+
+def _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:
+ substitution_regex = re.compile("\\${[a-zA-Z0-9_]+}") # might be a bit too strict in some cases
+ tokens = substitution_regex.findall(definition)
+ result = definition
+ for token in tokens:
+ raw_token = token[2:-1] # strip ${ and }
+ if raw_token not in substitutions.keys():
+ raise
+ result = result.replace(token, substitutions[raw_token])
+
+ return result
diff --git a/localstack/utils/generic/wait_utils.py b/localstack/utils/generic/wait_utils.py
--- a/localstack/utils/generic/wait_utils.py
+++ b/localstack/utils/generic/wait_utils.py
@@ -24,5 +24,5 @@
if strategy == "linear":
next_wait = (wait / _retries) * (_retries + 1)
elif strategy == "exponential":
- next_wait = wait ** 2
+ next_wait = wait * 2
wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)
| {"golden_diff": "diff --git a/localstack/services/cloudformation/models/stepfunctions.py b/localstack/services/cloudformation/models/stepfunctions.py\n--- a/localstack/services/cloudformation/models/stepfunctions.py\n+++ b/localstack/services/cloudformation/models/stepfunctions.py\n@@ -1,3 +1,6 @@\n+import re\n+from typing import Dict\n+\n from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\n from localstack.services.cloudformation.service_models import GenericBaseModel\n from localstack.utils.aws import aws_stack\n@@ -65,19 +68,43 @@\n }\n return client.update_state_machine(**kwargs)\n \n- @staticmethod\n- def get_deploy_templates():\n+ @classmethod\n+ def get_deploy_templates(cls):\n+ def _create_params(params, **kwargs):\n+ def _get_definition(params):\n+ definition_str = params.get(\"DefinitionString\")\n+ substitutions = params.get(\"DefinitionSubstitutions\")\n+ if substitutions is not None:\n+ definition_str = _apply_substitutions(definition_str, substitutions)\n+ return definition_str\n+\n+ return {\n+ \"name\": params.get(\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME),\n+ \"definition\": _get_definition(params),\n+ \"roleArn\": params.get(\"RoleArn\"),\n+ \"type\": params.get(\"StateMachineTyp\", None),\n+ }\n+\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n- \"parameters\": {\n- \"name\": [\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME],\n- \"definition\": \"DefinitionString\",\n- \"roleArn\": \"RoleArn\",\n- },\n+ \"parameters\": _create_params,\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n+\n+\n+def _apply_substitutions(definition: str, substitutions: Dict[str, str]) -> str:\n+ substitution_regex = re.compile(\"\\\\${[a-zA-Z0-9_]+}\") # might be a bit too strict in some cases\n+ tokens = substitution_regex.findall(definition)\n+ result = definition\n+ for token in tokens:\n+ raw_token = token[2:-1] # strip ${ and }\n+ if raw_token not in substitutions.keys():\n+ raise\n+ result = result.replace(token, substitutions[raw_token])\n+\n+ return result\ndiff --git a/localstack/utils/generic/wait_utils.py b/localstack/utils/generic/wait_utils.py\n--- a/localstack/utils/generic/wait_utils.py\n+++ b/localstack/utils/generic/wait_utils.py\n@@ -24,5 +24,5 @@\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n- next_wait = wait ** 2\n+ next_wait = wait * 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "issue": "bug: State Machine references don't get resolved properly\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nLambda refs get lost\n\n### Expected Behavior\n\nLambda refs work in state machines\n\n### How are you starting LocalStack?\n\nWith a docker-compose file\n\n### Steps To Reproduce\n\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\n docker run localstack/localstack\r\n\r\n#### Client commands (e.g., AWS SDK code snippet, or sequence of \"awslocal\" commands)\r\n\r\n awslocal s3 mb s3://mybucket\r\n\n\n### Environment\n\n```markdown\n- OS: \r\n- LocalStack:\n```\n\n\n### Anything else?\n\nThis is based on a conversation I had with @dominikschubert \n", "before_files": [{"content": "import time\nfrom typing import Callable\n\nfrom typing_extensions import Literal\n\n\ndef wait_until(\n fn: Callable[[], bool],\n wait: float = 1.0,\n max_retries: int = 10,\n strategy: Literal[\"exponential\", \"static\", \"linear\"] = \"exponential\",\n _retries: int = 0,\n _max_wait: float = 240,\n) -> None:\n \"\"\"waits until a given condition is true, rechecking it periodically\"\"\"\n if max_retries < _retries:\n return\n completed = fn()\n if not completed:\n if wait > _max_wait:\n return\n time.sleep(wait)\n next_wait = wait # default: static\n if strategy == \"linear\":\n next_wait = (wait / _retries) * (_retries + 1)\n elif strategy == \"exponential\":\n next_wait = wait ** 2\n wait_until(fn, next_wait, max_retries, strategy, _retries + 1, _max_wait)\n", "path": "localstack/utils/generic/wait_utils.py"}, {"content": "from localstack.services.cloudformation.deployment_utils import PLACEHOLDER_RESOURCE_NAME\nfrom localstack.services.cloudformation.service_models import GenericBaseModel\nfrom localstack.utils.aws import aws_stack\n\n\nclass SFNActivity(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::Activity\"\n\n def fetch_state(self, stack_name, resources):\n activity_arn = self.physical_resource_id\n if not activity_arn:\n return None\n client = aws_stack.connect_to_service(\"stepfunctions\")\n result = client.describe_activity(activityArn=activity_arn)\n return result\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_activity\",\n \"parameters\": {\"name\": [\"Name\", PLACEHOLDER_RESOURCE_NAME], \"tags\": \"Tags\"},\n },\n \"delete\": {\n \"function\": \"delete_activity\",\n \"parameters\": {\"activityArn\": \"PhysicalResourceId\"},\n },\n }\n\n\nclass SFNStateMachine(GenericBaseModel):\n @staticmethod\n def cloudformation_type():\n return \"AWS::StepFunctions::StateMachine\"\n\n def get_resource_name(self):\n return self.props.get(\"StateMachineName\")\n\n def get_physical_resource_id(self, attribute=None, **kwargs):\n return self.props.get(\"stateMachineArn\")\n\n def fetch_state(self, stack_name, resources):\n sm_name = self.props.get(\"StateMachineName\") or self.resource_id\n sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)\n sfn_client = aws_stack.connect_to_service(\"stepfunctions\")\n state_machines = sfn_client.list_state_machines()[\"stateMachines\"]\n sm_arn = [m[\"stateMachineArn\"] for m in state_machines if m[\"name\"] == sm_name]\n if not sm_arn:\n return None\n result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])\n return result\n\n def update_resource(self, new_resource, stack_name, resources):\n props = new_resource[\"Properties\"]\n client = aws_stack.connect_to_service(\"stepfunctions\")\n sm_arn = self.props.get(\"stateMachineArn\")\n if not sm_arn:\n self.state = self.fetch_state(stack_name=stack_name, resources=resources)\n sm_arn = self.state[\"stateMachineArn\"]\n kwargs = {\n \"stateMachineArn\": sm_arn,\n \"definition\": props[\"DefinitionString\"],\n }\n return client.update_state_machine(**kwargs)\n\n @staticmethod\n def get_deploy_templates():\n return {\n \"create\": {\n \"function\": \"create_state_machine\",\n \"parameters\": {\n \"name\": [\"StateMachineName\", PLACEHOLDER_RESOURCE_NAME],\n \"definition\": \"DefinitionString\",\n \"roleArn\": \"RoleArn\",\n },\n },\n \"delete\": {\n \"function\": \"delete_state_machine\",\n \"parameters\": {\"stateMachineArn\": \"PhysicalResourceId\"},\n },\n }\n", "path": "localstack/services/cloudformation/models/stepfunctions.py"}]} | 1,819 | 655 |
gh_patches_debug_19491 | rasdani/github-patches | git_diff | sunpy__sunpy-5493 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix CROTA keyword in EUI maps
Currently EUI maps have a `CROTA` keyword, which by the FITS standard should really be a `CROTA2` keyword. This results in the warning
```python
/home/docs/checkouts/readthedocs.org/user_builds/solar-orbiter-python/envs/latest/lib/python3.8/site-packages/astropy/wcs/wcs.py:482: FITSFixedWarning: CROTA = 2.486914995997215 / [deg] rotation angle
keyword looks very much like CROTAn but isn't.
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
```
It would be good to
- Check if CROTA is in the header and CROTA2 isn't
- If so, rename the CROTA keyword to CROTA2
</issue>
<code>
[start of sunpy/map/sources/solo.py]
1 """
2 Solar Orbiter Map subclass definitions.
3 """
4 import astropy.units as u
5 from astropy.coordinates import CartesianRepresentation
6 from astropy.visualization import ImageNormalize, LinearStretch
7
8 from sunpy.coordinates import HeliocentricInertial
9 from sunpy.map import GenericMap
10 from sunpy.map.sources.source_type import source_stretch
11 from sunpy.time import parse_time
12
13 __all__ = ['EUIMap']
14
15
16 class EUIMap(GenericMap):
17 """
18 EUI Image Map
19
20 The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the
21 Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in
22 Lyman-alpha (1216 Å) and the EUV (174 Å and 304 Å). The three telescopes are the
23 Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the
24 whole Sun in both 174 Å and 304 Å. The EUV and Lyman-alpha HRI telescopes image a
25 1000"-by-1000" patch in 174 Å and 1216 Å, respectively.
26
27 References
28 ----------
29 * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__
30 * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__
31 * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__
32 """
33
34 def __init__(self, data, header, **kwargs):
35 super().__init__(data, header, **kwargs)
36 self._nickname = self.detector
37 self.plot_settings['cmap'] = self._get_cmap_name()
38 self.plot_settings['norm'] = ImageNormalize(
39 stretch=source_stretch(self.meta, LinearStretch()), clip=False)
40
41 @property
42 def processing_level(self):
43 if self.meta.get('level'):
44 # The level number is prepended by the letter L
45 return int(self.meta.get('level')[1:])
46
47 @property
48 def exposure_time(self):
49 return self.meta.get('xposure', 0.0) * self.timeunit
50
51 @property
52 def date(self):
53 t = self.meta.get('date-avg')
54 timesys = self.meta.get('timesys')
55 return parse_time(t, scale=timesys.lower())
56
57 @property
58 def _supported_observer_coordinates(self):
59 return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),
60 {'x': self.meta.get('hcix_obs'),
61 'y': self.meta.get('hciy_obs'),
62 'z': self.meta.get('hciz_obs'),
63 'unit': u.m,
64 'representation_type': CartesianRepresentation,
65 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates
66
67 @classmethod
68 def is_datasource_for(cls, data, header, **kwargs):
69 """Determines if header corresponds to an EUI image"""
70 is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()
71 is_eui = str(header.get('instrume', '')).startswith('EUI')
72 return is_solo and is_eui
73
[end of sunpy/map/sources/solo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/map/sources/solo.py b/sunpy/map/sources/solo.py
--- a/sunpy/map/sources/solo.py
+++ b/sunpy/map/sources/solo.py
@@ -5,6 +5,7 @@
from astropy.coordinates import CartesianRepresentation
from astropy.visualization import ImageNormalize, LinearStretch
+from sunpy import log
from sunpy.coordinates import HeliocentricInertial
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
@@ -38,6 +39,10 @@
self.plot_settings['norm'] = ImageNormalize(
stretch=source_stretch(self.meta, LinearStretch()), clip=False)
+ if 'CROTA' in self.meta and 'CROTA2' not in self.meta:
+ log.debug("Renaming 'CROTA' to 'CROTA2'")
+ self.meta['CROTA2'] = self.meta.pop('CROTA')
+
@property
def processing_level(self):
if self.meta.get('level'):
| {"golden_diff": "diff --git a/sunpy/map/sources/solo.py b/sunpy/map/sources/solo.py\n--- a/sunpy/map/sources/solo.py\n+++ b/sunpy/map/sources/solo.py\n@@ -5,6 +5,7 @@\n from astropy.coordinates import CartesianRepresentation\n from astropy.visualization import ImageNormalize, LinearStretch\n \n+from sunpy import log\n from sunpy.coordinates import HeliocentricInertial\n from sunpy.map import GenericMap\n from sunpy.map.sources.source_type import source_stretch\n@@ -38,6 +39,10 @@\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n \n+ if 'CROTA' in self.meta and 'CROTA2' not in self.meta:\n+ log.debug(\"Renaming 'CROTA' to 'CROTA2'\")\n+ self.meta['CROTA2'] = self.meta.pop('CROTA')\n+\n @property\n def processing_level(self):\n if self.meta.get('level'):\n", "issue": "Fix CROTA keyword in EUI maps\nCurrently EUI maps have a `CROTA` keyword, which by the FITS standard should really be a `CROTA2` keyword. This results in the warning\r\n```python\r\n/home/docs/checkouts/readthedocs.org/user_builds/solar-orbiter-python/envs/latest/lib/python3.8/site-packages/astropy/wcs/wcs.py:482: FITSFixedWarning: CROTA = 2.486914995997215 / [deg] rotation angle\r\nkeyword looks very much like CROTAn but isn't.\r\n wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,\r\n```\r\nIt would be good to\r\n- Check if CROTA is in the header and CROTA2 isn't\r\n- If so, rename the CROTA keyword to CROTA2\n", "before_files": [{"content": "\"\"\"\nSolar Orbiter Map subclass definitions.\n\"\"\"\nimport astropy.units as u\nfrom astropy.coordinates import CartesianRepresentation\nfrom astropy.visualization import ImageNormalize, LinearStretch\n\nfrom sunpy.coordinates import HeliocentricInertial\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\nfrom sunpy.time import parse_time\n\n__all__ = ['EUIMap']\n\n\nclass EUIMap(GenericMap):\n \"\"\"\n EUI Image Map\n\n The Extreme Ultraviolet Imager (EUI) is a remote sensing instrument onboard the\n Solar Orbiter (SolO) spacecraft. EUI has three telescopes that image the Sun in\n Lyman-alpha (1216 \u00c5) and the EUV (174 \u00c5 and 304 \u00c5). The three telescopes are the\n Full Sun Imager (FSI) and two High Resolution Imagers (HRI). The FSI images the\n whole Sun in both 174 \u00c5 and 304 \u00c5. The EUV and Lyman-alpha HRI telescopes image a\n 1000\"-by-1000\" patch in 174 \u00c5 and 1216 \u00c5, respectively.\n\n References\n ----------\n * `Solar Orbiter Mission Page <https://sci.esa.int/web/solar-orbiter/>`__\n * `EUI Instrument Page <https://wwwbis.sidc.be/EUI/EUI/EUI/EUI/EUI/>`__\n * `Instrument Paper <https://doi.org/10.1051/0004-6361/201936663>`__\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n super().__init__(data, header, **kwargs)\n self._nickname = self.detector\n self.plot_settings['cmap'] = self._get_cmap_name()\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, LinearStretch()), clip=False)\n\n @property\n def processing_level(self):\n if self.meta.get('level'):\n # The level number is prepended by the letter L\n return int(self.meta.get('level')[1:])\n\n @property\n def exposure_time(self):\n return self.meta.get('xposure', 0.0) * self.timeunit\n\n @property\n def date(self):\n t = self.meta.get('date-avg')\n timesys = self.meta.get('timesys')\n return parse_time(t, scale=timesys.lower())\n\n @property\n def _supported_observer_coordinates(self):\n return [(('hcix_obs', 'hciy_obs', 'hciz_obs'),\n {'x': self.meta.get('hcix_obs'),\n 'y': self.meta.get('hciy_obs'),\n 'z': self.meta.get('hciz_obs'),\n 'unit': u.m,\n 'representation_type': CartesianRepresentation,\n 'frame': HeliocentricInertial})] + super()._supported_observer_coordinates\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUI image\"\"\"\n is_solo = 'solar orbiter' in str(header.get('obsrvtry', '')).lower()\n is_eui = str(header.get('instrume', '')).startswith('EUI')\n return is_solo and is_eui\n", "path": "sunpy/map/sources/solo.py"}]} | 1,623 | 234 |
gh_patches_debug_10058 | rasdani/github-patches | git_diff | docker__docker-py-1972 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build context (.tar) is not prepared properly
Hey,
This morning I've updated to version `3.1.1` however, using this version i'm getting wired error from docker-engine build:
```
ERROR: Error processing tar file(exit status 1): mkdir /foodir/bardir: no such file or directory
```
and the actual building does not start.
took me some time to realise this is related to update i got this morning,
Reverting back to version `3.0.1`, I could build again.
*NOTE*: `/foodir/bardir` is censored due to security policy in my company,
so for the sake of this issue, lets assume this is the context:
- Dockerfile
- foodir
- bardir
- file
Also, path in error did start with `/` so i kept it there.
</issue>
<code>
[start of docker/utils/build.py]
1 import os
2 import re
3
4 from ..constants import IS_WINDOWS_PLATFORM
5 from fnmatch import fnmatch
6 from itertools import chain
7 from .utils import create_archive
8
9
10 def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
11 root = os.path.abspath(path)
12 exclude = exclude or []
13 return create_archive(
14 files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),
15 root=root, fileobj=fileobj, gzip=gzip
16 )
17
18
19 _SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
20
21
22 def exclude_paths(root, patterns, dockerfile=None):
23 """
24 Given a root directory path and a list of .dockerignore patterns, return
25 an iterator of all paths (both regular files and directories) in the root
26 directory that do *not* match any of the patterns.
27
28 All paths returned are relative to the root.
29 """
30
31 if dockerfile is None:
32 dockerfile = 'Dockerfile'
33
34 def split_path(p):
35 return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
36
37 def normalize(p):
38 # Leading and trailing slashes are not relevant. Yes,
39 # "foo.py/" must exclude the "foo.py" regular file. "."
40 # components are not relevant either, even if the whole
41 # pattern is only ".", as the Docker reference states: "For
42 # historical reasons, the pattern . is ignored."
43 # ".." component must be cleared with the potential previous
44 # component, regardless of whether it exists: "A preprocessing
45 # step [...] eliminates . and .. elements using Go's
46 # filepath.".
47 i = 0
48 split = split_path(p)
49 while i < len(split):
50 if split[i] == '..':
51 del split[i]
52 if i > 0:
53 del split[i - 1]
54 i -= 1
55 else:
56 i += 1
57 return split
58
59 patterns = (
60 (True, normalize(p[1:]))
61 if p.startswith('!') else
62 (False, normalize(p))
63 for p in patterns)
64 patterns = list(reversed(list(chain(
65 # Exclude empty patterns such as "." or the empty string.
66 filter(lambda p: p[1], patterns),
67 # Always include the Dockerfile and .dockerignore
68 [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))
69 return set(walk(root, patterns))
70
71
72 def walk(root, patterns, default=True):
73 """
74 A collection of file lying below root that should be included according to
75 patterns.
76 """
77
78 def match(p):
79 if p[1][0] == '**':
80 rec = (p[0], p[1][1:])
81 return [p] + (match(rec) if rec[1] else [rec])
82 elif fnmatch(f, p[1][0]):
83 return [(p[0], p[1][1:])]
84 else:
85 return []
86
87 for f in os.listdir(root):
88 cur = os.path.join(root, f)
89 # The patterns if recursing in that directory.
90 sub = list(chain(*(match(p) for p in patterns)))
91 # Whether this file is explicitely included / excluded.
92 hit = next((p[0] for p in sub if not p[1]), None)
93 # Whether this file is implicitely included / excluded.
94 matched = default if hit is None else hit
95 sub = list(filter(lambda p: p[1], sub))
96 if os.path.isdir(cur):
97 # Entirely skip directories if there are no chance any subfile will
98 # be included.
99 if all(not p[0] for p in sub) and not matched:
100 continue
101 # I think this would greatly speed up dockerignore handling by not
102 # recursing into directories we are sure would be entirely
103 # included, and only yielding the directory itself, which will be
104 # recursively archived anyway. However the current unit test expect
105 # the full list of subfiles and I'm not 100% sure it would make no
106 # difference yet.
107 # if all(p[0] for p in sub) and matched:
108 # yield f
109 # continue
110 children = False
111 for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):
112 yield r
113 children = True
114 # The current unit tests expect directories only under those
115 # conditions. It might be simplifiable though.
116 if (not sub or not children) and hit or hit is None and default:
117 yield f
118 elif matched:
119 yield f
120
[end of docker/utils/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/build.py b/docker/utils/build.py
--- a/docker/utils/build.py
+++ b/docker/utils/build.py
@@ -93,7 +93,7 @@
# Whether this file is implicitely included / excluded.
matched = default if hit is None else hit
sub = list(filter(lambda p: p[1], sub))
- if os.path.isdir(cur):
+ if os.path.isdir(cur) and not os.path.islink(cur):
# Entirely skip directories if there are no chance any subfile will
# be included.
if all(not p[0] for p in sub) and not matched:
| {"golden_diff": "diff --git a/docker/utils/build.py b/docker/utils/build.py\n--- a/docker/utils/build.py\n+++ b/docker/utils/build.py\n@@ -93,7 +93,7 @@\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n- if os.path.isdir(cur):\n+ if os.path.isdir(cur) and not os.path.islink(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n", "issue": "Build context (.tar) is not prepared properly\nHey,\r\n\r\nThis morning I've updated to version `3.1.1` however, using this version i'm getting wired error from docker-engine build:\r\n```\r\nERROR: Error processing tar file(exit status 1): mkdir /foodir/bardir: no such file or directory\r\n```\r\nand the actual building does not start.\r\ntook me some time to realise this is related to update i got this morning, \r\nReverting back to version `3.0.1`, I could build again.\r\n\r\n*NOTE*: `/foodir/bardir` is censored due to security policy in my company,\r\nso for the sake of this issue, lets assume this is the context:\r\n- Dockerfile\r\n- foodir\r\n - bardir\r\n - file\r\n\r\nAlso, path in error did start with `/` so i kept it there.\r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom fnmatch import fnmatch\nfrom itertools import chain\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\n_SEP = re.compile('/|\\\\\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n def split_path(p):\n return [pt for pt in re.split(_SEP, p) if pt and pt != '.']\n\n def normalize(p):\n # Leading and trailing slashes are not relevant. Yes,\n # \"foo.py/\" must exclude the \"foo.py\" regular file. \".\"\n # components are not relevant either, even if the whole\n # pattern is only \".\", as the Docker reference states: \"For\n # historical reasons, the pattern . is ignored.\"\n # \"..\" component must be cleared with the potential previous\n # component, regardless of whether it exists: \"A preprocessing\n # step [...] eliminates . and .. elements using Go's\n # filepath.\".\n i = 0\n split = split_path(p)\n while i < len(split):\n if split[i] == '..':\n del split[i]\n if i > 0:\n del split[i - 1]\n i -= 1\n else:\n i += 1\n return split\n\n patterns = (\n (True, normalize(p[1:]))\n if p.startswith('!') else\n (False, normalize(p))\n for p in patterns)\n patterns = list(reversed(list(chain(\n # Exclude empty patterns such as \".\" or the empty string.\n filter(lambda p: p[1], patterns),\n # Always include the Dockerfile and .dockerignore\n [(True, split_path(dockerfile)), (True, ['.dockerignore'])]))))\n return set(walk(root, patterns))\n\n\ndef walk(root, patterns, default=True):\n \"\"\"\n A collection of file lying below root that should be included according to\n patterns.\n \"\"\"\n\n def match(p):\n if p[1][0] == '**':\n rec = (p[0], p[1][1:])\n return [p] + (match(rec) if rec[1] else [rec])\n elif fnmatch(f, p[1][0]):\n return [(p[0], p[1][1:])]\n else:\n return []\n\n for f in os.listdir(root):\n cur = os.path.join(root, f)\n # The patterns if recursing in that directory.\n sub = list(chain(*(match(p) for p in patterns)))\n # Whether this file is explicitely included / excluded.\n hit = next((p[0] for p in sub if not p[1]), None)\n # Whether this file is implicitely included / excluded.\n matched = default if hit is None else hit\n sub = list(filter(lambda p: p[1], sub))\n if os.path.isdir(cur):\n # Entirely skip directories if there are no chance any subfile will\n # be included.\n if all(not p[0] for p in sub) and not matched:\n continue\n # I think this would greatly speed up dockerignore handling by not\n # recursing into directories we are sure would be entirely\n # included, and only yielding the directory itself, which will be\n # recursively archived anyway. However the current unit test expect\n # the full list of subfiles and I'm not 100% sure it would make no\n # difference yet.\n # if all(p[0] for p in sub) and matched:\n # yield f\n # continue\n children = False\n for r in (os.path.join(f, p) for p in walk(cur, sub, matched)):\n yield r\n children = True\n # The current unit tests expect directories only under those\n # conditions. It might be simplifiable though.\n if (not sub or not children) and hit or hit is None and default:\n yield f\n elif matched:\n yield f\n", "path": "docker/utils/build.py"}]} | 1,989 | 140 |
gh_patches_debug_5701 | rasdani/github-patches | git_diff | getpelican__pelican-3094 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A dead link on PyPI for the contributions and feedback
I just stumbled upon [Pelican's page in PyPI](https://pypi.org/project/pelican/) and found that the l[ink for the contributions and feedback](https://pypi.org/project/pelican/CONTRIBUTING.rst) is dead. Perhaps, it needs to be updated?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from os import walk
4 from os.path import join, relpath
5
6 from setuptools import find_packages, setup
7
8
9 version = "4.8.0"
10
11 requires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',
12 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',
13 'python-dateutil', 'rich']
14
15 entry_points = {
16 'console_scripts': [
17 'pelican = pelican.__main__:main',
18 'pelican-import = pelican.tools.pelican_import:main',
19 'pelican-quickstart = pelican.tools.pelican_quickstart:main',
20 'pelican-themes = pelican.tools.pelican_themes:main',
21 'pelican-plugins = pelican.plugins._utils:list_plugins'
22 ]
23 }
24
25 README = open('README.rst', encoding='utf-8').read()
26 CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
27
28 description = '\n'.join([README, CHANGELOG])
29
30 setup(
31 name='pelican',
32 version=version,
33 url='https://getpelican.com/',
34 author='Justin Mayer',
35 author_email='[email protected]',
36 description="Static site generator supporting reStructuredText and "
37 "Markdown source content.",
38 project_urls={
39 'Documentation': 'https://docs.getpelican.com/',
40 'Funding': 'https://donate.getpelican.com/',
41 'Source': 'https://github.com/getpelican/pelican',
42 'Tracker': 'https://github.com/getpelican/pelican/issues',
43 },
44 keywords='static web site generator SSG reStructuredText Markdown',
45 license='AGPLv3',
46 long_description=description,
47 long_description_content_type='text/x-rst',
48 packages=find_packages(),
49 include_package_data=True, # includes all in MANIFEST.in if in package
50 # NOTE : This will collect any files that happen to be in the themes
51 # directory, even though they may not be checked into version control.
52 package_data={ # pelican/themes is not a package, so include manually
53 'pelican': [relpath(join(root, name), 'pelican')
54 for root, _, names in walk(join('pelican', 'themes'))
55 for name in names],
56 },
57 install_requires=requires,
58 extras_require={
59 'Markdown': ['markdown~=3.1.1']
60 },
61 entry_points=entry_points,
62 classifiers=[
63 'Development Status :: 5 - Production/Stable',
64 'Environment :: Console',
65 'Framework :: Pelican',
66 'License :: OSI Approved :: GNU Affero General Public License v3',
67 'Operating System :: OS Independent',
68 'Programming Language :: Python :: 3',
69 'Programming Language :: Python :: 3.7',
70 'Programming Language :: Python :: 3.8',
71 'Programming Language :: Python :: 3.9',
72 'Programming Language :: Python :: 3.10',
73 'Programming Language :: Python :: Implementation :: CPython',
74 'Topic :: Internet :: WWW/HTTP',
75 'Topic :: Software Development :: Libraries :: Python Modules',
76 ],
77 test_suite='pelican.tests',
78 )
79
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -25,6 +25,13 @@
README = open('README.rst', encoding='utf-8').read()
CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()
+# Relative links in the README must be converted to absolute URL's
+# so that they render correctly on PyPI.
+README = README.replace(
+ "<CONTRIBUTING.rst>",
+ "<https://docs.getpelican.com/en/latest/contribute.html>",
+)
+
description = '\n'.join([README, CHANGELOG])
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,6 +25,13 @@\n README = open('README.rst', encoding='utf-8').read()\n CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n \n+# Relative links in the README must be converted to absolute URL's\n+# so that they render correctly on PyPI.\n+README = README.replace(\n+ \"<CONTRIBUTING.rst>\",\n+ \"<https://docs.getpelican.com/en/latest/contribute.html>\",\n+)\n+\n description = '\\n'.join([README, CHANGELOG])\n \n setup(\n", "issue": "A dead link on PyPI for the contributions and feedback\nI just stumbled upon [Pelican's page in PyPI](https://pypi.org/project/pelican/) and found that the l[ink for the contributions and feedback](https://pypi.org/project/pelican/CONTRIBUTING.rst) is dead. Perhaps, it needs to be updated?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom os import walk\nfrom os.path import join, relpath\n\nfrom setuptools import find_packages, setup\n\n\nversion = \"4.8.0\"\n\nrequires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments',\n 'docutils>=0.15', 'pytz >= 0a', 'blinker', 'unidecode',\n 'python-dateutil', 'rich']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican.__main__:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main',\n 'pelican-plugins = pelican.plugins._utils:list_plugins'\n ]\n}\n\nREADME = open('README.rst', encoding='utf-8').read()\nCHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n\ndescription = '\\n'.join([README, CHANGELOG])\n\nsetup(\n name='pelican',\n version=version,\n url='https://getpelican.com/',\n author='Justin Mayer',\n author_email='[email protected]',\n description=\"Static site generator supporting reStructuredText and \"\n \"Markdown source content.\",\n project_urls={\n 'Documentation': 'https://docs.getpelican.com/',\n 'Funding': 'https://donate.getpelican.com/',\n 'Source': 'https://github.com/getpelican/pelican',\n 'Tracker': 'https://github.com/getpelican/pelican/issues',\n },\n keywords='static web site generator SSG reStructuredText Markdown',\n license='AGPLv3',\n long_description=description,\n long_description_content_type='text/x-rst',\n packages=find_packages(),\n include_package_data=True, # includes all in MANIFEST.in if in package\n # NOTE : This will collect any files that happen to be in the themes\n # directory, even though they may not be checked into version control.\n package_data={ # pelican/themes is not a package, so include manually\n 'pelican': [relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes'))\n for name in names],\n },\n install_requires=requires,\n extras_require={\n 'Markdown': ['markdown~=3.1.1']\n },\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Pelican',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}]} | 1,468 | 144 |
gh_patches_debug_5379 | rasdani/github-patches | git_diff | cltk__cltk-399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Compile Poeti d’Italia for CLTK
http://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere
</issue>
<code>
[start of cltk/corpus/latin/corpora.py]
1 """Latin language corpora available for download or loading locally.
2 All remote corpora hosted by github on the cltk organization account, eg:
3 'http://github.com/cltk' + name
4 """
5
6 LATIN_CORPORA = [
7 {'encoding': 'utf-8',
8 'markup': 'tei_xml',
9 'location': 'remote',
10 'type': 'text',
11 'name': 'latin_text_perseus',
12 'origin': 'https://github.com/cltk/latin_text_perseus.git'},
13 {'encoding': 'utf-8',
14 'markup': 'xml',
15 'name': 'latin_treebank_perseus',
16 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',
17 'location': 'remote',
18 'type': 'treebank'},
19 {'encoding': 'utf-8',
20 'markup': 'plaintext',
21 'name': 'latin_treebank_perseus',
22 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',
23 'location': 'remote',
24 'type': 'text'},
25 {'encoding': 'utf-8',
26 'markup': 'plaintext',
27 'name': 'latin_text_latin_library',
28 'origin': 'https://github.com/cltk/latin_text_latin_library.git',
29 'location': 'remote',
30 'type': 'text'},
31 {'encoding': 'latin-1',
32 'markup': 'beta_code',
33 'name': '',
34 'location': 'local',
35 'name': 'phi5',
36 'origin': None,
37 'type': 'text'},
38 {'encoding': 'latin-1',
39 'markup': 'beta_code',
40 'origin': None,
41 'name': 'phi7',
42 'location': 'local',
43 'type': 'text'},
44 {'encoding': 'utf-8',
45 'markup': 'plaintext',
46 'name': 'latin_proper_names_cltk',
47 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',
48 'location': 'remote',
49 'type': 'lexicon'},
50 {'origin': 'https://github.com/cltk/latin_models_cltk.git',
51 'name': 'latin_models_cltk',
52 'location': 'remote',
53 'type': 'model'},
54 {'encoding': 'utf-8',
55 'markup': 'python',
56 'name': 'latin_pos_lemmata_cltk',
57 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',
58 'location': 'remote',
59 'type': 'lemma'},
60 {'encoding': 'utf-8',
61 'markup': 'xml',
62 'name': 'latin_treebank_index_thomisticus',
63 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',
64 'location': 'remote',
65 'type': 'treebank'},
66 {'encoding': 'xml',
67 'markup': 'plaintext',
68 'name': 'latin_lexica_perseus',
69 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',
70 'location': 'remote',
71 'type': 'lexicon'},
72 {'encoding': 'utf-8',
73 'markup': 'plaintext',
74 'name': 'latin_training_set_sentence_cltk',
75 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',
76 'location': 'remote',
77 'type': 'training_set'},
78 {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',
79 'name': 'latin_word2vec_cltk',
80 'location': 'remote',
81 'type': 'model'},
82 {'encoding': 'utf-8',
83 'markup': 'tei_xml',
84 'location': 'remote',
85 'type': 'text',
86 'name': 'latin_text_antique_digiliblt',
87 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},
88 {'location': 'remote',
89 'type': 'text',
90 'name': 'latin_text_corpus_grammaticorum_latinorum',
91 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
92 ]
93
[end of cltk/corpus/latin/corpora.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py
--- a/cltk/corpus/latin/corpora.py
+++ b/cltk/corpus/latin/corpora.py
@@ -88,5 +88,9 @@
{'location': 'remote',
'type': 'text',
'name': 'latin_text_corpus_grammaticorum_latinorum',
- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}
+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},
+ {'location': 'remote',
+ 'type': 'text',
+ 'name': 'latin_text_poeti_ditalia',
+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}
]
| {"golden_diff": "diff --git a/cltk/corpus/latin/corpora.py b/cltk/corpus/latin/corpora.py\n--- a/cltk/corpus/latin/corpora.py\n+++ b/cltk/corpus/latin/corpora.py\n@@ -88,5 +88,9 @@\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n- 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}\n+ 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'},\n+ {'location': 'remote',\n+ 'type': 'text',\n+ 'name': 'latin_text_poeti_ditalia',\n+ 'origin': 'https://github.com/cltk/latin_text_poeti_ditalia.git'}\n ]\n", "issue": "Compile Poeti d\u2019Italia for CLTK\nhttp://www.mqdq.it/mqdq/poetiditalia/indice_autori_alfa.jsp?scelta=AZ&path=metri_opere\n\n", "before_files": [{"content": "\"\"\"Latin language corpora available for download or loading locally.\nAll remote corpora hosted by github on the cltk organization account, eg:\n'http://github.com/cltk' + name\n\"\"\"\n\nLATIN_CORPORA = [\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_perseus',\n 'origin': 'https://github.com/cltk/latin_text_perseus.git'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_treebank_perseus',\n 'origin': 'https://github.com/cltk/latin_treebank_perseus.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_text_latin_library',\n 'origin': 'https://github.com/cltk/latin_text_latin_library.git',\n 'location': 'remote',\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'name': '',\n 'location': 'local',\n 'name': 'phi5',\n 'origin': None,\n 'type': 'text'},\n {'encoding': 'latin-1',\n 'markup': 'beta_code',\n 'origin': None,\n 'name': 'phi7',\n 'location': 'local',\n 'type': 'text'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_proper_names_cltk',\n 'origin': 'https://github.com/cltk/latin_proper_names_cltk.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'origin': 'https://github.com/cltk/latin_models_cltk.git',\n 'name': 'latin_models_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'python',\n 'name': 'latin_pos_lemmata_cltk',\n 'origin': 'https://github.com/cltk/latin_pos_lemmata_cltk.git',\n 'location': 'remote',\n 'type': 'lemma'},\n {'encoding': 'utf-8',\n 'markup': 'xml',\n 'name': 'latin_treebank_index_thomisticus',\n 'origin': 'https://github.com/cltk/latin_treebank_index_thomisticus.git',\n 'location': 'remote',\n 'type': 'treebank'},\n {'encoding': 'xml',\n 'markup': 'plaintext',\n 'name': 'latin_lexica_perseus',\n 'origin': 'https://github.com/cltk/latin_lexica_perseus.git',\n 'location': 'remote',\n 'type': 'lexicon'},\n {'encoding': 'utf-8',\n 'markup': 'plaintext',\n 'name': 'latin_training_set_sentence_cltk',\n 'origin': 'https://github.com/cltk/latin_training_set_sentence_cltk.git',\n 'location': 'remote',\n 'type': 'training_set'},\n {'origin': 'https://github.com/cltk/latin_word2vec_cltk.git',\n 'name': 'latin_word2vec_cltk',\n 'location': 'remote',\n 'type': 'model'},\n {'encoding': 'utf-8',\n 'markup': 'tei_xml',\n 'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_antique_digiliblt',\n 'origin': 'https://github.com/cltk/latin_text_antique_digiliblt.git'},\n {'location': 'remote',\n 'type': 'text',\n 'name': 'latin_text_corpus_grammaticorum_latinorum',\n 'origin': 'https://github.com/cltk/latin_text_corpus_grammaticorum_latinorum.git'}\n]\n", "path": "cltk/corpus/latin/corpora.py"}]} | 1,692 | 202 |
gh_patches_debug_608 | rasdani/github-patches | git_diff | pex-tool__pex-1482 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.51
On the docket:
+ [ ] UnicodeDecodeError when packaging after upgrading to v2.1.46 #1479
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.50"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.50"
+__version__ = "2.1.51"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.50\"\n+__version__ = \"2.1.51\"\n", "issue": "Release 2.1.51\nOn the docket:\r\n+ [ ] UnicodeDecodeError when packaging after upgrading to v2.1.46 #1479 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.50\"\n", "path": "pex/version.py"}]} | 622 | 97 |
gh_patches_debug_13431 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CSV export fails on converting uuid to json
Problematic JSON structure in https://grand-challenge.org/api/v1/cases/images/redacted_uuid/?format=csv
```
{"pk":"redacted_uuid","name":"redacted.png","study":null,"files":[{"pk":"redacted_uuid","image":"redacted_uuid","file":"https://grand-challenge.org/media/images/...mhd","image_type":"MHD"},{"pk":"redacted_uuid","image":"09b3b3d6-0994-43d2-b6a9-eaff634b8805","file":"https://grand-challenge.org/media/images/...zraw","image_type":"MHD"}],"reader_study_set":["https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/","https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/"],"archive_set":[],"job_set":[],"width":596,"height":596,"depth":null,"color_space":"RGB","modality":null,"eye_choice":"NA","stereoscopic_choice":null,"field_of_view":null,"shape_without_color":[596,596],"shape":[596,596,3],"voxel_width_mm":null,"voxel_height_mm":null,"voxel_depth_mm":null,"api_url":"https://grand-challenge.org/api/v1/cases/images/redacted_uuid/"}
```
Probably due to trying to serialize the list of files to json in https://github.com/comic/grand-challenge.org/blob/14bc3dd4002756e9cf4a32bb0f238859a9175252/app/grandchallenge/core/renderers.py#L26-L27
</issue>
<code>
[start of app/grandchallenge/core/renderers.py]
1 import json
2
3 from rest_framework_csv.renderers import CSVRenderer
4
5
6 class PaginatedCSVRenderer(CSVRenderer):
7 results_field = "results"
8
9 def render(self, data, *args, **kwargs):
10 if self.results_field in data:
11 data = data[self.results_field]
12
13 return super().render(data, *args, **kwargs)
14
15 def flatten_data(self, data):
16 """
17 Create a dictionary that is 1 level deep, with nested values serialized
18 as json. This means that the header rows are now consistent.
19 """
20 for row in data:
21 flat_row = {k: self._flatten_value(v) for k, v in row.items()}
22 yield flat_row
23
24 @staticmethod
25 def _flatten_value(value):
26 if isinstance(value, (dict, list)):
27 return json.dumps(value)
28 else:
29 return value
30
[end of app/grandchallenge/core/renderers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py
--- a/app/grandchallenge/core/renderers.py
+++ b/app/grandchallenge/core/renderers.py
@@ -1,5 +1,7 @@
import json
+from rest_framework.settings import api_settings
+from rest_framework.utils.encoders import JSONEncoder
from rest_framework_csv.renderers import CSVRenderer
@@ -24,6 +26,11 @@
@staticmethod
def _flatten_value(value):
if isinstance(value, (dict, list)):
- return json.dumps(value)
+ return json.dumps(
+ value,
+ cls=JSONEncoder,
+ ensure_ascii=not api_settings.UNICODE_JSON,
+ allow_nan=not api_settings.STRICT_JSON,
+ )
else:
return value
| {"golden_diff": "diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py\n--- a/app/grandchallenge/core/renderers.py\n+++ b/app/grandchallenge/core/renderers.py\n@@ -1,5 +1,7 @@\n import json\n \n+from rest_framework.settings import api_settings\n+from rest_framework.utils.encoders import JSONEncoder\n from rest_framework_csv.renderers import CSVRenderer\n \n \n@@ -24,6 +26,11 @@\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n- return json.dumps(value)\n+ return json.dumps(\n+ value,\n+ cls=JSONEncoder,\n+ ensure_ascii=not api_settings.UNICODE_JSON,\n+ allow_nan=not api_settings.STRICT_JSON,\n+ )\n else:\n return value\n", "issue": "CSV export fails on converting uuid to json\nProblematic JSON structure in https://grand-challenge.org/api/v1/cases/images/redacted_uuid/?format=csv\r\n\r\n```\r\n{\"pk\":\"redacted_uuid\",\"name\":\"redacted.png\",\"study\":null,\"files\":[{\"pk\":\"redacted_uuid\",\"image\":\"redacted_uuid\",\"file\":\"https://grand-challenge.org/media/images/...mhd\",\"image_type\":\"MHD\"},{\"pk\":\"redacted_uuid\",\"image\":\"09b3b3d6-0994-43d2-b6a9-eaff634b8805\",\"file\":\"https://grand-challenge.org/media/images/...zraw\",\"image_type\":\"MHD\"}],\"reader_study_set\":[\"https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/\",\"https://grand-challenge.org/api/v1/reader-studies/redacted_uuid/\"],\"archive_set\":[],\"job_set\":[],\"width\":596,\"height\":596,\"depth\":null,\"color_space\":\"RGB\",\"modality\":null,\"eye_choice\":\"NA\",\"stereoscopic_choice\":null,\"field_of_view\":null,\"shape_without_color\":[596,596],\"shape\":[596,596,3],\"voxel_width_mm\":null,\"voxel_height_mm\":null,\"voxel_depth_mm\":null,\"api_url\":\"https://grand-challenge.org/api/v1/cases/images/redacted_uuid/\"}\r\n```\r\n\r\nProbably due to trying to serialize the list of files to json in https://github.com/comic/grand-challenge.org/blob/14bc3dd4002756e9cf4a32bb0f238859a9175252/app/grandchallenge/core/renderers.py#L26-L27\n", "before_files": [{"content": "import json\n\nfrom rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n\n def flatten_data(self, data):\n \"\"\"\n Create a dictionary that is 1 level deep, with nested values serialized\n as json. This means that the header rows are now consistent.\n \"\"\"\n for row in data:\n flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n yield flat_row\n\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n return json.dumps(value)\n else:\n return value\n", "path": "app/grandchallenge/core/renderers.py"}]} | 1,167 | 181 |
gh_patches_debug_25801 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3421 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Results framework loads very slowly for projects with lot of indicator dimensions
</issue>
<code>
[start of akvo/rest/views/indicator_dimension.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimension
9
10 from ..serializers import IndicatorDimensionSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimension.objects.all()
18 serializer_class = IndicatorDimensionSerializer
19 project_relation = 'indicator__result__project__'
20
[end of akvo/rest/views/indicator_dimension.py]
[start of akvo/rest/pagination.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from rest_framework import pagination
8 from rest_framework.response import Response
9
10
11 class LimitSizePageNumberPagination(pagination.PageNumberPagination):
12 page_size = 30
13 page_size_query_param = 'limit'
14 max_page_size = 100
15
16
17 class TastypieOffsetPagination(pagination.LimitOffsetPagination):
18
19 def get_paginated_response(self, data):
20 """ Emulate the old style Tastypie format if the URL contains /api/v1/
21 """
22 return Response({
23 'meta': {
24 'next': self.get_next_link(),
25 'previous': self.get_previous_link(),
26 'total_count': self.count,
27 'limit': self.limit,
28 'offset': self.offset,
29 },
30 'objects': data
31 })
32
[end of akvo/rest/pagination.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py
--- a/akvo/rest/pagination.py
+++ b/akvo/rest/pagination.py
@@ -8,12 +8,18 @@
from rest_framework.response import Response
-class LimitSizePageNumberPagination(pagination.PageNumberPagination):
+class StandardSizePageNumberPagination(pagination.PageNumberPagination):
page_size = 30
page_size_query_param = 'limit'
max_page_size = 100
+class LargeSizePageNumberPagination(pagination.PageNumberPagination):
+ page_size = 100
+ page_size_query_param = 'limit'
+ max_page_size = 1000
+
+
class TastypieOffsetPagination(pagination.LimitOffsetPagination):
def get_paginated_response(self, data):
diff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py
--- a/akvo/rest/views/indicator_dimension.py
+++ b/akvo/rest/views/indicator_dimension.py
@@ -6,6 +6,7 @@
from akvo.rsr.models import IndicatorDimension
+from akvo.rest.pagination import LargeSizePageNumberPagination
from ..serializers import IndicatorDimensionSerializer
from ..viewsets import PublicProjectViewSet
@@ -17,3 +18,4 @@
queryset = IndicatorDimension.objects.all()
serializer_class = IndicatorDimensionSerializer
project_relation = 'indicator__result__project__'
+ pagination_class = LargeSizePageNumberPagination
| {"golden_diff": "diff --git a/akvo/rest/pagination.py b/akvo/rest/pagination.py\n--- a/akvo/rest/pagination.py\n+++ b/akvo/rest/pagination.py\n@@ -8,12 +8,18 @@\n from rest_framework.response import Response\n \n \n-class LimitSizePageNumberPagination(pagination.PageNumberPagination):\n+class StandardSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n \n \n+class LargeSizePageNumberPagination(pagination.PageNumberPagination):\n+ page_size = 100\n+ page_size_query_param = 'limit'\n+ max_page_size = 1000\n+\n+\n class TastypieOffsetPagination(pagination.LimitOffsetPagination):\n \n def get_paginated_response(self, data):\ndiff --git a/akvo/rest/views/indicator_dimension.py b/akvo/rest/views/indicator_dimension.py\n--- a/akvo/rest/views/indicator_dimension.py\n+++ b/akvo/rest/views/indicator_dimension.py\n@@ -6,6 +6,7 @@\n \n \n from akvo.rsr.models import IndicatorDimension\n+from akvo.rest.pagination import LargeSizePageNumberPagination\n \n from ..serializers import IndicatorDimensionSerializer\n from ..viewsets import PublicProjectViewSet\n@@ -17,3 +18,4 @@\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n+ pagination_class = LargeSizePageNumberPagination\n", "issue": "Results framework loads very slowly for projects with lot of indicator dimensions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimension\n\nfrom ..serializers import IndicatorDimensionSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimension.objects.all()\n serializer_class = IndicatorDimensionSerializer\n project_relation = 'indicator__result__project__'\n", "path": "akvo/rest/views/indicator_dimension.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom rest_framework import pagination\nfrom rest_framework.response import Response\n\n\nclass LimitSizePageNumberPagination(pagination.PageNumberPagination):\n page_size = 30\n page_size_query_param = 'limit'\n max_page_size = 100\n\n\nclass TastypieOffsetPagination(pagination.LimitOffsetPagination):\n\n def get_paginated_response(self, data):\n \"\"\" Emulate the old style Tastypie format if the URL contains /api/v1/\n \"\"\"\n return Response({\n 'meta': {\n 'next': self.get_next_link(),\n 'previous': self.get_previous_link(),\n 'total_count': self.count,\n 'limit': self.limit,\n 'offset': self.offset,\n },\n 'objects': data\n })\n", "path": "akvo/rest/pagination.py"}]} | 1,034 | 337 |
gh_patches_debug_317 | rasdani/github-patches | git_diff | jazzband__pip-tools-1871 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Convert the README from rst to md
<!--- Describe the changes here. --->
This PR converts the documentation from README.rst to README.md
Related: https://github.com/jazzband/pip-tools/issues/1856
##### Contributor checklist
- [ ] Provided the tests for the changes.
- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog
##### Maintainer checklist
- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.
- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).
</issue>
<code>
[start of docs/conf.py]
1 # https://www.sphinx-doc.org/en/master/usage/configuration.html
2 """Configuration file for the Sphinx documentation builder."""
3
4 from __future__ import annotations
5
6 from functools import partial
7 from pathlib import Path
8
9 from setuptools_scm import get_version
10
11 # -- Path setup --------------------------------------------------------------
12
13 PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
14 get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
15
16
17 # -- Project information -----------------------------------------------------
18
19 project = "pip-tools"
20 author = f"{project} Contributors"
21 copyright = f"The {author}"
22
23 # The short X.Y version
24 version = ".".join(
25 get_scm_version(
26 local_scheme="no-local-version",
27 ).split(
28 "."
29 )[:3],
30 )
31
32 # The full version, including alpha/beta/rc tags
33 release = get_scm_version()
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = ["myst_parser"]
42
43
44 # -- Options for HTML output -------------------------------------------------
45
46 # The theme to use for HTML and HTML Help pages. See the documentation for
47 # a list of builtin themes.
48 #
49 html_theme = "furo"
50
51
52 # -------------------------------------------------------------------------
53 default_role = "any"
54 nitpicky = True
55
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -52,3 +52,4 @@
# -------------------------------------------------------------------------
default_role = "any"
nitpicky = True
+suppress_warnings = ["myst.xref_missing"]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -52,3 +52,4 @@\n # -------------------------------------------------------------------------\n default_role = \"any\"\n nitpicky = True\n+suppress_warnings = [\"myst.xref_missing\"]\n", "issue": "Convert the README from rst to md\n<!--- Describe the changes here. --->\r\nThis PR converts the documentation from README.rst to README.md\r\nRelated: https://github.com/jazzband/pip-tools/issues/1856\r\n##### Contributor checklist\r\n\r\n- [ ] Provided the tests for the changes.\r\n- [x] Assure PR title is short, clear, and good to be included in the user-oriented changelog\r\n\r\n##### Maintainer checklist\r\n\r\n- [ ] Assure one of these labels is present: `backwards incompatible`, `feature`, `enhancement`, `deprecation`, `bug`, `dependency`, `docs` or `skip-changelog` as they determine changelog listing.\r\n- [ ] Assign the PR to an existing or new milestone for the target version (following [Semantic Versioning](https://blog.versioneye.com/2014/01/16/semantic-versioning/)).\r\n\n", "before_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n", "path": "docs/conf.py"}]} | 1,116 | 62 |
gh_patches_debug_40399 | rasdani/github-patches | git_diff | SeldonIO__MLServer-233 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support MLflow current protocol
As a follow-up to #167, it would be interesting to explore adding a custom endpoint to the `mlserver-mlflow` runtime which supports [MLflow's existing API](https://www.mlflow.org/docs/latest/models.html#deploy-mlflow-models). This would help reduce friction on user adoption of MLSever, as well as a temporary stopgap for users while they adopt the V2 protocol.
</issue>
<code>
[start of runtimes/mlflow/mlserver_mlflow/runtime.py]
1 import mlflow
2
3 from mlserver.types import InferenceRequest, InferenceResponse
4 from mlserver.model import MLModel
5 from mlserver.utils import get_model_uri
6 from mlserver.codecs import get_decoded_or_raw
7
8 from .encoding import to_outputs
9
10
11 class MLflowRuntime(MLModel):
12 """
13 Implementation of the MLModel interface to load and serve `scikit-learn`
14 models persisted with `joblib`.
15 """
16
17 async def load(self) -> bool:
18 # TODO: Log info message
19 model_uri = await get_model_uri(self._settings)
20 self._model = mlflow.pyfunc.load_model(model_uri)
21
22 self.ready = True
23 return self.ready
24
25 async def predict(self, payload: InferenceRequest) -> InferenceResponse:
26 decoded_payload = get_decoded_or_raw(payload)
27
28 # TODO: Can `output` be a dictionary of tensors?
29 model_output = self._model.predict(decoded_payload)
30
31 return InferenceResponse(
32 model_name=self.name,
33 model_version=self.version,
34 outputs=to_outputs(model_output),
35 )
36
[end of runtimes/mlflow/mlserver_mlflow/runtime.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/runtimes/mlflow/mlserver_mlflow/runtime.py b/runtimes/mlflow/mlserver_mlflow/runtime.py
--- a/runtimes/mlflow/mlserver_mlflow/runtime.py
+++ b/runtimes/mlflow/mlserver_mlflow/runtime.py
@@ -1,9 +1,29 @@
import mlflow
+from io import StringIO
+from fastapi import Request, Response
+
+from mlflow.exceptions import MlflowException
+from mlflow.pyfunc.scoring_server import (
+ CONTENT_TYPES,
+ CONTENT_TYPE_CSV,
+ CONTENT_TYPE_JSON,
+ CONTENT_TYPE_JSON_SPLIT_ORIENTED,
+ CONTENT_TYPE_JSON_RECORDS_ORIENTED,
+ CONTENT_TYPE_JSON_SPLIT_NUMPY,
+ parse_csv_input,
+ infer_and_parse_json_input,
+ parse_json_input,
+ parse_split_oriented_json_input_to_numpy,
+ predictions_to_json,
+)
+
from mlserver.types import InferenceRequest, InferenceResponse
from mlserver.model import MLModel
from mlserver.utils import get_model_uri
from mlserver.codecs import get_decoded_or_raw
+from mlserver.handlers import custom_handler
+from mlserver.errors import InferenceError
from .encoding import to_outputs
@@ -14,10 +34,68 @@
models persisted with `joblib`.
"""
+ # TODO: Decouple from REST
+ @custom_handler(rest_path="/invocations")
+ async def invocations(self, request: Request) -> Response:
+ """
+ This custom handler is meant to mimic the behaviour of the existing
+ scoring server in MLflow.
+ For details about its implementation, please consult the original
+ implementation in the MLflow repository:
+
+ https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py
+ """
+ content_type = request.headers.get("content-type", None)
+ raw_data = await request.body()
+ as_str = raw_data.decode("utf-8")
+
+ if content_type == CONTENT_TYPE_CSV:
+ csv_input = StringIO(as_str)
+ data = parse_csv_input(csv_input=csv_input)
+ elif content_type == CONTENT_TYPE_JSON:
+ data = infer_and_parse_json_input(as_str, self._input_schema)
+ elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:
+ data = parse_json_input(
+ json_input=StringIO(as_str),
+ orient="split",
+ schema=self._input_schema,
+ )
+ elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:
+ data = parse_json_input(
+ json_input=StringIO(as_str),
+ orient="records",
+ schema=self._input_schema,
+ )
+ elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:
+ data = parse_split_oriented_json_input_to_numpy(as_str)
+ else:
+ content_type_error_message = (
+ "This predictor only supports the following content types, "
+ f"{CONTENT_TYPES}. Got '{content_type}'."
+ )
+ raise InferenceError(content_type_error_message)
+
+ try:
+ raw_predictions = self._model.predict(data)
+ except MlflowException as e:
+ raise InferenceError(e.message)
+ except Exception:
+ error_message = (
+ "Encountered an unexpected error while evaluating the model. Verify"
+ " that the serialized input Dataframe is compatible with the model for"
+ " inference."
+ )
+ raise InferenceError(error_message)
+
+ result = StringIO()
+ predictions_to_json(raw_predictions, result)
+ return Response(content=result.getvalue(), media_type="application/json")
+
async def load(self) -> bool:
# TODO: Log info message
model_uri = await get_model_uri(self._settings)
self._model = mlflow.pyfunc.load_model(model_uri)
+ self._input_schema = self._model.metadata.get_input_schema()
self.ready = True
return self.ready
| {"golden_diff": "diff --git a/runtimes/mlflow/mlserver_mlflow/runtime.py b/runtimes/mlflow/mlserver_mlflow/runtime.py\n--- a/runtimes/mlflow/mlserver_mlflow/runtime.py\n+++ b/runtimes/mlflow/mlserver_mlflow/runtime.py\n@@ -1,9 +1,29 @@\n import mlflow\n \n+from io import StringIO\n+from fastapi import Request, Response\n+\n+from mlflow.exceptions import MlflowException\n+from mlflow.pyfunc.scoring_server import (\n+ CONTENT_TYPES,\n+ CONTENT_TYPE_CSV,\n+ CONTENT_TYPE_JSON,\n+ CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n+ CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n+ CONTENT_TYPE_JSON_SPLIT_NUMPY,\n+ parse_csv_input,\n+ infer_and_parse_json_input,\n+ parse_json_input,\n+ parse_split_oriented_json_input_to_numpy,\n+ predictions_to_json,\n+)\n+\n from mlserver.types import InferenceRequest, InferenceResponse\n from mlserver.model import MLModel\n from mlserver.utils import get_model_uri\n from mlserver.codecs import get_decoded_or_raw\n+from mlserver.handlers import custom_handler\n+from mlserver.errors import InferenceError\n \n from .encoding import to_outputs\n \n@@ -14,10 +34,68 @@\n models persisted with `joblib`.\n \"\"\"\n \n+ # TODO: Decouple from REST\n+ @custom_handler(rest_path=\"/invocations\")\n+ async def invocations(self, request: Request) -> Response:\n+ \"\"\"\n+ This custom handler is meant to mimic the behaviour of the existing\n+ scoring server in MLflow.\n+ For details about its implementation, please consult the original\n+ implementation in the MLflow repository:\n+\n+ https://github.com/mlflow/mlflow/blob/master/mlflow/pyfunc/scoring_server/__init__.py\n+ \"\"\"\n+ content_type = request.headers.get(\"content-type\", None)\n+ raw_data = await request.body()\n+ as_str = raw_data.decode(\"utf-8\")\n+\n+ if content_type == CONTENT_TYPE_CSV:\n+ csv_input = StringIO(as_str)\n+ data = parse_csv_input(csv_input=csv_input)\n+ elif content_type == CONTENT_TYPE_JSON:\n+ data = infer_and_parse_json_input(as_str, self._input_schema)\n+ elif content_type == CONTENT_TYPE_JSON_SPLIT_ORIENTED:\n+ data = parse_json_input(\n+ json_input=StringIO(as_str),\n+ orient=\"split\",\n+ schema=self._input_schema,\n+ )\n+ elif content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n+ data = parse_json_input(\n+ json_input=StringIO(as_str),\n+ orient=\"records\",\n+ schema=self._input_schema,\n+ )\n+ elif content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:\n+ data = parse_split_oriented_json_input_to_numpy(as_str)\n+ else:\n+ content_type_error_message = (\n+ \"This predictor only supports the following content types, \"\n+ f\"{CONTENT_TYPES}. Got '{content_type}'.\"\n+ )\n+ raise InferenceError(content_type_error_message)\n+\n+ try:\n+ raw_predictions = self._model.predict(data)\n+ except MlflowException as e:\n+ raise InferenceError(e.message)\n+ except Exception:\n+ error_message = (\n+ \"Encountered an unexpected error while evaluating the model. Verify\"\n+ \" that the serialized input Dataframe is compatible with the model for\"\n+ \" inference.\"\n+ )\n+ raise InferenceError(error_message)\n+\n+ result = StringIO()\n+ predictions_to_json(raw_predictions, result)\n+ return Response(content=result.getvalue(), media_type=\"application/json\")\n+\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n+ self._input_schema = self._model.metadata.get_input_schema()\n \n self.ready = True\n return self.ready\n", "issue": "Support MLflow current protocol\nAs a follow-up to #167, it would be interesting to explore adding a custom endpoint to the `mlserver-mlflow` runtime which supports [MLflow's existing API](https://www.mlflow.org/docs/latest/models.html#deploy-mlflow-models). This would help reduce friction on user adoption of MLSever, as well as a temporary stopgap for users while they adopt the V2 protocol.\n", "before_files": [{"content": "import mlflow\n\nfrom mlserver.types import InferenceRequest, InferenceResponse\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import get_decoded_or_raw\n\nfrom .encoding import to_outputs\n\n\nclass MLflowRuntime(MLModel):\n \"\"\"\n Implementation of the MLModel interface to load and serve `scikit-learn`\n models persisted with `joblib`.\n \"\"\"\n\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(self._settings)\n self._model = mlflow.pyfunc.load_model(model_uri)\n\n self.ready = True\n return self.ready\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n decoded_payload = get_decoded_or_raw(payload)\n\n # TODO: Can `output` be a dictionary of tensors?\n model_output = self._model.predict(decoded_payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=to_outputs(model_output),\n )\n", "path": "runtimes/mlflow/mlserver_mlflow/runtime.py"}]} | 936 | 874 |
gh_patches_debug_20942 | rasdani/github-patches | git_diff | Project-MONAI__MONAI-3873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enhance `ContrastiveLoss` to avoid warning
Call ContrastiveLoss will see a warning message:
```
To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
```
Simple code to reproduce this issue:
```
from monai.losses import ContrastiveLoss
import torch
inp = torch.randn([2, 10])
target = torch.randn([2, 10])
loss = ContrastiveLoss(batch_size=2)
loss(inp, target)
```
</issue>
<code>
[start of monai/losses/contrastive.py]
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import torch
13 from torch.nn import functional as F
14 from torch.nn.modules.loss import _Loss
15
16 from monai.utils import deprecated_arg
17
18
19 class ContrastiveLoss(_Loss):
20
21 """
22 Compute the Contrastive loss defined in:
23
24 Chen, Ting, et al. "A simple framework for contrastive learning of visual representations." International
25 conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)
26
27 Adapted from:
28 https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5
29
30 """
31
32 @deprecated_arg(name="reduction", since="0.8", msg_suffix="`reduction` is no longer supported.")
33 def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction="sum") -> None:
34 """
35 Args:
36 temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.
37 batch_size: The number of samples.
38
39 Raises:
40 ValueError: When an input of dimension length > 2 is passed
41 ValueError: When input and target are of different shapes
42
43 .. deprecated:: 0.8.0
44
45 `reduction` is no longer supported.
46
47 """
48 super().__init__()
49
50 self.batch_size = batch_size
51 self.temperature = temperature
52
53 def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
54 """
55 Args:
56 input: the shape should be B[F].
57 target: the shape should be B[F].
58 """
59 if len(target.shape) > 2 or len(input.shape) > 2:
60 raise ValueError(
61 f"Either target or input has dimensions greater than 2 where target "
62 f"shape is ({target.shape}) and input shape is ({input.shape})"
63 )
64
65 if target.shape != input.shape:
66 raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
67
68 temperature_tensor = torch.tensor(self.temperature).to(input.device)
69
70 norm_i = F.normalize(input, dim=1)
71 norm_j = F.normalize(target, dim=1)
72
73 negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
74 negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
75 negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
76
77 repr = torch.cat([norm_i, norm_j], dim=0)
78 sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
79 sim_ij = torch.diag(sim_matrix, self.batch_size)
80 sim_ji = torch.diag(sim_matrix, -self.batch_size)
81
82 positives = torch.cat([sim_ij, sim_ji], dim=0)
83 nominator = torch.exp(positives / temperature_tensor)
84 denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)
85
86 loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))
87
88 return torch.sum(loss_partial) / (2 * self.batch_size)
89
[end of monai/losses/contrastive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py
--- a/monai/losses/contrastive.py
+++ b/monai/losses/contrastive.py
@@ -65,14 +65,13 @@
if target.shape != input.shape:
raise ValueError(f"ground truth has differing shape ({target.shape}) from input ({input.shape})")
- temperature_tensor = torch.tensor(self.temperature).to(input.device)
+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)
norm_i = F.normalize(input, dim=1)
norm_j = F.normalize(target, dim=1)
negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)
- negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)
- negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)
+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)
repr = torch.cat([norm_i, norm_j], dim=0)
sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)
| {"golden_diff": "diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py\n--- a/monai/losses/contrastive.py\n+++ b/monai/losses/contrastive.py\n@@ -65,14 +65,13 @@\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n \n- temperature_tensor = torch.tensor(self.temperature).to(input.device)\n+ temperature_tensor = torch.as_tensor(self.temperature).to(input.device)\n \n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n \n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n- negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n- negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n+ negatives_mask = torch.clone(negatives_mask.type(torch.float)).to(input.device)\n \n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n", "issue": "Enhance `ContrastiveLoss` to avoid warning\nCall ContrastiveLoss will see a warning message:\r\n```\r\nTo copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).\r\n```\r\n\r\nSimple code to reproduce this issue:\r\n```\r\nfrom monai.losses import ContrastiveLoss\r\nimport torch\r\n\r\ninp = torch.randn([2, 10])\r\ntarget = torch.randn([2, 10])\r\nloss = ContrastiveLoss(batch_size=2)\r\nloss(inp, target)\r\n```\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.utils import deprecated_arg\n\n\nclass ContrastiveLoss(_Loss):\n\n \"\"\"\n Compute the Contrastive loss defined in:\n\n Chen, Ting, et al. \"A simple framework for contrastive learning of visual representations.\" International\n conference on machine learning. PMLR, 2020. (http://proceedings.mlr.press/v119/chen20j.html)\n\n Adapted from:\n https://github.com/Sara-Ahmed/SiT/blob/1aacd6adcd39b71efc903d16b4e9095b97dda76f/losses.py#L5\n\n \"\"\"\n\n @deprecated_arg(name=\"reduction\", since=\"0.8\", msg_suffix=\"`reduction` is no longer supported.\")\n def __init__(self, temperature: float = 0.5, batch_size: int = 1, reduction=\"sum\") -> None:\n \"\"\"\n Args:\n temperature: Can be scaled between 0 and 1 for learning from negative samples, ideally set to 0.5.\n batch_size: The number of samples.\n\n Raises:\n ValueError: When an input of dimension length > 2 is passed\n ValueError: When input and target are of different shapes\n\n .. deprecated:: 0.8.0\n\n `reduction` is no longer supported.\n\n \"\"\"\n super().__init__()\n\n self.batch_size = batch_size\n self.temperature = temperature\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be B[F].\n target: the shape should be B[F].\n \"\"\"\n if len(target.shape) > 2 or len(input.shape) > 2:\n raise ValueError(\n f\"Either target or input has dimensions greater than 2 where target \"\n f\"shape is ({target.shape}) and input shape is ({input.shape})\"\n )\n\n if target.shape != input.shape:\n raise ValueError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n temperature_tensor = torch.tensor(self.temperature).to(input.device)\n\n norm_i = F.normalize(input, dim=1)\n norm_j = F.normalize(target, dim=1)\n\n negatives_mask = ~torch.eye(self.batch_size * 2, self.batch_size * 2, dtype=torch.bool)\n negatives_mask = torch.tensor(negatives_mask, dtype=torch.float)\n negatives_mask = torch.clone(torch.as_tensor(negatives_mask)).to(input.device)\n\n repr = torch.cat([norm_i, norm_j], dim=0)\n sim_matrix = F.cosine_similarity(repr.unsqueeze(1), repr.unsqueeze(0), dim=2)\n sim_ij = torch.diag(sim_matrix, self.batch_size)\n sim_ji = torch.diag(sim_matrix, -self.batch_size)\n\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n nominator = torch.exp(positives / temperature_tensor)\n denominator = negatives_mask * torch.exp(sim_matrix / temperature_tensor)\n\n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n\n return torch.sum(loss_partial) / (2 * self.batch_size)\n", "path": "monai/losses/contrastive.py"}]} | 1,695 | 271 |
gh_patches_debug_13438 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3307 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider vetco is broken
During the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))
</issue>
<code>
[start of locations/spiders/vetco_clinic.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from scrapy.selector import Selector
9
10
11 class VetcoSpider(scrapy.Spider):
12 name = "vetco"
13 item_attributes = {'brand': "vetcoclinics"}
14 allowed_domains = ["vetcoclinics.com"]
15 start_urls = (
16 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',
17 )
18
19 def start_requests(self):
20 with open('./locations/searchable_points/us_zcta.csv') as points:
21 next(points) # Ignore the header
22 for point in points:
23 row = point.split(',')
24 zip = row[0].strip().strip('"')
25
26 url = f"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}"
27
28 yield scrapy.http.Request(
29 url,
30 self.parse,
31 method='GET'
32 )
33
34 def parse(self, response):
35 jsonresponse = json.loads(response.body_as_unicode())
36 if jsonresponse is not None:
37 clinics = jsonresponse.get('clinics')
38 if clinics:
39 for stores in clinics:
40 body = stores['label']
41 address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
42 if len(address) == 3:
43 addr_full, city_state_postal, phone = [item.split(",") for item in address]
44 city, state_postal = [item.split(",") for item in city_state_postal]
45 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
46
47
48 else:
49 addr_full, city_state_postal = [item.split(",") for item in address]
50 city, state_postal = [item.split(",") for item in city_state_postal]
51 state, postal = re.search(r'([A-Z]{2}) (\d{5})', state_postal[0]).groups()
52
53 properties = {
54 'ref': addr_full[0].strip(),
55 'addr_full': addr_full[0].strip(),
56 'city': city[0].strip(),
57 'state': state,
58 'postcode': postal,
59 'lat': float(stores["point"]["lat"]),
60 'lon': float(stores["point"]["long"]),
61 'website': response.url
62 }
63
64 yield GeojsonPointItem(**properties)
65
[end of locations/spiders/vetco_clinic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py
--- a/locations/spiders/vetco_clinic.py
+++ b/locations/spiders/vetco_clinic.py
@@ -38,7 +38,7 @@
if clinics:
for stores in clinics:
body = stores['label']
- address = Selector(text=body).xpath('//div[@class="locationinfo_area"]/address/text()').extract()
+ address = Selector(text=body).xpath('//address/text()').extract()
if len(address) == 3:
addr_full, city_state_postal, phone = [item.split(",") for item in address]
city, state_postal = [item.split(",") for item in city_state_postal]
| {"golden_diff": "diff --git a/locations/spiders/vetco_clinic.py b/locations/spiders/vetco_clinic.py\n--- a/locations/spiders/vetco_clinic.py\n+++ b/locations/spiders/vetco_clinic.py\n@@ -38,7 +38,7 @@\n if clinics:\n for stores in clinics:\n body = stores['label']\n- address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n+ address = Selector(text=body).xpath('//address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n", "issue": "Spider vetco is broken\nDuring the global build at 2021-09-01-14-42-16, spider **vetco** failed with **0 features** and **24644 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/vetco.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/vetco.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom scrapy.selector import Selector\n\n\nclass VetcoSpider(scrapy.Spider):\n name = \"vetco\"\n item_attributes = {'brand': \"vetcoclinics\"}\n allowed_domains = [\"vetcoclinics.com\"]\n start_urls = (\n 'https://www.vetcoclinics.com/services-and-clinics/vaccination-clinics-by-state/',\n )\n\n def start_requests(self):\n with open('./locations/searchable_points/us_zcta.csv') as points:\n next(points) # Ignore the header\n for point in points:\n row = point.split(',')\n zip = row[0].strip().strip('\"')\n\n url = f\"https://www.vetcoclinics.com/_assets/dynamic/ajax/locator.php?zip={zip}\"\n\n yield scrapy.http.Request(\n url,\n self.parse,\n method='GET'\n )\n\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n if jsonresponse is not None:\n clinics = jsonresponse.get('clinics')\n if clinics:\n for stores in clinics:\n body = stores['label']\n address = Selector(text=body).xpath('//div[@class=\"locationinfo_area\"]/address/text()').extract()\n if len(address) == 3:\n addr_full, city_state_postal, phone = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n\n else:\n addr_full, city_state_postal = [item.split(\",\") for item in address]\n city, state_postal = [item.split(\",\") for item in city_state_postal]\n state, postal = re.search(r'([A-Z]{2}) (\\d{5})', state_postal[0]).groups()\n\n properties = {\n 'ref': addr_full[0].strip(),\n 'addr_full': addr_full[0].strip(),\n 'city': city[0].strip(),\n 'state': state,\n 'postcode': postal,\n 'lat': float(stores[\"point\"][\"lat\"]),\n 'lon': float(stores[\"point\"][\"long\"]),\n 'website': response.url\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/vetco_clinic.py"}]} | 1,389 | 174 |
gh_patches_debug_51335 | rasdani/github-patches | git_diff | beetbox__beets-1650 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plexupdate plugin crashed
Every time after import plexupdate plugin crashed with this error:
```
Traceback (most recent call last):
File "/usr/local/bin/beet", line 9, in <module>
load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1163, in main
_raw_main(args)
File "/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py", line 1155, in _raw_main
plugins.send('cli_exit', lib=lib)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 458, in send
result = handler(**arguments)
File "/usr/local/lib/python2.7/dist-packages/beets/plugins.py", line 123, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 87, in update
config['plex']['library_name'].get())
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 41, in update_plex
section_key = get_music_section(host, port, token, library_name)
File "/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py", line 31, in get_music_section
tree = ET.fromstring(r.raw)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1300, in XML
parser.feed(text)
File "/usr/lib/python2.7/xml/etree/ElementTree.py", line 1640, in feed
self._parser.Parse(data, 0)
TypeError: must be string or read-only buffer, not HTTPResponse
```
</issue>
<code>
[start of beetsplug/plexupdate.py]
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token, library_name):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == library_name:
34 return child.get('key')
35
36
37 def update_plex(host, port, token, library_name):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token, library_name)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u'',
68 u'library_name': u'Music'})
69
70 self.register_listener('database_change', self.listen_for_db_change)
71
72 def listen_for_db_change(self, lib, model):
73 """Listens for beets db change and register the update for the end"""
74 self.register_listener('cli_exit', self.update)
75
76 def update(self, lib):
77 """When the client exists try to send refresh request to Plex server.
78 """
79 self._log.info('Updating Plex library...')
80
81 # Try to send update request.
82 try:
83 update_plex(
84 config['plex']['host'].get(),
85 config['plex']['port'].get(),
86 config['plex']['token'].get(),
87 config['plex']['library_name'].get())
88 self._log.info('... started.')
89
90 except requests.exceptions.RequestException:
91 self._log.warning('Update failed.')
92
[end of beetsplug/plexupdate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -28,7 +28,7 @@
r = requests.get(url)
# Parse xml tree and extract music section key.
- tree = ET.fromstring(r.text)
+ tree = ET.fromstring(r.content)
for child in tree.findall('Directory'):
if child.get('title') == library_name:
return child.get('key')
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -28,7 +28,7 @@\n r = requests.get(url)\n \n # Parse xml tree and extract music section key.\n- tree = ET.fromstring(r.text)\n+ tree = ET.fromstring(r.content)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n", "issue": "plexupdate plugin crashed\nEvery time after import plexupdate plugin crashed with this error:\n\n```\nTraceback (most recent call last):\n File \"/usr/local/bin/beet\", line 9, in <module>\n load_entry_point('beets==1.3.15', 'console_scripts', 'beet')()\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1163, in main\n _raw_main(args)\n File \"/usr/local/lib/python2.7/dist-packages/beets/ui/__init__.py\", line 1155, in _raw_main\n plugins.send('cli_exit', lib=lib)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 458, in send\n result = handler(**arguments)\n File \"/usr/local/lib/python2.7/dist-packages/beets/plugins.py\", line 123, in wrapper\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 87, in update\n config['plex']['library_name'].get())\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 41, in update_plex\n section_key = get_music_section(host, port, token, library_name)\n File \"/usr/local/lib/python2.7/dist-packages/beetsplug/plexupdate.py\", line 31, in get_music_section\n tree = ET.fromstring(r.raw)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1300, in XML\n parser.feed(text)\n File \"/usr/lib/python2.7/xml/etree/ElementTree.py\", line 1640, in feed\n self._parser.Parse(data, 0)\nTypeError: must be string or read-only buffer, not HTTPResponse\n```\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,807 | 119 |
gh_patches_debug_34669 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-125 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy
We decided that PlasmaPy will only be supported for:
- Python version > 3.6
- Astropy version > 2.0
- NumPy version > 1.13
However, when I try to run:
```ShellSession
python setup.py install
```
from the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.
When I try to run
```Python
import plasmapy
```
in Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.
We should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.
Thank you!
Nick
</issue>
<code>
[start of plasmapy/__init__.py]
1 from ._metadata import (
2 name as __name__,
3 version as __version__,
4 description as __doc__,
5 author as __author__,
6 )
7
8 from .classes import Plasma
9 from . import classes
10 from . import constants
11 from . import atomic
12 from . import math
13 from . import physics
14 from . import utils
15
16 import sys
17 import warnings
18
19 if sys.version_info[:2] < (3, 6): # coveralls: ignore
20 warnings.warn("PlasmaPy does not support Python 3.5 and below")
21
[end of plasmapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py
--- a/plasmapy/__init__.py
+++ b/plasmapy/__init__.py
@@ -5,16 +5,81 @@
author as __author__,
)
-from .classes import Plasma
-from . import classes
-from . import constants
-from . import atomic
-from . import math
-from . import physics
-from . import utils
-
import sys
import warnings
-if sys.version_info[:2] < (3, 6): # coveralls: ignore
+__minimum_python_version__ = '3.6'
+__minimum_numpy_version__ = '1.13.0'
+__minimum_astropy_version__ = '2.0.0'
+
+
+def _split_version(version):
+ return tuple(int(ver) for ver in version.split('.'))
+
+
+def _min_required_version(required, current): # coveralls: ignore
+ """ Return `True` if the current version meets the required minimum
+ version and `False` if not/ if not installed.
+
+ Right now `required` and `current` are just '.' separated strings
+ but it would be good to make this more general and accept modules.
+ """
+ return _split_version(current) >= _split_version(required)
+
+
+def _check_numpy_version(): # coveralls: ignore
+ """ Make sure numpy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ np_ver = None
+
+ try:
+ from numpy import __version__ as np_ver
+ required_version = _min_required_version(__minimum_numpy_version__,
+ np_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Numpy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_numpy_version__, np_ver)
+ raise ImportError(ver_error)
+
+
+def _check_astropy_version(): # coveralls: ignore
+ """ Make sure astropy in installed and meets the minimum version requirements
+ """
+ required_version = False
+ ap_ver = None
+
+ try:
+ from astropy import __version__ as ap_ver
+ required_version = _min_required_version(__minimum_astropy_version__,
+ ap_ver)
+ except ImportError:
+ pass
+
+ if not required_version:
+ ver_error = ("Astropy {} or above is required for PlasmaPy. The "
+ "currently installed version is {}"
+ ).format(__minimum_astropy_version__, ap_ver)
+ raise ImportError(ver_error)
+
+
+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore
warnings.warn("PlasmaPy does not support Python 3.5 and below")
+
+_check_numpy_version()
+_check_astropy_version()
+
+try:
+ from .classes import Plasma
+ from . import classes
+ from . import constants
+ from . import atomic
+ from . import math
+ from . import physics
+ from . import utils
+except Exception:
+ raise ImportError("Unable to load PlasmaPy subpackages.")
| {"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -5,16 +5,81 @@\n author as __author__,\n )\n \n-from .classes import Plasma\n-from . import classes\n-from . import constants\n-from . import atomic\n-from . import math\n-from . import physics\n-from . import utils\n-\n import sys\n import warnings\n \n-if sys.version_info[:2] < (3, 6): # coveralls: ignore\n+__minimum_python_version__ = '3.6'\n+__minimum_numpy_version__ = '1.13.0'\n+__minimum_astropy_version__ = '2.0.0'\n+\n+\n+def _split_version(version):\n+ return tuple(int(ver) for ver in version.split('.'))\n+\n+\n+def _min_required_version(required, current): # coveralls: ignore\n+ \"\"\" Return `True` if the current version meets the required minimum\n+ version and `False` if not/ if not installed.\n+\n+ Right now `required` and `current` are just '.' separated strings\n+ but it would be good to make this more general and accept modules.\n+ \"\"\"\n+ return _split_version(current) >= _split_version(required)\n+\n+\n+def _check_numpy_version(): # coveralls: ignore\n+ \"\"\" Make sure numpy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ np_ver = None\n+\n+ try:\n+ from numpy import __version__ as np_ver\n+ required_version = _min_required_version(__minimum_numpy_version__,\n+ np_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Numpy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_numpy_version__, np_ver)\n+ raise ImportError(ver_error)\n+\n+\n+def _check_astropy_version(): # coveralls: ignore\n+ \"\"\" Make sure astropy in installed and meets the minimum version requirements\n+ \"\"\"\n+ required_version = False\n+ ap_ver = None\n+\n+ try:\n+ from astropy import __version__ as ap_ver\n+ required_version = _min_required_version(__minimum_astropy_version__,\n+ ap_ver)\n+ except ImportError:\n+ pass\n+\n+ if not required_version:\n+ ver_error = (\"Astropy {} or above is required for PlasmaPy. The \"\n+ \"currently installed version is {}\"\n+ ).format(__minimum_astropy_version__, ap_ver)\n+ raise ImportError(ver_error)\n+\n+\n+if (sys.version_info < _split_version(__minimum_python_version__)): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n+\n+_check_numpy_version()\n+_check_astropy_version()\n+\n+try:\n+ from .classes import Plasma\n+ from . import classes\n+ from . import constants\n+ from . import atomic\n+ from . import math\n+ from . import physics\n+ from . import utils\n+except Exception:\n+ raise ImportError(\"Unable to load PlasmaPy subpackages.\")\n", "issue": "Raise ImportError when installing/importing with old versions of Python, Astropy, & NumPy\nWe decided that PlasmaPy will only be supported for:\r\n- Python version > 3.6\r\n- Astropy version > 2.0\r\n- NumPy version > 1.13\r\n\r\nHowever, when I try to run:\r\n```ShellSession\r\npython setup.py install\r\n```\r\nfrom the command line with Python 3.5 then I get a `SyntaxError` for syntax that is new in version 3.6.\r\n\r\nWhen I try to run\r\n```Python\r\nimport plasmapy\r\n```\r\nin Python 3.6 with Astropy 1.3.1, then I get an exception since one of the constants imported from Astropy was renamed in 2.0.\r\n\r\nWe should raise an appropriate exception (probably an `ImportError`) when we try to install or import PlasmaPy with any of the unsupported versions above. We should also have appropriate and useful error messages for any of these situations. The pull request to close this issue would involve editing `setup.py`, `requirements/base.txt`, and `plasmapy/__init__.py`.\r\n\r\nThank you!\r\nNick\n", "before_files": [{"content": "from ._metadata import (\n name as __name__,\n version as __version__,\n description as __doc__,\n author as __author__,\n)\n\nfrom .classes import Plasma\nfrom . import classes\nfrom . import constants\nfrom . import atomic\nfrom . import math\nfrom . import physics\nfrom . import utils\n\nimport sys\nimport warnings\n\nif sys.version_info[:2] < (3, 6): # coveralls: ignore\n warnings.warn(\"PlasmaPy does not support Python 3.5 and below\")\n", "path": "plasmapy/__init__.py"}]} | 936 | 721 |
gh_patches_debug_4229 | rasdani/github-patches | git_diff | twisted__twisted-11816 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
twisted.web.pages.errorPage docstring has a typo
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.
Should be:
> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.
</issue>
<code>
[start of src/twisted/web/pages.py]
1 # -*- test-case-name: twisted.web.test.test_pages -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 Utility implementations of L{IResource}.
7 """
8
9 __all__ = (
10 "errorPage",
11 "notFound",
12 "forbidden",
13 )
14
15 from typing import cast
16
17 from twisted.web import http
18 from twisted.web.iweb import IRenderable, IRequest
19 from twisted.web.resource import IResource, Resource
20 from twisted.web.template import renderElement, tags
21
22
23 class _ErrorPage(Resource):
24 """
25 L{_ErrorPage} is a resource that responds to all requests with a particular
26 (parameterized) HTTP status code and an HTML body containing some
27 descriptive text. This is useful for rendering simple error pages.
28
29 @see: L{twisted.web.pages.errorPage}
30
31 @ivar _code: An integer HTTP status code which will be used for the
32 response.
33
34 @ivar _brief: A short string which will be included in the response body as
35 the page title.
36
37 @ivar _detail: A longer string which will be included in the response body.
38 """
39
40 def __init__(self, code: int, brief: str, detail: str) -> None:
41 super().__init__()
42 self._code: int = code
43 self._brief: str = brief
44 self._detail: str = detail
45
46 def render(self, request: IRequest) -> object:
47 """
48 Respond to all requests with the given HTTP status code and an HTML
49 document containing the explanatory strings.
50 """
51 request.setResponseCode(self._code)
52 request.setHeader(b"content-type", b"text/html; charset=utf-8")
53 return renderElement(
54 request,
55 # cast because the type annotations here seem off; Tag isn't an
56 # IRenderable but also probably should be? See
57 # https://github.com/twisted/twisted/issues/4982
58 cast(
59 IRenderable,
60 tags.html(
61 tags.head(tags.title(f"{self._code} - {self._brief}")),
62 tags.body(tags.h1(self._brief), tags.p(self._detail)),
63 ),
64 ),
65 )
66
67 def getChild(self, path: bytes, request: IRequest) -> Resource:
68 """
69 Handle all requests for which L{_ErrorPage} lacks a child by returning
70 this error page.
71
72 @param path: A path segment.
73
74 @param request: HTTP request
75 """
76 return self
77
78
79 def errorPage(code: int, brief: str, detail: str) -> IResource:
80 """
81 Build a resource that responds to all requests with a particular HTTP
82 status code and an HTML body containing some descriptive text. This is
83 useful for rendering simple error pages.
84
85 The resource dynamically handles all paths below it. Use
86 L{IResource.putChild()} override specific path.
87
88 @param code: An integer HTTP status code which will be used for the
89 response.
90
91 @param brief: A short string which will be included in the response
92 body as the page title.
93
94 @param detail: A longer string which will be included in the
95 response body.
96
97 @returns: An L{IResource}
98 """
99 return _ErrorPage(code, brief, detail)
100
101
102 def notFound(
103 brief: str = "No Such Resource",
104 message: str = "Sorry. No luck finding that resource.",
105 ) -> IResource:
106 """
107 Generate an L{IResource} with a 404 Not Found status code.
108
109 @see: L{twisted.web.pages.errorPage}
110
111 @param brief: A short string displayed as the page title.
112
113 @param brief: A longer string displayed in the page body.
114
115 @returns: An L{IResource}
116 """
117 return _ErrorPage(http.NOT_FOUND, brief, message)
118
119
120 def forbidden(
121 brief: str = "Forbidden Resource", message: str = "Sorry, resource is forbidden."
122 ) -> IResource:
123 """
124 Generate an L{IResource} with a 403 Forbidden status code.
125
126 @see: L{twisted.web.pages.errorPage}
127
128 @param brief: A short string displayed as the page title.
129
130 @param brief: A longer string displayed in the page body.
131
132 @returns: An L{IResource}
133 """
134 return _ErrorPage(http.FORBIDDEN, brief, message)
135
[end of src/twisted/web/pages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py
--- a/src/twisted/web/pages.py
+++ b/src/twisted/web/pages.py
@@ -83,7 +83,7 @@
useful for rendering simple error pages.
The resource dynamically handles all paths below it. Use
- L{IResource.putChild()} override specific path.
+ L{IResource.putChild()} to override a specific path.
@param code: An integer HTTP status code which will be used for the
response.
| {"golden_diff": "diff --git a/src/twisted/web/pages.py b/src/twisted/web/pages.py\n--- a/src/twisted/web/pages.py\n+++ b/src/twisted/web/pages.py\n@@ -83,7 +83,7 @@\n useful for rendering simple error pages.\n \n The resource dynamically handles all paths below it. Use\n- L{IResource.putChild()} override specific path.\n+ L{IResource.putChild()} to override a specific path.\n \n @param code: An integer HTTP status code which will be used for the\n response.\n", "issue": "twisted.web.pages.errorPage docstring has a typo\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override specific path.\r\n\r\nShould be:\r\n\r\n> Use [IResource.putChild()](https://docs.twisted.org/en/stable/api/twisted.web.resource.IResource.html#putChild) override **a** specific path.\n", "before_files": [{"content": "# -*- test-case-name: twisted.web.test.test_pages -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nUtility implementations of L{IResource}.\n\"\"\"\n\n__all__ = (\n \"errorPage\",\n \"notFound\",\n \"forbidden\",\n)\n\nfrom typing import cast\n\nfrom twisted.web import http\nfrom twisted.web.iweb import IRenderable, IRequest\nfrom twisted.web.resource import IResource, Resource\nfrom twisted.web.template import renderElement, tags\n\n\nclass _ErrorPage(Resource):\n \"\"\"\n L{_ErrorPage} is a resource that responds to all requests with a particular\n (parameterized) HTTP status code and an HTML body containing some\n descriptive text. This is useful for rendering simple error pages.\n\n @see: L{twisted.web.pages.errorPage}\n\n @ivar _code: An integer HTTP status code which will be used for the\n response.\n\n @ivar _brief: A short string which will be included in the response body as\n the page title.\n\n @ivar _detail: A longer string which will be included in the response body.\n \"\"\"\n\n def __init__(self, code: int, brief: str, detail: str) -> None:\n super().__init__()\n self._code: int = code\n self._brief: str = brief\n self._detail: str = detail\n\n def render(self, request: IRequest) -> object:\n \"\"\"\n Respond to all requests with the given HTTP status code and an HTML\n document containing the explanatory strings.\n \"\"\"\n request.setResponseCode(self._code)\n request.setHeader(b\"content-type\", b\"text/html; charset=utf-8\")\n return renderElement(\n request,\n # cast because the type annotations here seem off; Tag isn't an\n # IRenderable but also probably should be? See\n # https://github.com/twisted/twisted/issues/4982\n cast(\n IRenderable,\n tags.html(\n tags.head(tags.title(f\"{self._code} - {self._brief}\")),\n tags.body(tags.h1(self._brief), tags.p(self._detail)),\n ),\n ),\n )\n\n def getChild(self, path: bytes, request: IRequest) -> Resource:\n \"\"\"\n Handle all requests for which L{_ErrorPage} lacks a child by returning\n this error page.\n\n @param path: A path segment.\n\n @param request: HTTP request\n \"\"\"\n return self\n\n\ndef errorPage(code: int, brief: str, detail: str) -> IResource:\n \"\"\"\n Build a resource that responds to all requests with a particular HTTP\n status code and an HTML body containing some descriptive text. This is\n useful for rendering simple error pages.\n\n The resource dynamically handles all paths below it. Use\n L{IResource.putChild()} override specific path.\n\n @param code: An integer HTTP status code which will be used for the\n response.\n\n @param brief: A short string which will be included in the response\n body as the page title.\n\n @param detail: A longer string which will be included in the\n response body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(code, brief, detail)\n\n\ndef notFound(\n brief: str = \"No Such Resource\",\n message: str = \"Sorry. No luck finding that resource.\",\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 404 Not Found status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.NOT_FOUND, brief, message)\n\n\ndef forbidden(\n brief: str = \"Forbidden Resource\", message: str = \"Sorry, resource is forbidden.\"\n) -> IResource:\n \"\"\"\n Generate an L{IResource} with a 403 Forbidden status code.\n\n @see: L{twisted.web.pages.errorPage}\n\n @param brief: A short string displayed as the page title.\n\n @param brief: A longer string displayed in the page body.\n\n @returns: An L{IResource}\n \"\"\"\n return _ErrorPage(http.FORBIDDEN, brief, message)\n", "path": "src/twisted/web/pages.py"}]} | 1,884 | 123 |
gh_patches_debug_38939 | rasdani/github-patches | git_diff | AlexsLemonade__refinebio-3363 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up AWS Batch job definition list
### Problem or idea
The Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.
### Solution or next step
Clean up stale items, make sure job deregistering script takes care of old job definitions in a right way.
</issue>
<code>
[start of infrastructure/delete_batch_job_queue.py]
1 import os
2 from time import sleep
3
4 import boto3
5
6 AWS_REGION = os.environ["AWS_REGION"]
7 AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
8
9 batch = boto3.client("batch", region_name=AWS_REGION)
10
11 # First disable each job queue.
12 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
13 try:
14 batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
15 except Exception as e:
16 # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
17 pass
18
19 # Then wait for each one to be disabled so it can be deleted.
20 for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
21 while True:
22 job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])
23 if "jobQueues" in job_queues:
24 job_queue = job_queues["jobQueues"][0]
25 if job_queue["state"] == "DISABLED" and job_queue["status"] != "UPDATING":
26 break
27 else:
28 print(f"Unexpected response while describing job queue {batch_queue_name}.")
29 break
30
31 sleep(3)
32
33 batch.delete_job_queue(jobQueue=batch_queue_name)
34
[end of infrastructure/delete_batch_job_queue.py]
[start of infrastructure/deregister_batch_job_definitions.py]
1 import os
2
3 import boto3
4
5 AWS_REGION = os.environ["AWS_REGION"]
6
7 batch = boto3.client("batch", region_name=AWS_REGION)
8
9 # TODO: stop repeating this construction everywhere. Just set it once somewhere.
10 JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
11
12 job_definition_files = os.listdir("batch-job-templates")
13
14 job_definition_list = list(
15 {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
16 )
17
18 # Have to go one by one because providing a list of job names doesn't work:
19 # https://github.com/boto/boto3/issues/2908
20 for job_definition in job_definition_list:
21 job_definitions = batch.describe_job_definitions(
22 jobDefinitionName=job_definition, status="ACTIVE"
23 )
24 # There can be multiple revisions per job definition. We want them all gone.
25 for job_definition_revision in job_definitions["jobDefinitions"]:
26 batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
27
[end of infrastructure/deregister_batch_job_definitions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py
--- a/infrastructure/delete_batch_job_queue.py
+++ b/infrastructure/delete_batch_job_queue.py
@@ -2,19 +2,22 @@
from time import sleep
import boto3
+from botocore.exceptions import ClientError
-AWS_REGION = os.environ["AWS_REGION"]
AWS_BATCH_QUEUE_ALL_NAMES = os.environ["REFINEBIO_JOB_QUEUE_ALL_NAMES"].split(",")
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# First disable each job queue.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
try:
batch.update_job_queue(jobQueue=batch_queue_name, state="DISABLED")
- except Exception as e:
+ except ClientError as e:
# If the job queue doesn't exist, that's cool, we were trying to delete it anyway.
- pass
+ if str(e).endswith(" does not exist."):
+ pass
+ else:
+ raise e
# Then wait for each one to be disabled so it can be deleted.
for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:
diff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py
--- a/infrastructure/deregister_batch_job_definitions.py
+++ b/infrastructure/deregister_batch_job_definitions.py
@@ -2,25 +2,36 @@
import boto3
-AWS_REGION = os.environ["AWS_REGION"]
-
-batch = boto3.client("batch", region_name=AWS_REGION)
+batch = boto3.client("batch", region_name=os.environ["AWS_REGION"])
# TODO: stop repeating this construction everywhere. Just set it once somewhere.
JOB_DEFINITION_PREFIX = os.environ["USER"] + "_" + os.environ["STAGE"] + "_"
-job_definition_files = os.listdir("batch-job-templates")
-
-job_definition_list = list(
- {JOB_DEFINITION_PREFIX + job_def.upper().split(".")[0] for job_def in job_definition_files}
+job_names = (
+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(".")[0]
+ for batch_job_template in os.listdir("batch-job-templates")
)
+nextToken = ""
# Have to go one by one because providing a list of job names doesn't work:
# https://github.com/boto/boto3/issues/2908
-for job_definition in job_definition_list:
- job_definitions = batch.describe_job_definitions(
- jobDefinitionName=job_definition, status="ACTIVE"
- )
- # There can be multiple revisions per job definition. We want them all gone.
- for job_definition_revision in job_definitions["jobDefinitions"]:
- batch.deregister_job_definition(jobDefinition=job_definition_revision["jobDefinitionArn"])
+for job_name in sorted(job_names):
+ while True:
+ data = {
+ "jobDefinitionName": job_name,
+ "maxResults": 100,
+ "status": "ACTIVE",
+ }
+ if nextToken:
+ data["nextToken"] = nextToken
+
+ response = batch.describe_job_definitions(**data)
+ nextToken = response.get("nextToken", "")
+
+ job_definitions = response.get("jobDefinitions")
+ if not job_definitions:
+ break
+
+ # There can be multiple revisions per job definition. We want them all gone.
+ for job_definition in job_definitions:
+ batch.deregister_job_definition(jobDefinition=job_definition["jobDefinitionArn"])
| {"golden_diff": "diff --git a/infrastructure/delete_batch_job_queue.py b/infrastructure/delete_batch_job_queue.py\n--- a/infrastructure/delete_batch_job_queue.py\n+++ b/infrastructure/delete_batch_job_queue.py\n@@ -2,19 +2,22 @@\n from time import sleep\n \n import boto3\n+from botocore.exceptions import ClientError\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n AWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n \n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # First disable each job queue.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n- except Exception as e:\n+ except ClientError as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n- pass\n+ if str(e).endswith(\" does not exist.\"):\n+ pass\n+ else:\n+ raise e\n \n # Then wait for each one to be disabled so it can be deleted.\n for batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\ndiff --git a/infrastructure/deregister_batch_job_definitions.py b/infrastructure/deregister_batch_job_definitions.py\n--- a/infrastructure/deregister_batch_job_definitions.py\n+++ b/infrastructure/deregister_batch_job_definitions.py\n@@ -2,25 +2,36 @@\n \n import boto3\n \n-AWS_REGION = os.environ[\"AWS_REGION\"]\n-\n-batch = boto3.client(\"batch\", region_name=AWS_REGION)\n+batch = boto3.client(\"batch\", region_name=os.environ[\"AWS_REGION\"])\n \n # TODO: stop repeating this construction everywhere. Just set it once somewhere.\n JOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n \n-job_definition_files = os.listdir(\"batch-job-templates\")\n-\n-job_definition_list = list(\n- {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n+job_names = (\n+ JOB_DEFINITION_PREFIX + batch_job_template.upper().split(\".\")[0]\n+ for batch_job_template in os.listdir(\"batch-job-templates\")\n )\n+nextToken = \"\"\n \n # Have to go one by one because providing a list of job names doesn't work:\n # https://github.com/boto/boto3/issues/2908\n-for job_definition in job_definition_list:\n- job_definitions = batch.describe_job_definitions(\n- jobDefinitionName=job_definition, status=\"ACTIVE\"\n- )\n- # There can be multiple revisions per job definition. We want them all gone.\n- for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n- batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n+for job_name in sorted(job_names):\n+ while True:\n+ data = {\n+ \"jobDefinitionName\": job_name,\n+ \"maxResults\": 100,\n+ \"status\": \"ACTIVE\",\n+ }\n+ if nextToken:\n+ data[\"nextToken\"] = nextToken\n+\n+ response = batch.describe_job_definitions(**data)\n+ nextToken = response.get(\"nextToken\", \"\")\n+\n+ job_definitions = response.get(\"jobDefinitions\")\n+ if not job_definitions:\n+ break\n+\n+ # There can be multiple revisions per job definition. We want them all gone.\n+ for job_definition in job_definitions:\n+ batch.deregister_job_definition(jobDefinition=job_definition[\"jobDefinitionArn\"])\n", "issue": "Clean up AWS Batch job definition list\n### Problem or idea\r\n\r\nThe Batch job definition section contains 100+ pages of jobs (~2500 items). They haven't been cleaned up properly during deploy process for a while.\r\n\r\n\r\n### Solution or next step\r\n\r\nClean up stale items, make sure job deregistering script takes care of old job definitions in a right way.\r\n\n", "before_files": [{"content": "import os\nfrom time import sleep\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\nAWS_BATCH_QUEUE_ALL_NAMES = os.environ[\"REFINEBIO_JOB_QUEUE_ALL_NAMES\"].split(\",\")\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# First disable each job queue.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n try:\n batch.update_job_queue(jobQueue=batch_queue_name, state=\"DISABLED\")\n except Exception as e:\n # If the job queue doesn't exist, that's cool, we were trying to delete it anyway.\n pass\n\n# Then wait for each one to be disabled so it can be deleted.\nfor batch_queue_name in AWS_BATCH_QUEUE_ALL_NAMES:\n while True:\n job_queues = batch.describe_job_queues(jobQueues=[batch_queue_name])\n if \"jobQueues\" in job_queues:\n job_queue = job_queues[\"jobQueues\"][0]\n if job_queue[\"state\"] == \"DISABLED\" and job_queue[\"status\"] != \"UPDATING\":\n break\n else:\n print(f\"Unexpected response while describing job queue {batch_queue_name}.\")\n break\n\n sleep(3)\n\n batch.delete_job_queue(jobQueue=batch_queue_name)\n", "path": "infrastructure/delete_batch_job_queue.py"}, {"content": "import os\n\nimport boto3\n\nAWS_REGION = os.environ[\"AWS_REGION\"]\n\nbatch = boto3.client(\"batch\", region_name=AWS_REGION)\n\n# TODO: stop repeating this construction everywhere. Just set it once somewhere.\nJOB_DEFINITION_PREFIX = os.environ[\"USER\"] + \"_\" + os.environ[\"STAGE\"] + \"_\"\n\njob_definition_files = os.listdir(\"batch-job-templates\")\n\njob_definition_list = list(\n {JOB_DEFINITION_PREFIX + job_def.upper().split(\".\")[0] for job_def in job_definition_files}\n)\n\n# Have to go one by one because providing a list of job names doesn't work:\n# https://github.com/boto/boto3/issues/2908\nfor job_definition in job_definition_list:\n job_definitions = batch.describe_job_definitions(\n jobDefinitionName=job_definition, status=\"ACTIVE\"\n )\n # There can be multiple revisions per job definition. We want them all gone.\n for job_definition_revision in job_definitions[\"jobDefinitions\"]:\n batch.deregister_job_definition(jobDefinition=job_definition_revision[\"jobDefinitionArn\"])\n", "path": "infrastructure/deregister_batch_job_definitions.py"}]} | 1,238 | 787 |
gh_patches_debug_10043 | rasdani/github-patches | git_diff | nautobot__nautobot-877 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API
### Proposed Functionality
Before the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.
### Use Cases
As Patti the Platform Admin,
I want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,
So that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc.
One option is to create an "Admin" dropdown in the navigation bar which contains "Users (no change)," "Social Auth (drop 'Python')," and "System" sections. We may need one additional section called "plugins" for when plugins have created entries in Django Admin.
I will know this is done when it is possible to:
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI
* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls
</issue>
<code>
[start of nautobot/core/admin.py]
1 from django.conf import settings
2 from django.contrib.admin import site as admin_site
3 from taggit.models import Tag
4
5
6 # Override default AdminSite attributes so we can avoid creating and
7 # registering our own class
8 admin_site.site_header = "Nautobot Administration"
9 admin_site.site_title = "Nautobot"
10 admin_site.index_template = "admin/nautobot_index.html"
11
12 # Unregister the unused stock Tag model provided by django-taggit
13 admin_site.unregister(Tag)
14
[end of nautobot/core/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py
--- a/nautobot/core/admin.py
+++ b/nautobot/core/admin.py
@@ -1,5 +1,6 @@
from django.conf import settings
from django.contrib.admin import site as admin_site
+from social_django.models import Association, Nonce, UserSocialAuth
from taggit.models import Tag
@@ -11,3 +12,8 @@
# Unregister the unused stock Tag model provided by django-taggit
admin_site.unregister(Tag)
+
+# Unregister SocialAuth from Django admin menu
+admin_site.unregister(Association)
+admin_site.unregister(Nonce)
+admin_site.unregister(UserSocialAuth)
| {"golden_diff": "diff --git a/nautobot/core/admin.py b/nautobot/core/admin.py\n--- a/nautobot/core/admin.py\n+++ b/nautobot/core/admin.py\n@@ -1,5 +1,6 @@\n from django.conf import settings\n from django.contrib.admin import site as admin_site\n+from social_django.models import Association, Nonce, UserSocialAuth\n from taggit.models import Tag\n \n \n@@ -11,3 +12,8 @@\n \n # Unregister the unused stock Tag model provided by django-taggit\n admin_site.unregister(Tag)\n+\n+# Unregister SocialAuth from Django admin menu\n+admin_site.unregister(Association)\n+admin_site.unregister(Nonce)\n+admin_site.unregister(UserSocialAuth)\n", "issue": "Migrate user, social auth, and system settings from Django Admin to be first-class citizens in UI/API\n### Proposed Functionality \r\n\r\nBefore the launch of Nautobot, there was significant work to migrate functionality from Django Admin into core and create first-class views and APIs for webhooks, custom links, and export templates. Custom fields are also coming soon in #229. This proposes doing the same for everything in the Users, Python Social Auth, and System sections in Django Admin.\r\n\r\n### Use Cases\r\n\r\nAs Patti the Platform Admin,\r\nI want to have the ability to manage users, groups, and permissions from the Nautobot UI without going into Django Admin,\r\nSo that there is a more consistent user experience for my team as well as APIs for anything else that doesn't have an API currently within Django Admin, e.g. Social Auth, retrieving background tasks, etc. \r\n\r\nOne option is to create an \"Admin\" dropdown in the navigation bar which contains \"Users (no change),\" \"Social Auth (drop 'Python'),\" and \"System\" sections. We may need one additional section called \"plugins\" for when plugins have created entries in Django Admin.\r\n\r\nI will know this is done when it is possible to:\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin in the main Nautobot UI\r\n* Manage what is currently under Users, Python Social Auth, and System sections in Django Admin through Nautobot REST API calls\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.admin import site as admin_site\nfrom taggit.models import Tag\n\n\n# Override default AdminSite attributes so we can avoid creating and\n# registering our own class\nadmin_site.site_header = \"Nautobot Administration\"\nadmin_site.site_title = \"Nautobot\"\nadmin_site.index_template = \"admin/nautobot_index.html\"\n\n# Unregister the unused stock Tag model provided by django-taggit\nadmin_site.unregister(Tag)\n", "path": "nautobot/core/admin.py"}]} | 958 | 154 |
gh_patches_debug_4919 | rasdani/github-patches | git_diff | bokeh__bokeh-1361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot style minor ticks
Axis objects do not have minor tick properties.
</issue>
<code>
[start of bokeh/models/axes.py]
1 from __future__ import absolute_import
2
3 from ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include
4 from ..mixins import LineProps, TextProps
5 from ..enums import Location
6
7 from .renderers import GuideRenderer
8 from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker
9 from .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter
10
11 class Axis(GuideRenderer):
12 location = Either(Enum('auto'), Enum(Location))
13 bounds = Either(Enum('auto'), Tuple(Float, Float))
14
15 x_range_name = String('default')
16 y_range_name = String('default')
17
18 ticker = Instance(Ticker)
19 formatter = Instance(TickFormatter)
20
21 axis_label = String
22 axis_label_standoff = Int
23 axis_label_props = Include(TextProps)
24
25 major_label_standoff = Int
26 major_label_orientation = Either(Enum("horizontal", "vertical"), Float)
27 major_label_props = Include(TextProps)
28
29 axis_props = Include(LineProps)
30 major_tick_props = Include(LineProps)
31
32 major_tick_in = Int
33 major_tick_out = Int
34
35 class ContinuousAxis(Axis):
36 pass
37
38 class LinearAxis(ContinuousAxis):
39 def __init__(self, ticker=None, formatter=None, **kwargs):
40 if ticker is None:
41 ticker = BasicTicker()
42 if formatter is None:
43 formatter = BasicTickFormatter()
44 super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
45
46 class LogAxis(ContinuousAxis):
47 def __init__(self, ticker=None, formatter=None, **kwargs):
48 if ticker is None:
49 ticker = LogTicker(num_minor_ticks=10)
50 if formatter is None:
51 formatter = LogTickFormatter()
52 super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
53
54 class CategoricalAxis(Axis):
55 def __init__(self, ticker=None, formatter=None, **kwargs):
56 if ticker is None:
57 ticker = CategoricalTicker()
58 if formatter is None:
59 formatter = CategoricalTickFormatter()
60 super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
61
62 class DatetimeAxis(LinearAxis):
63 axis_label = String("date")
64 scale = String("time")
65 num_labels = Int(8)
66 char_width = Int(10)
67 fill_ratio = Float(0.3)
68
69 def __init__(self, ticker=None, formatter=None, **kwargs):
70 if ticker is None:
71 ticker = DatetimeTicker()
72 if formatter is None:
73 formatter = DatetimeTickFormatter()
74 super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)
75
[end of bokeh/models/axes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py
--- a/bokeh/models/axes.py
+++ b/bokeh/models/axes.py
@@ -27,11 +27,15 @@
major_label_props = Include(TextProps)
axis_props = Include(LineProps)
- major_tick_props = Include(LineProps)
+ major_tick_props = Include(LineProps)
major_tick_in = Int
major_tick_out = Int
+ minor_tick_props = Include(LineProps)
+ minor_tick_in = Int
+ minor_tick_out = Int
+
class ContinuousAxis(Axis):
pass
| {"golden_diff": "diff --git a/bokeh/models/axes.py b/bokeh/models/axes.py\n--- a/bokeh/models/axes.py\n+++ b/bokeh/models/axes.py\n@@ -27,11 +27,15 @@\n major_label_props = Include(TextProps)\n \n axis_props = Include(LineProps)\n- major_tick_props = Include(LineProps)\n \n+ major_tick_props = Include(LineProps)\n major_tick_in = Int\n major_tick_out = Int\n \n+ minor_tick_props = Include(LineProps)\n+ minor_tick_in = Int\n+ minor_tick_out = Int\n+\n class ContinuousAxis(Axis):\n pass\n", "issue": "Cannot style minor ticks\nAxis objects do not have minor tick properties.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom ..properties import Int, Float, String, Enum, Instance, Tuple, Either, Include\nfrom ..mixins import LineProps, TextProps\nfrom ..enums import Location\n\nfrom .renderers import GuideRenderer\nfrom .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker\nfrom .formatters import TickFormatter, BasicTickFormatter, LogTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter\n\nclass Axis(GuideRenderer):\n location = Either(Enum('auto'), Enum(Location))\n bounds = Either(Enum('auto'), Tuple(Float, Float))\n\n x_range_name = String('default')\n y_range_name = String('default')\n\n ticker = Instance(Ticker)\n formatter = Instance(TickFormatter)\n\n axis_label = String\n axis_label_standoff = Int\n axis_label_props = Include(TextProps)\n\n major_label_standoff = Int\n major_label_orientation = Either(Enum(\"horizontal\", \"vertical\"), Float)\n major_label_props = Include(TextProps)\n\n axis_props = Include(LineProps)\n major_tick_props = Include(LineProps)\n\n major_tick_in = Int\n major_tick_out = Int\n\nclass ContinuousAxis(Axis):\n pass\n\nclass LinearAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = BasicTicker()\n if formatter is None:\n formatter = BasicTickFormatter()\n super(LinearAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass LogAxis(ContinuousAxis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = LogTicker(num_minor_ticks=10)\n if formatter is None:\n formatter = LogTickFormatter()\n super(LogAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass CategoricalAxis(Axis):\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = CategoricalTicker()\n if formatter is None:\n formatter = CategoricalTickFormatter()\n super(CategoricalAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n\nclass DatetimeAxis(LinearAxis):\n axis_label = String(\"date\")\n scale = String(\"time\")\n num_labels = Int(8)\n char_width = Int(10)\n fill_ratio = Float(0.3)\n\n def __init__(self, ticker=None, formatter=None, **kwargs):\n if ticker is None:\n ticker = DatetimeTicker()\n if formatter is None:\n formatter = DatetimeTickFormatter()\n super(DatetimeAxis, self).__init__(ticker=ticker, formatter=formatter, **kwargs)\n", "path": "bokeh/models/axes.py"}]} | 1,295 | 144 |
gh_patches_debug_14570 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
localization: various problems
# Bug
## Description
This issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**
They should be fixed in a pull request right after the 0.4.4 release.
</issue>
<code>
[start of securedrop/source_app/forms.py]
1 from flask_babel import gettext
2 from flask_wtf import FlaskForm
3 from wtforms import PasswordField
4 from wtforms.validators import InputRequired, Regexp, Length
5
6 from db import Source
7
8
9 class LoginForm(FlaskForm):
10 codename = PasswordField('codename', validators=[
11 InputRequired(message=gettext('This field is required.')),
12 Length(1, Source.MAX_CODENAME_LEN,
13 message=gettext('Field must be between 1 and '
14 '{max_codename_len} characters long. '.format(
15 max_codename_len=Source.MAX_CODENAME_LEN))),
16 # Make sure to allow dashes since some words in the wordlist have them
17 Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
18 ])
19
[end of securedrop/source_app/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py
--- a/securedrop/source_app/forms.py
+++ b/securedrop/source_app/forms.py
@@ -11,7 +11,7 @@
InputRequired(message=gettext('This field is required.')),
Length(1, Source.MAX_CODENAME_LEN,
message=gettext('Field must be between 1 and '
- '{max_codename_len} characters long. '.format(
+ '{max_codename_len} characters long.'.format(
max_codename_len=Source.MAX_CODENAME_LEN))),
# Make sure to allow dashes since some words in the wordlist have them
Regexp(r'[\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))
| {"golden_diff": "diff --git a/securedrop/source_app/forms.py b/securedrop/source_app/forms.py\n--- a/securedrop/source_app/forms.py\n+++ b/securedrop/source_app/forms.py\n@@ -11,7 +11,7 @@\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n- '{max_codename_len} characters long. '.format(\n+ '{max_codename_len} characters long.'.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n", "issue": "localization: various problems\n# Bug\r\n\r\n## Description\r\n\r\nThis issue is to collect the various localization problems found before the 0.4.4 release and after the **string freeze**\r\n\r\nThey should be fixed in a pull request right after the 0.4.4 release.\n", "before_files": [{"content": "from flask_babel import gettext\nfrom flask_wtf import FlaskForm\nfrom wtforms import PasswordField\nfrom wtforms.validators import InputRequired, Regexp, Length\n\nfrom db import Source\n\n\nclass LoginForm(FlaskForm):\n codename = PasswordField('codename', validators=[\n InputRequired(message=gettext('This field is required.')),\n Length(1, Source.MAX_CODENAME_LEN,\n message=gettext('Field must be between 1 and '\n '{max_codename_len} characters long. '.format(\n max_codename_len=Source.MAX_CODENAME_LEN))),\n # Make sure to allow dashes since some words in the wordlist have them\n Regexp(r'[\\sA-Za-z0-9-]+$', message=gettext('Invalid input.'))\n ])\n", "path": "securedrop/source_app/forms.py"}]} | 791 | 170 |
gh_patches_debug_41609 | rasdani/github-patches | git_diff | getnikola__nikola-1292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
render fails if the theme has a code.css
The `conf.py` says:
```
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
```
I've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site
```
(blog)tin@morochita:~/lab/blog$ nikola build
Scanning posts.....done!
ERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.
(blog)tin@morochita:~/lab/blog$
```
</issue>
<code>
[start of nikola/plugins/task/copy_assets.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 import codecs
28 import os
29
30 from nikola.plugin_categories import Task
31 from nikola import utils
32
33
34 class CopyAssets(Task):
35 """Copy theme assets into output."""
36
37 name = "copy_assets"
38
39 def gen_tasks(self):
40 """Create tasks to copy the assets of the whole theme chain.
41
42 If a file is present on two themes, use the version
43 from the "youngest" theme.
44 """
45
46 kw = {
47 "themes": self.site.THEMES,
48 "output_folder": self.site.config['OUTPUT_FOLDER'],
49 "filters": self.site.config['FILTERS'],
50 "code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
51 "code.css_selectors": 'pre.code',
52 "code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
53 }
54 has_code_css = False
55 tasks = {}
56 code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
57
58 yield self.group_task()
59
60 for theme_name in kw['themes']:
61 src = os.path.join(utils.get_theme_path(theme_name), 'assets')
62 dst = os.path.join(kw['output_folder'], 'assets')
63 for task in utils.copy_tree(src, dst):
64 if task['name'] in tasks:
65 continue
66 has_code_css = task['targets'][0] == code_css_path
67 tasks[task['name']] = task
68 task['uptodate'] = [utils.config_changed(kw)]
69 task['basename'] = self.name
70 yield utils.apply_filters(task, kw['filters'])
71
72 if not has_code_css: # Generate it
73
74 def create_code_css():
75 from pygments.formatters import get_formatter_by_name
76 formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
77 utils.makedirs(os.path.dirname(code_css_path))
78 with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
79 outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
80 outf.write(kw["code.css_close"])
81
82 task = {
83 'basename': self.name,
84 'name': code_css_path,
85 'targets': [code_css_path],
86 'uptodate': [utils.config_changed(kw)],
87 'actions': [(create_code_css, [])],
88 'clean': True,
89 }
90 yield utils.apply_filters(task, kw['filters'])
91
[end of nikola/plugins/task/copy_assets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py
--- a/nikola/plugins/task/copy_assets.py
+++ b/nikola/plugins/task/copy_assets.py
@@ -45,15 +45,21 @@
kw = {
"themes": self.site.THEMES,
+ "files_folders": self.site.config['FILES_FOLDERS'],
"output_folder": self.site.config['OUTPUT_FOLDER'],
"filters": self.site.config['FILTERS'],
"code_color_scheme": self.site.config['CODE_COLOR_SCHEME'],
"code.css_selectors": 'pre.code',
+ "code.css_head": '/* code.css file generated by Nikola */\n',
"code.css_close": "\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\n",
}
- has_code_css = False
tasks = {}
code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')
+ code_css_input = utils.get_asset_path('assets/css/code.css',
+ themes=kw['themes'],
+ files_folders=kw['files_folders'])
+
+ kw["code.css_input"] = code_css_input
yield self.group_task()
@@ -63,27 +69,35 @@
for task in utils.copy_tree(src, dst):
if task['name'] in tasks:
continue
- has_code_css = task['targets'][0] == code_css_path
tasks[task['name']] = task
task['uptodate'] = [utils.config_changed(kw)]
task['basename'] = self.name
+ if code_css_input:
+ task['file_dep'] = [code_css_input]
yield utils.apply_filters(task, kw['filters'])
- if not has_code_css: # Generate it
-
+ # Check whether or not there is a code.css file around.
+ if not code_css_input:
def create_code_css():
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name('html', style=kw["code_color_scheme"])
utils.makedirs(os.path.dirname(code_css_path))
with codecs.open(code_css_path, 'wb+', 'utf8') as outf:
+ outf.write(kw["code.css_head"])
outf.write(formatter.get_style_defs(kw["code.css_selectors"]))
outf.write(kw["code.css_close"])
+ if os.path.exists(code_css_path):
+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:
+ testcontents = fh.read(len(kw["code.css_head"])) == kw["code.css_head"]
+ else:
+ testcontents = False
+
task = {
'basename': self.name,
'name': code_css_path,
'targets': [code_css_path],
- 'uptodate': [utils.config_changed(kw)],
+ 'uptodate': [utils.config_changed(kw), testcontents],
'actions': [(create_code_css, [])],
'clean': True,
}
| {"golden_diff": "diff --git a/nikola/plugins/task/copy_assets.py b/nikola/plugins/task/copy_assets.py\n--- a/nikola/plugins/task/copy_assets.py\n+++ b/nikola/plugins/task/copy_assets.py\n@@ -45,15 +45,21 @@\n \n kw = {\n \"themes\": self.site.THEMES,\n+ \"files_folders\": self.site.config['FILES_FOLDERS'],\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n+ \"code.css_head\": '/* code.css file generated by Nikola */\\n',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n- has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n+ code_css_input = utils.get_asset_path('assets/css/code.css',\n+ themes=kw['themes'],\n+ files_folders=kw['files_folders'])\n+\n+ kw[\"code.css_input\"] = code_css_input\n \n yield self.group_task()\n \n@@ -63,27 +69,35 @@\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n- has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n+ if code_css_input:\n+ task['file_dep'] = [code_css_input]\n yield utils.apply_filters(task, kw['filters'])\n \n- if not has_code_css: # Generate it\n-\n+ # Check whether or not there is a code.css file around.\n+ if not code_css_input:\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n+ outf.write(kw[\"code.css_head\"])\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n \n+ if os.path.exists(code_css_path):\n+ with codecs.open(code_css_path, 'r', 'utf-8') as fh:\n+ testcontents = fh.read(len(kw[\"code.css_head\"])) == kw[\"code.css_head\"]\n+ else:\n+ testcontents = False\n+\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n- 'uptodate': [utils.config_changed(kw)],\n+ 'uptodate': [utils.config_changed(kw), testcontents],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n", "issue": "render fails if the theme has a code.css \nThe `conf.py` says: \n\n```\n# Color scheme to be used for code blocks. If your theme provides\n# \"assets/css/code.css\" this is ignored.\n```\n\nI've [provided one](https://github.com/mgaitan/my-nikola-theme/commit/f0140d0d67771d7ee9f46df2c78548c0e757f65e) but then I can't render my site\n\n```\n(blog)tin@morochita:~/lab/blog$ nikola build\nScanning posts.....done!\nERROR: Two different tasks can't have a common target.'output/assets/css/code.css' is a target for copy_files:output/assets/css/code.css and copy_assets:output/assets/css/code.css.\n(blog)tin@morochita:~/lab/blog$ \n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport codecs\nimport os\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass CopyAssets(Task):\n \"\"\"Copy theme assets into output.\"\"\"\n\n name = \"copy_assets\"\n\n def gen_tasks(self):\n \"\"\"Create tasks to copy the assets of the whole theme chain.\n\n If a file is present on two themes, use the version\n from the \"youngest\" theme.\n \"\"\"\n\n kw = {\n \"themes\": self.site.THEMES,\n \"output_folder\": self.site.config['OUTPUT_FOLDER'],\n \"filters\": self.site.config['FILTERS'],\n \"code_color_scheme\": self.site.config['CODE_COLOR_SCHEME'],\n \"code.css_selectors\": 'pre.code',\n \"code.css_close\": \"\\ntable.codetable { width: 100%;} td.linenos {text-align: right; width: 4em;}\\n\",\n }\n has_code_css = False\n tasks = {}\n code_css_path = os.path.join(kw['output_folder'], 'assets', 'css', 'code.css')\n\n yield self.group_task()\n\n for theme_name in kw['themes']:\n src = os.path.join(utils.get_theme_path(theme_name), 'assets')\n dst = os.path.join(kw['output_folder'], 'assets')\n for task in utils.copy_tree(src, dst):\n if task['name'] in tasks:\n continue\n has_code_css = task['targets'][0] == code_css_path\n tasks[task['name']] = task\n task['uptodate'] = [utils.config_changed(kw)]\n task['basename'] = self.name\n yield utils.apply_filters(task, kw['filters'])\n\n if not has_code_css: # Generate it\n\n def create_code_css():\n from pygments.formatters import get_formatter_by_name\n formatter = get_formatter_by_name('html', style=kw[\"code_color_scheme\"])\n utils.makedirs(os.path.dirname(code_css_path))\n with codecs.open(code_css_path, 'wb+', 'utf8') as outf:\n outf.write(formatter.get_style_defs(kw[\"code.css_selectors\"]))\n outf.write(kw[\"code.css_close\"])\n\n task = {\n 'basename': self.name,\n 'name': code_css_path,\n 'targets': [code_css_path],\n 'uptodate': [utils.config_changed(kw)],\n 'actions': [(create_code_css, [])],\n 'clean': True,\n }\n yield utils.apply_filters(task, kw['filters'])\n", "path": "nikola/plugins/task/copy_assets.py"}]} | 1,705 | 690 |
gh_patches_debug_34527 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1694 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: source 'City of Karlsruhe' stopped working
### I Have A Problem With:
A specific source
### What's Your Problem
Release 1.44.0:
Due to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.
I start troubleshooting and add my findings here.
### Source (if relevant)
karlsruhe_de
### Logs
_No response_
### Relevant Configuration
_No response_
### Checklist Source Error
- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [x] Checked that the website of your service provider is still working
- [x] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py]
1 from datetime import datetime
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "City of Karlsruhe"
8 DESCRIPTION = "Source for City of Karlsruhe."
9 URL = "https://www.karlsruhe.de/"
10 TEST_CASES = {
11 "Östliche Rheinbrückenstraße 1": {
12 "street": "Östliche Rheinbrückenstraße",
13 "hnr": 1,
14 },
15 "Habichtweg 4": {"street": "Habichtweg", "hnr": 4},
16 "Machstraße 5": {"street": "Machstraße", "hnr": 5},
17 "Bernsteinstraße 10 ladeort 1": {
18 "street": "Bernsteinstraße",
19 "hnr": 10,
20 "ladeort": 1,
21 },
22 "Bernsteinstraße 10 ladeort 2": {
23 "street": "Bernsteinstraße",
24 "hnr": 10,
25 "ladeort": 2,
26 },
27 }
28
29
30 ICON_MAP = {
31 "Restmüll": "mdi:trash-can",
32 "Bioabfall": "mdi:leaf",
33 "Papier": "mdi:package-variant",
34 "Wertstoff": "mdi:recycle",
35 "Sperrmüllabholung": "mdi:wardrobe",
36 }
37
38
39 API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
40
41
42 class Source:
43 def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):
44 self._street: str = street
45 self._hnr: str | int = hnr
46 self._ladeort: int | None = ladeort
47 self.ics = ICS()
48
49 def fetch(self):
50 now = datetime.now()
51 error = None
52 for year in (now.year, now.year + 1, now.year - 1):
53 try:
54 return self.get_data(API_URL.format(year=year))
55 except Exception as e:
56 error = e
57 raise error
58
59 def get_data(self, url):
60 data = {
61 "strasse_n": self._street,
62 "hausnr": self._hnr,
63 "ical": "+iCalendar",
64 "ladeort": self._ladeort,
65 }
66 params = {"hausnr": self._hnr}
67
68 r = requests.post(url, data=data, params=params)
69 dates = self.ics.convert(r.text)
70
71 entries = []
72 for d in dates:
73 date, waste_type = d
74 waste_type = waste_type.split(",")[0]
75 icon = ICON_MAP.get(waste_type)
76 entries.append(Collection(date=date, t=waste_type, icon=icon))
77
78 return entries
79
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py
@@ -1,9 +1,17 @@
from datetime import datetime
import requests
+import urllib3
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
+# With verify=True the POST fails due to a SSLCertVerificationError.
+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:
+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings
+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl
+# These two lines areused to suppress the InsecureRequestWarning when using verify=False
+urllib3.disable_warnings()
+
TITLE = "City of Karlsruhe"
DESCRIPTION = "Source for City of Karlsruhe."
URL = "https://www.karlsruhe.de/"
@@ -36,7 +44,7 @@
}
-API_URL = "https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php"
+API_URL = "https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php"
class Source:
@@ -50,10 +58,11 @@
now = datetime.now()
error = None
for year in (now.year, now.year + 1, now.year - 1):
- try:
- return self.get_data(API_URL.format(year=year))
- except Exception as e:
- error = e
+ for i in (4, 6):
+ try:
+ return self.get_data(API_URL.format(year=year, i=i))
+ except Exception as e:
+ error = e
raise error
def get_data(self, url):
@@ -65,7 +74,7 @@
}
params = {"hausnr": self._hnr}
- r = requests.post(url, data=data, params=params)
+ r = requests.post(url, data=data, params=params, verify=False)
dates = self.ics.convert(r.text)
entries = []
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py\n@@ -1,9 +1,17 @@\n from datetime import datetime\n \n import requests\n+import urllib3\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n from waste_collection_schedule.service.ICS import ICS\n \n+# With verify=True the POST fails due to a SSLCertVerificationError.\n+# Using verify=False works, but is not ideal. The following links may provide a better way of dealing with this:\n+# https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html#ssl-warnings\n+# https://urllib3.readthedocs.io/en/1.26.x/user-guide.html#ssl\n+# These two lines areused to suppress the InsecureRequestWarning when using verify=False\n+urllib3.disable_warnings()\n+\n TITLE = \"City of Karlsruhe\"\n DESCRIPTION = \"Source for City of Karlsruhe.\"\n URL = \"https://www.karlsruhe.de/\"\n@@ -36,7 +44,7 @@\n }\n \n \n-API_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n+API_URL = \"https://web{i}.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n \n \n class Source:\n@@ -50,10 +58,11 @@\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n- try:\n- return self.get_data(API_URL.format(year=year))\n- except Exception as e:\n- error = e\n+ for i in (4, 6):\n+ try:\n+ return self.get_data(API_URL.format(year=year, i=i))\n+ except Exception as e:\n+ error = e\n raise error\n \n def get_data(self, url):\n@@ -65,7 +74,7 @@\n }\n params = {\"hausnr\": self._hnr}\n \n- r = requests.post(url, data=data, params=params)\n+ r = requests.post(url, data=data, params=params, verify=False)\n dates = self.ics.convert(r.text)\n \n entries = []\n", "issue": "[Bug]: source 'City of Karlsruhe' stopped working\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nRelease 1.44.0:\r\nDue to changes on the website the source '**City of Karlsruhe**' (name: karlsruhe_de) stopped working.\r\nI start troubleshooting and add my findings here.\n\n### Source (if relevant)\n\nkarlsruhe_de\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n_No response_\n\n### Checklist Source Error\n\n- [x] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [x] Checked that the website of your service provider is still working\n- [x] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [x] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"City of Karlsruhe\"\nDESCRIPTION = \"Source for City of Karlsruhe.\"\nURL = \"https://www.karlsruhe.de/\"\nTEST_CASES = {\n \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe 1\": {\n \"street\": \"\u00d6stliche Rheinbr\u00fcckenstra\u00dfe\",\n \"hnr\": 1,\n },\n \"Habichtweg 4\": {\"street\": \"Habichtweg\", \"hnr\": 4},\n \"Machstra\u00dfe 5\": {\"street\": \"Machstra\u00dfe\", \"hnr\": 5},\n \"Bernsteinstra\u00dfe 10 ladeort 1\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 1,\n },\n \"Bernsteinstra\u00dfe 10 ladeort 2\": {\n \"street\": \"Bernsteinstra\u00dfe\",\n \"hnr\": 10,\n \"ladeort\": 2,\n },\n}\n\n\nICON_MAP = {\n \"Restm\u00fcll\": \"mdi:trash-can\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Wertstoff\": \"mdi:recycle\",\n \"Sperrm\u00fcllabholung\": \"mdi:wardrobe\",\n}\n\n\nAPI_URL = \"https://web6.karlsruhe.de/service/abfall/akal/akal_{year}.php\"\n\n\nclass Source:\n def __init__(self, street: str, hnr: str | int, ladeort: int | None = None):\n self._street: str = street\n self._hnr: str | int = hnr\n self._ladeort: int | None = ladeort\n self.ics = ICS()\n\n def fetch(self):\n now = datetime.now()\n error = None\n for year in (now.year, now.year + 1, now.year - 1):\n try:\n return self.get_data(API_URL.format(year=year))\n except Exception as e:\n error = e\n raise error\n\n def get_data(self, url):\n data = {\n \"strasse_n\": self._street,\n \"hausnr\": self._hnr,\n \"ical\": \"+iCalendar\",\n \"ladeort\": self._ladeort,\n }\n params = {\"hausnr\": self._hnr}\n\n r = requests.post(url, data=data, params=params)\n dates = self.ics.convert(r.text)\n\n entries = []\n for d in dates:\n date, waste_type = d\n waste_type = waste_type.split(\",\")[0]\n icon = ICON_MAP.get(waste_type)\n entries.append(Collection(date=date, t=waste_type, icon=icon))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/karlsruhe_de.py"}]} | 1,652 | 547 |
gh_patches_debug_28800 | rasdani/github-patches | git_diff | quantumlib__Cirq-1674 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve error message if on_each gets a list
When you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**.
Maybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.
</issue>
<code>
[start of cirq/ops/gate_features.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Marker classes for indicating which additional features gates support.
16
17 For example: some gates are reversible, some have known matrices, etc.
18 """
19
20 import abc
21
22 from cirq.ops import op_tree, raw_types
23
24
25 class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
26 """Indicates operations should be equal under some qubit permutations."""
27
28 def qubit_index_to_equivalence_group_key(self, index: int) -> int:
29 """Returns a key that differs between non-interchangeable qubits."""
30 return 0
31
32
33 class SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
34 """A gate that must be applied to exactly one qubit."""
35 def num_qubits(self) -> int:
36 return 1
37
38 def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
39 """Returns a list of operations apply this gate to each of the targets.
40
41 Args:
42 *targets: The qubits to apply this gate to.
43
44 Returns:
45 Operations applying this gate to the target qubits.
46
47 Raises:
48 ValueError if targets are not instances of Qid.
49 """
50 return [self.on(target) for target in targets]
51
52
53 class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
54 """A gate that must be applied to exactly two qubits."""
55 def num_qubits(self) -> int:
56 return 2
57
58
59 class ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
60 """A gate that must be applied to exactly three qubits."""
61 def num_qubits(self) -> int:
62 return 3
63
[end of cirq/ops/gate_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py
--- a/cirq/ops/gate_features.py
+++ b/cirq/ops/gate_features.py
@@ -18,8 +18,10 @@
"""
import abc
+import collections
+from typing import Union, Iterable, Any, List
-from cirq.ops import op_tree, raw_types
+from cirq.ops import raw_types
class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
@@ -35,7 +37,8 @@
def num_qubits(self) -> int:
return 1
- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:
+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]
+ ) -> List[raw_types.Operation]:
"""Returns a list of operations apply this gate to each of the targets.
Args:
@@ -45,9 +48,20 @@
Operations applying this gate to the target qubits.
Raises:
- ValueError if targets are not instances of Qid.
+ ValueError if targets are not instances of Qid or List[Qid].
"""
- return [self.on(target) for target in targets]
+ operations = [] # type: List[raw_types.Operation]
+ for target in targets:
+ if isinstance(target,
+ collections.Iterable) and not isinstance(target, str):
+ operations.extend(self.on_each(*target))
+ elif isinstance(target, raw_types.Qid):
+ operations.append(self.on(target))
+ else:
+ raise ValueError(
+ 'Gate was called with type different than Qid. Type: {}'.
+ format(type(target)))
+ return operations
class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
| {"golden_diff": "diff --git a/cirq/ops/gate_features.py b/cirq/ops/gate_features.py\n--- a/cirq/ops/gate_features.py\n+++ b/cirq/ops/gate_features.py\n@@ -18,8 +18,10 @@\n \"\"\"\n \n import abc\n+import collections\n+from typing import Union, Iterable, Any, List\n \n-from cirq.ops import op_tree, raw_types\n+from cirq.ops import raw_types\n \n \n class InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n@@ -35,7 +37,8 @@\n def num_qubits(self) -> int:\n return 1\n \n- def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n+ def on_each(self, *targets: Union[raw_types.Qid, Iterable[Any]]\n+ ) -> List[raw_types.Operation]:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n \n Args:\n@@ -45,9 +48,20 @@\n Operations applying this gate to the target qubits.\n \n Raises:\n- ValueError if targets are not instances of Qid.\n+ ValueError if targets are not instances of Qid or List[Qid].\n \"\"\"\n- return [self.on(target) for target in targets]\n+ operations = [] # type: List[raw_types.Operation]\n+ for target in targets:\n+ if isinstance(target,\n+ collections.Iterable) and not isinstance(target, str):\n+ operations.extend(self.on_each(*target))\n+ elif isinstance(target, raw_types.Qid):\n+ operations.append(self.on(target))\n+ else:\n+ raise ValueError(\n+ 'Gate was called with type different than Qid. Type: {}'.\n+ format(type(target)))\n+ return operations\n \n \n class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n", "issue": "Improve error message if on_each gets a list\nWhen you do `cirq.H.on_each([q0, q1])` instead of the unpacked version `cirq.H.on_each(q0, q1)` for some qubits `q0` and `q1`, the error message you get is **Gate was called with type different than Qid**. \r\n\r\nMaybe either flatten (because most of the time you'll have your qubits in a list or a list of lists), or give a more descriptive error message.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Marker classes for indicating which additional features gates support.\n\nFor example: some gates are reversible, some have known matrices, etc.\n\"\"\"\n\nimport abc\n\nfrom cirq.ops import op_tree, raw_types\n\n\nclass InterchangeableQubitsGate(metaclass=abc.ABCMeta):\n \"\"\"Indicates operations should be equal under some qubit permutations.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index: int) -> int:\n \"\"\"Returns a key that differs between non-interchangeable qubits.\"\"\"\n return 0\n\n\nclass SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly one qubit.\"\"\"\n def num_qubits(self) -> int:\n return 1\n\n def on_each(self, *targets: raw_types.Qid) -> op_tree.OP_TREE:\n \"\"\"Returns a list of operations apply this gate to each of the targets.\n\n Args:\n *targets: The qubits to apply this gate to.\n\n Returns:\n Operations applying this gate to the target qubits.\n\n Raises:\n ValueError if targets are not instances of Qid.\n \"\"\"\n return [self.on(target) for target in targets]\n\n\nclass TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly two qubits.\"\"\"\n def num_qubits(self) -> int:\n return 2\n\n\nclass ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):\n \"\"\"A gate that must be applied to exactly three qubits.\"\"\"\n def num_qubits(self) -> int:\n return 3\n", "path": "cirq/ops/gate_features.py"}]} | 1,264 | 412 |
gh_patches_debug_39718 | rasdani/github-patches | git_diff | prowler-cloud__prowler-2291 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist
### Steps to Reproduce
The mentioned checks are triggered even if no backups are present or configured.
### Expected behavior
When the check can't find a resource ID (it actually says "No Backups"), the check shouldn't trigger
### Actual Result with Screenshots or Logs

### How did you install Prowler?
From pip package (pip install prowler)
### Environment Resource
Workstation
### OS used
WSL2 under Windows 11
### Prowler version
Prowler 3.4.0 (it is the latest version, yay!)
### Pip version
pip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)
### Context
_No response_
</issue>
<code>
[start of prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_plans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
17 report.resource_arn = backup_client.backup_plans[0].arn
18 report.resource_id = backup_client.backup_plans[0].name
19 report.region = backup_client.backup_plans[0].region
20
21 findings.append(report)
22 return findings
23
[end of prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py]
[start of prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_reportplans_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Report Plan Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_report_plans:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
17 report.resource_arn = backup_client.backup_report_plans[0].arn
18 report.resource_id = backup_client.backup_report_plans[0].name
19 report.region = backup_client.backup_report_plans[0].region
20
21 findings.append(report)
22 return findings
23
[end of prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py]
[start of prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py]
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.backup.backup_client import backup_client
3
4
5 class backup_vaults_exist(Check):
6 def execute(self):
7 findings = []
8 report = Check_Report_AWS(self.metadata())
9 report.status = "FAIL"
10 report.status_extended = "No Backup Vault Exist"
11 report.resource_arn = ""
12 report.resource_id = "No Backups"
13 report.region = backup_client.region
14 if backup_client.backup_vaults:
15 report.status = "PASS"
16 report.status_extended = f"At least one backup vault exists: { backup_client.backup_vaults[0].name}"
17 report.resource_arn = backup_client.backup_vaults[0].arn
18 report.resource_id = backup_client.backup_vaults[0].name
19 report.region = backup_client.backup_vaults[0].region
20
21 findings.append(report)
22 return findings
23
[end of prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py
@@ -9,11 +9,13 @@
report.status = "FAIL"
report.status_extended = "No Backup Plan Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_plans:
report.status = "PASS"
- report.status_extended = f"At least one backup plan exists: { backup_client.backup_plans[0].name}"
+ report.status_extended = (
+ f"At least one backup plan exists: {backup_client.backup_plans[0].name}"
+ )
report.resource_arn = backup_client.backup_plans[0].arn
report.resource_id = backup_client.backup_plans[0].name
report.region = backup_client.backup_plans[0].region
diff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py
@@ -5,18 +5,20 @@
class backup_reportplans_exist(Check):
def execute(self):
findings = []
- report = Check_Report_AWS(self.metadata())
- report.status = "FAIL"
- report.status_extended = "No Backup Report Plan Exist"
- report.resource_arn = ""
- report.resource_id = "No Backups"
- report.region = backup_client.region
- if backup_client.backup_report_plans:
- report.status = "PASS"
- report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
- report.resource_arn = backup_client.backup_report_plans[0].arn
- report.resource_id = backup_client.backup_report_plans[0].name
- report.region = backup_client.backup_report_plans[0].region
+ # We only check report plans if backup plans exist, reducing noise
+ if backup_client.backup_plans:
+ report = Check_Report_AWS(self.metadata())
+ report.status = "FAIL"
+ report.status_extended = "No Backup Report Plan Exist"
+ report.resource_arn = ""
+ report.resource_id = "Backups"
+ report.region = backup_client.region
+ if backup_client.backup_report_plans:
+ report.status = "PASS"
+ report.status_extended = f"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}"
+ report.resource_arn = backup_client.backup_report_plans[0].arn
+ report.resource_id = backup_client.backup_report_plans[0].name
+ report.region = backup_client.backup_report_plans[0].region
- findings.append(report)
+ findings.append(report)
return findings
diff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py
@@ -9,7 +9,7 @@
report.status = "FAIL"
report.status_extended = "No Backup Vault Exist"
report.resource_arn = ""
- report.resource_id = "No Backups"
+ report.resource_id = "Backups"
report.region = backup_client.region
if backup_client.backup_vaults:
report.status = "PASS"
| {"golden_diff": "diff --git a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py\n@@ -9,11 +9,13 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n- report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n+ report.status_extended = (\n+ f\"At least one backup plan exists: {backup_client.backup_plans[0].name}\"\n+ )\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\ndiff --git a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n--- a/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py\n@@ -5,18 +5,20 @@\n class backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n- report = Check_Report_AWS(self.metadata())\n- report.status = \"FAIL\"\n- report.status_extended = \"No Backup Report Plan Exist\"\n- report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n- report.region = backup_client.region\n- if backup_client.backup_report_plans:\n- report.status = \"PASS\"\n- report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n- report.resource_arn = backup_client.backup_report_plans[0].arn\n- report.resource_id = backup_client.backup_report_plans[0].name\n- report.region = backup_client.backup_report_plans[0].region\n+ # We only check report plans if backup plans exist, reducing noise\n+ if backup_client.backup_plans:\n+ report = Check_Report_AWS(self.metadata())\n+ report.status = \"FAIL\"\n+ report.status_extended = \"No Backup Report Plan Exist\"\n+ report.resource_arn = \"\"\n+ report.resource_id = \"Backups\"\n+ report.region = backup_client.region\n+ if backup_client.backup_report_plans:\n+ report.status = \"PASS\"\n+ report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n+ report.resource_arn = backup_client.backup_report_plans[0].arn\n+ report.resource_id = backup_client.backup_report_plans[0].name\n+ report.region = backup_client.backup_report_plans[0].region\n \n- findings.append(report)\n+ findings.append(report)\n return findings\ndiff --git a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n--- a/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n+++ b/prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py\n@@ -9,7 +9,7 @@\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n- report.resource_id = \"No Backups\"\n+ report.resource_id = \"Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n", "issue": "[Bug]: backup_plans_exist and backup_reportplans_exist trigger in regions where not backups exist\n### Steps to Reproduce\n\nThe mentioned checks are triggered even if no backups are present or configured.\n\n### Expected behavior\n\nWhen the check can't find a resource ID (it actually says \"No Backups\"), the check shouldn't trigger\n\n### Actual Result with Screenshots or Logs\n\n\r\n\n\n### How did you install Prowler?\n\nFrom pip package (pip install prowler)\n\n### Environment Resource\n\nWorkstation\n\n### OS used\n\nWSL2 under Windows 11\n\n### Prowler version\n\nProwler 3.4.0 (it is the latest version, yay!)\n\n### Pip version\n\npip 22.0.2 from /usr/lib/python3/dist-packages/pip (python 3.10)\n\n### Context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_plans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup plan exists: { backup_client.backup_plans[0].name}\"\n report.resource_arn = backup_client.backup_plans[0].arn\n report.resource_id = backup_client.backup_plans[0].name\n report.region = backup_client.backup_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_plans_exist/backup_plans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_reportplans_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Report Plan Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_report_plans:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup report plan exists: { backup_client.backup_report_plans[0].name}\"\n report.resource_arn = backup_client.backup_report_plans[0].arn\n report.resource_id = backup_client.backup_report_plans[0].name\n report.region = backup_client.backup_report_plans[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_reportplans_exist/backup_reportplans_exist.py"}, {"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.backup.backup_client import backup_client\n\n\nclass backup_vaults_exist(Check):\n def execute(self):\n findings = []\n report = Check_Report_AWS(self.metadata())\n report.status = \"FAIL\"\n report.status_extended = \"No Backup Vault Exist\"\n report.resource_arn = \"\"\n report.resource_id = \"No Backups\"\n report.region = backup_client.region\n if backup_client.backup_vaults:\n report.status = \"PASS\"\n report.status_extended = f\"At least one backup vault exists: { backup_client.backup_vaults[0].name}\"\n report.resource_arn = backup_client.backup_vaults[0].arn\n report.resource_id = backup_client.backup_vaults[0].name\n report.region = backup_client.backup_vaults[0].region\n\n findings.append(report)\n return findings\n", "path": "prowler/providers/aws/services/backup/backup_vaults_exist/backup_vaults_exist.py"}]} | 1,591 | 916 |
gh_patches_debug_19093 | rasdani/github-patches | git_diff | weecology__retriever-287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
download command should probably fail when specified path does not exist
A datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist.
</issue>
<code>
[start of engines/download_only.py]
1 import os
2 import platform
3 import shutil
4 import inspect
5 from retriever.lib.engine import filename_from_url
6 from retriever.lib.models import Engine, no_cleanup
7 from retriever import DATA_DIR, HOME_DIR
8
9 class DummyConnection:
10 def cursor(self):
11 pass
12 def commit(self):
13 pass
14 def rollback(self):
15 pass
16 def close(self):
17 pass
18
19 class DummyCursor(DummyConnection):
20 pass
21
22
23 class engine(Engine):
24 """Engine instance for writing data to a CSV file."""
25 name = "Download Only"
26 abbreviation = "download"
27 required_opts = [("path",
28 "File path to copy data files",
29 "./"),
30 ]
31
32 def table_exists(self, dbname, tablename):
33 try:
34 tablename = self.table_name(name=tablename, dbname=dbname)
35 return os.path.exists(tablename)
36 except:
37 return False
38
39 def get_connection(self):
40 """Gets the db connection."""
41 self.get_input()
42 return DummyConnection()
43
44 def final_cleanup(self):
45 data_dir = self.format_data_dir()
46 if hasattr(self, "all_files"):
47 for file_name in self.all_files:
48 file_path, file_name_nopath = os.path.split(file_name)
49 if file_path == DATA_DIR:
50 print ("%s is already in the working directory" % file_name_nopath)
51 print("Keeping existing copy.")
52 else:
53 print("Copying %s from %s" % (file_name_nopath, file_path))
54 shutil.copy(file_name, self.opts['path'])
55 self.all_files = set()
56
57 def auto_create_table(self, table, url=None, filename=None, pk=None):
58 if url and not filename:
59 filename = filename_from_url(url)
60
61 if url and not self.find_file(filename):
62 # If the file doesn't exist, download it
63 self.download_file(url, filename)
64
65 def insert_data_from_url(self, url):
66 filename = filename_from_url(url)
67 find = self.find_file(filename)
68 if not find:
69 self.create_raw_data_dir()
70 self.download_file(url, filename)
71
72 def find_file(self, filename):
73 result = Engine.find_file(self, filename)
74 if not hasattr(self, "all_files"): self.all_files = set()
75 if result: self.all_files.add(result)
76 return result
77
78 def register_files(self, filenames):
79 """Identify a list of files to be moved by the download
80
81 When downloading archives with multiple files the engine needs to be
82 informed of all of the file names so that it can move them.
83
84 """
85 full_filenames = {self.find_file(filename) for filename in filenames}
86 self.all_files = self.all_files.union(full_filenames)
87
88
89 # replace all other methods with a function that does nothing
90 def dummy_method(self, *args, **kwargs):
91 pass
92 methods = inspect.getmembers(engine, predicate=inspect.ismethod)
93 keep_methods = {'table_exists',
94 'get_connection',
95 'final_cleanup',
96 'auto_create_table',
97 'insert_data_from_url',
98 }
99 remove_methods = ['insert_data_from_file']
100 for name, method in methods:
101 if (not name in keep_methods
102 and not 'download' in name
103 and not 'file' in name
104 and not 'dir' in name):
105
106 setattr(engine, name, dummy_method)
107 for name in remove_methods:
108 setattr(engine, name, dummy_method)
109
[end of engines/download_only.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/engines/download_only.py b/engines/download_only.py
--- a/engines/download_only.py
+++ b/engines/download_only.py
@@ -51,7 +51,18 @@
print("Keeping existing copy.")
else:
print("Copying %s from %s" % (file_name_nopath, file_path))
- shutil.copy(file_name, self.opts['path'])
+ if os.path.isdir(self.opts['path']):
+ try:
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't copy file to %s" % self.opts['path'])
+ else:
+ try:
+ print("Creating directory %s" % self.opts['path'])
+ os.mkdir(self.opts['path'])
+ shutil.copy(file_name, self.opts['path'])
+ except:
+ print("Couldn't create directory %s" % self.opts['path'])
self.all_files = set()
def auto_create_table(self, table, url=None, filename=None, pk=None):
| {"golden_diff": "diff --git a/engines/download_only.py b/engines/download_only.py\n--- a/engines/download_only.py\n+++ b/engines/download_only.py\n@@ -51,7 +51,18 @@\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n- shutil.copy(file_name, self.opts['path'])\n+ if os.path.isdir(self.opts['path']):\n+ try:\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't copy file to %s\" % self.opts['path'])\n+ else:\n+ try:\n+ print(\"Creating directory %s\" % self.opts['path'])\n+ os.mkdir(self.opts['path'])\n+ shutil.copy(file_name, self.opts['path'])\n+ except:\n+ print(\"Couldn't create directory %s\" % self.opts['path'])\n self.all_files = set()\n \n def auto_create_table(self, table, url=None, filename=None, pk=None):\n", "issue": "download command should probably fail when specified path does not exist\nA datsaet can be downloaded to a specific path with the function `download` while specifying the -p argument. For example `retriever download MCDB -p my_path` will download each of the MCDB files and then copy them to the directory my_path but if my_path does not exist a file called my_path is created and the files overwrite that file as each is copied from the download directory. It may be best if the retriever fails with a warning that the path does not exist. \n\n", "before_files": [{"content": "import os\nimport platform\nimport shutil\nimport inspect\nfrom retriever.lib.engine import filename_from_url\nfrom retriever.lib.models import Engine, no_cleanup\nfrom retriever import DATA_DIR, HOME_DIR\n\nclass DummyConnection:\n def cursor(self):\n pass\n def commit(self):\n pass\n def rollback(self):\n pass\n def close(self):\n pass\n\nclass DummyCursor(DummyConnection):\n pass\n\n\nclass engine(Engine):\n \"\"\"Engine instance for writing data to a CSV file.\"\"\"\n name = \"Download Only\"\n abbreviation = \"download\"\n required_opts = [(\"path\",\n \"File path to copy data files\",\n \"./\"),\n ]\n\n def table_exists(self, dbname, tablename):\n try:\n tablename = self.table_name(name=tablename, dbname=dbname)\n return os.path.exists(tablename)\n except:\n return False\n\n def get_connection(self):\n \"\"\"Gets the db connection.\"\"\"\n self.get_input()\n return DummyConnection()\n\n def final_cleanup(self):\n data_dir = self.format_data_dir()\n if hasattr(self, \"all_files\"):\n for file_name in self.all_files:\n file_path, file_name_nopath = os.path.split(file_name)\n if file_path == DATA_DIR:\n print (\"%s is already in the working directory\" % file_name_nopath)\n print(\"Keeping existing copy.\")\n else:\n print(\"Copying %s from %s\" % (file_name_nopath, file_path))\n shutil.copy(file_name, self.opts['path'])\n self.all_files = set()\n\n def auto_create_table(self, table, url=None, filename=None, pk=None):\n if url and not filename:\n filename = filename_from_url(url)\n\n if url and not self.find_file(filename):\n # If the file doesn't exist, download it\n self.download_file(url, filename)\n\n def insert_data_from_url(self, url):\n filename = filename_from_url(url)\n find = self.find_file(filename)\n if not find:\n self.create_raw_data_dir()\n self.download_file(url, filename)\n\n def find_file(self, filename):\n result = Engine.find_file(self, filename)\n if not hasattr(self, \"all_files\"): self.all_files = set()\n if result: self.all_files.add(result)\n return result\n\n def register_files(self, filenames):\n \"\"\"Identify a list of files to be moved by the download\n\n When downloading archives with multiple files the engine needs to be\n informed of all of the file names so that it can move them.\n\n \"\"\"\n full_filenames = {self.find_file(filename) for filename in filenames}\n self.all_files = self.all_files.union(full_filenames)\n\n\n# replace all other methods with a function that does nothing\ndef dummy_method(self, *args, **kwargs):\n pass\nmethods = inspect.getmembers(engine, predicate=inspect.ismethod)\nkeep_methods = {'table_exists',\n 'get_connection',\n 'final_cleanup',\n 'auto_create_table',\n 'insert_data_from_url',\n }\nremove_methods = ['insert_data_from_file']\nfor name, method in methods:\n if (not name in keep_methods\n and not 'download' in name\n and not 'file' in name\n and not 'dir' in name):\n\n setattr(engine, name, dummy_method)\nfor name in remove_methods:\n setattr(engine, name, dummy_method)\n", "path": "engines/download_only.py"}]} | 1,597 | 232 |
gh_patches_debug_12065 | rasdani/github-patches | git_diff | tinygrad__tinygrad-65 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
EOFError: Ran out of input
When running example and solving "Can't import fetch from utils" issue, this one comes up:

</issue>
<code>
[start of tinygrad/utils.py]
1 import numpy as np
2
3 def mask_like(like, mask_inx, mask_value = 1.0):
4 mask = np.zeros_like(like).reshape(-1)
5 mask[mask_inx] = mask_value
6 return mask.reshape(like.shape)
7
8 def layer_init_uniform(*x):
9 ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))
10 return ret.astype(np.float32)
11
12 def fetch(url):
13 import requests, os, hashlib, tempfile
14 fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
15 if os.path.isfile(fp):
16 with open(fp, "rb") as f:
17 dat = f.read()
18 else:
19 print("fetching %s" % url)
20 with open(fp+".tmp", "wb") as f:
21 dat = requests.get(url).content
22 f.write(dat)
23 os.rename(fp+".tmp", fp)
24 return dat
25
26 def fetch_mnist():
27 import gzip
28 parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()
29 X_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
30 Y_train = parse(fetch("http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"))[8:]
31 X_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz"))[0x10:].reshape((-1, 28, 28))
32 Y_test = parse(fetch("http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"))[8:]
33 return X_train, Y_train, X_test, Y_test
34
35
[end of tinygrad/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tinygrad/utils.py b/tinygrad/utils.py
--- a/tinygrad/utils.py
+++ b/tinygrad/utils.py
@@ -1,4 +1,5 @@
import numpy as np
+import os
def mask_like(like, mask_inx, mask_value = 1.0):
mask = np.zeros_like(like).reshape(-1)
@@ -12,7 +13,7 @@
def fetch(url):
import requests, os, hashlib, tempfile
fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest())
- if os.path.isfile(fp):
+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:
with open(fp, "rb") as f:
dat = f.read()
else:
| {"golden_diff": "diff --git a/tinygrad/utils.py b/tinygrad/utils.py\n--- a/tinygrad/utils.py\n+++ b/tinygrad/utils.py\n@@ -1,4 +1,5 @@\n import numpy as np\n+import os\n \n def mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n@@ -12,7 +13,7 @@\n def fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n- if os.path.isfile(fp):\n+ if os.path.isfile(fp) and os.stat(fp).st_size > 0:\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n", "issue": "EOFError: Ran out of input\nWhen running example and solving \"Can't import fetch from utils\" issue, this one comes up:\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\ndef mask_like(like, mask_inx, mask_value = 1.0):\n mask = np.zeros_like(like).reshape(-1)\n mask[mask_inx] = mask_value\n return mask.reshape(like.shape)\n\ndef layer_init_uniform(*x):\n ret = np.random.uniform(-1., 1., size=x)/np.sqrt(np.prod(x))\n return ret.astype(np.float32)\n\ndef fetch(url):\n import requests, os, hashlib, tempfile\n fp = os.path.join(tempfile.gettempdir(), hashlib.md5(url.encode('utf-8')).hexdigest()) \n if os.path.isfile(fp):\n with open(fp, \"rb\") as f:\n dat = f.read()\n else:\n print(\"fetching %s\" % url)\n with open(fp+\".tmp\", \"wb\") as f:\n dat = requests.get(url).content\n f.write(dat)\n os.rename(fp+\".tmp\", fp)\n return dat\n\ndef fetch_mnist():\n import gzip\n parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8).copy()\n X_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_train = parse(fetch(\"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\"))[8:]\n X_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\"))[0x10:].reshape((-1, 28, 28))\n Y_test = parse(fetch(\"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\"))[8:]\n return X_train, Y_train, X_test, Y_test\n\n", "path": "tinygrad/utils.py"}]} | 1,106 | 177 |
gh_patches_debug_20502 | rasdani/github-patches | git_diff | cloudtools__troposphere-1205 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User
This property has been released on November 9 by AWS.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html
```
PermissionsBoundary
The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.
Required: No
Type: String
Update requires: No interruption
```
</issue>
<code>
[start of troposphere/iam.py]
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import integer, boolean, status
8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name
9
10 try:
11 from awacs.aws import Policy
12 policytypes = (dict, Policy)
13 except ImportError:
14 policytypes = dict,
15
16
17 Active = "Active"
18 Inactive = "Inactive"
19
20
21 class AccessKey(AWSObject):
22 resource_type = "AWS::IAM::AccessKey"
23
24 props = {
25 'Serial': (integer, False),
26 'Status': (status, False),
27 'UserName': (basestring, True),
28 }
29
30
31 class PolicyType(AWSObject):
32 resource_type = "AWS::IAM::Policy"
33
34 props = {
35 'Groups': ([basestring], False),
36 'PolicyDocument': (policytypes, True),
37 'PolicyName': (basestring, True),
38 'Roles': ([basestring], False),
39 'Users': ([basestring], False),
40 }
41
42
43 class Policy(AWSProperty):
44 props = {
45 'PolicyDocument': (policytypes, True),
46 'PolicyName': (basestring, True),
47 }
48
49
50 PolicyProperty = Policy
51
52
53 class Group(AWSObject):
54 resource_type = "AWS::IAM::Group"
55
56 props = {
57 'GroupName': (iam_group_name, False),
58 'ManagedPolicyArns': ([basestring], False),
59 'Path': (iam_path, False),
60 'Policies': ([Policy], False),
61 }
62
63
64 class InstanceProfile(AWSObject):
65 resource_type = "AWS::IAM::InstanceProfile"
66
67 props = {
68 'Path': (iam_path, False),
69 'Roles': (list, True),
70 'InstanceProfileName': (basestring, False),
71 }
72
73
74 class Role(AWSObject):
75 resource_type = "AWS::IAM::Role"
76
77 props = {
78 'AssumeRolePolicyDocument': (policytypes, True),
79 'ManagedPolicyArns': ([basestring], False),
80 'MaxSessionDuration': (integer, False),
81 'Path': (iam_path, False),
82 'Policies': ([Policy], False),
83 'RoleName': (iam_role_name, False),
84 }
85
86
87 class ServiceLinkedRole(AWSObject):
88 resource_type = "AWS::IAM::ServiceLinkedRole"
89
90 props = {
91 'AWSServiceName': (basestring, True),
92 'CustomSuffix': (basestring, False),
93 'Description': (basestring, False),
94 }
95
96
97 class LoginProfile(AWSProperty):
98 props = {
99 'Password': (basestring, True),
100 'PasswordResetRequired': (boolean, False),
101 }
102
103
104 class User(AWSObject):
105 resource_type = "AWS::IAM::User"
106
107 props = {
108 'Path': (iam_path, False),
109 'Groups': ([basestring], False),
110 'ManagedPolicyArns': ([basestring], False),
111 'LoginProfile': (LoginProfile, False),
112 'Policies': ([Policy], False),
113 'UserName': (iam_user_name, False),
114 }
115
116
117 class UserToGroupAddition(AWSObject):
118 resource_type = "AWS::IAM::UserToGroupAddition"
119
120 props = {
121 'GroupName': (basestring, True),
122 'Users': (list, True),
123 }
124
125
126 class ManagedPolicy(AWSObject):
127 resource_type = "AWS::IAM::ManagedPolicy"
128
129 props = {
130 'Description': (basestring, False),
131 'Groups': ([basestring], False),
132 'ManagedPolicyName': (basestring, False),
133 'Path': (iam_path, False),
134 'PolicyDocument': (policytypes, True),
135 'Roles': ([basestring], False),
136 'Users': ([basestring], False),
137 }
138
[end of troposphere/iam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/iam.py b/troposphere/iam.py
--- a/troposphere/iam.py
+++ b/troposphere/iam.py
@@ -79,6 +79,7 @@
'ManagedPolicyArns': ([basestring], False),
'MaxSessionDuration': (integer, False),
'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'RoleName': (iam_role_name, False),
}
@@ -105,10 +106,11 @@
resource_type = "AWS::IAM::User"
props = {
- 'Path': (iam_path, False),
'Groups': ([basestring], False),
- 'ManagedPolicyArns': ([basestring], False),
'LoginProfile': (LoginProfile, False),
+ 'ManagedPolicyArns': ([basestring], False),
+ 'Path': (iam_path, False),
+ 'PermissionsBoundary': (basestring, False),
'Policies': ([Policy], False),
'UserName': (iam_user_name, False),
}
| {"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -79,6 +79,7 @@\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n@@ -105,10 +106,11 @@\n resource_type = \"AWS::IAM::User\"\n \n props = {\n- 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n- 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n+ 'ManagedPolicyArns': ([basestring], False),\n+ 'Path': (iam_path, False),\n+ 'PermissionsBoundary': (basestring, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n", "issue": "Add 'PermissionsBoundary' properties to AWS::IAM::Role and AWS::IAM::User\nThis property has been released on November 9 by AWS.\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/ReleaseHistory.html\r\n```\r\nPermissionsBoundary\r\n\r\n The ARN of the policy that is used to set the permissions boundary for the role. Minimum length of 20. Maximum length of 2048.\r\n\r\n Required: No\r\n\r\n Type: String\r\n\r\n Update requires: No interruption\r\n\r\n```\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n 'InstanceProfileName': (basestring, False),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'MaxSessionDuration': (integer, False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass ServiceLinkedRole(AWSObject):\n resource_type = \"AWS::IAM::ServiceLinkedRole\"\n\n props = {\n 'AWSServiceName': (basestring, True),\n 'CustomSuffix': (basestring, False),\n 'Description': (basestring, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyName': (basestring, False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]} | 1,817 | 257 |
gh_patches_debug_19916 | rasdani/github-patches | git_diff | weecology__retriever-1121 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a description field(s) to setup.py
This populates the description on PYPI:
https://packaging.python.org/tutorials/distributing-packages/#description
</issue>
<code>
[start of setup.py]
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 import os
5 import platform
6
7 from pkg_resources import parse_version
8 from setuptools import setup, find_packages
9
10 current_platform = platform.system().lower()
11 extra_includes = []
12 if current_platform == "windows":
13 extra_includes += ["pypyodbc"]
14
15 if os.path.exists(".git/hooks"): # check if we are in git repo
16 os.system("cp hooks/pre-commit .git/hooks/pre-commit")
17 os.system("chmod +x .git/hooks/pre-commit")
18
19 app_data = "~/.retriever/scripts"
20 if os.path.exists(app_data):
21 os.system("rm -r {}".format(app_data))
22
23 __version__ = 'v2.1.dev'
24 with open(os.path.join("retriever", "_version.py"), "w") as version_file:
25 version_file.write("__version__ = " + "'" + __version__ + "'\n")
26 version_file.close()
27
28
29 def clean_version(v):
30 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
31
32 includes = [
33 'xlrd',
34 'future',
35 'argcomplete',
36 'pymysql',
37 'psycopg2',
38 'sqlite3',
39 ] + extra_includes
40
41 excludes = [
42 'pyreadline',
43 'doctest',
44 'pickle',
45 'pdb',
46 'pywin', 'pywin.debugger',
47 'pywin.debugger.dbgcon',
48 'pywin.dialogs', 'pywin.dialogs.list',
49 'Tkconstants', 'Tkinter', 'tcl', 'tk'
50 ]
51
52 setup(name='retriever',
53 version=clean_version(__version__),
54 description='Data Retriever',
55 author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
56 author_email='[email protected]',
57 url='https://github.com/weecology/retriever',
58 classifiers=['Intended Audience :: Science/Research',
59 'License :: OSI Approved :: MIT License',
60 'Programming Language :: Python',
61 'Programming Language :: Python :: 2',
62 'Programming Language :: Python :: 3', ],
63 packages=find_packages(
64 exclude=['hooks',
65 'docs',
66 'tests',
67 'scripts',
68 'docker',
69 ".cache"]),
70 entry_points={
71 'console_scripts': [
72 'retriever = retriever.__main__:main',
73 ],
74 },
75 install_requires=[
76 'xlrd',
77 'future',
78 'argcomplete',
79 'tqdm'
80 ],
81 data_files=[('', ['CITATION'])],
82 setup_requires=[],
83 )
84
85 # windows doesn't have bash. No point in using bash-completion
86 if current_platform != "windows":
87 # if platform is OS X use "~/.bash_profile"
88 if current_platform == "darwin":
89 bash_file = "~/.bash_profile"
90 # if platform is Linux use "~/.bashrc
91 elif current_platform == "linux":
92 bash_file = "~/.bashrc"
93 # else write and discard
94 else:
95 bash_file = "/dev/null"
96
97 argcomplete_command = 'eval "$(register-python-argcomplete retriever)"'
98 with open(os.path.expanduser(bash_file), "a+") as bashrc:
99 bashrc.seek(0)
100 # register retriever for arg-completion if not already registered
101 # whenever a new shell is spawned
102 if argcomplete_command not in bashrc.read():
103 bashrc.write(argcomplete_command + "\n")
104 bashrc.close()
105 os.system("activate-global-python-argcomplete")
106 # register for the current shell
107 os.system(argcomplete_command)
108
109 try:
110 from retriever.compile import compile
111 from retriever.lib.repository import check_for_updates
112
113 check_for_updates(False)
114 compile()
115 except:
116 pass
117
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@
def clean_version(v):
return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
+
includes = [
'xlrd',
'future',
@@ -52,6 +53,10 @@
setup(name='retriever',
version=clean_version(__version__),
description='Data Retriever',
+ long_description=('The Data Retriever is a package manager for data. '
+ 'It downloads, cleans, and stores publicly available data, '
+ 'so that analysts spend less time cleaning and managing data, '
+ 'and more time analyzing it.'),
author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',
author_email='[email protected]',
url='https://github.com/weecology/retriever',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,6 +29,7 @@\n def clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n \n+\n includes = [\n 'xlrd',\n 'future',\n@@ -52,6 +53,10 @@\n setup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n+ long_description=('The Data Retriever is a package manager for data. '\n+ 'It downloads, cleans, and stores publicly available data, '\n+ 'so that analysts spend less time cleaning and managing data, '\n+ 'and more time analyzing it.'),\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n", "issue": "Add a description field(s) to setup.py\nThis populates the description on PYPI:\r\n\r\nhttps://packaging.python.org/tutorials/distributing-packages/#description\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nimport os\nimport platform\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"windows\":\n extra_includes += [\"pypyodbc\"]\n\nif os.path.exists(\".git/hooks\"): # check if we are in git repo\n os.system(\"cp hooks/pre-commit .git/hooks/pre-commit\")\n os.system(\"chmod +x .git/hooks/pre-commit\")\n\napp_data = \"~/.retriever/scripts\"\nif os.path.exists(app_data):\n os.system(\"rm -r {}\".format(app_data))\n\n__version__ = 'v2.1.dev'\nwith open(os.path.join(\"retriever\", \"_version.py\"), \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\nincludes = [\n 'xlrd',\n 'future',\n 'argcomplete',\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n ] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'pickle',\n 'pdb',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl', 'tk'\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n author='Ben Morris, Shivam Negi, Akash Goel, Andrew Zhang, Henry Senyondo, Ethan White',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3', ],\n packages=find_packages(\n exclude=['hooks',\n 'docs',\n 'tests',\n 'scripts',\n 'docker',\n \".cache\"]),\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future',\n 'argcomplete',\n 'tqdm'\n ],\n data_files=[('', ['CITATION'])],\n setup_requires=[],\n )\n\n# windows doesn't have bash. No point in using bash-completion\nif current_platform != \"windows\":\n # if platform is OS X use \"~/.bash_profile\"\n if current_platform == \"darwin\":\n bash_file = \"~/.bash_profile\"\n # if platform is Linux use \"~/.bashrc\n elif current_platform == \"linux\":\n bash_file = \"~/.bashrc\"\n # else write and discard\n else:\n bash_file = \"/dev/null\"\n\n argcomplete_command = 'eval \"$(register-python-argcomplete retriever)\"'\n with open(os.path.expanduser(bash_file), \"a+\") as bashrc:\n bashrc.seek(0)\n # register retriever for arg-completion if not already registered\n # whenever a new shell is spawned\n if argcomplete_command not in bashrc.read():\n bashrc.write(argcomplete_command + \"\\n\")\n bashrc.close()\n os.system(\"activate-global-python-argcomplete\")\n # register for the current shell\n os.system(argcomplete_command)\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n\n check_for_updates(False)\n compile()\nexcept:\n pass\n", "path": "setup.py"}]} | 1,646 | 218 |
gh_patches_debug_51313 | rasdani/github-patches | git_diff | scikit-image__scikit-image-5128 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
filters.farid missing from skimage.filters documentation
## Description
The `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)
</issue>
<code>
[start of skimage/filters/__init__.py]
1 from .lpi_filter import inverse, wiener, LPIFilter2D
2 from ._gaussian import (gaussian, _guess_spatial_dimensions,
3 difference_of_gaussians)
4 from .edges import (sobel, sobel_h, sobel_v,
5 scharr, scharr_h, scharr_v,
6 prewitt, prewitt_h, prewitt_v,
7 roberts, roberts_pos_diag, roberts_neg_diag,
8 laplace,
9 farid, farid_h, farid_v)
10 from ._rank_order import rank_order
11 from ._gabor import gabor_kernel, gabor
12 from .thresholding import (threshold_local, threshold_otsu, threshold_yen,
13 threshold_isodata, threshold_li, threshold_minimum,
14 threshold_mean, threshold_triangle,
15 threshold_niblack, threshold_sauvola,
16 threshold_multiotsu, try_all_threshold,
17 apply_hysteresis_threshold)
18 from .ridges import (meijering, sato, frangi, hessian)
19 from . import rank
20 from ._median import median
21 from ._sparse import correlate_sparse
22 from ._unsharp_mask import unsharp_mask
23 from ._window import window
24
25
26 __all__ = ['inverse',
27 'correlate_sparse',
28 'wiener',
29 'LPIFilter2D',
30 'gaussian',
31 'difference_of_gaussians',
32 'median',
33 'sobel',
34 'sobel_h',
35 'sobel_v',
36 'scharr',
37 'scharr_h',
38 'scharr_v',
39 'prewitt',
40 'prewitt_h',
41 'prewitt_v',
42 'roberts',
43 'roberts_pos_diag',
44 'roberts_neg_diag',
45 'laplace',
46 'rank_order',
47 'gabor_kernel',
48 'gabor',
49 'try_all_threshold',
50 'meijering',
51 'sato',
52 'frangi',
53 'hessian',
54 'threshold_otsu',
55 'threshold_yen',
56 'threshold_isodata',
57 'threshold_li',
58 'threshold_local',
59 'threshold_minimum',
60 'threshold_mean',
61 'threshold_niblack',
62 'threshold_sauvola',
63 'threshold_triangle',
64 'threshold_multiotsu',
65 'apply_hysteresis_threshold',
66 'rank',
67 'unsharp_mask',
68 'window']
69
[end of skimage/filters/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py
--- a/skimage/filters/__init__.py
+++ b/skimage/filters/__init__.py
@@ -43,6 +43,9 @@
'roberts_pos_diag',
'roberts_neg_diag',
'laplace',
+ 'farid',
+ 'farid_h',
+ 'farid_v',
'rank_order',
'gabor_kernel',
'gabor',
| {"golden_diff": "diff --git a/skimage/filters/__init__.py b/skimage/filters/__init__.py\n--- a/skimage/filters/__init__.py\n+++ b/skimage/filters/__init__.py\n@@ -43,6 +43,9 @@\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n+ 'farid',\n+ 'farid_h',\n+ 'farid_v',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n", "issue": "filters.farid missing from skimage.filters documentation\n## Description\r\n\r\nThe `filters.farid{,_h,_v}` functions are missing from the [`skimage.filters` documentation](https://scikit-image.org/docs/dev/api/skimage.filters.html). I presume this is because they are not it `__all__`? (No time to investigate right now.)\n", "before_files": [{"content": "from .lpi_filter import inverse, wiener, LPIFilter2D\nfrom ._gaussian import (gaussian, _guess_spatial_dimensions,\n difference_of_gaussians)\nfrom .edges import (sobel, sobel_h, sobel_v,\n scharr, scharr_h, scharr_v,\n prewitt, prewitt_h, prewitt_v,\n roberts, roberts_pos_diag, roberts_neg_diag,\n laplace,\n farid, farid_h, farid_v)\nfrom ._rank_order import rank_order\nfrom ._gabor import gabor_kernel, gabor\nfrom .thresholding import (threshold_local, threshold_otsu, threshold_yen,\n threshold_isodata, threshold_li, threshold_minimum,\n threshold_mean, threshold_triangle,\n threshold_niblack, threshold_sauvola,\n threshold_multiotsu, try_all_threshold,\n apply_hysteresis_threshold)\nfrom .ridges import (meijering, sato, frangi, hessian)\nfrom . import rank\nfrom ._median import median\nfrom ._sparse import correlate_sparse\nfrom ._unsharp_mask import unsharp_mask\nfrom ._window import window\n\n\n__all__ = ['inverse',\n 'correlate_sparse',\n 'wiener',\n 'LPIFilter2D',\n 'gaussian',\n 'difference_of_gaussians',\n 'median',\n 'sobel',\n 'sobel_h',\n 'sobel_v',\n 'scharr',\n 'scharr_h',\n 'scharr_v',\n 'prewitt',\n 'prewitt_h',\n 'prewitt_v',\n 'roberts',\n 'roberts_pos_diag',\n 'roberts_neg_diag',\n 'laplace',\n 'rank_order',\n 'gabor_kernel',\n 'gabor',\n 'try_all_threshold',\n 'meijering',\n 'sato',\n 'frangi',\n 'hessian',\n 'threshold_otsu',\n 'threshold_yen',\n 'threshold_isodata',\n 'threshold_li',\n 'threshold_local',\n 'threshold_minimum',\n 'threshold_mean',\n 'threshold_niblack',\n 'threshold_sauvola',\n 'threshold_triangle',\n 'threshold_multiotsu',\n 'apply_hysteresis_threshold',\n 'rank',\n 'unsharp_mask',\n 'window']\n", "path": "skimage/filters/__init__.py"}]} | 1,256 | 118 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.