problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_47845 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-550 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
About page requires login
**Describe the bug**
Accessing the "About this server" link (https://bookwyrm.social/about) redirects to login
**To Reproduce**
Steps to reproduce the behavior:
1. Go to https://bookwyrm.social/about
2. redirected to login instead of seeing an about page (the URL is login/?next=/about)
**Expected behavior**
Access to information about this site / server
**Desktop (please complete the following information):**
- OS: linux
- Browser firefox
- Version 85 (developer edition)
</issue>
<code>
[start of bookwyrm/views/landing.py]
1 ''' non-interactive pages '''
2 from django.contrib.auth.decorators import login_required
3 from django.core.paginator import Paginator
4 from django.db.models import Avg, Max
5 from django.template.response import TemplateResponse
6 from django.utils import timezone
7 from django.utils.decorators import method_decorator
8 from django.views import View
9
10 from bookwyrm import forms, models
11 from bookwyrm.settings import PAGE_LENGTH
12 from .helpers import get_activity_feed
13
14
15 # pylint: disable= no-self-use
16 @method_decorator(login_required, name='dispatch')
17 class About(View):
18 ''' create invites '''
19 def get(self, request):
20 ''' more information about the instance '''
21 data = {
22 'title': 'About',
23 }
24 return TemplateResponse(request, 'about.html', data)
25
26 class Home(View):
27 ''' discover page or home feed depending on auth '''
28 def get(self, request):
29 ''' this is the same as the feed on the home tab '''
30 if request.user.is_authenticated:
31 feed_view = Feed.as_view()
32 return feed_view(request, 'home')
33 discover_view = Discover.as_view()
34 return discover_view(request)
35
36 class Discover(View):
37 ''' preview of recently reviewed books '''
38 def get(self, request):
39 ''' tiled book activity page '''
40 books = models.Edition.objects.filter(
41 review__published_date__isnull=False,
42 review__user__local=True,
43 review__privacy__in=['public', 'unlisted'],
44 ).exclude(
45 cover__exact=''
46 ).annotate(
47 Max('review__published_date')
48 ).order_by('-review__published_date__max')[:6]
49
50 ratings = {}
51 for book in books:
52 reviews = models.Review.objects.filter(
53 book__in=book.parent_work.editions.all()
54 )
55 reviews = get_activity_feed(
56 request.user, ['public', 'unlisted'], queryset=reviews)
57 ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']
58 data = {
59 'title': 'Discover',
60 'register_form': forms.RegisterForm(),
61 'books': list(set(books)),
62 'ratings': ratings
63 }
64 return TemplateResponse(request, 'discover.html', data)
65
66
67 @method_decorator(login_required, name='dispatch')
68 class Feed(View):
69 ''' activity stream '''
70 def get(self, request, tab):
71 ''' user's homepage with activity feed '''
72 try:
73 page = int(request.GET.get('page', 1))
74 except ValueError:
75 page = 1
76
77 suggested_books = get_suggested_books(request.user)
78
79 if tab == 'home':
80 activities = get_activity_feed(
81 request.user, ['public', 'unlisted', 'followers'],
82 following_only=True)
83 elif tab == 'local':
84 activities = get_activity_feed(
85 request.user, ['public', 'followers'], local_only=True)
86 else:
87 activities = get_activity_feed(
88 request.user, ['public', 'followers'])
89 paginated = Paginator(activities, PAGE_LENGTH)
90
91 goal = models.AnnualGoal.objects.filter(
92 user=request.user, year=timezone.now().year
93 ).first()
94 data = {
95 'title': 'Updates Feed',
96 'user': request.user,
97 'suggested_books': suggested_books,
98 'activities': paginated.page(page),
99 'tab': tab,
100 'goal': goal,
101 'goal_form': forms.GoalForm(),
102 }
103 return TemplateResponse(request, 'feed.html', data)
104
105
106 def get_suggested_books(user, max_books=5):
107 ''' helper to get a user's recent books '''
108 book_count = 0
109 preset_shelves = [
110 ('reading', max_books), ('read', 2), ('to-read', max_books)
111 ]
112 suggested_books = []
113 for (preset, shelf_max) in preset_shelves:
114 limit = shelf_max if shelf_max < (max_books - book_count) \
115 else max_books - book_count
116 shelf = user.shelf_set.get(identifier=preset)
117
118 shelf_books = shelf.shelfbook_set.order_by(
119 '-updated_date'
120 ).all()[:limit]
121 if not shelf_books:
122 continue
123 shelf_preview = {
124 'name': shelf.name,
125 'books': [s.book for s in shelf_books]
126 }
127 suggested_books.append(shelf_preview)
128 book_count += len(shelf_preview['books'])
129 return suggested_books
130
[end of bookwyrm/views/landing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py
--- a/bookwyrm/views/landing.py
+++ b/bookwyrm/views/landing.py
@@ -13,7 +13,6 @@
# pylint: disable= no-self-use
-@method_decorator(login_required, name='dispatch')
class About(View):
''' create invites '''
def get(self, request):
| {"golden_diff": "diff --git a/bookwyrm/views/landing.py b/bookwyrm/views/landing.py\n--- a/bookwyrm/views/landing.py\n+++ b/bookwyrm/views/landing.py\n@@ -13,7 +13,6 @@\n \n \n # pylint: disable= no-self-use\n-@method_decorator(login_required, name='dispatch')\n class About(View):\n ''' create invites '''\n def get(self, request):\n", "issue": "About page requires login\n**Describe the bug**\r\nAccessing the \"About this server\" link (https://bookwyrm.social/about) redirects to login\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to https://bookwyrm.social/about\r\n2. redirected to login instead of seeing an about page (the URL is login/?next=/about)\r\n\r\n**Expected behavior**\r\nAccess to information about this site / server\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux\r\n - Browser firefox\r\n - Version 85 (developer edition)\r\n\n", "before_files": [{"content": "''' non-interactive pages '''\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Avg, Max\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom .helpers import get_activity_feed\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass About(View):\n ''' create invites '''\n def get(self, request):\n ''' more information about the instance '''\n data = {\n 'title': 'About',\n }\n return TemplateResponse(request, 'about.html', data)\n\nclass Home(View):\n ''' discover page or home feed depending on auth '''\n def get(self, request):\n ''' this is the same as the feed on the home tab '''\n if request.user.is_authenticated:\n feed_view = Feed.as_view()\n return feed_view(request, 'home')\n discover_view = Discover.as_view()\n return discover_view(request)\n\nclass Discover(View):\n ''' preview of recently reviewed books '''\n def get(self, request):\n ''' tiled book activity page '''\n books = models.Edition.objects.filter(\n review__published_date__isnull=False,\n review__user__local=True,\n review__privacy__in=['public', 'unlisted'],\n ).exclude(\n cover__exact=''\n ).annotate(\n Max('review__published_date')\n ).order_by('-review__published_date__max')[:6]\n\n ratings = {}\n for book in books:\n reviews = models.Review.objects.filter(\n book__in=book.parent_work.editions.all()\n )\n reviews = get_activity_feed(\n request.user, ['public', 'unlisted'], queryset=reviews)\n ratings[book.id] = reviews.aggregate(Avg('rating'))['rating__avg']\n data = {\n 'title': 'Discover',\n 'register_form': forms.RegisterForm(),\n 'books': list(set(books)),\n 'ratings': ratings\n }\n return TemplateResponse(request, 'discover.html', data)\n\n\n@method_decorator(login_required, name='dispatch')\nclass Feed(View):\n ''' activity stream '''\n def get(self, request, tab):\n ''' user's homepage with activity feed '''\n try:\n page = int(request.GET.get('page', 1))\n except ValueError:\n page = 1\n\n suggested_books = get_suggested_books(request.user)\n\n if tab == 'home':\n activities = get_activity_feed(\n request.user, ['public', 'unlisted', 'followers'],\n following_only=True)\n elif tab == 'local':\n activities = get_activity_feed(\n request.user, ['public', 'followers'], local_only=True)\n else:\n activities = get_activity_feed(\n request.user, ['public', 'followers'])\n paginated = Paginator(activities, PAGE_LENGTH)\n\n goal = models.AnnualGoal.objects.filter(\n user=request.user, year=timezone.now().year\n ).first()\n data = {\n 'title': 'Updates Feed',\n 'user': request.user,\n 'suggested_books': suggested_books,\n 'activities': paginated.page(page),\n 'tab': tab,\n 'goal': goal,\n 'goal_form': forms.GoalForm(),\n }\n return TemplateResponse(request, 'feed.html', data)\n\n\ndef get_suggested_books(user, max_books=5):\n ''' helper to get a user's recent books '''\n book_count = 0\n preset_shelves = [\n ('reading', max_books), ('read', 2), ('to-read', max_books)\n ]\n suggested_books = []\n for (preset, shelf_max) in preset_shelves:\n limit = shelf_max if shelf_max < (max_books - book_count) \\\n else max_books - book_count\n shelf = user.shelf_set.get(identifier=preset)\n\n shelf_books = shelf.shelfbook_set.order_by(\n '-updated_date'\n ).all()[:limit]\n if not shelf_books:\n continue\n shelf_preview = {\n 'name': shelf.name,\n 'books': [s.book for s in shelf_books]\n }\n suggested_books.append(shelf_preview)\n book_count += len(shelf_preview['books'])\n return suggested_books\n", "path": "bookwyrm/views/landing.py"}]} | 1,870 | 90 |
gh_patches_debug_16896 | rasdani/github-patches | git_diff | webkom__lego-1069 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong penalty count in email
The counter in the penalty email is still wrong:

</issue>
<code>
[start of lego/apps/feed/feed_handlers/penalty_handler.py]
1 from lego.apps.feed.activities import Activity
2 from lego.apps.feed.feed_handlers.base_handler import BaseHandler
3 from lego.apps.feed.feed_manager import feed_manager
4 from lego.apps.feed.feeds.notification_feed import NotificationFeed
5 from lego.apps.feed.registry import register_handler
6 from lego.apps.feed.verbs import PenaltyVerb
7 from lego.apps.users.models import Penalty
8 from lego.apps.users.notifications import PenaltyNotification
9
10
11 class PenaltyHandler(BaseHandler):
12 model = Penalty
13 manager = feed_manager
14
15 def get_activity(self, penalty):
16 return Activity(
17 actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,
18 time=penalty.created_at, extra_context={
19 'reason': penalty.reason,
20 'weight': penalty.weight,
21 'total': penalty.user.number_of_penalties()
22 }
23 )
24
25 def handle_create(self, penalty):
26 activity = self.get_activity(penalty)
27 self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])
28
29 # Send Notification
30 notification = PenaltyNotification(penalty.user, penalty=penalty)
31 notification.notify()
32
33 def handle_update(self, penalty):
34 activity = self.get_activity(penalty)
35 self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])
36
37 def handle_delete(self, penalty):
38 activity = self.get_activity(penalty)
39 self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])
40
41
42 register_handler(PenaltyHandler)
43
[end of lego/apps/feed/feed_handlers/penalty_handler.py]
[start of lego/apps/users/notifications.py]
1 from lego.apps.notifications.constants import PENALTY_CREATION
2 from lego.apps.notifications.notification import Notification
3
4
5 class PenaltyNotification(Notification):
6
7 name = PENALTY_CREATION
8
9 def generate_mail(self):
10 penalty = self.kwargs['penalty']
11
12 return self._delay_mail(
13 to_email=self.user.email,
14 context={
15 'name': self.user.full_name,
16 'weight': penalty.weight,
17 'event': penalty.source_event.title,
18 'reason': penalty.reason,
19 'total': self.user.number_of_penalties()
20 },
21 subject=f'Du har fått en ny prikk',
22 plain_template='users/email/penalty.txt',
23 html_template='users/email/penalty.html',
24 )
25
26 def generate_push(self):
27 penalty = self.kwargs['penalty']
28
29 return self._delay_push(
30 template='users/push/penalty.txt', context={
31 'weight': penalty.weight,
32 'event': penalty.source_event.title,
33 }, instance=penalty
34 )
35
[end of lego/apps/users/notifications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py
--- a/lego/apps/feed/feed_handlers/penalty_handler.py
+++ b/lego/apps/feed/feed_handlers/penalty_handler.py
@@ -18,7 +18,6 @@
time=penalty.created_at, extra_context={
'reason': penalty.reason,
'weight': penalty.weight,
- 'total': penalty.user.number_of_penalties()
}
)
diff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py
--- a/lego/apps/users/notifications.py
+++ b/lego/apps/users/notifications.py
@@ -16,7 +16,6 @@
'weight': penalty.weight,
'event': penalty.source_event.title,
'reason': penalty.reason,
- 'total': self.user.number_of_penalties()
},
subject=f'Du har fått en ny prikk',
plain_template='users/email/penalty.txt',
| {"golden_diff": "diff --git a/lego/apps/feed/feed_handlers/penalty_handler.py b/lego/apps/feed/feed_handlers/penalty_handler.py\n--- a/lego/apps/feed/feed_handlers/penalty_handler.py\n+++ b/lego/apps/feed/feed_handlers/penalty_handler.py\n@@ -18,7 +18,6 @@\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n- 'total': penalty.user.number_of_penalties()\n }\n )\n \ndiff --git a/lego/apps/users/notifications.py b/lego/apps/users/notifications.py\n--- a/lego/apps/users/notifications.py\n+++ b/lego/apps/users/notifications.py\n@@ -16,7 +16,6 @@\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n- 'total': self.user.number_of_penalties()\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n", "issue": "Wrong penalty count in email\nThe counter in the penalty email is still wrong:\r\n\r\n\r\n\n", "before_files": [{"content": "from lego.apps.feed.activities import Activity\nfrom lego.apps.feed.feed_handlers.base_handler import BaseHandler\nfrom lego.apps.feed.feed_manager import feed_manager\nfrom lego.apps.feed.feeds.notification_feed import NotificationFeed\nfrom lego.apps.feed.registry import register_handler\nfrom lego.apps.feed.verbs import PenaltyVerb\nfrom lego.apps.users.models import Penalty\nfrom lego.apps.users.notifications import PenaltyNotification\n\n\nclass PenaltyHandler(BaseHandler):\n model = Penalty\n manager = feed_manager\n\n def get_activity(self, penalty):\n return Activity(\n actor=penalty.source_event, verb=PenaltyVerb, object=penalty, target=penalty.user,\n time=penalty.created_at, extra_context={\n 'reason': penalty.reason,\n 'weight': penalty.weight,\n 'total': penalty.user.number_of_penalties()\n }\n )\n\n def handle_create(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n # Send Notification\n notification = PenaltyNotification(penalty.user, penalty=penalty)\n notification.notify()\n\n def handle_update(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.add_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n def handle_delete(self, penalty):\n activity = self.get_activity(penalty)\n self.manager.remove_activity(activity, [penalty.user.pk], [NotificationFeed])\n\n\nregister_handler(PenaltyHandler)\n", "path": "lego/apps/feed/feed_handlers/penalty_handler.py"}, {"content": "from lego.apps.notifications.constants import PENALTY_CREATION\nfrom lego.apps.notifications.notification import Notification\n\n\nclass PenaltyNotification(Notification):\n\n name = PENALTY_CREATION\n\n def generate_mail(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_mail(\n to_email=self.user.email,\n context={\n 'name': self.user.full_name,\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n 'reason': penalty.reason,\n 'total': self.user.number_of_penalties()\n },\n subject=f'Du har f\u00e5tt en ny prikk',\n plain_template='users/email/penalty.txt',\n html_template='users/email/penalty.html',\n )\n\n def generate_push(self):\n penalty = self.kwargs['penalty']\n\n return self._delay_push(\n template='users/push/penalty.txt', context={\n 'weight': penalty.weight,\n 'event': penalty.source_event.title,\n }, instance=penalty\n )\n", "path": "lego/apps/users/notifications.py"}]} | 1,350 | 232 |
gh_patches_debug_24988 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1681 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deleting a Careeropportunity in the dashboard does not actually delete
When trying to delete a career opportunity in the dashboard, it does not actually delete it.
</issue>
<code>
[start of apps/careeropportunity/dashboard/views.py]
1 # -*- encoding: utf-8 -*-
2 from django.contrib import messages
3 from django.contrib.auth.decorators import login_required
4 from django.core.exceptions import PermissionDenied
5 from django.shortcuts import get_object_or_404, redirect, render
6 from django.utils import timezone
7 from guardian.decorators import permission_required
8
9 from apps.careeropportunity.forms import AddCareerOpportunityForm
10 from apps.careeropportunity.models import CareerOpportunity
11 from apps.dashboard.tools import get_base_context, has_access
12
13
14 @login_required
15 @permission_required('careeropportunity.view_careeropportunity', return_403=True)
16 def index(request):
17
18 if not has_access(request):
19 raise PermissionDenied
20
21 context = get_base_context(request)
22
23 # "cops" is short for "careeropportunities" which is a fucking long word
24 # "cop" is short for "careeropportunity" which also is a fucking long word
25 cops = CareerOpportunity.objects.all()
26 context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
27 context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
28
29 return render(request, 'careeropportunity/dashboard/index.html', context)
30
31
32 @login_required
33 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
34 def detail(request, opportunity_id=None):
35
36 if not has_access(request):
37 raise PermissionDenied
38
39 context = get_base_context(request)
40 cop = None
41 if opportunity_id:
42 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
43 context['cop'] = cop
44 context['form'] = AddCareerOpportunityForm(instance=cop)
45 else:
46 context['form'] = AddCareerOpportunityForm()
47
48 if request.method == 'POST':
49 if cop:
50 form = AddCareerOpportunityForm(data=request.POST, instance=cop)
51 else:
52 form = AddCareerOpportunityForm(data=request.POST)
53
54 if form.is_valid():
55 form.save()
56 messages.success(request, 'La til ny karrieremulighet')
57 return redirect(index)
58 else:
59 context['form'] = form
60 messages.error(request,
61 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for å se hva som gikk galt.')
62
63 return render(request, 'careeropportunity/dashboard/detail.html', context)
64
65
66 @login_required
67 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
68 def delete(request, opportunity_id=None):
69 if not has_access(request):
70 raise PermissionDenied
71
72 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
73 cop.delete()
74 messages.success(request, 'Slettet karrieremuligheten')
75 return redirect(index)
76
[end of apps/careeropportunity/dashboard/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py
--- a/apps/careeropportunity/dashboard/views.py
+++ b/apps/careeropportunity/dashboard/views.py
@@ -1,4 +1,6 @@
# -*- encoding: utf-8 -*-
+import logging
+
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
@@ -32,6 +34,8 @@
@login_required
@permission_required('careeropportunity.change_careeropportunity', return_403=True)
def detail(request, opportunity_id=None):
+ logger = logging.getLogger(__name__)
+ logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))
if not has_access(request):
raise PermissionDenied
@@ -66,6 +70,8 @@
@login_required
@permission_required('careeropportunity.change_careeropportunity', return_403=True)
def delete(request, opportunity_id=None):
+ logger = logging.getLogger(__name__)
+ logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))
if not has_access(request):
raise PermissionDenied
| {"golden_diff": "diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py\n--- a/apps/careeropportunity/dashboard/views.py\n+++ b/apps/careeropportunity/dashboard/views.py\n@@ -1,4 +1,6 @@\n # -*- encoding: utf-8 -*-\n+import logging\n+\n from django.contrib import messages\n from django.contrib.auth.decorators import login_required\n from django.core.exceptions import PermissionDenied\n@@ -32,6 +34,8 @@\n @login_required\n @permission_required('careeropportunity.change_careeropportunity', return_403=True)\n def detail(request, opportunity_id=None):\n+ logger = logging.getLogger(__name__)\n+ logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n \n if not has_access(request):\n raise PermissionDenied\n@@ -66,6 +70,8 @@\n @login_required\n @permission_required('careeropportunity.change_careeropportunity', return_403=True)\n def delete(request, opportunity_id=None):\n+ logger = logging.getLogger(__name__)\n+ logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n", "issue": "Deleting a Careeropportunity in the dashboard does not actually delete\nWhen trying to delete a career opportunity in the dashboard, it does not actually delete it.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}]} | 1,341 | 270 |
gh_patches_debug_5191 | rasdani/github-patches | git_diff | nf-core__tools-1333 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Space missing in tip message for --fix files_unchanged
<!--
# nf-core/tools bug report
Hi there!
Thanks for telling us about a problem with the nf-core/tools package.
Please delete this text and anything that's not relevant from the template below:
-->
## Description of the bug
a space is missing before `--fix files_unchanged`
```
Tip: Some of these linting errors can automatically be resolved with the
following command:
nf-core lint --dir /home/runner/work/rnavar/rnavar--fix files_unchanged
```
## Steps to reproduce
https://github.com/nf-core/rnavar/runs/4317868056?check_suite_focus=true#step:6:100
## Expected behaviour
<!-- A clear and concise description of what you expected to happen. -->
## System
- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->
- Executor: <!-- [e.g. slurm, local, awsbatch...] -->
- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->
- Version of nf-core/tools: <!-- [e.g. 1.1, 1.5, 1.8.2...] -->
- Python version: <!-- [e.g. 3.7, 3.8...] -->
## Nextflow Installation
- Version: <!-- [e.g. 19.10.0] -->
## Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of nf_core/lint_utils.py]
1 import rich
2 from rich.console import Console
3 from rich.table import Table
4 import logging
5
6 import nf_core.utils
7
8 log = logging.getLogger(__name__)
9
10 # Create a console used by all lint tests
11 console = Console(force_terminal=nf_core.utils.rich_force_colors())
12
13
14 def print_joint_summary(lint_obj, module_lint_obj):
15 """Print a joint summary of the general pipe lint tests and the module lint tests"""
16 nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)
17 nbr_ignored = len(lint_obj.ignored)
18 nbr_fixed = len(lint_obj.fixed)
19 nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)
20 nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)
21
22 def _s(some_length):
23 return "" if some_length == 1 else "s"
24
25 summary_colour = "red" if nbr_failed > 0 else "green"
26 table = Table(box=rich.box.ROUNDED, style=summary_colour)
27 table.add_column(f"LINT RESULTS SUMMARY".format(nbr_passed), no_wrap=True)
28 table.add_row(r"[green][✔] {:>3} Test{} Passed".format(nbr_passed, _s(nbr_passed)))
29 if nbr_fixed:
30 table.add_row(r"[bright blue][?] {:>3} Test{} Fixed".format(nbr_fixed, _s(nbr_fixed)))
31 table.add_row(r"[grey58][?] {:>3} Test{} Ignored".format(nbr_ignored, _s(nbr_ignored)))
32 table.add_row(r"[yellow][!] {:>3} Test Warning{}".format(nbr_warned, _s(nbr_warned)))
33 table.add_row(r"[red][✗] {:>3} Test{} Failed".format(nbr_failed, _s(nbr_failed)))
34 console.print(table)
35
36
37 def print_fixes(lint_obj, module_lint_obj):
38 """Prints available and applied fixes"""
39
40 if len(lint_obj.could_fix):
41 fix_cmd = "nf-core lint {}--fix {}".format(
42 "" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
43 )
44 console.print(
45 f"\nTip: Some of these linting errors can automatically be resolved with the following command:\n\n[blue] {fix_cmd}\n"
46 )
47 if len(lint_obj.fix):
48 console.print(
49 "Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'."
50 )
51
[end of nf_core/lint_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py
--- a/nf_core/lint_utils.py
+++ b/nf_core/lint_utils.py
@@ -38,7 +38,7 @@
"""Prints available and applied fixes"""
if len(lint_obj.could_fix):
- fix_cmd = "nf-core lint {}--fix {}".format(
+ fix_cmd = "nf-core lint {} --fix {}".format(
"" if lint_obj.wf_path == "." else f"--dir {lint_obj.wf_path}", " --fix ".join(lint_obj.could_fix)
)
console.print(
| {"golden_diff": "diff --git a/nf_core/lint_utils.py b/nf_core/lint_utils.py\n--- a/nf_core/lint_utils.py\n+++ b/nf_core/lint_utils.py\n@@ -38,7 +38,7 @@\n \"\"\"Prints available and applied fixes\"\"\"\n \n if len(lint_obj.could_fix):\n- fix_cmd = \"nf-core lint {}--fix {}\".format(\n+ fix_cmd = \"nf-core lint {} --fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n", "issue": "Space missing in tip message for --fix files_unchanged\n<!--\r\n# nf-core/tools bug report\r\n\r\nHi there!\r\n\r\nThanks for telling us about a problem with the nf-core/tools package.\r\nPlease delete this text and anything that's not relevant from the template below:\r\n-->\r\n\r\n## Description of the bug\r\n\r\na space is missing before `--fix files_unchanged`\r\n\r\n```\r\nTip: Some of these linting errors can automatically be resolved with the \r\nfollowing command:\r\n\r\n nf-core lint --dir /home/runner/work/rnavar/rnavar--fix files_unchanged\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nhttps://github.com/nf-core/rnavar/runs/4317868056?check_suite_focus=true#step:6:100\r\n\r\n## Expected behaviour\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## System\r\n\r\n- Hardware: <!-- [e.g. HPC, Desktop, Cloud...] -->\r\n- Executor: <!-- [e.g. slurm, local, awsbatch...] -->\r\n- OS: <!-- [e.g. CentOS Linux, macOS, Linux Mint...] -->\r\n- Version of nf-core/tools: <!-- [e.g. 1.1, 1.5, 1.8.2...] -->\r\n- Python version: <!-- [e.g. 3.7, 3.8...] -->\r\n\r\n## Nextflow Installation\r\n\r\n- Version: <!-- [e.g. 19.10.0] -->\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import rich\nfrom rich.console import Console\nfrom rich.table import Table\nimport logging\n\nimport nf_core.utils\n\nlog = logging.getLogger(__name__)\n\n# Create a console used by all lint tests\nconsole = Console(force_terminal=nf_core.utils.rich_force_colors())\n\n\ndef print_joint_summary(lint_obj, module_lint_obj):\n \"\"\"Print a joint summary of the general pipe lint tests and the module lint tests\"\"\"\n nbr_passed = len(lint_obj.passed) + len(module_lint_obj.passed)\n nbr_ignored = len(lint_obj.ignored)\n nbr_fixed = len(lint_obj.fixed)\n nbr_warned = len(lint_obj.warned) + len(module_lint_obj.warned)\n nbr_failed = len(lint_obj.failed) + len(module_lint_obj.failed)\n\n def _s(some_length):\n return \"\" if some_length == 1 else \"s\"\n\n summary_colour = \"red\" if nbr_failed > 0 else \"green\"\n table = Table(box=rich.box.ROUNDED, style=summary_colour)\n table.add_column(f\"LINT RESULTS SUMMARY\".format(nbr_passed), no_wrap=True)\n table.add_row(r\"[green][\u2714] {:>3} Test{} Passed\".format(nbr_passed, _s(nbr_passed)))\n if nbr_fixed:\n table.add_row(r\"[bright blue][?] {:>3} Test{} Fixed\".format(nbr_fixed, _s(nbr_fixed)))\n table.add_row(r\"[grey58][?] {:>3} Test{} Ignored\".format(nbr_ignored, _s(nbr_ignored)))\n table.add_row(r\"[yellow][!] {:>3} Test Warning{}\".format(nbr_warned, _s(nbr_warned)))\n table.add_row(r\"[red][\u2717] {:>3} Test{} Failed\".format(nbr_failed, _s(nbr_failed)))\n console.print(table)\n\n\ndef print_fixes(lint_obj, module_lint_obj):\n \"\"\"Prints available and applied fixes\"\"\"\n\n if len(lint_obj.could_fix):\n fix_cmd = \"nf-core lint {}--fix {}\".format(\n \"\" if lint_obj.wf_path == \".\" else f\"--dir {lint_obj.wf_path}\", \" --fix \".join(lint_obj.could_fix)\n )\n console.print(\n f\"\\nTip: Some of these linting errors can automatically be resolved with the following command:\\n\\n[blue] {fix_cmd}\\n\"\n )\n if len(lint_obj.fix):\n console.print(\n \"Automatic fixes applied. Please check with 'git diff' and revert any changes you do not want with 'git checkout <file>'.\"\n )\n", "path": "nf_core/lint_utils.py"}]} | 1,530 | 142 |
gh_patches_debug_15169 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pending import csv lines displayed under "Successful" title until tried
Importing a CSV into Bookwyrm shows titles being "successfully imported" but they do not show up in the library.
Here's screenshots of the import results, neither the successful nor the failed imports seem to show up:


[Attached is the file which I attempted to import.](https://github.com/bookwyrm-social/bookwyrm/files/6523421/Tomat0.s.Library.csv)
</issue>
<code>
[start of bookwyrm/views/import_data.py]
1 """ import books from another app """
2 from io import TextIOWrapper
3
4 from django.contrib.auth.decorators import login_required
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.utils.translation import gettext_lazy as _
11 from django.views import View
12
13 from bookwyrm import forms, models
14 from bookwyrm.importers import (
15 Importer,
16 LibrarythingImporter,
17 GoodreadsImporter,
18 StorygraphImporter,
19 )
20 from bookwyrm.tasks import app
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Import(View):
25 """import view"""
26
27 def get(self, request):
28 """load import page"""
29 return TemplateResponse(
30 request,
31 "import.html",
32 {
33 "import_form": forms.ImportForm(),
34 "jobs": models.ImportJob.objects.filter(user=request.user).order_by(
35 "-created_date"
36 ),
37 },
38 )
39
40 def post(self, request):
41 """ingest a goodreads csv"""
42 form = forms.ImportForm(request.POST, request.FILES)
43 if form.is_valid():
44 include_reviews = request.POST.get("include_reviews") == "on"
45 privacy = request.POST.get("privacy")
46 source = request.POST.get("source")
47
48 importer = None
49 if source == "LibraryThing":
50 importer = LibrarythingImporter()
51 elif source == "Storygraph":
52 importer = StorygraphImporter()
53 else:
54 # Default : GoodReads
55 importer = GoodreadsImporter()
56
57 try:
58 job = importer.create_job(
59 request.user,
60 TextIOWrapper(
61 request.FILES["csv_file"], encoding=importer.encoding
62 ),
63 include_reviews,
64 privacy,
65 )
66 except (UnicodeDecodeError, ValueError, KeyError):
67 return HttpResponseBadRequest(_("Not a valid csv file"))
68
69 importer.start_import(job)
70
71 return redirect("/import/%d" % job.id)
72 return HttpResponseBadRequest()
73
74
75 @method_decorator(login_required, name="dispatch")
76 class ImportStatus(View):
77 """status of an existing import"""
78
79 def get(self, request, job_id):
80 """status of an import job"""
81 job = models.ImportJob.objects.get(id=job_id)
82 if job.user != request.user:
83 raise PermissionDenied
84 try:
85 task = app.AsyncResult(job.task_id)
86 except ValueError:
87 task = None
88 items = job.items.order_by("index").all()
89 failed_items = [i for i in items if i.fail_reason]
90 items = [i for i in items if not i.fail_reason]
91 return TemplateResponse(
92 request,
93 "import_status.html",
94 {"job": job, "items": items, "failed_items": failed_items, "task": task},
95 )
96
97 def post(self, request, job_id):
98 """retry lines from an import"""
99 job = get_object_or_404(models.ImportJob, id=job_id)
100 items = []
101 for item in request.POST.getlist("import_item"):
102 items.append(get_object_or_404(models.ImportItem, id=item))
103
104 importer = Importer()
105 job = importer.create_retry_job(
106 request.user,
107 job,
108 items,
109 )
110 importer.start_import(job)
111 return redirect("/import/%d" % job.id)
112
[end of bookwyrm/views/import_data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py
--- a/bookwyrm/views/import_data.py
+++ b/bookwyrm/views/import_data.py
@@ -78,13 +78,15 @@
def get(self, request, job_id):
"""status of an import job"""
- job = models.ImportJob.objects.get(id=job_id)
+ job = get_object_or_404(models.ImportJob, id=job_id)
if job.user != request.user:
raise PermissionDenied
+
try:
task = app.AsyncResult(job.task_id)
except ValueError:
task = None
+
items = job.items.order_by("index").all()
failed_items = [i for i in items if i.fail_reason]
items = [i for i in items if not i.fail_reason]
| {"golden_diff": "diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py\n--- a/bookwyrm/views/import_data.py\n+++ b/bookwyrm/views/import_data.py\n@@ -78,13 +78,15 @@\n \n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n- job = models.ImportJob.objects.get(id=job_id)\n+ job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n+\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n+\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n", "issue": "Pending import csv lines displayed under \"Successful\" title until tried\nImporting a CSV into Bookwyrm shows titles being \"successfully imported\" but they do not show up in the library.\r\n\r\nHere's screenshots of the import results, neither the successful nor the failed imports seem to show up:\r\n\r\n\r\n\r\n\r\n[Attached is the file which I attempted to import.](https://github.com/bookwyrm-social/bookwyrm/files/6523421/Tomat0.s.Library.csv)\r\n\r\n\n", "before_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = models.ImportJob.objects.get(id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n try:\n task = app.AsyncResult(job.task_id)\n except ValueError:\n task = None\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}]} | 1,626 | 187 |
gh_patches_debug_10059 | rasdani/github-patches | git_diff | scrapy__scrapy-5269 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ItemLoader: support non-TextResponse
At the moment, `ItemLoader(response=response)` fails if `response` is not a `TextResponse` instance.
Passing a binary response can still be useful, though. For example, to allow processors to access the response from their loader context, and hence be able to report the source URL (`response.url`) when reporting input issues.
</issue>
<code>
[start of scrapy/loader/__init__.py]
1 """
2 Item Loader
3
4 See documentation in docs/topics/loaders.rst
5 """
6 import itemloaders
7
8 from scrapy.item import Item
9 from scrapy.selector import Selector
10
11
12 class ItemLoader(itemloaders.ItemLoader):
13 """
14 A user-friendly abstraction to populate an :ref:`item <topics-items>` with data
15 by applying :ref:`field processors <topics-loaders-processors>` to scraped data.
16 When instantiated with a ``selector`` or a ``response`` it supports
17 data extraction from web pages using :ref:`selectors <topics-selectors>`.
18
19 :param item: The item instance to populate using subsequent calls to
20 :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,
21 or :meth:`~ItemLoader.add_value`.
22 :type item: scrapy.item.Item
23
24 :param selector: The selector to extract data from, when using the
25 :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or
26 :meth:`replace_css` method.
27 :type selector: :class:`~scrapy.selector.Selector` object
28
29 :param response: The response used to construct the selector using the
30 :attr:`default_selector_class`, unless the selector argument is given,
31 in which case this argument is ignored.
32 :type response: :class:`~scrapy.http.Response` object
33
34 If no item is given, one is instantiated automatically using the class in
35 :attr:`default_item_class`.
36
37 The item, selector, response and remaining keyword arguments are
38 assigned to the Loader context (accessible through the :attr:`context` attribute).
39
40 .. attribute:: item
41
42 The item object being parsed by this Item Loader.
43 This is mostly used as a property so, when attempting to override this
44 value, you may want to check out :attr:`default_item_class` first.
45
46 .. attribute:: context
47
48 The currently active :ref:`Context <loaders-context>` of this Item Loader.
49
50 .. attribute:: default_item_class
51
52 An :ref:`item <topics-items>` class (or factory), used to instantiate
53 items when not given in the ``__init__`` method.
54
55 .. attribute:: default_input_processor
56
57 The default input processor to use for those fields which don't specify
58 one.
59
60 .. attribute:: default_output_processor
61
62 The default output processor to use for those fields which don't specify
63 one.
64
65 .. attribute:: default_selector_class
66
67 The class used to construct the :attr:`selector` of this
68 :class:`ItemLoader`, if only a response is given in the ``__init__`` method.
69 If a selector is given in the ``__init__`` method this attribute is ignored.
70 This attribute is sometimes overridden in subclasses.
71
72 .. attribute:: selector
73
74 The :class:`~scrapy.selector.Selector` object to extract data from.
75 It's either the selector given in the ``__init__`` method or one created from
76 the response given in the ``__init__`` method using the
77 :attr:`default_selector_class`. This attribute is meant to be
78 read-only.
79 """
80
81 default_item_class = Item
82 default_selector_class = Selector
83
84 def __init__(self, item=None, selector=None, response=None, parent=None, **context):
85 if selector is None and response is not None:
86 selector = self.default_selector_class(response)
87 context.update(response=response)
88 super().__init__(item=item, selector=selector, parent=parent, **context)
89
[end of scrapy/loader/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py
--- a/scrapy/loader/__init__.py
+++ b/scrapy/loader/__init__.py
@@ -83,6 +83,9 @@
def __init__(self, item=None, selector=None, response=None, parent=None, **context):
if selector is None and response is not None:
- selector = self.default_selector_class(response)
+ try:
+ selector = self.default_selector_class(response)
+ except AttributeError:
+ selector = None
context.update(response=response)
super().__init__(item=item, selector=selector, parent=parent, **context)
| {"golden_diff": "diff --git a/scrapy/loader/__init__.py b/scrapy/loader/__init__.py\n--- a/scrapy/loader/__init__.py\n+++ b/scrapy/loader/__init__.py\n@@ -83,6 +83,9 @@\n \n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n- selector = self.default_selector_class(response)\n+ try:\n+ selector = self.default_selector_class(response)\n+ except AttributeError:\n+ selector = None\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "issue": "ItemLoader: support non-TextResponse\nAt the moment, `ItemLoader(response=response)` fails if `response` is not a `TextResponse` instance.\r\n\r\nPassing a binary response can still be useful, though. For example, to allow processors to access the response from their loader context, and hence be able to report the source URL (`response.url`) when reporting input issues.\n", "before_files": [{"content": "\"\"\"\nItem Loader\n\nSee documentation in docs/topics/loaders.rst\n\"\"\"\nimport itemloaders\n\nfrom scrapy.item import Item\nfrom scrapy.selector import Selector\n\n\nclass ItemLoader(itemloaders.ItemLoader):\n \"\"\"\n A user-friendly abstraction to populate an :ref:`item <topics-items>` with data\n by applying :ref:`field processors <topics-loaders-processors>` to scraped data.\n When instantiated with a ``selector`` or a ``response`` it supports\n data extraction from web pages using :ref:`selectors <topics-selectors>`.\n\n :param item: The item instance to populate using subsequent calls to\n :meth:`~ItemLoader.add_xpath`, :meth:`~ItemLoader.add_css`,\n or :meth:`~ItemLoader.add_value`.\n :type item: scrapy.item.Item\n\n :param selector: The selector to extract data from, when using the\n :meth:`add_xpath`, :meth:`add_css`, :meth:`replace_xpath`, or\n :meth:`replace_css` method.\n :type selector: :class:`~scrapy.selector.Selector` object\n\n :param response: The response used to construct the selector using the\n :attr:`default_selector_class`, unless the selector argument is given,\n in which case this argument is ignored.\n :type response: :class:`~scrapy.http.Response` object\n\n If no item is given, one is instantiated automatically using the class in\n :attr:`default_item_class`.\n\n The item, selector, response and remaining keyword arguments are\n assigned to the Loader context (accessible through the :attr:`context` attribute).\n\n .. attribute:: item\n\n The item object being parsed by this Item Loader.\n This is mostly used as a property so, when attempting to override this\n value, you may want to check out :attr:`default_item_class` first.\n\n .. attribute:: context\n\n The currently active :ref:`Context <loaders-context>` of this Item Loader.\n\n .. attribute:: default_item_class\n\n An :ref:`item <topics-items>` class (or factory), used to instantiate\n items when not given in the ``__init__`` method.\n\n .. attribute:: default_input_processor\n\n The default input processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_output_processor\n\n The default output processor to use for those fields which don't specify\n one.\n\n .. attribute:: default_selector_class\n\n The class used to construct the :attr:`selector` of this\n :class:`ItemLoader`, if only a response is given in the ``__init__`` method.\n If a selector is given in the ``__init__`` method this attribute is ignored.\n This attribute is sometimes overridden in subclasses.\n\n .. attribute:: selector\n\n The :class:`~scrapy.selector.Selector` object to extract data from.\n It's either the selector given in the ``__init__`` method or one created from\n the response given in the ``__init__`` method using the\n :attr:`default_selector_class`. This attribute is meant to be\n read-only.\n \"\"\"\n\n default_item_class = Item\n default_selector_class = Selector\n\n def __init__(self, item=None, selector=None, response=None, parent=None, **context):\n if selector is None and response is not None:\n selector = self.default_selector_class(response)\n context.update(response=response)\n super().__init__(item=item, selector=selector, parent=parent, **context)\n", "path": "scrapy/loader/__init__.py"}]} | 1,546 | 147 |
gh_patches_debug_29456 | rasdani/github-patches | git_diff | oppia__oppia-7287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show skill mastery values in the topic viewer
Add a skill tab in the topic viewer that will show skill mastery of all skills in that topic (Once we have enough skill mastery information for the skill)
Milestone 3.2 in @sophiewu6 's GSoC project
</issue>
<code>
[start of core/controllers/topic_viewer.py]
1 # Copyright 2018 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the topic viewer page."""
16
17 from constants import constants
18 from core.controllers import acl_decorators
19 from core.controllers import base
20 from core.domain import story_fetchers
21 from core.domain import topic_fetchers
22 import feconf
23
24
25 class TopicViewerPage(base.BaseHandler):
26 """Renders the topic viewer page."""
27
28 @acl_decorators.can_access_topic_viewer_page
29 def get(self, _):
30 """Handles GET requests."""
31
32 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
33 raise self.PageNotFoundException
34
35 self.render_template('dist/topic-viewer-page.mainpage.html')
36
37
38 class TopicPageDataHandler(base.BaseHandler):
39 """Manages the data that needs to be displayed to a learner on the topic
40 viewer page.
41 """
42 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
43
44 @acl_decorators.can_access_topic_viewer_page
45 def get(self, topic_name):
46 """Handles GET requests."""
47
48 if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
49 raise self.PageNotFoundException
50
51 topic = topic_fetchers.get_topic_by_name(topic_name)
52 canonical_story_ids = topic.get_canonical_story_ids(
53 include_only_published=True)
54 additional_story_ids = topic.get_additional_story_ids(
55 include_only_published=True)
56 canonical_story_summaries = [
57 story_fetchers.get_story_summary_by_id(
58 canonical_story_id) for canonical_story_id
59 in canonical_story_ids]
60
61 additional_story_summaries = [
62 story_fetchers.get_story_summary_by_id(
63 additional_story_id) for additional_story_id
64 in additional_story_ids]
65
66 canonical_story_dicts = [
67 summary.to_human_readable_dict() for summary
68 in canonical_story_summaries]
69
70 additional_story_dicts = [
71 summary.to_human_readable_dict() for summary
72 in additional_story_summaries]
73
74 uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
75 subtopics = topic.get_all_subtopics()
76
77 self.values.update({
78 'topic_id': topic.id,
79 'topic_name': topic.name,
80 'canonical_story_dicts': canonical_story_dicts,
81 'additional_story_dicts': additional_story_dicts,
82 'uncategorized_skill_ids': uncategorized_skill_ids,
83 'subtopics': subtopics
84 })
85 self.render_json(self.values)
86
[end of core/controllers/topic_viewer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py
--- a/core/controllers/topic_viewer.py
+++ b/core/controllers/topic_viewer.py
@@ -17,6 +17,7 @@
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
+from core.domain import skill_services
from core.domain import story_fetchers
from core.domain import topic_fetchers
import feconf
@@ -74,12 +75,26 @@
uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()
subtopics = topic.get_all_subtopics()
+ assigned_skill_ids = topic.get_all_skill_ids()
+ skill_descriptions = skill_services.get_skill_descriptions_by_ids(
+ topic.id, assigned_skill_ids)
+
+ if self.user_id:
+ degrees_of_mastery = skill_services.get_multi_user_skill_mastery(
+ self.user_id, assigned_skill_ids)
+ else:
+ degrees_of_mastery = {}
+ for skill_id in assigned_skill_ids:
+ degrees_of_mastery[skill_id] = None
+
self.values.update({
'topic_id': topic.id,
'topic_name': topic.name,
'canonical_story_dicts': canonical_story_dicts,
'additional_story_dicts': additional_story_dicts,
'uncategorized_skill_ids': uncategorized_skill_ids,
- 'subtopics': subtopics
+ 'subtopics': subtopics,
+ 'degrees_of_mastery': degrees_of_mastery,
+ 'skill_descriptions': skill_descriptions
})
self.render_json(self.values)
| {"golden_diff": "diff --git a/core/controllers/topic_viewer.py b/core/controllers/topic_viewer.py\n--- a/core/controllers/topic_viewer.py\n+++ b/core/controllers/topic_viewer.py\n@@ -17,6 +17,7 @@\n from constants import constants\n from core.controllers import acl_decorators\n from core.controllers import base\n+from core.domain import skill_services\n from core.domain import story_fetchers\n from core.domain import topic_fetchers\n import feconf\n@@ -74,12 +75,26 @@\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n \n+ assigned_skill_ids = topic.get_all_skill_ids()\n+ skill_descriptions = skill_services.get_skill_descriptions_by_ids(\n+ topic.id, assigned_skill_ids)\n+\n+ if self.user_id:\n+ degrees_of_mastery = skill_services.get_multi_user_skill_mastery(\n+ self.user_id, assigned_skill_ids)\n+ else:\n+ degrees_of_mastery = {}\n+ for skill_id in assigned_skill_ids:\n+ degrees_of_mastery[skill_id] = None\n+\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n- 'subtopics': subtopics\n+ 'subtopics': subtopics,\n+ 'degrees_of_mastery': degrees_of_mastery,\n+ 'skill_descriptions': skill_descriptions\n })\n self.render_json(self.values)\n", "issue": "Show skill mastery values in the topic viewer\nAdd a skill tab in the topic viewer that will show skill mastery of all skills in that topic (Once we have enough skill mastery information for the skill)\r\n\r\nMilestone 3.2 in @sophiewu6 's GSoC project\n", "before_files": [{"content": "# Copyright 2018 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the topic viewer page.\"\"\"\n\nfrom constants import constants\nfrom core.controllers import acl_decorators\nfrom core.controllers import base\nfrom core.domain import story_fetchers\nfrom core.domain import topic_fetchers\nimport feconf\n\n\nclass TopicViewerPage(base.BaseHandler):\n \"\"\"Renders the topic viewer page.\"\"\"\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, _):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n self.render_template('dist/topic-viewer-page.mainpage.html')\n\n\nclass TopicPageDataHandler(base.BaseHandler):\n \"\"\"Manages the data that needs to be displayed to a learner on the topic\n viewer page.\n \"\"\"\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @acl_decorators.can_access_topic_viewer_page\n def get(self, topic_name):\n \"\"\"Handles GET requests.\"\"\"\n\n if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:\n raise self.PageNotFoundException\n\n topic = topic_fetchers.get_topic_by_name(topic_name)\n canonical_story_ids = topic.get_canonical_story_ids(\n include_only_published=True)\n additional_story_ids = topic.get_additional_story_ids(\n include_only_published=True)\n canonical_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n canonical_story_id) for canonical_story_id\n in canonical_story_ids]\n\n additional_story_summaries = [\n story_fetchers.get_story_summary_by_id(\n additional_story_id) for additional_story_id\n in additional_story_ids]\n\n canonical_story_dicts = [\n summary.to_human_readable_dict() for summary\n in canonical_story_summaries]\n\n additional_story_dicts = [\n summary.to_human_readable_dict() for summary\n in additional_story_summaries]\n\n uncategorized_skill_ids = topic.get_all_uncategorized_skill_ids()\n subtopics = topic.get_all_subtopics()\n\n self.values.update({\n 'topic_id': topic.id,\n 'topic_name': topic.name,\n 'canonical_story_dicts': canonical_story_dicts,\n 'additional_story_dicts': additional_story_dicts,\n 'uncategorized_skill_ids': uncategorized_skill_ids,\n 'subtopics': subtopics\n })\n self.render_json(self.values)\n", "path": "core/controllers/topic_viewer.py"}]} | 1,375 | 341 |
gh_patches_debug_34814 | rasdani/github-patches | git_diff | dynaconf__dynaconf-825 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[RFC] Support multidoc yaml files
**Is your feature request related to a problem? Please describe.**
Sometimes it can be difficult or impossible to pass multiple files with config fragments. yaml support multiple documents in one file and `safe_load_all` from pyaml api loads that accordingly. It is standard yaml feature, it would be nice to support it and make in usable in cases when passing one file (composited from more files) would be easier.
**Describe the solution you'd like**
Support `safe_load_all` as yaml loader.
**Describe alternatives you've considered**
Passing multiple files will do the work, however it doesn't have to be always straightforward.
**Additional context**
I have prepared a patch
</issue>
<code>
[start of dynaconf/loaders/yaml_loader.py]
1 from __future__ import annotations
2
3 import sys
4 from pathlib import Path
5 from typing import TextIO
6 from warnings import warn
7
8 from dynaconf import default_settings
9 from dynaconf.constants import YAML_EXTENSIONS
10 from dynaconf.loaders.base import BaseLoader
11 from dynaconf.utils import object_merge
12 from dynaconf.utils.parse_conf import try_to_encode
13 from dynaconf.vendor.ruamel import yaml
14
15 # Add support for Dynaconf Lazy values to YAML dumper
16 yaml.SafeDumper.yaml_representers[
17 None
18 ] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(
19 self, try_to_encode(data)
20 )
21
22
23 def load(obj, env=None, silent=True, key=None, filename=None, validate=False):
24 """
25 Reads and loads in to "obj" a single key or all keys from source file.
26
27 :param obj: the settings instance
28 :param env: settings current env default='development'
29 :param silent: if errors should raise
30 :param key: if defined load a single key, else load all in env
31 :param filename: Optional custom filename to load
32 :return: None
33 """
34 # Resolve the loaders
35 # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
36 # Possible values are `safe_load, full_load, unsafe_load, load`
37 yaml_reader = getattr(
38 yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
39 )
40 if yaml_reader.__name__ == "unsafe_load": # pragma: no cover
41 warn(
42 "yaml.unsafe_load is deprecated."
43 " Please read https://msg.pyyaml.org/load for full details."
44 " Try to use full_load or safe_load."
45 )
46
47 loader = BaseLoader(
48 obj=obj,
49 env=env,
50 identifier="yaml",
51 extensions=YAML_EXTENSIONS,
52 file_reader=yaml_reader,
53 string_reader=yaml_reader,
54 validate=validate,
55 )
56 loader.load(
57 filename=filename,
58 key=key,
59 silent=silent,
60 )
61
62
63 def write(settings_path, settings_data, merge=True):
64 """Write data to a settings file.
65
66 :param settings_path: the filepath
67 :param settings_data: a dictionary with data
68 :param merge: boolean if existing file should be merged with new data
69 :param stdout: boolean if should output to stdout instead of file
70 """
71 settings_path = Path(settings_path)
72 if settings_path.exists() and merge: # pragma: no cover
73 with open(
74 str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF
75 ) as open_file:
76 object_merge(yaml.safe_load(open_file), settings_data)
77
78 with open(
79 str(settings_path),
80 "w",
81 encoding=default_settings.ENCODING_FOR_DYNACONF,
82 ) as open_file:
83 yaml.dump(
84 settings_data,
85 open_file,
86 Dumper=yaml.dumper.SafeDumper,
87 explicit_start=True,
88 indent=2,
89 default_flow_style=False,
90 )
91
[end of dynaconf/loaders/yaml_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py
--- a/dynaconf/loaders/yaml_loader.py
+++ b/dynaconf/loaders/yaml_loader.py
@@ -20,6 +20,41 @@
)
+class AllLoader(BaseLoader):
+ """YAML Loader to load multi doc files"""
+
+ @staticmethod
+ def _assign_data(data, source_file, content):
+ """Helper to iterate through all docs in a file"""
+ content = tuple(content)
+ if len(content) == 1:
+ data[source_file] = content[0]
+ elif len(content) > 1:
+ for i, doc in enumerate(content):
+ data[f"{source_file}[{i}]"] = doc
+
+ def get_source_data(self, files):
+ data = {}
+ for source_file in files:
+ if source_file.endswith(self.extensions):
+ try:
+ with open(source_file, **self.opener_params) as open_file:
+ content = self.file_reader(open_file)
+ self.obj._loaded_files.append(source_file)
+ self._assign_data(data, source_file, content)
+ except OSError as e:
+ if ".local." not in source_file:
+ warn(
+ f"{self.identifier}_loader: {source_file} "
+ f":{str(e)}"
+ )
+ else:
+ # for tests it is possible to pass string
+ content = self.string_reader(source_file)
+ self._assign_data(data, source_file, content)
+ return data
+
+
def load(obj, env=None, silent=True, key=None, filename=None, validate=False):
"""
Reads and loads in to "obj" a single key or all keys from source file.
@@ -33,7 +68,8 @@
"""
# Resolve the loaders
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
- # Possible values are `safe_load, full_load, unsafe_load, load`
+ # Possible values are:
+ # `safe_load, full_load, unsafe_load, load, safe_load_all`
yaml_reader = getattr(
yaml, obj.get("YAML_LOADER_FOR_DYNACONF"), yaml.safe_load
)
@@ -44,7 +80,11 @@
" Try to use full_load or safe_load."
)
- loader = BaseLoader(
+ _loader = BaseLoader
+ if yaml_reader.__name__.endswith("_all"):
+ _loader = AllLoader
+
+ loader = _loader(
obj=obj,
env=env,
identifier="yaml",
| {"golden_diff": "diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py\n--- a/dynaconf/loaders/yaml_loader.py\n+++ b/dynaconf/loaders/yaml_loader.py\n@@ -20,6 +20,41 @@\n )\n \n \n+class AllLoader(BaseLoader):\n+ \"\"\"YAML Loader to load multi doc files\"\"\"\n+\n+ @staticmethod\n+ def _assign_data(data, source_file, content):\n+ \"\"\"Helper to iterate through all docs in a file\"\"\"\n+ content = tuple(content)\n+ if len(content) == 1:\n+ data[source_file] = content[0]\n+ elif len(content) > 1:\n+ for i, doc in enumerate(content):\n+ data[f\"{source_file}[{i}]\"] = doc\n+\n+ def get_source_data(self, files):\n+ data = {}\n+ for source_file in files:\n+ if source_file.endswith(self.extensions):\n+ try:\n+ with open(source_file, **self.opener_params) as open_file:\n+ content = self.file_reader(open_file)\n+ self.obj._loaded_files.append(source_file)\n+ self._assign_data(data, source_file, content)\n+ except OSError as e:\n+ if \".local.\" not in source_file:\n+ warn(\n+ f\"{self.identifier}_loader: {source_file} \"\n+ f\":{str(e)}\"\n+ )\n+ else:\n+ # for tests it is possible to pass string\n+ content = self.string_reader(source_file)\n+ self._assign_data(data, source_file, content)\n+ return data\n+\n+\n def load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n@@ -33,7 +68,8 @@\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n- # Possible values are `safe_load, full_load, unsafe_load, load`\n+ # Possible values are:\n+ # `safe_load, full_load, unsafe_load, load, safe_load_all`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n@@ -44,7 +80,11 @@\n \" Try to use full_load or safe_load.\"\n )\n \n- loader = BaseLoader(\n+ _loader = BaseLoader\n+ if yaml_reader.__name__.endswith(\"_all\"):\n+ _loader = AllLoader\n+\n+ loader = _loader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n", "issue": "[RFC] Support multidoc yaml files\n**Is your feature request related to a problem? Please describe.**\r\nSometimes it can be difficult or impossible to pass multiple files with config fragments. yaml support multiple documents in one file and `safe_load_all` from pyaml api loads that accordingly. It is standard yaml feature, it would be nice to support it and make in usable in cases when passing one file (composited from more files) would be easier.\r\n\r\n**Describe the solution you'd like**\r\nSupport `safe_load_all` as yaml loader.\r\n\r\n**Describe alternatives you've considered**\r\nPassing multiple files will do the work, however it doesn't have to be always straightforward.\r\n\r\n**Additional context**\r\nI have prepared a patch\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TextIO\nfrom warnings import warn\n\nfrom dynaconf import default_settings\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.utils import object_merge\nfrom dynaconf.utils.parse_conf import try_to_encode\nfrom dynaconf.vendor.ruamel import yaml\n\n# Add support for Dynaconf Lazy values to YAML dumper\nyaml.SafeDumper.yaml_representers[\n None\n] = lambda self, data: yaml.representer.SafeRepresenter.represent_str(\n self, try_to_encode(data)\n)\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None, validate=False):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n # Resolve the loaders\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Possible values are `safe_load, full_load, unsafe_load, load`\n yaml_reader = getattr(\n yaml, obj.get(\"YAML_LOADER_FOR_DYNACONF\"), yaml.safe_load\n )\n if yaml_reader.__name__ == \"unsafe_load\": # pragma: no cover\n warn(\n \"yaml.unsafe_load is deprecated.\"\n \" Please read https://msg.pyyaml.org/load for full details.\"\n \" Try to use full_load or safe_load.\"\n )\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier=\"yaml\",\n extensions=YAML_EXTENSIONS,\n file_reader=yaml_reader,\n string_reader=yaml_reader,\n validate=validate,\n )\n loader.load(\n filename=filename,\n key=key,\n silent=silent,\n )\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n :param stdout: boolean if should output to stdout instead of file\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n with open(\n str(settings_path), encoding=default_settings.ENCODING_FOR_DYNACONF\n ) as open_file:\n object_merge(yaml.safe_load(open_file), settings_data)\n\n with open(\n str(settings_path),\n \"w\",\n encoding=default_settings.ENCODING_FOR_DYNACONF,\n ) as open_file:\n yaml.dump(\n settings_data,\n open_file,\n Dumper=yaml.dumper.SafeDumper,\n explicit_start=True,\n indent=2,\n default_flow_style=False,\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}]} | 1,540 | 602 |
gh_patches_debug_9478 | rasdani/github-patches | git_diff | bridgecrewio__checkov-548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add new check: API Gateway V2 should have access logging enabled
AccessLogSettings: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-stage.html
Terraform does not currently support this: https://github.com/terraform-providers/terraform-provider-aws/issues/7004
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py]
1 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
2 from checkov.common.models.enums import CheckCategories
3 from checkov.common.models.consts import ANY_VALUE
4
5
6 class APIGatewayAccessLogging(BaseResourceValueCheck):
7
8 def __init__(self):
9 name = "Ensure API Gateway has Access Logging enabled"
10 id = "CKV_AWS_76"
11 supported_resources = ['aws_api_gateway_stage']
12 categories = [CheckCategories.LOGGING]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def get_inspected_key(self):
16 return "access_log_settings/[0]/destination_arn"
17
18 def get_expected_value(self):
19 return ANY_VALUE
20
21
22 check = APIGatewayAccessLogging()
23
[end of checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
--- a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
+++ b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py
@@ -8,7 +8,7 @@
def __init__(self):
name = "Ensure API Gateway has Access Logging enabled"
id = "CKV_AWS_76"
- supported_resources = ['aws_api_gateway_stage']
+ supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']
categories = [CheckCategories.LOGGING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n--- a/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n+++ b/checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py\n@@ -8,7 +8,7 @@\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n- supported_resources = ['aws_api_gateway_stage']\n+ supported_resources = ['aws_api_gateway_stage', 'aws_apigatewayv2_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n", "issue": "Add new check: API Gateway V2 should have access logging enabled \nAccessLogSettings: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-apigatewayv2-stage.html\r\n\r\nTerraform does not currently support this: https://github.com/terraform-providers/terraform-provider-aws/issues/7004\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\nfrom checkov.common.models.enums import CheckCategories\nfrom checkov.common.models.consts import ANY_VALUE\n\n\nclass APIGatewayAccessLogging(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure API Gateway has Access Logging enabled\"\n id = \"CKV_AWS_76\"\n supported_resources = ['aws_api_gateway_stage']\n categories = [CheckCategories.LOGGING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return \"access_log_settings/[0]/destination_arn\"\n\n def get_expected_value(self):\n return ANY_VALUE\n\n\ncheck = APIGatewayAccessLogging()\n", "path": "checkov/terraform/checks/resource/aws/APIGatewayAccessLogging.py"}]} | 826 | 167 |
gh_patches_debug_113 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[META 576] Sanitize `*auth*` instead of `authorization`
[](https://github.com/elastic/apm/issues/576)
[](https://github.com/elastic/apm/issues/577)
Sanitize `*auth*` instead of `authorization`
</issue>
<code>
[start of elasticapm/conf/constants.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import decimal
32 import re
33 from collections import namedtuple
34
35
36 def _starmatch_to_regex(pattern):
37 """
38 This is a duplicate of starmatch_to_regex() in utils/__init__.py
39
40 Duplication to avoid circular imports
41 """
42 options = re.DOTALL
43 # check if we are case sensitive
44 if pattern.startswith("(?-i)"):
45 pattern = pattern[5:]
46 else:
47 options |= re.IGNORECASE
48 i, n = 0, len(pattern)
49 res = []
50 while i < n:
51 c = pattern[i]
52 i = i + 1
53 if c == "*":
54 res.append(".*")
55 else:
56 res.append(re.escape(c))
57 return re.compile(r"(?:%s)\Z" % "".join(res), options)
58
59
60 EVENTS_API_PATH = "intake/v2/events"
61 AGENT_CONFIG_PATH = "config/v1/agents"
62 SERVER_INFO_PATH = ""
63
64 TRACE_CONTEXT_VERSION = 0
65 TRACEPARENT_HEADER_NAME = "traceparent"
66 TRACEPARENT_LEGACY_HEADER_NAME = "elastic-apm-traceparent"
67 TRACESTATE_HEADER_NAME = "tracestate"
68
69 TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
70
71 KEYWORD_MAX_LENGTH = 1024
72
73 HTTP_WITH_BODY = {"POST", "PUT", "PATCH", "DELETE"}
74
75 MASK = "[REDACTED]"
76
77 EXCEPTION_CHAIN_MAX_DEPTH = 50
78
79 ERROR = "error"
80 TRANSACTION = "transaction"
81 SPAN = "span"
82 METRICSET = "metricset"
83
84 LABEL_RE = re.compile('[.*"]')
85
86 HARDCODED_PROCESSORS = ["elasticapm.processors.add_context_lines_to_frames"]
87
88 BASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [
89 "password",
90 "passwd",
91 "pwd",
92 "secret",
93 "*key",
94 "*token*",
95 "*session*",
96 "*credit*",
97 "*card*",
98 "authorization",
99 "set-cookie",
100 ]
101
102 BASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]
103
104 OUTCOME = namedtuple("OUTCOME", ["SUCCESS", "FAILURE", "UNKNOWN"])(
105 SUCCESS="success", FAILURE="failure", UNKNOWN="unknown"
106 )
107
108 try:
109 # Python 2
110 LABEL_TYPES = (bool, int, long, float, decimal.Decimal)
111 except NameError:
112 # Python 3
113 LABEL_TYPES = (bool, int, float, decimal.Decimal)
114
115 TRACESTATE = namedtuple("TRACESTATE", ["SAMPLE_RATE"])(SAMPLE_RATE="s")
116
[end of elasticapm/conf/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py
--- a/elasticapm/conf/constants.py
+++ b/elasticapm/conf/constants.py
@@ -95,7 +95,7 @@
"*session*",
"*credit*",
"*card*",
- "authorization",
+ "*auth*",
"set-cookie",
]
| {"golden_diff": "diff --git a/elasticapm/conf/constants.py b/elasticapm/conf/constants.py\n--- a/elasticapm/conf/constants.py\n+++ b/elasticapm/conf/constants.py\n@@ -95,7 +95,7 @@\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n- \"authorization\",\n+ \"*auth*\",\n \"set-cookie\",\n ]\n", "issue": "[META 576] Sanitize `*auth*` instead of `authorization`\n[](https://github.com/elastic/apm/issues/576)\n\n[](https://github.com/elastic/apm/issues/577)\n\nSanitize `*auth*` instead of `authorization`\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport decimal\nimport re\nfrom collections import namedtuple\n\n\ndef _starmatch_to_regex(pattern):\n \"\"\"\n This is a duplicate of starmatch_to_regex() in utils/__init__.py\n\n Duplication to avoid circular imports\n \"\"\"\n options = re.DOTALL\n # check if we are case sensitive\n if pattern.startswith(\"(?-i)\"):\n pattern = pattern[5:]\n else:\n options |= re.IGNORECASE\n i, n = 0, len(pattern)\n res = []\n while i < n:\n c = pattern[i]\n i = i + 1\n if c == \"*\":\n res.append(\".*\")\n else:\n res.append(re.escape(c))\n return re.compile(r\"(?:%s)\\Z\" % \"\".join(res), options)\n\n\nEVENTS_API_PATH = \"intake/v2/events\"\nAGENT_CONFIG_PATH = \"config/v1/agents\"\nSERVER_INFO_PATH = \"\"\n\nTRACE_CONTEXT_VERSION = 0\nTRACEPARENT_HEADER_NAME = \"traceparent\"\nTRACEPARENT_LEGACY_HEADER_NAME = \"elastic-apm-traceparent\"\nTRACESTATE_HEADER_NAME = \"tracestate\"\n\nTIMESTAMP_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nKEYWORD_MAX_LENGTH = 1024\n\nHTTP_WITH_BODY = {\"POST\", \"PUT\", \"PATCH\", \"DELETE\"}\n\nMASK = \"[REDACTED]\"\n\nEXCEPTION_CHAIN_MAX_DEPTH = 50\n\nERROR = \"error\"\nTRANSACTION = \"transaction\"\nSPAN = \"span\"\nMETRICSET = \"metricset\"\n\nLABEL_RE = re.compile('[.*\"]')\n\nHARDCODED_PROCESSORS = [\"elasticapm.processors.add_context_lines_to_frames\"]\n\nBASE_SANITIZE_FIELD_NAMES_UNPROCESSED = [\n \"password\",\n \"passwd\",\n \"pwd\",\n \"secret\",\n \"*key\",\n \"*token*\",\n \"*session*\",\n \"*credit*\",\n \"*card*\",\n \"authorization\",\n \"set-cookie\",\n]\n\nBASE_SANITIZE_FIELD_NAMES = [_starmatch_to_regex(x) for x in BASE_SANITIZE_FIELD_NAMES_UNPROCESSED]\n\nOUTCOME = namedtuple(\"OUTCOME\", [\"SUCCESS\", \"FAILURE\", \"UNKNOWN\"])(\n SUCCESS=\"success\", FAILURE=\"failure\", UNKNOWN=\"unknown\"\n)\n\ntry:\n # Python 2\n LABEL_TYPES = (bool, int, long, float, decimal.Decimal)\nexcept NameError:\n # Python 3\n LABEL_TYPES = (bool, int, float, decimal.Decimal)\n\nTRACESTATE = namedtuple(\"TRACESTATE\", [\"SAMPLE_RATE\"])(SAMPLE_RATE=\"s\")\n", "path": "elasticapm/conf/constants.py"}]} | 1,862 | 85 |
gh_patches_debug_26194 | rasdani/github-patches | git_diff | streamlink__streamlink-95 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Connectcast stream fails with "invalid url"
Attempting to load an active connectcast stream via `streamlink connectcast.tv/streamname` results in an error:
`error: Unable to open URL: (Invalid URL '': No schema supplied. Perhaps you mean http://?)`
Similarly, using `http://connectcast.tv/streamname` for the url also fails.
Running on Windows, built with python 3.5.0rc2
</issue>
<code>
[start of src/streamlink/plugins/connectcast.py]
1 import re
2 import json
3
4 from streamlink.plugin import Plugin
5 from streamlink.plugin.api import http, validate
6 from streamlink.stream import HDSStream
7
8 SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf"
9
10 _url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/")
11 _manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*")
12
13
14 class ConnectCast(Plugin):
15 @classmethod
16 def can_handle_url(self, url):
17 return _url_re.match(url)
18
19 def _get_streams(self):
20 res = http.get(self.url)
21 match = _manifest_re.search(res.text)
22 manifest = match.group(1)
23 streams = {}
24 streams.update(
25 HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)
26 )
27
28 return streams
29
30 __plugin__ = ConnectCast
31
[end of src/streamlink/plugins/connectcast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/connectcast.py b/src/streamlink/plugins/connectcast.py
--- a/src/streamlink/plugins/connectcast.py
+++ b/src/streamlink/plugins/connectcast.py
@@ -3,13 +3,11 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, validate
-from streamlink.stream import HDSStream
-
-SWF_URL = "https://www.connectcast.tv/jwplayer/jwplayer.flash.swf"
-
-_url_re = re.compile("http(s)?://(\w+\.)?connectcast.tv/")
-_manifest_re = re.compile(".*data-playback=\"([^\"]*)\".*")
+from streamlink.stream import RTMPStream
+_url_re = re.compile(r"http(?:s)?://connectcast.tv/(\w+)?")
+_stream_re = re.compile(r'<video src="mp4:(.*?)"')
+_stream_url = "http://connectcast.tv/channel/stream/{channel}"
class ConnectCast(Plugin):
@classmethod
@@ -17,14 +15,15 @@
return _url_re.match(url)
def _get_streams(self):
- res = http.get(self.url)
- match = _manifest_re.search(res.text)
- manifest = match.group(1)
- streams = {}
- streams.update(
- HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)
- )
-
- return streams
+ url_match = _url_re.match(self.url)
+ stream_url = _stream_url.format(channel=url_match.group(1))
+ res = self.session.http.get(stream_url)
+ match = _stream_re.search(res.content)
+ if match:
+ params = dict(rtmp="rtmp://stream.connectcast.tv/live",
+ playpath=match.group(1),
+ live=True)
+
+ return dict(live=RTMPStream(self.session, params))
__plugin__ = ConnectCast
| {"golden_diff": "diff --git a/src/streamlink/plugins/connectcast.py b/src/streamlink/plugins/connectcast.py\n--- a/src/streamlink/plugins/connectcast.py\n+++ b/src/streamlink/plugins/connectcast.py\n@@ -3,13 +3,11 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http, validate\n-from streamlink.stream import HDSStream\n-\n-SWF_URL = \"https://www.connectcast.tv/jwplayer/jwplayer.flash.swf\"\n-\n-_url_re = re.compile(\"http(s)?://(\\w+\\.)?connectcast.tv/\")\n-_manifest_re = re.compile(\".*data-playback=\\\"([^\\\"]*)\\\".*\")\n+from streamlink.stream import RTMPStream\n \n+_url_re = re.compile(r\"http(?:s)?://connectcast.tv/(\\w+)?\")\n+_stream_re = re.compile(r'<video src=\"mp4:(.*?)\"')\n+_stream_url = \"http://connectcast.tv/channel/stream/{channel}\"\n \n class ConnectCast(Plugin):\n @classmethod\n@@ -17,14 +15,15 @@\n return _url_re.match(url)\n \n def _get_streams(self):\n- res = http.get(self.url)\n- match = _manifest_re.search(res.text)\n- manifest = match.group(1)\n- streams = {}\n- streams.update(\n- HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)\n- )\n- \n- return streams\n+ url_match = _url_re.match(self.url)\n+ stream_url = _stream_url.format(channel=url_match.group(1))\n+ res = self.session.http.get(stream_url)\n+ match = _stream_re.search(res.content)\n+ if match:\n+ params = dict(rtmp=\"rtmp://stream.connectcast.tv/live\",\n+ playpath=match.group(1),\n+ live=True)\n+\n+ return dict(live=RTMPStream(self.session, params))\n \n __plugin__ = ConnectCast\n", "issue": "Connectcast stream fails with \"invalid url\"\nAttempting to load an active connectcast stream via `streamlink connectcast.tv/streamname` results in an error:\n`error: Unable to open URL: (Invalid URL '': No schema supplied. Perhaps you mean http://?)`\n\nSimilarly, using `http://connectcast.tv/streamname` for the url also fails.\n\nRunning on Windows, built with python 3.5.0rc2\n\n", "before_files": [{"content": "import re\nimport json\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.stream import HDSStream\n\nSWF_URL = \"https://www.connectcast.tv/jwplayer/jwplayer.flash.swf\"\n\n_url_re = re.compile(\"http(s)?://(\\w+\\.)?connectcast.tv/\")\n_manifest_re = re.compile(\".*data-playback=\\\"([^\\\"]*)\\\".*\")\n\n\nclass ConnectCast(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n res = http.get(self.url)\n match = _manifest_re.search(res.text)\n manifest = match.group(1)\n streams = {}\n streams.update(\n HDSStream.parse_manifest(self.session, manifest, pvswf=SWF_URL)\n )\n \n return streams\n\n__plugin__ = ConnectCast\n", "path": "src/streamlink/plugins/connectcast.py"}]} | 884 | 427 |
gh_patches_debug_3637 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3246 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
#3491 [mB] add video embed to interactive event
**URL:** https://meinberlin-dev.liqd.net/projekte/design-project/
**device & browser:** *Safari Version 14.0 (15610.1.28.1.9, 15610)*
**Comment/Question:**
*Just to confirm, the live stream field should appear just when the project is published? Cause, I can't select the live stream section before being published, otherwise all good*
<img width="1361" alt="Screenshot 2020-11-10 at 16 03 41" src="https://user-images.githubusercontent.com/59610786/98691968-e462ff80-236e-11eb-904b-755ff83b79cc.png">
<img width="1389" alt="Screenshot 2020-11-10 at 16 04 07" src="https://user-images.githubusercontent.com/59610786/98691978-e7f68680-236e-11eb-9a18-53ade0537fa8.png">
<img width="1330" alt="Screenshot 2020-11-10 at 16 04 24" src="https://user-images.githubusercontent.com/59610786/98691980-e927b380-236e-11eb-88a8-ad2c644e58df.png">
</issue>
<code>
[start of meinberlin/apps/livequestions/dashboard.py]
1 from django.urls import reverse
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.dashboard import DashboardComponent
5 from adhocracy4.dashboard import components
6
7 from . import views
8
9
10 class LiveStreamComponent(DashboardComponent):
11 identifier = 'live_stream'
12 weight = 20
13 label = _('Live Stream')
14
15 def is_effective(self, module):
16 module_app = module.phases[0].content().app
17 return (module_app == 'meinberlin_livequestions' and
18 not module.project.is_draft)
19
20 def get_progress(self, module):
21 return 0, 0
22
23 def get_base_url(self, module):
24 return reverse('a4dashboard:livequestions-livestream', kwargs={
25 'module_slug': module.slug,
26 })
27
28 def get_urls(self):
29 return [(
30 r'^modules/(?P<module_slug>[-\w_]+)/livestream/$',
31 views.LiveStreamDashboardView.as_view(component=self),
32 'livequestions-livestream'
33 )]
34
35
36 components.register_module(LiveStreamComponent())
37
[end of meinberlin/apps/livequestions/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/livequestions/dashboard.py b/meinberlin/apps/livequestions/dashboard.py
--- a/meinberlin/apps/livequestions/dashboard.py
+++ b/meinberlin/apps/livequestions/dashboard.py
@@ -14,8 +14,7 @@
def is_effective(self, module):
module_app = module.phases[0].content().app
- return (module_app == 'meinberlin_livequestions' and
- not module.project.is_draft)
+ return (module_app == 'meinberlin_livequestions')
def get_progress(self, module):
return 0, 0
| {"golden_diff": "diff --git a/meinberlin/apps/livequestions/dashboard.py b/meinberlin/apps/livequestions/dashboard.py\n--- a/meinberlin/apps/livequestions/dashboard.py\n+++ b/meinberlin/apps/livequestions/dashboard.py\n@@ -14,8 +14,7 @@\n \n def is_effective(self, module):\n module_app = module.phases[0].content().app\n- return (module_app == 'meinberlin_livequestions' and\n- not module.project.is_draft)\n+ return (module_app == 'meinberlin_livequestions')\n \n def get_progress(self, module):\n return 0, 0\n", "issue": "#3491 [mB] add video embed to interactive event \n**URL:** https://meinberlin-dev.liqd.net/projekte/design-project/\r\n**device & browser:** *Safari Version 14.0 (15610.1.28.1.9, 15610)*\r\n**Comment/Question:** \r\n*Just to confirm, the live stream field should appear just when the project is published? Cause, I can't select the live stream section before being published, otherwise all good* \r\n\r\n<img width=\"1361\" alt=\"Screenshot 2020-11-10 at 16 03 41\" src=\"https://user-images.githubusercontent.com/59610786/98691968-e462ff80-236e-11eb-904b-755ff83b79cc.png\">\r\n<img width=\"1389\" alt=\"Screenshot 2020-11-10 at 16 04 07\" src=\"https://user-images.githubusercontent.com/59610786/98691978-e7f68680-236e-11eb-9a18-53ade0537fa8.png\">\r\n<img width=\"1330\" alt=\"Screenshot 2020-11-10 at 16 04 24\" src=\"https://user-images.githubusercontent.com/59610786/98691980-e927b380-236e-11eb-88a8-ad2c644e58df.png\">\r\n\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import views\n\n\nclass LiveStreamComponent(DashboardComponent):\n identifier = 'live_stream'\n weight = 20\n label = _('Live Stream')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return (module_app == 'meinberlin_livequestions' and\n not module.project.is_draft)\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse('a4dashboard:livequestions-livestream', kwargs={\n 'module_slug': module.slug,\n })\n\n def get_urls(self):\n return [(\n r'^modules/(?P<module_slug>[-\\w_]+)/livestream/$',\n views.LiveStreamDashboardView.as_view(component=self),\n 'livequestions-livestream'\n )]\n\n\ncomponents.register_module(LiveStreamComponent())\n", "path": "meinberlin/apps/livequestions/dashboard.py"}]} | 1,241 | 142 |
gh_patches_debug_11143 | rasdani/github-patches | git_diff | qtile__qtile-2811 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set version using importlib.metadata
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
If you are suggesting a new feature/enhancement please instead post it on the
discussions board as an idea: https://github.com/qtile/qtile/discussions/categories/ideas
-->
# Issue description
Currently, if setuptools is not installed on the system running qtile, it will run into issues upon start.
An Arch user reported this downstream: https://bugs.archlinux.org/task/71804
Apart from also guarding against `ModuleNotFoundError` I think it could be a great idea to [use importlib.metadata to provide qtile's version](https://docs.python.org/3.9/library/importlib.metadata.html?highlight=importlib%20metadata#distribution-versions) instead for newer python versions.
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
# Qtile version
0.18.1
# Stack traces
Copied verbatim from the issue reported downstream:
```
Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/libqtile/scripts/main.py", line 9, in <module>
import pkg_resources
ModuleNotFoundError: No module named 'pkg_resources'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/qtile", line 33, in <module>
sys.exit(load_entry_point('qtile==0.18.1.dev0+g8e7ecc0a.d20210719', 'console_scripts', 'qtile')())
File "/usr/bin/qtile", line 25, in importlib_load_entry_point
return next(matches).load()
File "/usr/lib/python3.9/importlib/metadata.py", line 77, in load
module = import_module(match.group('module'))
File "/usr/lib/python3.9/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1030, in _gcd_import
File "<frozen importlib._bootstrap>", line 1007, in _find_and_load
File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 680, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 850, in exec_module
File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed
File "/usr/lib/python3.9/site-packages/libqtile/scripts/main.py", line 11, in <module>
except (pkg_resources.DistributionNotFound, ImportError):
NameError: name 'pkg_resources' is not defined
```
# Configuration
not important for this issue
</issue>
<code>
[start of libqtile/scripts/main.py]
1 import argparse
2 import logging
3 import sys
4
5 from libqtile.log_utils import init_log
6 from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top
7
8 try:
9 import pkg_resources
10 VERSION = pkg_resources.require("qtile")[0].version
11 except (pkg_resources.DistributionNotFound, ImportError):
12 VERSION = 'dev'
13
14
15 def main():
16 parent_parser = argparse.ArgumentParser(add_help=False)
17 parent_parser.add_argument(
18 '-l', '--log-level',
19 default='WARNING',
20 dest='log_level',
21 type=str.upper,
22 choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
23 help='Set qtile log level'
24 )
25
26 main_parser = argparse.ArgumentParser(
27 prog='qtile',
28 description='A full-featured, pure-Python tiling window manager.',
29 )
30 main_parser.add_argument(
31 '-v', '--version',
32 action='version',
33 version=VERSION,
34 )
35
36 subparsers = main_parser.add_subparsers()
37 start.add_subcommand(subparsers, [parent_parser])
38 shell.add_subcommand(subparsers, [parent_parser])
39 top.add_subcommand(subparsers, [parent_parser])
40 run_cmd.add_subcommand(subparsers, [parent_parser])
41 cmd_obj.add_subcommand(subparsers, [parent_parser])
42 check.add_subcommand(subparsers, [parent_parser])
43 migrate.add_subcommand(subparsers, [parent_parser])
44
45 # `qtile help` should print help
46 def print_help(options):
47 main_parser.print_help()
48 help_ = subparsers.add_parser("help", help="Print help information and exit")
49 help_.set_defaults(func=print_help)
50
51 options = main_parser.parse_args()
52 try:
53 log_level = getattr(logging, options.log_level)
54 init_log(log_level=log_level, log_color=sys.stdout.isatty())
55 options.func(options)
56 except AttributeError:
57 main_parser.print_usage()
58 print("")
59 print("Did you mean:")
60 print(" ".join(sys.argv + ['start']))
61 sys.exit(1)
62
63
64 if __name__ == "__main__":
65 main()
66
[end of libqtile/scripts/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py
--- a/libqtile/scripts/main.py
+++ b/libqtile/scripts/main.py
@@ -6,10 +6,16 @@
from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top
try:
- import pkg_resources
- VERSION = pkg_resources.require("qtile")[0].version
-except (pkg_resources.DistributionNotFound, ImportError):
- VERSION = 'dev'
+ # Python>3.7 can get the version from importlib
+ from importlib.metadata import distribution
+ VERSION = distribution("qtile").version
+except ModuleNotFoundError:
+ try:
+ # pkg_resources is required for 3.7
+ import pkg_resources
+ VERSION = pkg_resources.require("qtile")[0].version
+ except (pkg_resources.DistributionNotFound, ModuleNotFoundError):
+ VERSION = 'dev'
def main():
| {"golden_diff": "diff --git a/libqtile/scripts/main.py b/libqtile/scripts/main.py\n--- a/libqtile/scripts/main.py\n+++ b/libqtile/scripts/main.py\n@@ -6,10 +6,16 @@\n from libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n \n try:\n- import pkg_resources\n- VERSION = pkg_resources.require(\"qtile\")[0].version\n-except (pkg_resources.DistributionNotFound, ImportError):\n- VERSION = 'dev'\n+ # Python>3.7 can get the version from importlib\n+ from importlib.metadata import distribution\n+ VERSION = distribution(\"qtile\").version\n+except ModuleNotFoundError:\n+ try:\n+ # pkg_resources is required for 3.7\n+ import pkg_resources\n+ VERSION = pkg_resources.require(\"qtile\")[0].version\n+ except (pkg_resources.DistributionNotFound, ModuleNotFoundError):\n+ VERSION = 'dev'\n \n \n def main():\n", "issue": "Set version using importlib.metadata\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n\r\nIf you are suggesting a new feature/enhancement please instead post it on the\r\ndiscussions board as an idea: https://github.com/qtile/qtile/discussions/categories/ideas\r\n-->\r\n\r\n# Issue description\r\n\r\nCurrently, if setuptools is not installed on the system running qtile, it will run into issues upon start.\r\nAn Arch user reported this downstream: https://bugs.archlinux.org/task/71804\r\n\r\nApart from also guarding against `ModuleNotFoundError` I think it could be a great idea to [use importlib.metadata to provide qtile's version](https://docs.python.org/3.9/library/importlib.metadata.html?highlight=importlib%20metadata#distribution-versions) instead for newer python versions.\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\n\r\n# Qtile version\r\n\r\n0.18.1\r\n\r\n# Stack traces\r\n\r\nCopied verbatim from the issue reported downstream:\r\n\r\n```\r\nTraceback (most recent call last):\r\nFile \"/usr/lib/python3.9/site-packages/libqtile/scripts/main.py\", line 9, in <module>\r\nimport pkg_resources\r\nModuleNotFoundError: No module named 'pkg_resources'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\nFile \"/usr/bin/qtile\", line 33, in <module>\r\nsys.exit(load_entry_point('qtile==0.18.1.dev0+g8e7ecc0a.d20210719', 'console_scripts', 'qtile')())\r\nFile \"/usr/bin/qtile\", line 25, in importlib_load_entry_point\r\nreturn next(matches).load()\r\nFile \"/usr/lib/python3.9/importlib/metadata.py\", line 77, in load\r\nmodule = import_module(match.group('module'))\r\nFile \"/usr/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\nreturn _bootstrap._gcd_import(name[level:], package, level)\r\nFile \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\nFile \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\nFile \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\nFile \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\nFile \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\nFile \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\nFile \"/usr/lib/python3.9/site-packages/libqtile/scripts/main.py\", line 11, in <module>\r\nexcept (pkg_resources.DistributionNotFound, ImportError):\r\nNameError: name 'pkg_resources' is not defined\r\n```\r\n\r\n# Configuration\r\n\r\nnot important for this issue\n", "before_files": [{"content": "import argparse\nimport logging\nimport sys\n\nfrom libqtile.log_utils import init_log\nfrom libqtile.scripts import check, cmd_obj, migrate, run_cmd, shell, start, top\n\ntry:\n import pkg_resources\n VERSION = pkg_resources.require(\"qtile\")[0].version\nexcept (pkg_resources.DistributionNotFound, ImportError):\n VERSION = 'dev'\n\n\ndef main():\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\n '-l', '--log-level',\n default='WARNING',\n dest='log_level',\n type=str.upper,\n choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),\n help='Set qtile log level'\n )\n\n main_parser = argparse.ArgumentParser(\n prog='qtile',\n description='A full-featured, pure-Python tiling window manager.',\n )\n main_parser.add_argument(\n '-v', '--version',\n action='version',\n version=VERSION,\n )\n\n subparsers = main_parser.add_subparsers()\n start.add_subcommand(subparsers, [parent_parser])\n shell.add_subcommand(subparsers, [parent_parser])\n top.add_subcommand(subparsers, [parent_parser])\n run_cmd.add_subcommand(subparsers, [parent_parser])\n cmd_obj.add_subcommand(subparsers, [parent_parser])\n check.add_subcommand(subparsers, [parent_parser])\n migrate.add_subcommand(subparsers, [parent_parser])\n\n # `qtile help` should print help\n def print_help(options):\n main_parser.print_help()\n help_ = subparsers.add_parser(\"help\", help=\"Print help information and exit\")\n help_.set_defaults(func=print_help)\n\n options = main_parser.parse_args()\n try:\n log_level = getattr(logging, options.log_level)\n init_log(log_level=log_level, log_color=sys.stdout.isatty())\n options.func(options)\n except AttributeError:\n main_parser.print_usage()\n print(\"\")\n print(\"Did you mean:\")\n print(\" \".join(sys.argv + ['start']))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "libqtile/scripts/main.py"}]} | 1,798 | 216 |
gh_patches_debug_11452 | rasdani/github-patches | git_diff | cupy__cupy-1138 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cupy.random.permutation() overwrites its argument.
`cupy.random.permutation()` overwrites its argument.
This is incompatible with `numpy.random.permutation`.
</issue>
<code>
[start of cupy/random/permutations.py]
1 from cupy.random import generator
2 import six
3
4
5 def shuffle(a):
6 """Shuffles an array.
7
8 Args:
9 a (cupy.ndarray): The array to be shuffled.
10
11 .. seealso:: :func:`numpy.random.shuffle`
12
13 """
14 rs = generator.get_random_state()
15 return rs.shuffle(a)
16
17
18 def permutation(a):
19 """Returns a permuted range or shuffles an array."""
20 if isinstance(a, six.integer_types):
21 rs = generator.get_random_state()
22 return rs.permutation(a)
23 else:
24 return shuffle(a)
25
[end of cupy/random/permutations.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/random/permutations.py b/cupy/random/permutations.py
--- a/cupy/random/permutations.py
+++ b/cupy/random/permutations.py
@@ -16,9 +16,20 @@
def permutation(a):
- """Returns a permuted range or shuffles an array."""
+ """Returns a permuted range or a permutation of an array.
+
+ Args:
+ a (int or cupy.ndarray): The range or the array to be shuffled.
+
+ Returns:
+ cupy.ndarray: If `a` is an integer, it is permutation range between 0
+ and `a` - 1.
+ Otherwise, it is a permutation of `a`.
+
+ .. seealso:: :func:`numpy.random.permutation`
+ """
+ rs = generator.get_random_state()
if isinstance(a, six.integer_types):
- rs = generator.get_random_state()
return rs.permutation(a)
else:
- return shuffle(a)
+ return a[rs.permutation(len(a))]
| {"golden_diff": "diff --git a/cupy/random/permutations.py b/cupy/random/permutations.py\n--- a/cupy/random/permutations.py\n+++ b/cupy/random/permutations.py\n@@ -16,9 +16,20 @@\n \n \n def permutation(a):\n- \"\"\"Returns a permuted range or shuffles an array.\"\"\"\n+ \"\"\"Returns a permuted range or a permutation of an array.\n+\n+ Args:\n+ a (int or cupy.ndarray): The range or the array to be shuffled.\n+\n+ Returns:\n+ cupy.ndarray: If `a` is an integer, it is permutation range between 0\n+ and `a` - 1.\n+ Otherwise, it is a permutation of `a`.\n+\n+ .. seealso:: :func:`numpy.random.permutation`\n+ \"\"\"\n+ rs = generator.get_random_state()\n if isinstance(a, six.integer_types):\n- rs = generator.get_random_state()\n return rs.permutation(a)\n else:\n- return shuffle(a)\n+ return a[rs.permutation(len(a))]\n", "issue": "cupy.random.permutation() overwrites its argument.\n`cupy.random.permutation()` overwrites its argument.\r\nThis is incompatible with `numpy.random.permutation`.\r\n\n", "before_files": [{"content": "from cupy.random import generator\nimport six\n\n\ndef shuffle(a):\n \"\"\"Shuffles an array.\n\n Args:\n a (cupy.ndarray): The array to be shuffled.\n\n .. seealso:: :func:`numpy.random.shuffle`\n\n \"\"\"\n rs = generator.get_random_state()\n return rs.shuffle(a)\n\n\ndef permutation(a):\n \"\"\"Returns a permuted range or shuffles an array.\"\"\"\n if isinstance(a, six.integer_types):\n rs = generator.get_random_state()\n return rs.permutation(a)\n else:\n return shuffle(a)\n", "path": "cupy/random/permutations.py"}]} | 735 | 229 |
gh_patches_debug_64393 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3328 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider longhorn_steakhouse is broken
During the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))
</issue>
<code>
[start of locations/spiders/longhorn_steakhouse.py]
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class LongHornSteakhouseSpider(scrapy.Spider):
12 name = "longhorn_steakhouse"
13 item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': "Q3259007"}
14 allowed_domains = []
15 start_urls = [
16 'https://www.longhornsteakhouse.com/locations-sitemap.xml',
17 ]
18 custom_settings = {
19 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
20 }
21 download_delay = 5
22
23 def parse_hours(self, hours):
24 opening_hours = OpeningHours()
25
26 for hour in hours:
27 day, open_close = hour.split(' ')
28 open_time, close_time = open_close.split('-')
29 opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')
30 return opening_hours.as_opening_hours()
31
32 def parse(self, response):
33 response.selector.remove_namespaces()
34 urls = response.xpath('//url/loc/text()').extract()
35 for url in urls:
36 yield scrapy.Request(url=url, callback=self.parse_store)
37
38 def parse_store(self, response):
39 store_data = response.xpath('//script[@type="application/ld+json" and contains(text(), "streetAddress")]/text()').extract_first()
40 if store_data:
41 data = json.loads(store_data)
42 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
43
44 # Handle store pages that are missing the application/ld+json data
45 addr, city_state_zip, phone = response.xpath('//p[@id="info-link-webhead"]/text()').extract()
46 city, state, postcode = re.search(r'(.*?),\s([A-Z]{2})\s([\d-]+)$', city_state_zip).groups()
47
48 properties = {
49 'name': data.get("name") or response.xpath('//h1[@class="style_h1"]/text()').extract_first().strip(),
50 'ref': data["branchCode"] or ref,
51 'addr_full': data["address"]["streetAddress"].strip() or addr.strip(),
52 'city': data["address"]["addressLocality"] or city,
53 'state': data["address"]["addressRegion"] or state,
54 'postcode': data["address"]["postalCode"] or postcode,
55 'country': data["address"]["addressCountry"],
56 'phone': data.get("telephone") or phone.strip(),
57 'website': data.get("url") or response.url,
58 'lat': float(data["geo"]["latitude"]),
59 'lon': float(data["geo"]["longitude"]),
60 }
61
62 hours = data.get("openingHours")
63 if hours:
64 store_hours = self.parse_hours(hours)
65 properties["opening_hours"] = store_hours
66
67 yield GeojsonPointItem(**properties)
68
[end of locations/spiders/longhorn_steakhouse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py
--- a/locations/spiders/longhorn_steakhouse.py
+++ b/locations/spiders/longhorn_steakhouse.py
@@ -18,7 +18,7 @@
custom_settings = {
'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',
}
- download_delay = 5
+ download_delay = 1
def parse_hours(self, hours):
opening_hours = OpeningHours()
| {"golden_diff": "diff --git a/locations/spiders/longhorn_steakhouse.py b/locations/spiders/longhorn_steakhouse.py\n--- a/locations/spiders/longhorn_steakhouse.py\n+++ b/locations/spiders/longhorn_steakhouse.py\n@@ -18,7 +18,7 @@\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n- download_delay = 5\n+ download_delay = 1\n \n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n", "issue": "Spider longhorn_steakhouse is broken\nDuring the global build at 2021-10-20-14-42-48, spider **longhorn_steakhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/longhorn_steakhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/longhorn_steakhouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass LongHornSteakhouseSpider(scrapy.Spider):\n name = \"longhorn_steakhouse\"\n item_attributes = {'brand': 'LongHorn Steakhouse', 'brand_wikidata': \"Q3259007\"}\n allowed_domains = []\n start_urls = [\n 'https://www.longhornsteakhouse.com/locations-sitemap.xml',\n ]\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n download_delay = 5\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day, open_close = hour.split(' ')\n open_time, close_time = open_close.split('-')\n opening_hours.add_range(day=day, open_time=open_time, close_time=close_time, time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse_store)\n\n def parse_store(self, response):\n store_data = response.xpath('//script[@type=\"application/ld+json\" and contains(text(), \"streetAddress\")]/text()').extract_first()\n if store_data:\n data = json.loads(store_data)\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n # Handle store pages that are missing the application/ld+json data\n addr, city_state_zip, phone = response.xpath('//p[@id=\"info-link-webhead\"]/text()').extract()\n city, state, postcode = re.search(r'(.*?),\\s([A-Z]{2})\\s([\\d-]+)$', city_state_zip).groups()\n\n properties = {\n 'name': data.get(\"name\") or response.xpath('//h1[@class=\"style_h1\"]/text()').extract_first().strip(),\n 'ref': data[\"branchCode\"] or ref,\n 'addr_full': data[\"address\"][\"streetAddress\"].strip() or addr.strip(),\n 'city': data[\"address\"][\"addressLocality\"] or city,\n 'state': data[\"address\"][\"addressRegion\"] or state,\n 'postcode': data[\"address\"][\"postalCode\"] or postcode,\n 'country': data[\"address\"][\"addressCountry\"],\n 'phone': data.get(\"telephone\") or phone.strip(),\n 'website': data.get(\"url\") or response.url,\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n }\n\n hours = data.get(\"openingHours\")\n if hours:\n store_hours = self.parse_hours(hours)\n properties[\"opening_hours\"] = store_hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/longhorn_steakhouse.py"}]} | 1,562 | 168 |
gh_patches_debug_60375 | rasdani/github-patches | git_diff | UTNkar__moore-794 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translations for footer_en missing in production
I noticed that in the settings the footer option is called footer_en. Seems like a translation has gone missing

</issue>
<code>
[start of src/branding/models.py]
1 from django.db import models
2 from wagtail.contrib.settings.models import BaseSetting, register_setting
3
4 from django.utils.translation import gettext_lazy as _
5 from wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \
6 MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList
7 from wagtail.core import blocks
8 from wagtail.core.fields import StreamField
9 from wagtail.images.edit_handlers import ImageChooserPanel
10 from utils.translation import TranslatedField
11
12
13 @register_setting(icon='fa-window-minimize')
14 class FooterSettings(BaseSetting):
15 class Meta:
16 verbose_name = _('footer_en') # quickfix
17
18 footer_en = StreamField(
19 [('column', blocks.StructBlock([
20 ('size', blocks.IntegerBlock(min_value=1, max_value=12)),
21 ('content', blocks.RichTextBlock()),
22 ]))],
23 blank=True,
24 )
25
26 footer_sv = StreamField(
27 [('column', blocks.StructBlock([
28 ('size', blocks.IntegerBlock(min_value=1, max_value=12)),
29 ('content', blocks.RichTextBlock()),
30 ]))],
31 blank=True,
32 )
33
34 footer = TranslatedField('footer_en', 'footer_sv')
35
36 panels_sv = [
37 StreamFieldPanel('footer_sv')
38 ]
39
40 panels_en = [
41 StreamFieldPanel('footer_en')
42 ]
43
44 edit_handler = TabbedInterface([
45 ObjectList(panels_en, heading=_("English")),
46 ObjectList(panels_sv, heading=_("Swedish"))
47 ])
48
49
50 @register_setting(icon='openquote')
51 class SocialMediaSettings(BaseSetting):
52 class Meta:
53 verbose_name = _('social media accounts')
54
55 facebook = models.URLField(
56 help_text=_('Your Facebook page URL'),
57 blank=True,
58 )
59 instagram = models.CharField(
60 max_length=255,
61 help_text=_('Your Instagram username, without the @'),
62 blank=True,
63 )
64 twitter = models.CharField(
65 max_length=255,
66 help_text=_('Your Twitter username, without the @'),
67 blank=True,
68 )
69
70
71 class Logo(models.Model):
72 class Meta:
73 verbose_name = _('logo')
74 verbose_name_plural = _('logos')
75
76 def __str__(self):
77 logotext = str(_('logo'))
78 return logotext.capitalize()
79
80 CATEGORY_CHOICES = (
81 ('committee', _('Committee')),
82 ('section', _('Section')),
83 )
84
85 category = models.CharField(
86 max_length=20,
87 choices=CATEGORY_CHOICES,
88 verbose_name=_('category'),
89 blank=False,
90 null=False,
91 )
92
93 link = models.URLField(
94 verbose_name=_('links to'),
95 null=False,
96 blank=False,
97 )
98
99 logo = models.ForeignKey(
100 'wagtailimages.Image',
101 verbose_name=_('logo'),
102 null=True,
103 blank=True,
104 on_delete=models.SET_NULL,
105 related_name='+'
106 )
107
108 logo_white = models.ForeignKey(
109 'wagtailimages.Image',
110 verbose_name=_('white logo'),
111 null=True,
112 blank=True,
113 on_delete=models.SET_NULL,
114 related_name='+'
115 )
116
117 logo_black = models.ForeignKey(
118 'wagtailimages.Image',
119 verbose_name=_('black logo'),
120 null=True,
121 blank=True,
122 on_delete=models.SET_NULL,
123 related_name='+'
124 )
125
126 belongs_to = models.ForeignKey(
127 'wagtailcore.Site',
128 verbose_name=_('belongs to'),
129 null=True,
130 blank=True,
131 on_delete=models.SET_NULL,
132 )
133
134 # ------ Administrator settings ------
135 panels = [MultiFieldPanel([
136 FieldRowPanel([
137 FieldPanel('category'),
138 FieldPanel('link'),
139 ]),
140 ImageChooserPanel('logo'),
141 ImageChooserPanel('logo_white'),
142 ImageChooserPanel('logo_black'),
143 FieldPanel('belongs_to'),
144 ])]
145
[end of src/branding/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/branding/models.py b/src/branding/models.py
--- a/src/branding/models.py
+++ b/src/branding/models.py
@@ -13,7 +13,7 @@
@register_setting(icon='fa-window-minimize')
class FooterSettings(BaseSetting):
class Meta:
- verbose_name = _('footer_en') # quickfix
+ verbose_name = _('footer') # quickfix
footer_en = StreamField(
[('column', blocks.StructBlock([
| {"golden_diff": "diff --git a/src/branding/models.py b/src/branding/models.py\n--- a/src/branding/models.py\n+++ b/src/branding/models.py\n@@ -13,7 +13,7 @@\n @register_setting(icon='fa-window-minimize')\n class FooterSettings(BaseSetting):\n class Meta:\n- verbose_name = _('footer_en') # quickfix\n+ verbose_name = _('footer') # quickfix\n \n footer_en = StreamField(\n [('column', blocks.StructBlock([\n", "issue": "Translations for footer_en missing in production\nI noticed that in the settings the footer option is called footer_en. Seems like a translation has gone missing\r\n\r\n\n", "before_files": [{"content": "from django.db import models\nfrom wagtail.contrib.settings.models import BaseSetting, register_setting\n\nfrom django.utils.translation import gettext_lazy as _\nfrom wagtail.admin.edit_handlers import FieldPanel, FieldRowPanel, \\\n MultiFieldPanel, StreamFieldPanel, TabbedInterface, ObjectList\nfrom wagtail.core import blocks\nfrom wagtail.core.fields import StreamField\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom utils.translation import TranslatedField\n\n\n@register_setting(icon='fa-window-minimize')\nclass FooterSettings(BaseSetting):\n class Meta:\n verbose_name = _('footer_en') # quickfix\n\n footer_en = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer_sv = StreamField(\n [('column', blocks.StructBlock([\n ('size', blocks.IntegerBlock(min_value=1, max_value=12)),\n ('content', blocks.RichTextBlock()),\n ]))],\n blank=True,\n )\n\n footer = TranslatedField('footer_en', 'footer_sv')\n\n panels_sv = [\n StreamFieldPanel('footer_sv')\n ]\n\n panels_en = [\n StreamFieldPanel('footer_en')\n ]\n\n edit_handler = TabbedInterface([\n ObjectList(panels_en, heading=_(\"English\")),\n ObjectList(panels_sv, heading=_(\"Swedish\"))\n ])\n\n\n@register_setting(icon='openquote')\nclass SocialMediaSettings(BaseSetting):\n class Meta:\n verbose_name = _('social media accounts')\n\n facebook = models.URLField(\n help_text=_('Your Facebook page URL'),\n blank=True,\n )\n instagram = models.CharField(\n max_length=255,\n help_text=_('Your Instagram username, without the @'),\n blank=True,\n )\n twitter = models.CharField(\n max_length=255,\n help_text=_('Your Twitter username, without the @'),\n blank=True,\n )\n\n\nclass Logo(models.Model):\n class Meta:\n verbose_name = _('logo')\n verbose_name_plural = _('logos')\n\n def __str__(self):\n logotext = str(_('logo'))\n return logotext.capitalize()\n\n CATEGORY_CHOICES = (\n ('committee', _('Committee')),\n ('section', _('Section')),\n )\n\n category = models.CharField(\n max_length=20,\n choices=CATEGORY_CHOICES,\n verbose_name=_('category'),\n blank=False,\n null=False,\n )\n\n link = models.URLField(\n verbose_name=_('links to'),\n null=False,\n blank=False,\n )\n\n logo = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_white = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('white logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n logo_black = models.ForeignKey(\n 'wagtailimages.Image',\n verbose_name=_('black logo'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n belongs_to = models.ForeignKey(\n 'wagtailcore.Site',\n verbose_name=_('belongs to'),\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n # ------ Administrator settings ------\n panels = [MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('category'),\n FieldPanel('link'),\n ]),\n ImageChooserPanel('logo'),\n ImageChooserPanel('logo_white'),\n ImageChooserPanel('logo_black'),\n FieldPanel('belongs_to'),\n ])]\n", "path": "src/branding/models.py"}]} | 1,775 | 111 |
gh_patches_debug_86 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2754 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transitive import of mitmproxy.version causes warning
Since #1837, we import `.script`, will imports `.flow`, which imports `.version`.
This causes the following warning in pytest:
```
test/mitmproxy/test_version.py::test_version
/Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125:
RuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package
'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in
unpredictable behaviour
warn(RuntimeWarning(msg))
-- Docs: http://doc.pytest.org/en/latest/warnings.html
```
[Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap)
> This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: “Never add a package directory, or any directory inside a package, directly to the Python path”.
> The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path).
Maybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better?
</issue>
<code>
[start of mitmproxy/version.py]
1 import os
2 import subprocess
3
4 # The actual version string. For precompiled binaries, this will be changed to include the build
5 # tag, e.g. "3.0.0.dev0042-0xcafeabc"
6 VERSION = "3.0.0"
7 PATHOD = "pathod " + VERSION
8 MITMPROXY = "mitmproxy " + VERSION
9
10 # Serialization format version. This is displayed nowhere, it just needs to be incremented by one
11 # for each change in the file format.
12 FLOW_FORMAT_VERSION = 5
13
14
15 def get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:
16 """
17 Return a detailed version string, sourced either from a hardcoded VERSION constant
18 or obtained dynamically using git.
19
20 Args:
21 dev: If True, non-tagged releases will include a ".devXXXX" suffix, where XXXX is the number
22 of commits since the last tagged release.
23 build: If True, non-tagged releases will include a "-0xXXXXXXX" suffix, where XXXXXXX are
24 the first seven digits of the commit hash.
25 refresh: If True, always try to use git instead of a potentially hardcoded constant.
26 """
27
28 mitmproxy_version = VERSION
29
30 if "dev" in VERSION and not refresh:
31 pass # There is a hardcoded build tag, so we just use what's there.
32 elif dev or build:
33 here = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
34 try:
35 git_describe = subprocess.check_output(
36 ['git', 'describe', '--tags', '--long'],
37 stderr=subprocess.STDOUT,
38 cwd=here,
39 )
40 last_tag, tag_dist, commit = git_describe.decode().strip().rsplit("-", 2)
41 commit = commit.lstrip("g")[:7]
42 tag_dist = int(tag_dist)
43 except Exception:
44 pass
45 else:
46 # Remove current suffix
47 mitmproxy_version = mitmproxy_version.split(".dev")[0]
48
49 # Add suffix for non-tagged releases
50 if tag_dist > 0:
51 mitmproxy_version += ".dev{tag_dist}".format(tag_dist=tag_dist)
52 # The wheel build tag (we use the commit) must start with a digit, so we include "0x"
53 mitmproxy_version += "-0x{commit}".format(commit=commit)
54
55 if not dev:
56 mitmproxy_version = mitmproxy_version.split(".dev")[0]
57 elif not build:
58 mitmproxy_version = mitmproxy_version.split("-0x")[0]
59
60 return mitmproxy_version
61
62
63 if __name__ == "__main__":
64 print(VERSION)
65
[end of mitmproxy/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/version.py b/mitmproxy/version.py
--- a/mitmproxy/version.py
+++ b/mitmproxy/version.py
@@ -60,5 +60,5 @@
return mitmproxy_version
-if __name__ == "__main__":
+if __name__ == "__main__": # pragma: no cover
print(VERSION)
| {"golden_diff": "diff --git a/mitmproxy/version.py b/mitmproxy/version.py\n--- a/mitmproxy/version.py\n+++ b/mitmproxy/version.py\n@@ -60,5 +60,5 @@\n return mitmproxy_version\n \n \n-if __name__ == \"__main__\":\n+if __name__ == \"__main__\": # pragma: no cover\n print(VERSION)\n", "issue": "Transitive import of mitmproxy.version causes warning\nSince #1837, we import `.script`, will imports `.flow`, which imports `.version`.\r\nThis causes the following warning in pytest:\r\n\r\n```\r\ntest/mitmproxy/test_version.py::test_version\r\n /Users/kriechi/.pyenv/versions/3.5.3/lib/python3.5/runpy.py:125: \r\nRuntimeWarning: 'mitmproxy.version' found in sys.modules after import of package \r\n'mitmproxy', but prior to execution of 'mitmproxy.version'; this may result in \r\nunpredictable behaviour\r\n warn(RuntimeWarning(msg))\r\n\r\n-- Docs: http://doc.pytest.org/en/latest/warnings.html\r\n```\r\n\r\n[Note](http://python-notes.curiousefficiency.org/en/latest/python_concepts/import_traps.html#the-double-import-trap)\r\n> This next trap exists in all current versions of Python, including 3.3, and can be summed up in the following general guideline: \u201cNever add a package directory, or any directory inside a package, directly to the Python path\u201d.\r\n\r\n> The reason this is problematic is that every module in that directory is now potentially accessible under two different names: as a top level module (since the directory is on sys.path) and as a submodule of the package (if the higher level directory containing the package itself is also on sys.path).\r\n\r\nMaybe using the approach described [here](https://stackoverflow.com/questions/27947639/how-to-properly-create-a-pyinstaller-hook-or-maybe-hidden-import) works better?\n", "before_files": [{"content": "import os\nimport subprocess\n\n# The actual version string. For precompiled binaries, this will be changed to include the build\n# tag, e.g. \"3.0.0.dev0042-0xcafeabc\"\nVERSION = \"3.0.0\"\nPATHOD = \"pathod \" + VERSION\nMITMPROXY = \"mitmproxy \" + VERSION\n\n# Serialization format version. This is displayed nowhere, it just needs to be incremented by one\n# for each change in the file format.\nFLOW_FORMAT_VERSION = 5\n\n\ndef get_version(dev: bool = False, build: bool = False, refresh: bool = False) -> str:\n \"\"\"\n Return a detailed version string, sourced either from a hardcoded VERSION constant\n or obtained dynamically using git.\n\n Args:\n dev: If True, non-tagged releases will include a \".devXXXX\" suffix, where XXXX is the number\n of commits since the last tagged release.\n build: If True, non-tagged releases will include a \"-0xXXXXXXX\" suffix, where XXXXXXX are\n the first seven digits of the commit hash.\n refresh: If True, always try to use git instead of a potentially hardcoded constant.\n \"\"\"\n\n mitmproxy_version = VERSION\n\n if \"dev\" in VERSION and not refresh:\n pass # There is a hardcoded build tag, so we just use what's there.\n elif dev or build:\n here = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\"))\n try:\n git_describe = subprocess.check_output(\n ['git', 'describe', '--tags', '--long'],\n stderr=subprocess.STDOUT,\n cwd=here,\n )\n last_tag, tag_dist, commit = git_describe.decode().strip().rsplit(\"-\", 2)\n commit = commit.lstrip(\"g\")[:7]\n tag_dist = int(tag_dist)\n except Exception:\n pass\n else:\n # Remove current suffix\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n\n # Add suffix for non-tagged releases\n if tag_dist > 0:\n mitmproxy_version += \".dev{tag_dist}\".format(tag_dist=tag_dist)\n # The wheel build tag (we use the commit) must start with a digit, so we include \"0x\"\n mitmproxy_version += \"-0x{commit}\".format(commit=commit)\n\n if not dev:\n mitmproxy_version = mitmproxy_version.split(\".dev\")[0]\n elif not build:\n mitmproxy_version = mitmproxy_version.split(\"-0x\")[0]\n\n return mitmproxy_version\n\n\nif __name__ == \"__main__\":\n print(VERSION)\n", "path": "mitmproxy/version.py"}]} | 1,578 | 83 |
gh_patches_debug_25459 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-948 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
anytime_fitness.py null values
The scraper currently includes address2 whether it is null or not, resulting in ", None" being appended to many (most?) of the addr:full fields.
</issue>
<code>
[start of locations/spiders/anytime_fitness.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 import json
5
6 class AnytimeFitnessSpider(scrapy.Spider):
7 name = 'anytime_fitness'
8 allowed_domains = ['www.anytimefitness.com']
9
10 def start_requests(self):
11 url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'
12 yield scrapy.Request(url, callback=self.parse)
13
14 def parse(self, response):
15 gyms = json.loads(response.body_as_unicode())
16
17 for gym in gyms:
18 yield GeojsonPointItem(
19 lat = gym['latitude'],
20 lon = gym['longitude'],
21 addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),
22 city = gym['content']['city'],
23 phone = gym['content']['phone'],
24 state = gym['content']['state_abbr'],
25 postcode = gym['content']['zip'],
26 ref = gym['content']['url'],
27 country = gym['content']['country']
28 )
29
30
31
32
[end of locations/spiders/anytime_fitness.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py
--- a/locations/spiders/anytime_fitness.py
+++ b/locations/spiders/anytime_fitness.py
@@ -2,6 +2,8 @@
import scrapy
from locations.items import GeojsonPointItem
import json
+import html
+
class AnytimeFitnessSpider(scrapy.Spider):
name = 'anytime_fitness'
@@ -18,14 +20,13 @@
yield GeojsonPointItem(
lat = gym['latitude'],
lon = gym['longitude'],
- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),
+ addr_full = ", ".join(filter(None, [gym['content']['address'], gym['content']['address2']])),
city = gym['content']['city'],
phone = gym['content']['phone'],
state = gym['content']['state_abbr'],
postcode = gym['content']['zip'],
ref = gym['content']['url'],
- country = gym['content']['country']
+ country = gym['content']['country'],
+ name = html.unescape(gym['content']['title']),
+ extras = {"number": gym['content']['number']}
)
-
-
-
| {"golden_diff": "diff --git a/locations/spiders/anytime_fitness.py b/locations/spiders/anytime_fitness.py\n--- a/locations/spiders/anytime_fitness.py\n+++ b/locations/spiders/anytime_fitness.py\n@@ -2,6 +2,8 @@\n import scrapy\n from locations.items import GeojsonPointItem\n import json\n+import html\n+\n \n class AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n@@ -18,14 +20,13 @@\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n- addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n+ addr_full = \", \".join(filter(None, [gym['content']['address'], gym['content']['address2']])),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n- country = gym['content']['country']\n+ country = gym['content']['country'],\n+ name = html.unescape(gym['content']['title']),\n+ extras = {\"number\": gym['content']['number']}\n )\n- \n- \n-\n", "issue": "anytime_fitness.py null values\nThe scraper currently includes address2 whether it is null or not, resulting in \", None\" being appended to many (most?) of the addr:full fields.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nimport json\n\nclass AnytimeFitnessSpider(scrapy.Spider):\n name = 'anytime_fitness'\n allowed_domains = ['www.anytimefitness.com']\n\n def start_requests(self):\n url = 'https://www.anytimefitness.com/wp-content/uploads/gyms.json'\n yield scrapy.Request(url, callback=self.parse)\n\n def parse(self, response):\n gyms = json.loads(response.body_as_unicode())\n\n for gym in gyms:\n yield GeojsonPointItem(\n lat = gym['latitude'],\n lon = gym['longitude'],\n addr_full = '{}, {}'.format(gym['content']['address'], gym['content']['address2']),\n city = gym['content']['city'],\n phone = gym['content']['phone'],\n state = gym['content']['state_abbr'],\n postcode = gym['content']['zip'],\n ref = gym['content']['url'],\n country = gym['content']['country']\n )\n \n \n \n", "path": "locations/spiders/anytime_fitness.py"}]} | 856 | 284 |
gh_patches_debug_9873 | rasdani/github-patches | git_diff | wright-group__WrightTools-992 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
collection.convert
Would like to support syntax `collection.convert(unit)`.
Would convert all contained data objects recursively.
</issue>
<code>
[start of WrightTools/collection/_collection.py]
1 """Collection."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 from .. import data as wt_data
10 from .. import exceptions as wt_exceptions
11 from .._group import Group
12
13
14 # --- define --------------------------------------------------------------------------------------
15
16
17 __all__ = ["Collection"]
18
19
20 # --- classes -------------------------------------------------------------------------------------
21
22
23 class Collection(Group):
24 """Nestable Collection of Data objects."""
25
26 class_name = "Collection"
27
28 def __iter__(self):
29 self.__n = 0
30 return self
31
32 def __len__(self):
33 return len(self.item_names)
34
35 def __next__(self):
36 if self.__n < len(self):
37 out = self.item_names[self.__n]
38 self.__n += 1
39 else:
40 raise StopIteration
41 return out
42
43 def __repr__(self):
44 return "<WrightTools.Collection '{0}' {1} at {2}>".format(
45 self.natural_name, self.item_names, "::".join([self.filepath, self.name])
46 )
47
48 def __getitem__(self, key):
49 if isinstance(key, int):
50 key = self.item_names[key]
51 if key == "":
52 return None
53 return super().__getitem__(key)
54
55 def __setitem__(self, key, value):
56 raise NotImplementedError
57
58 @property
59 def _leaf(self):
60 return self.natural_name
61
62 def _print_branch(self, prefix, depth, verbose):
63 for i, name in enumerate(self.item_names):
64 item = self[name]
65 if i + 1 == len(self.item_names):
66 s = prefix + "└── {0}: {1}".format(i, item._leaf)
67 p = prefix + " "
68 else:
69 s = prefix + "├── {0}: {1}".format(i, item._leaf)
70 p = prefix + "│ "
71 print(s)
72 if depth > 1 and hasattr(item, "_print_branch"):
73 item._print_branch(p, depth=depth - 1, verbose=verbose)
74
75 def create_collection(self, name="collection", position=None, **kwargs):
76 """Create a new child colleciton.
77
78 Parameters
79 ----------
80 name : string
81 Unique identifier.
82 position : integer (optional)
83 Location to insert. Default is None (append).
84 kwargs
85 Additional arguments to child collection instantiation.
86
87 Returns
88 -------
89 WrightTools Collection
90 New child.
91 """
92 if name in self.item_names:
93 wt_exceptions.ObjectExistsWarning.warn(name)
94 return self[name]
95 collection = Collection(
96 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
97 )
98 if position is not None:
99 self.attrs["item_names"] = np.insert(
100 self.attrs["item_names"][:-1], position, collection.natural_name.encode()
101 )
102 setattr(self, name, collection)
103 return collection
104
105 def create_data(self, name="data", position=None, **kwargs):
106 """Create a new child data.
107
108 Parameters
109 ----------
110 name : string
111 Unique identifier.
112 position : integer (optional)
113 Location to insert. Default is None (append).
114 kwargs
115 Additional arguments to child data instantiation.
116
117 Returns
118 -------
119 WrightTools Data
120 New child.
121 """
122 if name in self.item_names:
123 wt_exceptions.ObjectExistsWarning.warn(name)
124 return self[name]
125
126 if name == "":
127 data = None
128 natural_name = "".encode()
129 else:
130 data = wt_data.Data(
131 filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs
132 )
133 natural_name = data.natural_name.encode()
134 if position is not None:
135 self.attrs["item_names"] = np.insert(
136 self.attrs["item_names"][:-1], position, natural_name
137 )
138 setattr(self, name, data)
139 return data
140
141 def index(self):
142 """Index."""
143 raise NotImplementedError
144
145 def print_tree(self, depth=9, *, verbose=False):
146 """Print a ascii-formatted tree representation of the collection contents.
147
148 Parameters
149 ----------
150 depth : integer (optional)
151 Number of layers to include in the tree. Default is 9.
152 verbose : boolean (optional)
153 Toggle inclusion of extra information. Default is True.
154 """
155 print("{0} ({1})".format(self.natural_name, self.filepath))
156 self._print_branch("", depth=depth, verbose=verbose)
157
158 def flush(self):
159 """Ensure contents are written to file."""
160 for name in self.item_names:
161 item = self[name]
162 item.flush()
163 self.file.flush()
164
[end of WrightTools/collection/_collection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py
--- a/WrightTools/collection/_collection.py
+++ b/WrightTools/collection/_collection.py
@@ -102,6 +102,18 @@
setattr(self, name, collection)
return collection
+ def convert(self, units, convert_variables=False, verbose=True):
+ """Convert units of a collection.
+ Parameters
+ ----------
+ units: string
+ Units to convert to.
+ """
+
+ for name in self.item_names:
+ item = self[name]
+ item.convert(units, convert_variables=convert_variables, verbose=verbose)
+
def create_data(self, name="data", position=None, **kwargs):
"""Create a new child data.
| {"golden_diff": "diff --git a/WrightTools/collection/_collection.py b/WrightTools/collection/_collection.py\n--- a/WrightTools/collection/_collection.py\n+++ b/WrightTools/collection/_collection.py\n@@ -102,6 +102,18 @@\n setattr(self, name, collection)\n return collection\n \n+ def convert(self, units, convert_variables=False, verbose=True):\n+ \"\"\"Convert units of a collection.\n+ Parameters\n+ ----------\n+ units: string\n+ Units to convert to.\n+ \"\"\"\n+\n+ for name in self.item_names:\n+ item = self[name]\n+ item.convert(units, convert_variables=convert_variables, verbose=verbose)\n+\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n", "issue": "collection.convert\nWould like to support syntax `collection.convert(unit)`.\r\n\r\nWould convert all contained data objects recursively.\n", "before_files": [{"content": "\"\"\"Collection.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import data as wt_data\nfrom .. import exceptions as wt_exceptions\nfrom .._group import Group\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"Collection\"]\n\n\n# --- classes -------------------------------------------------------------------------------------\n\n\nclass Collection(Group):\n \"\"\"Nestable Collection of Data objects.\"\"\"\n\n class_name = \"Collection\"\n\n def __iter__(self):\n self.__n = 0\n return self\n\n def __len__(self):\n return len(self.item_names)\n\n def __next__(self):\n if self.__n < len(self):\n out = self.item_names[self.__n]\n self.__n += 1\n else:\n raise StopIteration\n return out\n\n def __repr__(self):\n return \"<WrightTools.Collection '{0}' {1} at {2}>\".format(\n self.natural_name, self.item_names, \"::\".join([self.filepath, self.name])\n )\n\n def __getitem__(self, key):\n if isinstance(key, int):\n key = self.item_names[key]\n if key == \"\":\n return None\n return super().__getitem__(key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError\n\n @property\n def _leaf(self):\n return self.natural_name\n\n def _print_branch(self, prefix, depth, verbose):\n for i, name in enumerate(self.item_names):\n item = self[name]\n if i + 1 == len(self.item_names):\n s = prefix + \"\u2514\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \" \"\n else:\n s = prefix + \"\u251c\u2500\u2500 {0}: {1}\".format(i, item._leaf)\n p = prefix + \"\u2502 \"\n print(s)\n if depth > 1 and hasattr(item, \"_print_branch\"):\n item._print_branch(p, depth=depth - 1, verbose=verbose)\n\n def create_collection(self, name=\"collection\", position=None, **kwargs):\n \"\"\"Create a new child colleciton.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child collection instantiation.\n\n Returns\n -------\n WrightTools Collection\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n collection = Collection(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, collection.natural_name.encode()\n )\n setattr(self, name, collection)\n return collection\n\n def create_data(self, name=\"data\", position=None, **kwargs):\n \"\"\"Create a new child data.\n\n Parameters\n ----------\n name : string\n Unique identifier.\n position : integer (optional)\n Location to insert. Default is None (append).\n kwargs\n Additional arguments to child data instantiation.\n\n Returns\n -------\n WrightTools Data\n New child.\n \"\"\"\n if name in self.item_names:\n wt_exceptions.ObjectExistsWarning.warn(name)\n return self[name]\n\n if name == \"\":\n data = None\n natural_name = \"\".encode()\n else:\n data = wt_data.Data(\n filepath=self.filepath, parent=self.name, name=name, edit_local=True, **kwargs\n )\n natural_name = data.natural_name.encode()\n if position is not None:\n self.attrs[\"item_names\"] = np.insert(\n self.attrs[\"item_names\"][:-1], position, natural_name\n )\n setattr(self, name, data)\n return data\n\n def index(self):\n \"\"\"Index.\"\"\"\n raise NotImplementedError\n\n def print_tree(self, depth=9, *, verbose=False):\n \"\"\"Print a ascii-formatted tree representation of the collection contents.\n\n Parameters\n ----------\n depth : integer (optional)\n Number of layers to include in the tree. Default is 9.\n verbose : boolean (optional)\n Toggle inclusion of extra information. Default is True.\n \"\"\"\n print(\"{0} ({1})\".format(self.natural_name, self.filepath))\n self._print_branch(\"\", depth=depth, verbose=verbose)\n\n def flush(self):\n \"\"\"Ensure contents are written to file.\"\"\"\n for name in self.item_names:\n item = self[name]\n item.flush()\n self.file.flush()\n", "path": "WrightTools/collection/_collection.py"}]} | 1,949 | 177 |
gh_patches_debug_26291 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3117 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The requirement of superuser postgresql access is problematic
## Problem
Mathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/
## Proposed solution
The mathesar user should not require superuser access.
## Additional context
The superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.
</issue>
<code>
[start of db/install.py]
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.sql import install as sql_install
6 from db.types import install as types_install
7
8
9 def install_mathesar(
10 database_name, username, password, hostname, port, skip_confirm
11 ):
12 """Create database and install Mathesar on it."""
13 user_db_engine = engine.create_future_engine(
14 username, password, hostname, database_name, port,
15 connect_args={"connect_timeout": 10}
16 )
17 try:
18 user_db_engine.connect()
19 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
20 sql_install.install(user_db_engine)
21 types_install.install_mathesar_on_database(user_db_engine)
22 user_db_engine.dispose()
23 except OperationalError:
24 database_created = _create_database(
25 database_name=database_name,
26 hostname=hostname,
27 username=username,
28 password=password,
29 port=port,
30 skip_confirm=skip_confirm
31 )
32 if database_created:
33 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
34 sql_install.install(user_db_engine)
35 types_install.install_mathesar_on_database(user_db_engine)
36 user_db_engine.dispose()
37 else:
38 print(f"Skipping installing on DB with key {database_name}.")
39
40
41 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
42 if skip_confirm is True:
43 create_database = "y"
44 else:
45 create_database = input(
46 f"Create a new Database called {database_name}? (y/n) > "
47 )
48 if create_database.lower() in ["y", "yes"]:
49 # We need to connect to an existing database inorder to create a new Database.
50 # So we use the default Database `postgres` that comes with postgres.
51 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
52 root_database = "postgres"
53 root_db_engine = engine.create_future_engine(
54 username, password, hostname, root_database, port,
55 connect_args={"connect_timeout": 10}
56 )
57 with root_db_engine.connect() as conn:
58 conn.execution_options(isolation_level="AUTOCOMMIT")
59 conn.execute(text(f'CREATE DATABASE "{database_name}"'))
60 root_db_engine.dispose()
61 print(f"Created DB is {database_name}.")
62 return True
63 else:
64 print(f"Database {database_name} not created!")
65 return False
66
[end of db/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -1,5 +1,6 @@
+from psycopg.errors import InsufficientPrivilege
from sqlalchemy import text
-from sqlalchemy.exc import OperationalError
+from sqlalchemy.exc import OperationalError, ProgrammingError
from db import engine
from db.sql import install as sql_install
@@ -54,12 +55,20 @@
username, password, hostname, root_database, port,
connect_args={"connect_timeout": 10}
)
- with root_db_engine.connect() as conn:
- conn.execution_options(isolation_level="AUTOCOMMIT")
- conn.execute(text(f'CREATE DATABASE "{database_name}"'))
- root_db_engine.dispose()
- print(f"Created DB is {database_name}.")
- return True
+ try:
+ with root_db_engine.connect() as conn:
+ conn.execution_options(isolation_level="AUTOCOMMIT")
+ conn.execute(text(f'CREATE DATABASE "{database_name}"'))
+ root_db_engine.dispose()
+ print(f"Created DB is {database_name}.")
+ return True
+ except ProgrammingError as e:
+ if isinstance(e.orig, InsufficientPrivilege):
+ print(f"Database {database_name} could not be created due to Insufficient Privilege")
+ return False
+ except Exception:
+ print(f"Database {database_name} could not be created!")
+ return False
else:
print(f"Database {database_name} not created!")
return False
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -1,5 +1,6 @@\n+from psycopg.errors import InsufficientPrivilege\n from sqlalchemy import text\n-from sqlalchemy.exc import OperationalError\n+from sqlalchemy.exc import OperationalError, ProgrammingError\n \n from db import engine\n from db.sql import install as sql_install\n@@ -54,12 +55,20 @@\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n- with root_db_engine.connect() as conn:\n- conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n- root_db_engine.dispose()\n- print(f\"Created DB is {database_name}.\")\n- return True\n+ try:\n+ with root_db_engine.connect() as conn:\n+ conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n+ root_db_engine.dispose()\n+ print(f\"Created DB is {database_name}.\")\n+ return True\n+ except ProgrammingError as e:\n+ if isinstance(e.orig, InsufficientPrivilege):\n+ print(f\"Database {database_name} could not be created due to Insufficient Privilege\")\n+ return False\n+ except Exception:\n+ print(f\"Database {database_name} could not be created!\")\n+ return False\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "issue": "The requirement of superuser postgresql access is problematic\n## Problem\r\nMathesar needs a Postgres superuser to function correctly, from the docs at https://docs.mathesar.org/installation/build-from-source/ \r\n\r\n## Proposed solution\r\nThe mathesar user should not require superuser access. \r\n\r\n## Additional context\r\nThe superuser is a global permission meaning that a user that has superuser permission will be able to access (and do stuff) not on the mathesar but *all* the databases of the RDBMS. Considering that many production systems have a single RDBMS hosting many application this is a major problem since the mathsar user won't have any access boundaries on the same RDBMS. The mathesar user access can be unlimited but *must* be bounded without the mathesar database.\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.sql import install as sql_install\nfrom db.types import install as types_install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n sql_install.install(user_db_engine)\n types_install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 1,347 | 343 |
gh_patches_debug_30331 | rasdani/github-patches | git_diff | e-valuation__EvaP-424 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UserProfile missing when User is not created during import
Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.
UserProfile missing when User is not created during import
Users which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.
</issue>
<code>
[start of evap/evaluation/views.py]
1 from django.contrib import messages
2 from django.contrib.auth import login as auth_login
3 from django.shortcuts import redirect, render_to_response
4 from django.template import RequestContext
5 from django.utils.translation import ugettext as _
6
7 from evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm
8 from evap.evaluation.models import UserProfile, FaqSection, EmailTemplate
9
10
11 def index(request):
12 """Main entry page into EvaP providing all the login options available. THe username/password
13 login is thought to be used for internal users, e.g. by connecting to a LDAP directory.
14 The login key mechanism is meant to be used to include external participants, e.g. visiting
15 students or visiting contributors.
16 """
17
18 # parse the form data into the respective form
19 submit_type = request.POST.get("submit_type", "no_submit")
20 new_key_form = NewKeyForm(request.POST if submit_type == "new_key" else None)
21 login_key_form = LoginKeyForm(request.POST if submit_type == "login_key" else None)
22 login_username_form = LoginUsernameForm(request, request.POST if submit_type == "login_username" else None)
23
24 # process form data
25 if request.method == 'POST':
26 if new_key_form.is_valid():
27 # user wants a new login key
28 profile = new_key_form.get_profile()
29 profile.generate_login_key()
30 profile.save()
31
32 EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())
33
34 messages.success(request, _(u"Successfully sent email with new login key."))
35 elif login_key_form.is_valid():
36 # user would like to login with a login key and passed key test
37 auth_login(request, login_key_form.get_user())
38 elif login_username_form.is_valid():
39 # user would like to login with username and password and passed password test
40 auth_login(request, login_username_form.get_user())
41
42 # clean up our test cookie
43 if request.session.test_cookie_worked():
44 request.session.delete_test_cookie()
45
46 # if not logged in by now, render form
47 if not request.user.is_active:
48 # set test cookie to verify whether they work in the next step
49 request.session.set_test_cookie()
50
51 return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))
52 else:
53 # check for redirect variable
54 redirect_to = request.GET.get("next", None)
55 if redirect_to is not None:
56 if redirect_to.startswith("/fsr/"):
57 if request.user.is_staff:
58 return redirect(redirect_to)
59 elif redirect_to.startswith("/contributor/"):
60 if UserProfile.get_for_user(request.user).is_contributor:
61 return redirect(redirect_to)
62 else:
63 return redirect(redirect_to)
64
65 # redirect user to appropriate start page
66 if request.user.is_staff:
67 return redirect('evap.fsr.views.index')
68 elif UserProfile.get_for_user(request.user).is_editor_or_delegate:
69 return redirect('evap.contributor.views.index')
70 else:
71 return redirect('evap.student.views.index')
72
73
74 def faq(request):
75 return render_to_response("faq.html", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))
76
[end of evap/evaluation/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py
--- a/evap/evaluation/views.py
+++ b/evap/evaluation/views.py
@@ -50,6 +50,8 @@
return render_to_response("index.html", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))
else:
+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)
+
# check for redirect variable
redirect_to = request.GET.get("next", None)
if redirect_to is not None:
@@ -57,7 +59,7 @@
if request.user.is_staff:
return redirect(redirect_to)
elif redirect_to.startswith("/contributor/"):
- if UserProfile.get_for_user(request.user).is_contributor:
+ if userprofile.is_contributor:
return redirect(redirect_to)
else:
return redirect(redirect_to)
@@ -65,7 +67,7 @@
# redirect user to appropriate start page
if request.user.is_staff:
return redirect('evap.fsr.views.index')
- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:
+ elif userprofile.is_editor_or_delegate:
return redirect('evap.contributor.views.index')
else:
return redirect('evap.student.views.index')
| {"golden_diff": "diff --git a/evap/evaluation/views.py b/evap/evaluation/views.py\n--- a/evap/evaluation/views.py\n+++ b/evap/evaluation/views.py\n@@ -50,6 +50,8 @@\n \n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n+ userprofile, _ = UserProfile.objects.get_or_create(user=request.user)\n+\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n@@ -57,7 +59,7 @@\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n- if UserProfile.get_for_user(request.user).is_contributor:\n+ if userprofile.is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n@@ -65,7 +67,7 @@\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n- elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n+ elif userprofile.is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n", "issue": "UserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\nUserProfile missing when User is not created during import\nUsers which are not created by an enrollment data import but by logging in for the first time (using Kerberos authentication, so they don't need to have a local account first) don't have a UserProfile. This leads to undefined behavior.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.contrib.auth import login as auth_login\nfrom django.shortcuts import redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.forms import NewKeyForm, LoginKeyForm, LoginUsernameForm\nfrom evap.evaluation.models import UserProfile, FaqSection, EmailTemplate\n\n\ndef index(request):\n \"\"\"Main entry page into EvaP providing all the login options available. THe username/password\n login is thought to be used for internal users, e.g. by connecting to a LDAP directory.\n The login key mechanism is meant to be used to include external participants, e.g. visiting\n students or visiting contributors.\n \"\"\"\n\n # parse the form data into the respective form\n submit_type = request.POST.get(\"submit_type\", \"no_submit\")\n new_key_form = NewKeyForm(request.POST if submit_type == \"new_key\" else None)\n login_key_form = LoginKeyForm(request.POST if submit_type == \"login_key\" else None)\n login_username_form = LoginUsernameForm(request, request.POST if submit_type == \"login_username\" else None)\n\n # process form data\n if request.method == 'POST':\n if new_key_form.is_valid():\n # user wants a new login key\n profile = new_key_form.get_profile()\n profile.generate_login_key()\n profile.save()\n\n EmailTemplate.get_login_key_template().send_to_user(new_key_form.get_user())\n\n messages.success(request, _(u\"Successfully sent email with new login key.\"))\n elif login_key_form.is_valid():\n # user would like to login with a login key and passed key test\n auth_login(request, login_key_form.get_user())\n elif login_username_form.is_valid():\n # user would like to login with username and password and passed password test\n auth_login(request, login_username_form.get_user())\n\n # clean up our test cookie\n if request.session.test_cookie_worked():\n request.session.delete_test_cookie()\n\n # if not logged in by now, render form\n if not request.user.is_active:\n # set test cookie to verify whether they work in the next step\n request.session.set_test_cookie()\n\n return render_to_response(\"index.html\", dict(new_key_form=new_key_form, login_key_form=login_key_form, login_username_form=login_username_form), context_instance=RequestContext(request))\n else:\n # check for redirect variable\n redirect_to = request.GET.get(\"next\", None)\n if redirect_to is not None:\n if redirect_to.startswith(\"/fsr/\"):\n if request.user.is_staff:\n return redirect(redirect_to)\n elif redirect_to.startswith(\"/contributor/\"):\n if UserProfile.get_for_user(request.user).is_contributor:\n return redirect(redirect_to)\n else:\n return redirect(redirect_to)\n\n # redirect user to appropriate start page\n if request.user.is_staff:\n return redirect('evap.fsr.views.index')\n elif UserProfile.get_for_user(request.user).is_editor_or_delegate:\n return redirect('evap.contributor.views.index')\n else:\n return redirect('evap.student.views.index')\n\n\ndef faq(request):\n return render_to_response(\"faq.html\", dict(sections=FaqSection.objects.all()), context_instance=RequestContext(request))\n", "path": "evap/evaluation/views.py"}]} | 1,499 | 305 |
gh_patches_debug_19870 | rasdani/github-patches | git_diff | Parsl__parsl-389 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
parsl installed from pip tries to determine its version using git
Every time I run parsl I get:
```
kacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py
fatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'
```
and in logs:
```
2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state
Traceback (most recent call last):
File "/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py", line 19, in get_version
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 336, in check_output
**kwargs).stdout
File "/home/kacperk/miniconda3/lib/python3.6/subprocess.py", line 418, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.
```
</issue>
<code>
[start of parsl/utils.py]
1 import logging
2 import os
3 import shlex
4 import subprocess
5 import threading
6 import time
7 from contextlib import contextmanager
8 from functools import wraps
9
10 import parsl
11 from parsl.version import VERSION
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_version():
17 version = parsl.__version__
18 work_tree = os.path.dirname(os.path.dirname(__file__))
19 git_dir = os.path.join(work_tree, '.git')
20 env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
21 try:
22 cmd = shlex.split('git rev-parse --short HEAD')
23 head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
24 diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
25 status = 'dirty' if diff else 'clean'
26 version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
27 except Exception as e:
28 pass
29
30 return version
31
32
33 def get_all_checkpoints(rundir="runinfo"):
34 """Finds the checkpoints from all last runs.
35
36 Note that checkpoints are incremental, and this helper will not find
37 previous checkpoints from earlier than the most recent run. It probably
38 should be made to do so.
39
40 Kwargs:
41 - rundir(str) : Path to the runinfo directory
42
43 Returns:
44 - a list suitable for the checkpointFiles parameter of DataFlowKernel
45 constructor
46
47 """
48
49 if(not(os.path.isdir(rundir))):
50 return []
51
52 dirs = sorted(os.listdir(rundir))
53
54 checkpoints = []
55
56 for runid in dirs:
57
58 checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
59
60 if(os.path.isdir(checkpoint)):
61 checkpoints.append(checkpoint)
62
63 return checkpoints
64
65
66 def get_last_checkpoint(rundir="runinfo"):
67 """Finds the checkpoint from the last run, if one exists.
68
69 Note that checkpoints are incremental, and this helper will not find
70 previous checkpoints from earlier than the most recent run. It probably
71 should be made to do so.
72
73 Kwargs:
74 - rundir(str) : Path to the runinfo directory
75
76 Returns:
77 - a list suitable for checkpointFiles parameter of DataFlowKernel
78 constructor, with 0 or 1 elements
79
80 """
81
82 if(not(os.path.isdir(rundir))):
83 return []
84
85 dirs = sorted(os.listdir(rundir))
86
87 if(len(dirs) == 0):
88 return []
89
90 last_runid = dirs[-1]
91 last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
92
93 if(not(os.path.isdir(last_checkpoint))):
94 return []
95
96 return [last_checkpoint]
97
98
99 def timeout(seconds=None):
100 def decorator(func, *args, **kwargs):
101 @wraps(func)
102 def wrapper(*args, **kwargs):
103 t = threading.Thread(target=func, args=args, kwargs=kwargs)
104 t.start()
105 result = t.join(seconds)
106 if t.is_alive():
107 raise RuntimeError('timed out in {}'.format(func))
108 return result
109 return wrapper
110 return decorator
111
112
113 @contextmanager
114 def wait_for_file(path, seconds=10):
115 for i in range(0, int(seconds * 100)):
116 time.sleep(seconds / 100.)
117 if os.path.exists(path):
118 break
119 yield
120
121
122 @contextmanager
123 def time_limited_open(path, mode, seconds=1):
124 wait_for_file(path, seconds)
125
126 f = open(path, mode)
127 yield f
128 f.close()
129
[end of parsl/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/utils.py b/parsl/utils.py
--- a/parsl/utils.py
+++ b/parsl/utils.py
@@ -17,15 +17,16 @@
version = parsl.__version__
work_tree = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(work_tree, '.git')
- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
- try:
- cmd = shlex.split('git rev-parse --short HEAD')
- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
- status = 'dirty' if diff else 'clean'
- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
- except Exception as e:
- pass
+ if os.path.exists(git_dir):
+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
+ try:
+ cmd = shlex.split('git rev-parse --short HEAD')
+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
+ status = 'dirty' if diff else 'clean'
+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
+ except Exception as e:
+ pass
return version
| {"golden_diff": "diff --git a/parsl/utils.py b/parsl/utils.py\n--- a/parsl/utils.py\n+++ b/parsl/utils.py\n@@ -17,15 +17,16 @@\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n- env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n- try:\n- cmd = shlex.split('git rev-parse --short HEAD')\n- head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n- diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n- status = 'dirty' if diff else 'clean'\n- version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n- except Exception as e:\n- pass\n+ if os.path.exists(git_dir):\n+ env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n+ try:\n+ cmd = shlex.split('git rev-parse --short HEAD')\n+ head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n+ diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n+ status = 'dirty' if diff else 'clean'\n+ version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n+ except Exception as e:\n+ pass\n \n return version\n", "issue": "parsl installed from pip tries to determine its version using git\nEvery time I run parsl I get:\r\n\r\n```\r\nkacperk@dxl1: /dpool/kacperk/arxiv $ python scraper_parsl.py\r\nfatal: Not a git repository: '/home/kacperk/.local/lib/python3.6/site-packages/.git'\r\n```\r\n\r\nand in logs:\r\n\r\n```\r\n2018-07-15 12:54:06 parsl.utils:24 [ERROR] Unable to determine code state\r\nTraceback (most recent call last):\r\n File \"/home/kacperk/.local/lib/python3.6/site-packages/parsl/utils.py\", line 19, in get_version\r\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 336, in check_output\r\n **kwargs).stdout\r\n File \"/home/kacperk/miniconda3/lib/python3.6/subprocess.py\", line 418, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['git', 'rev-parse', '--short', 'HEAD']' returned non-zero exit status 128.\r\n```\n", "before_files": [{"content": "import logging\nimport os\nimport shlex\nimport subprocess\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport parsl\nfrom parsl.version import VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_version():\n version = parsl.__version__\n work_tree = os.path.dirname(os.path.dirname(__file__))\n git_dir = os.path.join(work_tree, '.git')\n env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}\n try:\n cmd = shlex.split('git rev-parse --short HEAD')\n head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')\n diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)\n status = 'dirty' if diff else 'clean'\n version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)\n except Exception as e:\n pass\n\n return version\n\n\ndef get_all_checkpoints(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoints from all last runs.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for the checkpointFiles parameter of DataFlowKernel\n constructor\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n checkpoints = []\n\n for runid in dirs:\n\n checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))\n\n if(os.path.isdir(checkpoint)):\n checkpoints.append(checkpoint)\n\n return checkpoints\n\n\ndef get_last_checkpoint(rundir=\"runinfo\"):\n \"\"\"Finds the checkpoint from the last run, if one exists.\n\n Note that checkpoints are incremental, and this helper will not find\n previous checkpoints from earlier than the most recent run. It probably\n should be made to do so.\n\n Kwargs:\n - rundir(str) : Path to the runinfo directory\n\n Returns:\n - a list suitable for checkpointFiles parameter of DataFlowKernel\n constructor, with 0 or 1 elements\n\n \"\"\"\n\n if(not(os.path.isdir(rundir))):\n return []\n\n dirs = sorted(os.listdir(rundir))\n\n if(len(dirs) == 0):\n return []\n\n last_runid = dirs[-1]\n last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))\n\n if(not(os.path.isdir(last_checkpoint))):\n return []\n\n return [last_checkpoint]\n\n\ndef timeout(seconds=None):\n def decorator(func, *args, **kwargs):\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = threading.Thread(target=func, args=args, kwargs=kwargs)\n t.start()\n result = t.join(seconds)\n if t.is_alive():\n raise RuntimeError('timed out in {}'.format(func))\n return result\n return wrapper\n return decorator\n\n\n@contextmanager\ndef wait_for_file(path, seconds=10):\n for i in range(0, int(seconds * 100)):\n time.sleep(seconds / 100.)\n if os.path.exists(path):\n break\n yield\n\n\n@contextmanager\ndef time_limited_open(path, mode, seconds=1):\n wait_for_file(path, seconds)\n\n f = open(path, mode)\n yield f\n f.close()\n", "path": "parsl/utils.py"}]} | 1,891 | 343 |
gh_patches_debug_8615 | rasdani/github-patches | git_diff | secdev__scapy-373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unknown.version in egg.info and in the banner
Hi,
I'm porting 2.3.3 to OpenBSD. I had two issues:
1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'
I patched setup.py to hardcode the version then it worked
```
--- setup.py.orig Tue Oct 18 10:44:43 2016
+++ setup.py Mon Oct 31 17:19:45 2016
@@ -47,7 +47,7 @@ if os.name == "nt":
setup(
name='scapy',
- version=__import__('scapy').VERSION,
+ version='2.3.3',
packages=[
'scapy',
'scapy/arch',
```
I now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info
2. running scapy it says "Welcome to Scapy (unknown.version)" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.
</issue>
<code>
[start of scapy/__init__.py]
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## This program is published under a GPLv2 license
5
6 """
7 Scapy: create, send, sniff, dissect and manipulate network packets.
8
9 Usable either from an interactive console or as a Python library.
10 http://www.secdev.org/projects/scapy
11 """
12
13 import os
14 import re
15 import subprocess
16
17
18 _SCAPY_PKG_DIR = os.path.dirname(__file__)
19
20 def _version_from_git_describe():
21 """
22 Read the version from ``git describe``. It returns the latest tag with an
23 optional suffix if the current directory is not exactly on the tag.
24
25 Example::
26
27 $ git describe --always
28 v2.3.2-346-g164a52c075c8
29
30 The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are
31 removed if present.
32
33 If the current directory is not exactly on the tag, a ``.devN`` suffix is
34 appended where N is the number of commits made after the last tag.
35
36 Example::
37
38 >>> _version_from_git_describe()
39 '2.3.2.dev346'
40 """
41 p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,
42 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
43
44 out, err = p.communicate()
45
46 if p.returncode == 0:
47 tag = out.strip()
48 match = re.match(r'^v?(.+?)-(\d+)-g[a-f0-9]+$', tag)
49 if match:
50 # remove the 'v' prefix and add a '.devN' suffix
51 return '%s.dev%s' % (match.group(1), match.group(2))
52 else:
53 # just remove the 'v' prefix
54 return re.sub(r'^v', '', tag)
55 else:
56 raise subprocess.CalledProcessError(p.returncode, err)
57
58 def _version():
59 version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')
60 try:
61 tag = _version_from_git_describe()
62 # successfully read the tag from git, write it in VERSION for
63 # installation and/or archive generation.
64 with open(version_file, 'w') as f:
65 f.write(tag)
66 return tag
67 except:
68 # failed to read the tag from git, try to read it from a VERSION file
69 try:
70 with open(version_file, 'r') as f:
71 tag = f.read()
72 return tag
73 except:
74 return 'unknown.version'
75
76 VERSION = _version()
77
78 if __name__ == "__main__":
79 from scapy.main import interact
80 interact()
81
[end of scapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/__init__.py b/scapy/__init__.py
--- a/scapy/__init__.py
+++ b/scapy/__init__.py
@@ -71,7 +71,17 @@
tag = f.read()
return tag
except:
- return 'unknown.version'
+ # Rely on git archive "export-subst" git attribute.
+ # See 'man gitattributes' for more details.
+ git_archive_id = '$Format:%h %d$'
+ sha1 = git_archive_id.strip().split()[0]
+ match = re.search(r'tag:(\S+)', git_archive_id)
+ if match:
+ return match.group(1)
+ elif sha1:
+ return sha1
+ else:
+ return 'unknown.version'
VERSION = _version()
| {"golden_diff": "diff --git a/scapy/__init__.py b/scapy/__init__.py\n--- a/scapy/__init__.py\n+++ b/scapy/__init__.py\n@@ -71,7 +71,17 @@\n tag = f.read()\n return tag\n except:\n- return 'unknown.version'\n+ # Rely on git archive \"export-subst\" git attribute.\n+ # See 'man gitattributes' for more details.\n+ git_archive_id = '$Format:%h %d$'\n+ sha1 = git_archive_id.strip().split()[0]\n+ match = re.search(r'tag:(\\S+)', git_archive_id)\n+ if match:\n+ return match.group(1)\n+ elif sha1:\n+ return sha1\n+ else:\n+ return 'unknown.version'\n \n VERSION = _version()\n", "issue": "unknown.version in egg.info and in the banner\nHi,\r\n\r\nI'm porting 2.3.3 to OpenBSD. I had two issues:\r\n1. after the install with setuptools, the .egg-info generated is called 'lib/python2.7/site-packages/scapy-unknown.version-py2.7.egg-info'\r\n\r\nI patched setup.py to hardcode the version then it worked\r\n```\r\n--- setup.py.orig Tue Oct 18 10:44:43 2016\r\n+++ setup.py Mon Oct 31 17:19:45 2016\r\n@@ -47,7 +47,7 @@ if os.name == \"nt\":\r\n \r\n setup(\r\n name='scapy',\r\n- version=__import__('scapy').VERSION,\r\n+ version='2.3.3',\r\n packages=[\r\n 'scapy',\r\n 'scapy/arch',\r\n\r\n```\r\nI now have lib/python2.7/site-packages/scapy-2.3.3-py2.7.egg-info\r\n\r\n2. running scapy it says \"Welcome to Scapy (unknown.version)\" even with the setup.py change. I went through scapy-2.3.3/scapy/main.py but I didn't find anything suspicious.\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.strip()\n match = re.match(r'^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub(r'^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}]} | 1,591 | 187 |
gh_patches_debug_24772 | rasdani/github-patches | git_diff | Flexget__Flexget-548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[add] jinja split: Adds split into jinja filters
Sorry about this, I found the solution
</issue>
<code>
[start of flexget/plugins/input/trakt_emit.py]
1 from __future__ import unicode_literals, division, absolute_import
2 import hashlib
3 import logging
4 from urlparse import urljoin
5
6 from requests import RequestException
7
8 from flexget import plugin
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.utils import json
12 from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url
13
14 log = logging.getLogger('trakt_emit')
15
16
17 class TraktEmit(object):
18 """
19 Creates an entry for the latest or the next item in your watched or collected
20 episodes in your trakt account.
21
22 Syntax:
23
24 trakt_emit:
25 username: <value>
26 position: <last|next>
27 context: <collect|collected|watch|watched>
28 list: <value>
29
30 Options username, password and api_key are required.
31
32 """
33
34 schema = {
35 'type': 'object',
36 'properties': {
37 'username': {'type': 'string'},
38 'password': {'type': 'string'},
39 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},
40 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},
41 'list': {'type': 'string'}
42 },
43 'required': ['username'],
44 'additionalProperties': False
45 }
46
47 def on_task_input(self, task, config):
48 session = get_session(config['username'], config.get('password'))
49 listed_series = {}
50 if config.get('list'):
51 url = urljoin(API_URL, 'users/%s/' % config['username'])
52 if config['list'] in ['collection', 'watchlist', 'watched']:
53 url = urljoin(url, '%s/shows' % config['list'])
54 else:
55 url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))
56 try:
57 data = session.get(url).json()
58 except RequestException as e:
59 raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))
60 if not data:
61 log.warning('The list "%s" is empty.' % config['list'])
62 return
63 for item in data:
64 if item['show'] is not None:
65 if not item['show']['title']:
66 # Seems we can get entries with a blank show title sometimes
67 log.warning('Found trakt list show with no series name.')
68 continue
69 trakt_id = item['show']['ids']['trakt']
70 listed_series[trakt_id] = {
71 'series_name': item['show']['title'],
72 'trakt_id': trakt_id,
73 'tvdb_id': item['show']['ids']['tvdb']}
74 context = config['context']
75 if context == 'collected':
76 context = 'collection'
77 entries = []
78 for trakt_id, fields in listed_series.iteritems():
79 url = get_api_url('shows', trakt_id, 'progress', context)
80 try:
81 data = session.get(url).json()
82 except RequestException as e:
83 raise plugin.PluginError('TODO: error message')
84 if config['position'] == 'next' and data.get('next_episode'):
85 # If the next episode is already in the trakt database, we'll get it here
86 eps = data['next_episode']['season']
87 epn = data['next_episode']['number']
88 else:
89 # If we need last ep, or next_episode was not provided, search for last ep
90 for seas in reversed(data['seasons']):
91 # Find the first season with collected/watched episodes
92 if seas['completed'] > 0:
93 eps = seas['number']
94 # Pick the highest collected/watched episode
95 epn = max(item['number'] for item in seas['episodes'] if item['completed'])
96 # If we are in next episode mode, we have to increment this number
97 if config['position'] == 'next':
98 if seas['completed'] >= seas['aired']:
99 # TODO: next_episode doesn't count unaired episodes right now, this will skip to next
100 # season too early when there are episodes left to air this season.
101 eps += 1
102 epn = 1
103 else:
104 epn += 1
105 break
106 if eps and epn:
107 entry = self.make_entry(fields, eps, epn)
108 entries.append(entry)
109 return entries
110
111 def make_entry(self, fields, season, episode):
112 entry = Entry()
113 entry.update(fields)
114 entry['series_season'] = season
115 entry['series_episode'] = episode
116 entry['series_id_type'] = 'ep'
117 entry['series_id'] = 'S%02dE%02d' % (season, episode)
118 entry['title'] = entry['series_name'] + ' ' + entry['series_id']
119 entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)
120 return entry
121
122
123 @event('plugin.register')
124 def register_plugin():
125 plugin.register(TraktEmit, 'trakt_emit', api_ver=2)
126
[end of flexget/plugins/input/trakt_emit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py
--- a/flexget/plugins/input/trakt_emit.py
+++ b/flexget/plugins/input/trakt_emit.py
@@ -1,5 +1,4 @@
from __future__ import unicode_literals, division, absolute_import
-import hashlib
import logging
from urlparse import urljoin
@@ -8,7 +7,6 @@
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
-from flexget.utils import json
from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url
log = logging.getLogger('trakt_emit')
@@ -103,6 +101,12 @@
else:
epn += 1
break
+ else:
+ if config['position'] == 'next':
+ eps = epn = 1
+ else:
+ # There were no watched/collected episodes, nothing to emit in 'last' mode
+ continue
if eps and epn:
entry = self.make_entry(fields, eps, epn)
entries.append(entry)
| {"golden_diff": "diff --git a/flexget/plugins/input/trakt_emit.py b/flexget/plugins/input/trakt_emit.py\n--- a/flexget/plugins/input/trakt_emit.py\n+++ b/flexget/plugins/input/trakt_emit.py\n@@ -1,5 +1,4 @@\n from __future__ import unicode_literals, division, absolute_import\n-import hashlib\n import logging\n from urlparse import urljoin\n \n@@ -8,7 +7,6 @@\n from flexget import plugin\n from flexget.entry import Entry\n from flexget.event import event\n-from flexget.utils import json\n from flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n \n log = logging.getLogger('trakt_emit')\n@@ -103,6 +101,12 @@\n else:\n epn += 1\n break\n+ else:\n+ if config['position'] == 'next':\n+ eps = epn = 1\n+ else:\n+ # There were no watched/collected episodes, nothing to emit in 'last' mode\n+ continue\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n", "issue": "[add] jinja split: Adds split into jinja filters\nSorry about this, I found the solution\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport hashlib\nimport logging\nfrom urlparse import urljoin\n\nfrom requests import RequestException\n\nfrom flexget import plugin\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.utils import json\nfrom flexget.utils.trakt import API_URL, get_session, make_list_slug, get_api_url\n\nlog = logging.getLogger('trakt_emit')\n\n\nclass TraktEmit(object):\n \"\"\"\n Creates an entry for the latest or the next item in your watched or collected\n episodes in your trakt account.\n\n Syntax:\n\n trakt_emit:\n username: <value>\n position: <last|next>\n context: <collect|collected|watch|watched>\n list: <value>\n\n Options username, password and api_key are required.\n\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'position': {'type': 'string', 'enum': ['last', 'next'], 'default': 'next'},\n 'context': {'type': 'string', 'enum': ['watched', 'collected'], 'default': 'watched'},\n 'list': {'type': 'string'}\n },\n 'required': ['username'],\n 'additionalProperties': False\n }\n\n def on_task_input(self, task, config):\n session = get_session(config['username'], config.get('password'))\n listed_series = {}\n if config.get('list'):\n url = urljoin(API_URL, 'users/%s/' % config['username'])\n if config['list'] in ['collection', 'watchlist', 'watched']:\n url = urljoin(url, '%s/shows' % config['list'])\n else:\n url = urljoin(url, 'lists/%s/items' % make_list_slug(config['list']))\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('Unable to get trakt list `%s`: %s' % (config['list'], e))\n if not data:\n log.warning('The list \"%s\" is empty.' % config['list'])\n return\n for item in data:\n if item['show'] is not None:\n if not item['show']['title']:\n # Seems we can get entries with a blank show title sometimes\n log.warning('Found trakt list show with no series name.')\n continue\n trakt_id = item['show']['ids']['trakt']\n listed_series[trakt_id] = {\n 'series_name': item['show']['title'],\n 'trakt_id': trakt_id,\n 'tvdb_id': item['show']['ids']['tvdb']}\n context = config['context']\n if context == 'collected':\n context = 'collection'\n entries = []\n for trakt_id, fields in listed_series.iteritems():\n url = get_api_url('shows', trakt_id, 'progress', context)\n try:\n data = session.get(url).json()\n except RequestException as e:\n raise plugin.PluginError('TODO: error message')\n if config['position'] == 'next' and data.get('next_episode'):\n # If the next episode is already in the trakt database, we'll get it here\n eps = data['next_episode']['season']\n epn = data['next_episode']['number']\n else:\n # If we need last ep, or next_episode was not provided, search for last ep\n for seas in reversed(data['seasons']):\n # Find the first season with collected/watched episodes\n if seas['completed'] > 0:\n eps = seas['number']\n # Pick the highest collected/watched episode\n epn = max(item['number'] for item in seas['episodes'] if item['completed'])\n # If we are in next episode mode, we have to increment this number\n if config['position'] == 'next':\n if seas['completed'] >= seas['aired']:\n # TODO: next_episode doesn't count unaired episodes right now, this will skip to next\n # season too early when there are episodes left to air this season.\n eps += 1\n epn = 1\n else:\n epn += 1\n break\n if eps and epn:\n entry = self.make_entry(fields, eps, epn)\n entries.append(entry)\n return entries\n\n def make_entry(self, fields, season, episode):\n entry = Entry()\n entry.update(fields)\n entry['series_season'] = season\n entry['series_episode'] = episode\n entry['series_id_type'] = 'ep'\n entry['series_id'] = 'S%02dE%02d' % (season, episode)\n entry['title'] = entry['series_name'] + ' ' + entry['series_id']\n entry['url'] = 'http://trakt.tv/shows/%s/seasons/%s/episodes/%s' % (fields['trakt_id'], season, episode)\n return entry\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(TraktEmit, 'trakt_emit', api_ver=2)\n", "path": "flexget/plugins/input/trakt_emit.py"}]} | 1,977 | 258 |
gh_patches_debug_918 | rasdani/github-patches | git_diff | vas3k__vas3k.club-260 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Сломался check_PR экшн на новые пуллреквесты
Вот здесь все пошло не так после пары изменений в requirements и докерфайлах: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml
Из-за этого все новые пуллреквесты красненькие и мержить их приходится только суровой админской рукой. Надо бы переосмыслить этот CI как-нибудь. У кого есть идеи?
По сути мне важны линтеры и чтобы докер с новым кодом успешно поднимался. Остального пока нет.
</issue>
<code>
[start of utils/images.py]
1 import io
2 import logging
3 import os
4 from urllib.parse import urlparse
5
6 import requests
7 from PIL import Image
8 from django.conf import settings
9
10 log = logging.getLogger(__name__)
11
12
13 def upload_image_bytes(
14 filename, data, resize=(192, 192), convert_to=None, quality=None
15 ):
16 if not data:
17 return None
18
19 if resize:
20 try:
21 image = Image.open(data)
22 except Exception as ex:
23 log.warning(f"Bad image data: {ex}")
24 return None
25
26 image.thumbnail(resize)
27 saved_image = io.BytesIO()
28 saved_image.name = filename
29
30 try:
31 image.save(saved_image)
32 except OSError:
33 log.warning(f"Error saving image data: {ex}")
34 return None
35
36 data = saved_image.getvalue()
37
38 upload_params = {
39 "code": settings.MEDIA_UPLOAD_CODE
40 }
41
42 if convert_to:
43 upload_params["convert_to"] = convert_to
44
45 if quality:
46 upload_params["quality"] = quality
47
48 try:
49 uploaded = requests.post(
50 url=settings.MEDIA_UPLOAD_URL,
51 params=upload_params,
52 files={"media": (filename, data)},
53 )
54 except requests.exceptions.RequestException as ex:
55 log.error(f"Image upload error: {ex}")
56 return None
57
58 if 200 <= uploaded.status_code <= 299:
59 try:
60 response_data = uploaded.json()
61 except Exception as ex:
62 log.error(f"Image upload error: {ex} ({uploaded.content})")
63 return None
64
65 return response_data["uploaded"][0]
66
67 return None
68
69
70 def upload_image_from_url(url, resize=(192, 192), convert_to="jpg", quality=90):
71 if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:
72 return url
73
74 if not url:
75 return None
76
77 image_name = os.path.basename(urlparse(url).path)
78 if "." not in image_name:
79 image_name += ".jpg"
80
81 try:
82 image_data = io.BytesIO(requests.get(url).content)
83 except requests.exceptions.RequestException:
84 return None
85
86 return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)
87
[end of utils/images.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/images.py b/utils/images.py
--- a/utils/images.py
+++ b/utils/images.py
@@ -29,7 +29,7 @@
try:
image.save(saved_image)
- except OSError:
+ except OSError as ex:
log.warning(f"Error saving image data: {ex}")
return None
| {"golden_diff": "diff --git a/utils/images.py b/utils/images.py\n--- a/utils/images.py\n+++ b/utils/images.py\n@@ -29,7 +29,7 @@\n \n try:\n image.save(saved_image)\n- except OSError:\n+ except OSError as ex:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n", "issue": "\u0421\u043b\u043e\u043c\u0430\u043b\u0441\u044f check_PR \u044d\u043a\u0448\u043d \u043d\u0430 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b\n\u0412\u043e\u0442 \u0437\u0434\u0435\u0441\u044c \u0432\u0441\u0435 \u043f\u043e\u0448\u043b\u043e \u043d\u0435 \u0442\u0430\u043a \u043f\u043e\u0441\u043b\u0435 \u043f\u0430\u0440\u044b \u0438\u0437\u043c\u0435\u043d\u0435\u043d\u0438\u0439 \u0432 requirements \u0438 \u0434\u043e\u043a\u0435\u0440\u0444\u0430\u0439\u043b\u0430\u0445: https://github.com/vas3k/vas3k.club/blob/master/.github/workflows/CI.yml\r\n\r\n\u0418\u0437-\u0437\u0430 \u044d\u0442\u043e\u0433\u043e \u0432\u0441\u0435 \u043d\u043e\u0432\u044b\u0435 \u043f\u0443\u043b\u043b\u0440\u0435\u043a\u0432\u0435\u0441\u0442\u044b \u043a\u0440\u0430\u0441\u043d\u0435\u043d\u044c\u043a\u0438\u0435 \u0438 \u043c\u0435\u0440\u0436\u0438\u0442\u044c \u0438\u0445 \u043f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0442\u043e\u043b\u044c\u043a\u043e \u0441\u0443\u0440\u043e\u0432\u043e\u0439 \u0430\u0434\u043c\u0438\u043d\u0441\u043a\u043e\u0439 \u0440\u0443\u043a\u043e\u0439. \u041d\u0430\u0434\u043e \u0431\u044b \u043f\u0435\u0440\u0435\u043e\u0441\u043c\u044b\u0441\u043b\u0438\u0442\u044c \u044d\u0442\u043e\u0442 CI \u043a\u0430\u043a-\u043d\u0438\u0431\u0443\u0434\u044c. \u0423 \u043a\u043e\u0433\u043e \u0435\u0441\u0442\u044c \u0438\u0434\u0435\u0438?\r\n\r\n\u041f\u043e \u0441\u0443\u0442\u0438 \u043c\u043d\u0435 \u0432\u0430\u0436\u043d\u044b \u043b\u0438\u043d\u0442\u0435\u0440\u044b \u0438 \u0447\u0442\u043e\u0431\u044b \u0434\u043e\u043a\u0435\u0440 \u0441 \u043d\u043e\u0432\u044b\u043c \u043a\u043e\u0434\u043e\u043c \u0443\u0441\u043f\u0435\u0448\u043d\u043e \u043f\u043e\u0434\u043d\u0438\u043c\u0430\u043b\u0441\u044f. \u041e\u0441\u0442\u0430\u043b\u044c\u043d\u043e\u0433\u043e \u043f\u043e\u043a\u0430 \u043d\u0435\u0442.\n", "before_files": [{"content": "import io\nimport logging\nimport os\nfrom urllib.parse import urlparse\n\nimport requests\nfrom PIL import Image\nfrom django.conf import settings\n\nlog = logging.getLogger(__name__)\n\n\ndef upload_image_bytes(\n filename, data, resize=(192, 192), convert_to=None, quality=None\n):\n if not data:\n return None\n\n if resize:\n try:\n image = Image.open(data)\n except Exception as ex:\n log.warning(f\"Bad image data: {ex}\")\n return None\n\n image.thumbnail(resize)\n saved_image = io.BytesIO()\n saved_image.name = filename\n\n try:\n image.save(saved_image)\n except OSError:\n log.warning(f\"Error saving image data: {ex}\")\n return None\n\n data = saved_image.getvalue()\n\n upload_params = {\n \"code\": settings.MEDIA_UPLOAD_CODE\n }\n\n if convert_to:\n upload_params[\"convert_to\"] = convert_to\n\n if quality:\n upload_params[\"quality\"] = quality\n\n try:\n uploaded = requests.post(\n url=settings.MEDIA_UPLOAD_URL,\n params=upload_params,\n files={\"media\": (filename, data)},\n )\n except requests.exceptions.RequestException as ex:\n log.error(f\"Image upload error: {ex}\")\n return None\n\n if 200 <= uploaded.status_code <= 299:\n try:\n response_data = uploaded.json()\n except Exception as ex:\n log.error(f\"Image upload error: {ex} ({uploaded.content})\")\n return None\n\n return response_data[\"uploaded\"][0]\n\n return None\n\n\ndef upload_image_from_url(url, resize=(192, 192), convert_to=\"jpg\", quality=90):\n if settings.DEBUG or not settings.MEDIA_UPLOAD_URL or not settings.MEDIA_UPLOAD_CODE:\n return url\n\n if not url:\n return None\n\n image_name = os.path.basename(urlparse(url).path)\n if \".\" not in image_name:\n image_name += \".jpg\"\n\n try:\n image_data = io.BytesIO(requests.get(url).content)\n except requests.exceptions.RequestException:\n return None\n\n return upload_image_bytes(image_name, image_data, resize=resize, convert_to=convert_to, quality=quality)\n", "path": "utils/images.py"}]} | 1,350 | 76 |
gh_patches_debug_3052 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1486 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The type of `n_gram` is mislabeled as bool, which should be int type.
## 🐛 Bug
In Translation Task:
The type of `n_gram` is mislabeled as bool, which should be int type.
### To Reproduce
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
flash translation from_hf_datasets --help
```
The error raised:
```
translation: error: Configuration check failed :: Parser key "model.n_gram": Expected a <class 'bool'> but got "4"
```
</issue>
<code>
[start of flash/text/seq2seq/translation/model.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional, Union
15
16 from torchmetrics import BLEUScore
17
18 from flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0
19 from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE
20 from flash.text.seq2seq.core.model import Seq2SeqTask
21
22
23 class TranslationTask(Seq2SeqTask):
24 """The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see
25 :ref:`translation`.
26
27 You can change the backbone to any translation model from `HuggingFace/transformers
28 <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.
29
30 Args:
31 backbone: backbone model to use for the task.
32 max_source_length: The maximum length to pad / truncate input sequences to.
33 max_target_length: The maximum length to pad / truncate target sequences to.
34 padding: The type of padding to apply. One of: "longest" or ``True``, "max_length", "do_not_pad" or
35 ``False``.
36 loss_fn: Loss function for training.
37 optimizer: Optimizer to use for training.
38 lr_scheduler: The LR scheduler to use during training.
39 metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.
40 Changing this argument currently has no effect.
41 learning_rate: Learning rate to use for training, defaults to `1e-5`
42 num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`
43 n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`
44 smooth: Apply smoothing in BLEU calculation. Defaults to `True`
45 enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training
46 """
47
48 def __init__(
49 self,
50 backbone: str = "t5-small",
51 tokenizer_kwargs: Optional[Dict[str, Any]] = None,
52 max_source_length: int = 128,
53 max_target_length: int = 128,
54 padding: Union[str, bool] = "max_length",
55 loss_fn: LOSS_FN_TYPE = None,
56 optimizer: OPTIMIZER_TYPE = "Adam",
57 lr_scheduler: LR_SCHEDULER_TYPE = None,
58 metrics: METRICS_TYPE = None,
59 learning_rate: Optional[float] = None,
60 num_beams: Optional[int] = 4,
61 n_gram: bool = 4,
62 smooth: bool = True,
63 enable_ort: bool = False,
64 ):
65 self.save_hyperparameters()
66 super().__init__(
67 backbone=backbone,
68 tokenizer_kwargs=tokenizer_kwargs,
69 max_source_length=max_source_length,
70 max_target_length=max_target_length,
71 padding=padding,
72 loss_fn=loss_fn,
73 optimizer=optimizer,
74 lr_scheduler=lr_scheduler,
75 metrics=metrics,
76 learning_rate=learning_rate,
77 num_beams=num_beams,
78 enable_ort=enable_ort,
79 )
80 self.bleu = BLEUScore(
81 n_gram=n_gram,
82 smooth=smooth,
83 )
84
85 @property
86 def task(self) -> str:
87 return "translation"
88
89 def compute_metrics(self, generated_tokens, batch, prefix):
90 reference_corpus = self.decode(batch["labels"])
91 # wrap targets in list as score expects a list of potential references
92 reference_corpus = [[reference] for reference in reference_corpus]
93
94 translate_corpus = self.decode(generated_tokens)
95 translate_corpus = [line for line in translate_corpus]
96
97 if _TM_GREATER_EQUAL_0_7_0:
98 result = self.bleu(translate_corpus, reference_corpus)
99 else:
100 result = self.bleu(reference_corpus, translate_corpus)
101 self.log(f"{prefix}_bleu_score", result, on_step=False, on_epoch=True, prog_bar=True)
102
[end of flash/text/seq2seq/translation/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py
--- a/flash/text/seq2seq/translation/model.py
+++ b/flash/text/seq2seq/translation/model.py
@@ -58,7 +58,7 @@
metrics: METRICS_TYPE = None,
learning_rate: Optional[float] = None,
num_beams: Optional[int] = 4,
- n_gram: bool = 4,
+ n_gram: int = 4,
smooth: bool = True,
enable_ort: bool = False,
):
| {"golden_diff": "diff --git a/flash/text/seq2seq/translation/model.py b/flash/text/seq2seq/translation/model.py\n--- a/flash/text/seq2seq/translation/model.py\n+++ b/flash/text/seq2seq/translation/model.py\n@@ -58,7 +58,7 @@\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n- n_gram: bool = 4,\n+ n_gram: int = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n", "issue": "The type of `n_gram` is mislabeled as bool, which should be int type.\n## \ud83d\udc1b Bug\r\n\r\nIn Translation Task:\r\nThe type of `n_gram` is mislabeled as bool, which should be int type.\r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nflash translation from_hf_datasets --help\r\n```\r\nThe error raised:\r\n```\r\ntranslation: error: Configuration check failed :: Parser key \"model.n_gram\": Expected a <class 'bool'> but got \"4\"\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional, Union\n\nfrom torchmetrics import BLEUScore\n\nfrom flash.core.utilities.imports import _TM_GREATER_EQUAL_0_7_0\nfrom flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE\nfrom flash.text.seq2seq.core.model import Seq2SeqTask\n\n\nclass TranslationTask(Seq2SeqTask):\n \"\"\"The ``TranslationTask`` is a :class:`~flash.Task` for Seq2Seq text translation. For more details, see\n :ref:`translation`.\n\n You can change the backbone to any translation model from `HuggingFace/transformers\n <https://huggingface.co/models?filter=pytorch&pipeline_tag=translation>`__ using the ``backbone`` argument.\n\n Args:\n backbone: backbone model to use for the task.\n max_source_length: The maximum length to pad / truncate input sequences to.\n max_target_length: The maximum length to pad / truncate target sequences to.\n padding: The type of padding to apply. One of: \"longest\" or ``True``, \"max_length\", \"do_not_pad\" or\n ``False``.\n loss_fn: Loss function for training.\n optimizer: Optimizer to use for training.\n lr_scheduler: The LR scheduler to use during training.\n metrics: Metrics to compute for training and evaluation. Defauls to calculating the BLEU metric.\n Changing this argument currently has no effect.\n learning_rate: Learning rate to use for training, defaults to `1e-5`\n num_beams: Number of beams to use in validation when generating predictions. Defaults to `4`\n n_gram: Maximum n_grams to use in metric calculation. Defaults to `4`\n smooth: Apply smoothing in BLEU calculation. Defaults to `True`\n enable_ort: Enable Torch ONNX Runtime Optimization: https://onnxruntime.ai/docs/#onnx-runtime-for-training\n \"\"\"\n\n def __init__(\n self,\n backbone: str = \"t5-small\",\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n max_source_length: int = 128,\n max_target_length: int = 128,\n padding: Union[str, bool] = \"max_length\",\n loss_fn: LOSS_FN_TYPE = None,\n optimizer: OPTIMIZER_TYPE = \"Adam\",\n lr_scheduler: LR_SCHEDULER_TYPE = None,\n metrics: METRICS_TYPE = None,\n learning_rate: Optional[float] = None,\n num_beams: Optional[int] = 4,\n n_gram: bool = 4,\n smooth: bool = True,\n enable_ort: bool = False,\n ):\n self.save_hyperparameters()\n super().__init__(\n backbone=backbone,\n tokenizer_kwargs=tokenizer_kwargs,\n max_source_length=max_source_length,\n max_target_length=max_target_length,\n padding=padding,\n loss_fn=loss_fn,\n optimizer=optimizer,\n lr_scheduler=lr_scheduler,\n metrics=metrics,\n learning_rate=learning_rate,\n num_beams=num_beams,\n enable_ort=enable_ort,\n )\n self.bleu = BLEUScore(\n n_gram=n_gram,\n smooth=smooth,\n )\n\n @property\n def task(self) -> str:\n return \"translation\"\n\n def compute_metrics(self, generated_tokens, batch, prefix):\n reference_corpus = self.decode(batch[\"labels\"])\n # wrap targets in list as score expects a list of potential references\n reference_corpus = [[reference] for reference in reference_corpus]\n\n translate_corpus = self.decode(generated_tokens)\n translate_corpus = [line for line in translate_corpus]\n\n if _TM_GREATER_EQUAL_0_7_0:\n result = self.bleu(translate_corpus, reference_corpus)\n else:\n result = self.bleu(reference_corpus, translate_corpus)\n self.log(f\"{prefix}_bleu_score\", result, on_step=False, on_epoch=True, prog_bar=True)\n", "path": "flash/text/seq2seq/translation/model.py"}]} | 1,863 | 140 |
gh_patches_debug_28402 | rasdani/github-patches | git_diff | dask__distributed-416 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Distributed.joblib code fails to affect sklearn
The comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.
It would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.
</issue>
<code>
[start of distributed/joblib.py]
1 from __future__ import print_function, division, absolute_import
2
3 from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin
4 from joblib.parallel import register_parallel_backend
5 from tornado import gen
6
7 from .executor import Executor, _wait
8
9
10 class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
11 MIN_IDEAL_BATCH_DURATION = 0.2
12 MAX_IDEAL_BATCH_DURATION = 1.0
13
14 def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):
15 self.executor = Executor(scheduler_host, loop=loop)
16 self.futures = set()
17
18 def configure(self, n_jobs=1, parallel=None, **backend_args):
19 return self.effective_n_jobs(n_jobs)
20
21 def effective_n_jobs(self, n_jobs=1):
22 return sum(self.executor.ncores().values())
23
24 def apply_async(self, func, *args, **kwargs):
25 callback = kwargs.pop('callback', None)
26 kwargs['pure'] = False
27 future = self.executor.submit(func, *args, **kwargs)
28 self.futures.add(future)
29
30 @gen.coroutine
31 def callback_wrapper():
32 result = yield _wait([future])
33 self.futures.remove(future)
34 callback(result) # gets called in separate thread
35
36 self.executor.loop.add_callback(callback_wrapper)
37
38 future.get = future.result # monkey patch to achieve AsyncResult API
39 return future
40
41 def abort_everything(self, ensure_ready=True):
42 # Tell the executor to cancel any task submitted via this instance
43 # as joblib.Parallel will never access those results.
44 self.executor.cancel(self.futures)
45 self.futures.clear()
46
47
48 register_parallel_backend('distributed', DistributedBackend)
49
[end of distributed/joblib.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/joblib.py b/distributed/joblib.py
--- a/distributed/joblib.py
+++ b/distributed/joblib.py
@@ -1,10 +1,36 @@
from __future__ import print_function, division, absolute_import
-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin
-from joblib.parallel import register_parallel_backend
+from distutils.version import LooseVersion
+
from tornado import gen
from .executor import Executor, _wait
+from .utils import ignoring
+
+
+# A user could have installed joblib, sklearn, both, or neither. Further, only
+# joblib >= 0.10.0 supports backends, so we also need to check for that. This
+# bit of logic is to ensure that we create and register the backend for all
+# viable installations of joblib.
+joblib = sk_joblib = None
+with ignoring(ImportError):
+ import joblib
+ if LooseVersion(joblib.__version__) < '0.10.0':
+ joblib = None
+with ignoring(ImportError):
+ import sklearn.externals.joblib as sk_joblib
+ if LooseVersion(sk_joblib.__version__) < '0.10.0':
+ sk_joblib = None
+
+if joblib:
+ from joblib._parallel_backends import (ParallelBackendBase,
+ AutoBatchingMixin)
+elif sk_joblib:
+ from sklearn.externals.joblib._parallel_backends import (
+ ParallelBackendBase, AutoBatchingMixin)
+else:
+ raise RuntimeError("Joblib backend requires either `joblib` >= '0.10.0' "
+ " or `sklearn` > '0.17.1'. Please install or upgrade")
class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):
@@ -45,4 +71,8 @@
self.futures.clear()
-register_parallel_backend('distributed', DistributedBackend)
+# Register the backend with any available versions of joblib
+if joblib:
+ joblib.register_parallel_backend('distributed', DistributedBackend)
+if sk_joblib:
+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)
| {"golden_diff": "diff --git a/distributed/joblib.py b/distributed/joblib.py\n--- a/distributed/joblib.py\n+++ b/distributed/joblib.py\n@@ -1,10 +1,36 @@\n from __future__ import print_function, division, absolute_import\n \n-from joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\n-from joblib.parallel import register_parallel_backend\n+from distutils.version import LooseVersion\n+\n from tornado import gen\n \n from .executor import Executor, _wait\n+from .utils import ignoring\n+\n+\n+# A user could have installed joblib, sklearn, both, or neither. Further, only\n+# joblib >= 0.10.0 supports backends, so we also need to check for that. This\n+# bit of logic is to ensure that we create and register the backend for all\n+# viable installations of joblib.\n+joblib = sk_joblib = None\n+with ignoring(ImportError):\n+ import joblib\n+ if LooseVersion(joblib.__version__) < '0.10.0':\n+ joblib = None\n+with ignoring(ImportError):\n+ import sklearn.externals.joblib as sk_joblib\n+ if LooseVersion(sk_joblib.__version__) < '0.10.0':\n+ sk_joblib = None\n+\n+if joblib:\n+ from joblib._parallel_backends import (ParallelBackendBase,\n+ AutoBatchingMixin)\n+elif sk_joblib:\n+ from sklearn.externals.joblib._parallel_backends import (\n+ ParallelBackendBase, AutoBatchingMixin)\n+else:\n+ raise RuntimeError(\"Joblib backend requires either `joblib` >= '0.10.0' \"\n+ \" or `sklearn` > '0.17.1'. Please install or upgrade\")\n \n \n class DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n@@ -45,4 +71,8 @@\n self.futures.clear()\n \n \n-register_parallel_backend('distributed', DistributedBackend)\n+# Register the backend with any available versions of joblib\n+if joblib:\n+ joblib.register_parallel_backend('distributed', DistributedBackend)\n+if sk_joblib:\n+ sk_joblib.register_parallel_backend('distributed', DistributedBackend)\n", "issue": "Distributed.joblib code fails to affect sklearn\nThe comments in [this stackoverflow answer](http://stackoverflow.com/questions/38601026/easy-way-to-use-parallel-options-of-scikit-learn-functions-on-hpc/38814491#38814491) raise concerns about the effectiveness of `distributed.joblib` to parallelize vanilla sklearn code. It appears that sklearn ships with its own version of Joblib, which the plugin registration in `distributed.joblib` does not affect.\n\nIt would be good to test sklearn functionality and, if necessary, add plugin registration to `sklearn.externals.joblib` in the same way we do to normal `joblib`.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nfrom joblib._parallel_backends import ParallelBackendBase, AutoBatchingMixin\nfrom joblib.parallel import register_parallel_backend\nfrom tornado import gen\n\nfrom .executor import Executor, _wait\n\n\nclass DistributedBackend(ParallelBackendBase, AutoBatchingMixin):\n MIN_IDEAL_BATCH_DURATION = 0.2\n MAX_IDEAL_BATCH_DURATION = 1.0\n\n def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):\n self.executor = Executor(scheduler_host, loop=loop)\n self.futures = set()\n\n def configure(self, n_jobs=1, parallel=None, **backend_args):\n return self.effective_n_jobs(n_jobs)\n\n def effective_n_jobs(self, n_jobs=1):\n return sum(self.executor.ncores().values())\n\n def apply_async(self, func, *args, **kwargs):\n callback = kwargs.pop('callback', None)\n kwargs['pure'] = False\n future = self.executor.submit(func, *args, **kwargs)\n self.futures.add(future)\n\n @gen.coroutine\n def callback_wrapper():\n result = yield _wait([future])\n self.futures.remove(future)\n callback(result) # gets called in separate thread\n\n self.executor.loop.add_callback(callback_wrapper)\n\n future.get = future.result # monkey patch to achieve AsyncResult API\n return future\n\n def abort_everything(self, ensure_ready=True):\n # Tell the executor to cancel any task submitted via this instance\n # as joblib.Parallel will never access those results.\n self.executor.cancel(self.futures)\n self.futures.clear()\n\n\nregister_parallel_backend('distributed', DistributedBackend)\n", "path": "distributed/joblib.py"}]} | 1,163 | 484 |
gh_patches_debug_60682 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-2076 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Login page: change spacing on left panel
The spacing in the left panel is odd. Change to something like the below:

Note, this will stay in backlog for now as we may want to revise this page to align with the Frog design.
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
1 hdx_version = 'v0.5.13'
2
[end of ckanext-hdx_theme/ckanext/hdx_theme/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.5.13'
+hdx_version = 'v0.5.15'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.5.13'\n+hdx_version = 'v0.5.15'\n", "issue": "Login page: change spacing on left panel \nThe spacing in the left panel is odd. Change to something like the below: \n\n\n\nNote, this will stay in backlog for now as we may want to revise this page to align with the Frog design.\n\n", "before_files": [{"content": "hdx_version = 'v0.5.13'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 683 | 109 |
gh_patches_debug_10301 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1157 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update /availability API to pull from new registry
### Issue Description
The [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.
### Acceptance Criteria
- [x] The checkDomain method is used to check that a domain is in the registry or not
and
- [x] The check is used at /availability
- [x] Tests are implemented and/or updated
- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well.
### Additional Context (optional)
Once done, the backend work of #476 and frontend work of #561 can be completed.
_Consider add-on to defend the endpoint from bot spam._
### Implementation Notes
Epp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.
### Issue Links
Blocking #476 and #561
Blocked by #1028
</issue>
<code>
[start of src/api/views.py]
1 """Internal API views"""
2 from django.apps import apps
3 from django.views.decorators.http import require_http_methods
4 from django.http import JsonResponse
5
6 from django.contrib.auth.decorators import login_required
7
8 import requests
9
10 from cachetools.func import ttl_cache
11
12
13 DOMAIN_FILE_URL = (
14 "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv"
15 )
16
17
18 DOMAIN_API_MESSAGES = {
19 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”"
20 " For example, if you want www.city.gov, you would enter “city”"
21 " (without the quotes).",
22 "extra_dots": "Enter the .gov domain you want without any periods.",
23 "unavailable": "That domain isn’t available. Try entering another one."
24 " Contact us if you need help coming up with a domain.",
25 "invalid": "Enter a domain using only letters,"
26 " numbers, or hyphens (though we don't recommend using hyphens).",
27 "success": "That domain is available!",
28 }
29
30
31 # this file doesn't change that often, nor is it that big, so cache the result
32 # in memory for ten minutes
33 @ttl_cache(ttl=600)
34 def _domains():
35 """Return a list of the current .gov domains.
36
37 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,
38 lowercase everything and return the list.
39 """
40 DraftDomain = apps.get_model("registrar.DraftDomain")
41 # 5 second timeout
42 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text
43 domains = set()
44 # skip the first line
45 for line in file_contents.splitlines()[1:]:
46 # get the domain before the first comma
47 domain = line.split(",", 1)[0]
48 # sanity-check the string we got from the file here
49 if DraftDomain.string_could_be_domain(domain):
50 # lowercase everything when we put it in domains
51 domains.add(domain.lower())
52 return domains
53
54
55 def in_domains(domain):
56 """Return true if the given domain is in the domains list.
57
58 The given domain is lowercased to match against the domains list. If the
59 given domain doesn't end with .gov, ".gov" is added when looking for
60 a match.
61 """
62 domain = domain.lower()
63 if domain.endswith(".gov"):
64 return domain.lower() in _domains()
65 else:
66 # domain search string doesn't end with .gov, add it on here
67 return (domain + ".gov") in _domains()
68
69
70 @require_http_methods(["GET"])
71 @login_required
72 def available(request, domain=""):
73 """Is a given domain available or not.
74
75 Response is a JSON dictionary with the key "available" and value true or
76 false.
77 """
78 DraftDomain = apps.get_model("registrar.DraftDomain")
79 # validate that the given domain could be a domain name and fail early if
80 # not.
81 if not (
82 DraftDomain.string_could_be_domain(domain)
83 or DraftDomain.string_could_be_domain(domain + ".gov")
84 ):
85 return JsonResponse(
86 {"available": False, "message": DOMAIN_API_MESSAGES["invalid"]}
87 )
88 # a domain is available if it is NOT in the list of current domains
89 if in_domains(domain):
90 return JsonResponse(
91 {"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]}
92 )
93 else:
94 return JsonResponse(
95 {"available": True, "message": DOMAIN_API_MESSAGES["success"]}
96 )
97
[end of src/api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/api/views.py b/src/api/views.py
--- a/src/api/views.py
+++ b/src/api/views.py
@@ -59,12 +59,12 @@
given domain doesn't end with .gov, ".gov" is added when looking for
a match.
"""
- domain = domain.lower()
+ Domain = apps.get_model("registrar.Domain")
if domain.endswith(".gov"):
- return domain.lower() in _domains()
+ return Domain.available(domain)
else:
# domain search string doesn't end with .gov, add it on here
- return (domain + ".gov") in _domains()
+ return Domain.available(domain + ".gov")
@require_http_methods(["GET"])
| {"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -59,12 +59,12 @@\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n- domain = domain.lower()\n+ Domain = apps.get_model(\"registrar.Domain\")\n if domain.endswith(\".gov\"):\n- return domain.lower() in _domains()\n+ return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n- return (domain + \".gov\") in _domains()\n+ return Domain.available(domain + \".gov\")\n \n \n @require_http_methods([\"GET\"])\n", "issue": "Update /availability API to pull from new registry\n### Issue Description\r\n\r\nThe [current API](https://github.com/cisagov/getgov/blob/784cc0f618e056c262512d688e8e4316dd25c9e4/src/api/views.py#L14) consists of a second-hand pull of canonical data that the .gov program [publishes to GitHub](https://github.com/cisagov/dotgov-data/blob/main/current-full.csv). Change this implementation so that queries poll the new registry/Whois/RDAP.\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] The checkDomain method is used to check that a domain is in the registry or not\r\n\r\nand\r\n\r\n- [x] The check is used at /availability \r\n- [x] Tests are implemented and/or updated\r\n- [ ] In the new domain application flow, where a user is checking if a domain is available for use, it should now use this /availability endpoint (if not already) and tests should be updated to check that /availability and epp is being triggered on this page as well. \r\n\r\n### Additional Context (optional)\r\n\r\nOnce done, the backend work of #476 and frontend work of #561 can be completed.\r\n\r\n_Consider add-on to defend the endpoint from bot spam._\r\n\r\n### Implementation Notes\r\n\r\nEpp has a check command that can be run with just the name of a given domain. This should be used as the method for checking the desired domain at this endpoint.\r\n\r\n### Issue Links\r\n\r\nBlocking #476 and #561\r\nBlocked by #1028 \n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nfrom django.contrib.auth.decorators import login_required\n\nimport requests\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = (\n \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n)\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters,\"\n \" numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef in_domains(domain):\n \"\"\"Return true if the given domain is in the domains list.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n domain = domain.lower()\n if domain.endswith(\".gov\"):\n return domain.lower() in _domains()\n else:\n # domain search string doesn't end with .gov, add it on here\n return (domain + \".gov\") in _domains()\n\n\n@require_http_methods([\"GET\"])\n@login_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (\n DraftDomain.string_could_be_domain(domain)\n or DraftDomain.string_could_be_domain(domain + \".gov\")\n ):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]}\n )\n # a domain is available if it is NOT in the list of current domains\n if in_domains(domain):\n return JsonResponse(\n {\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]}\n )\n else:\n return JsonResponse(\n {\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]}\n )\n", "path": "src/api/views.py"}]} | 1,836 | 162 |
gh_patches_debug_7477 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)
[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)
**Describe the solution you'd like**
- [x] power control
- [x] brightness control
- [x] CCT control
- [x] remove color control

Maybee because the board is also color capable

**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**
[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)
**Additional context**
https://zigbee.blakadder.com/Tuya_TS0502A.html
Touchlink resetable
</issue>
<code>
[start of zhaquirks/lidl/cct.py]
1 """Quirk for LIDL CCT bulb."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomCluster, CustomDevice
4 from zigpy.zcl.clusters.general import (
5 Basic,
6 GreenPowerProxy,
7 Groups,
8 Identify,
9 LevelControl,
10 OnOff,
11 Ota,
12 Scenes,
13 Time,
14 )
15 from zigpy.zcl.clusters.lighting import Color
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from zhaquirks.const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27
28 class LidlCCTColorCluster(CustomCluster, Color):
29 """Lidl CCT Lighting custom cluster."""
30
31 # Remove RGB color wheel for CCT Lighting: only expose color temperature
32 # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)
33 _CONSTANT_ATTRIBUTES = {0x400A: 16}
34
35
36 class CCTLight(CustomDevice):
37 """Lidl CCT Lighting device."""
38
39 signature = {
40 MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")],
41 ENDPOINTS: {
42 1: {
43 # <SimpleDescriptor endpoint=1 profile=260 device_type=268
44 # device_version=1
45 # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]
46 # output_clusters=[10, 25]
47 PROFILE_ID: zha.PROFILE_ID,
48 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
49 INPUT_CLUSTERS: [
50 Basic.cluster_id,
51 Identify.cluster_id,
52 Groups.cluster_id,
53 Scenes.cluster_id,
54 OnOff.cluster_id,
55 LevelControl.cluster_id,
56 Color.cluster_id,
57 LightLink.cluster_id,
58 ],
59 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
60 },
61 242: {
62 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97
63 # device_version=0
64 # input_clusters=[]
65 # output_clusters=[33]
66 PROFILE_ID: 41440,
67 DEVICE_TYPE: 97,
68 INPUT_CLUSTERS: [],
69 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
70 },
71 },
72 }
73
74 replacement = {
75 ENDPOINTS: {
76 1: {
77 PROFILE_ID: zha.PROFILE_ID,
78 DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,
79 INPUT_CLUSTERS: [
80 Basic.cluster_id,
81 Identify.cluster_id,
82 Groups.cluster_id,
83 Scenes.cluster_id,
84 OnOff.cluster_id,
85 LevelControl.cluster_id,
86 LidlCCTColorCluster,
87 LightLink.cluster_id,
88 ],
89 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
90 },
91 242: {
92 PROFILE_ID: 41440,
93 DEVICE_TYPE: 97,
94 INPUT_CLUSTERS: [],
95 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
96 },
97 }
98 }
99
[end of zhaquirks/lidl/cct.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py
--- a/zhaquirks/lidl/cct.py
+++ b/zhaquirks/lidl/cct.py
@@ -37,7 +37,12 @@
"""Lidl CCT Lighting device."""
signature = {
- MODELS_INFO: [("_TZ3000_49qchf10", "TS0502A"), ("_TZ3000_oborybow", "TS0502A")],
+ MODELS_INFO: [
+ ("_TZ3000_49qchf10", "TS0502A"),
+ ("_TZ3000_oborybow", "TS0502A"),
+ ("_TZ3000_9evm3otq", "TS0502A"),
+ ("_TZ3000_rylaozuc", "TS0502A"),
+ ],
ENDPOINTS: {
1: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=268
| {"golden_diff": "diff --git a/zhaquirks/lidl/cct.py b/zhaquirks/lidl/cct.py\n--- a/zhaquirks/lidl/cct.py\n+++ b/zhaquirks/lidl/cct.py\n@@ -37,7 +37,12 @@\n \"\"\"Lidl CCT Lighting device.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n+ MODELS_INFO: [\n+ (\"_TZ3000_49qchf10\", \"TS0502A\"),\n+ (\"_TZ3000_oborybow\", \"TS0502A\"),\n+ (\"_TZ3000_9evm3otq\", \"TS0502A\"),\n+ (\"_TZ3000_rylaozuc\", \"TS0502A\"),\n+ ],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n", "issue": "[Device Support Request] LIVARNO LUX/LIDL Led Panel 60x60 (Tuya TS0502A)\n[LIDL Service website](https://www.lidl-service.com/cps/rde/xchg/SID-3771F4F2-8A18D468/lsp/hs.xsl/product.html?id=5027306530&title=Smart+LED+Light+Panel&count=1)\r\n\r\n**Describe the solution you'd like**\r\n- [x] power control\r\n- [x] brightness control\r\n- [x] CCT control\r\n- [x] remove color control\r\n\r\n\r\n\r\nMaybee because the board is also color capable\r\n\r\n\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n[Device signature and pairing log at my dev VM](https://pastebin.com/ifAkAXaF)\r\n\r\n\r\n**Additional context**\r\nhttps://zigbee.blakadder.com/Tuya_TS0502A.html\r\nTouchlink resetable\n", "before_files": [{"content": "\"\"\"Quirk for LIDL CCT bulb.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomCluster, CustomDevice\nfrom zigpy.zcl.clusters.general import (\n Basic,\n GreenPowerProxy,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.lighting import Color\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\n\nclass LidlCCTColorCluster(CustomCluster, Color):\n \"\"\"Lidl CCT Lighting custom cluster.\"\"\"\n\n # Remove RGB color wheel for CCT Lighting: only expose color temperature\n # LIDL bulbs do not correctly report this attribute (comes back as None in Home Assistant)\n _CONSTANT_ATTRIBUTES = {0x400A: 16}\n\n\nclass CCTLight(CustomDevice):\n \"\"\"Lidl CCT Lighting device.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"_TZ3000_49qchf10\", \"TS0502A\"), (\"_TZ3000_oborybow\", \"TS0502A\")],\n ENDPOINTS: {\n 1: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=268\n # device_version=1\n # input_clusters=[0, 3, 4, 5, 6, 8, 768, 4096]\n # output_clusters=[10, 25]\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Color.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.COLOR_TEMPERATURE_LIGHT,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n LidlCCTColorCluster,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 242: {\n PROFILE_ID: 41440,\n DEVICE_TYPE: 97,\n INPUT_CLUSTERS: [],\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n }\n }\n", "path": "zhaquirks/lidl/cct.py"}]} | 1,844 | 261 |
gh_patches_debug_57081 | rasdani/github-patches | git_diff | SeldonIO__MLServer-945 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MLServer is incompatible with latest release of FastAPI
MLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.
```
2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop
2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.
Traceback (most recent call last):
File "/home/cc/miniconda3/envs/central-1/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 79, in main
root()
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 20, in wrapper
return asyncio.run(f(*args, **kwargs))
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py", line 43, in start
server = MLServer(settings)
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py", line 71, in __init__
self._rest_server = RESTServer(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py", line 26, in __init__
self._app = create_app(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py", line 43, in create_app
APIRoute(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py", line 400, in __init__
self.response_field = create_response_field(
File "/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py", line 90, in create_response_field
raise fastapi.exceptions.FastAPIError(
fastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type
```
</issue>
<code>
[start of setup.py]
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 packages=find_packages(exclude=["tests", "tests.*"]),
42 install_requires=[
43 "click",
44 "fastapi<=0.88.0",
45 "python-dotenv",
46 "grpcio",
47 "importlib-metadata;python_version<'3.8'",
48 "numpy",
49 "pandas",
50 "protobuf",
51 "uvicorn",
52 "starlette_exporter",
53 "py-grpc-prometheus",
54 "uvloop;" + env_marker_cpython,
55 "aiokafka",
56 "tritonclient[http]>=2.24",
57 "aiofiles",
58 "orjson",
59 ],
60 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
61 long_description=_load_description(),
62 long_description_content_type="text/markdown",
63 license="Apache 2.0",
64 )
65
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -41,7 +41,8 @@
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=[
"click",
- "fastapi<=0.88.0",
+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
+ "fastapi<=0.89.1, !=0.89.0",
"python-dotenv",
"grpcio",
"importlib-metadata;python_version<'3.8'",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,8 @@\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n- \"fastapi<=0.88.0\",\n+ # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n+ \"fastapi<=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n", "issue": "MLServer is incompatible with latest release of FastAPI\nMLServer is incompatible with [latest release of FastAPI](https://github.com/tiangolo/fastapi/releases/tag/0.89.0), and installing any version of MLServer will result in the following error, temp workaround added in this [pull request](https://github.com/SeldonIO/MLServer/pull/934) however, I think this needs a more in-depth root-cause analysis.\r\n```\r\n2023-01-09 02:11:59,296 [mlserver] INFO - Using asyncio event-loop policy: uvloop\r\n2023-01-09 02:11:59,301 [mlserver] WARNING - Model name 'node-1' is different than model's folder name '25-mlserver-example-single'.\r\nTraceback (most recent call last):\r\n File \"/home/cc/miniconda3/envs/central-1/bin/mlserver\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 79, in main\r\n root()\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1130, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1055, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1657, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/click/core.py\", line 760, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 20, in wrapper\r\n return asyncio.run(f(*args, **kwargs))\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"uvloop/loop.pyx\", line 1517, in uvloop.loop.Loop.run_until_complete\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/cli/main.py\", line 43, in start\r\n server = MLServer(settings)\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/server.py\", line 71, in __init__\r\n self._rest_server = RESTServer(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/server.py\", line 26, in __init__\r\n self._app = create_app(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/mlserver/rest/app.py\", line 43, in create_app\r\n APIRoute(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/routing.py\", line 400, in __init__\r\n self.response_field = create_response_field(\r\n File \"/home/cc/miniconda3/envs/central-1/lib/python3.9/site-packages/fastapi/utils.py\", line 90, in create_response_field\r\n raise fastapi.exceptions.FastAPIError(\r\nfastapi.exceptions.FastAPIError: Invalid args for response field! Hint: check that <class 'starlette.responses.Response'> is a valid pydantic field type\r\n```\n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n \"fastapi<=0.88.0\",\n \"python-dotenv\",\n \"grpcio\",\n \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} | 2,011 | 139 |
gh_patches_debug_40195 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use strategy 0 for user-provided gradients in minuit
# Description
since we have exact gradient we can disable the checks minuit does
cc @alexander-held
</issue>
<code>
[start of src/pyhf/optimize/opt_minuit.py]
1 """Minuit Optimizer Class."""
2 from .. import default_backend, exceptions
3 from .mixins import OptimizerMixin
4 import scipy
5 import iminuit
6
7
8 class minuit_optimizer(OptimizerMixin):
9 """
10 Optimizer that uses iminuit.Minuit.migrad.
11 """
12
13 __slots__ = ['name', 'errordef', 'steps']
14
15 def __init__(self, *args, **kwargs):
16 """
17 Create MINUIT Optimizer.
18
19 .. note::
20
21 ``errordef`` should be 1.0 for a least-squares cost function and 0.5
22 for negative log-likelihood function. See page 37 of
23 http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes
24 called ``UP`` in the ``MINUIT`` docs.
25
26
27 Args:
28 errordef (:obj:`float`): See minuit docs. Default is 1.0.
29 steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
30 """
31 self.name = 'minuit'
32 self.errordef = kwargs.pop('errordef', 1)
33 self.steps = kwargs.pop('steps', 1000)
34 super().__init__(*args, **kwargs)
35
36 def _get_minimizer(
37 self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False
38 ):
39
40 step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]
41 fixed_vals = fixed_vals or []
42 # Minuit wants True/False for each parameter
43 fixed_bools = [False] * len(init_pars)
44 for index, val in fixed_vals:
45 fixed_bools[index] = True
46 init_pars[index] = val
47 step_sizes[index] = 0.0
48
49 # Minuit requires jac=callable
50 if do_grad:
51 wrapped_objective = lambda pars: objective_and_grad(pars)[0]
52 jac = lambda pars: objective_and_grad(pars)[1]
53 else:
54 wrapped_objective = objective_and_grad
55 jac = None
56
57 kwargs = dict(
58 fcn=wrapped_objective,
59 grad=jac,
60 start=init_pars,
61 error=step_sizes,
62 limit=init_bounds,
63 fix=fixed_bools,
64 print_level=self.verbose,
65 errordef=self.errordef,
66 )
67 return iminuit.Minuit.from_array_func(**kwargs)
68
69 def _minimize(
70 self,
71 minimizer,
72 func,
73 x0,
74 do_grad=False,
75 bounds=None,
76 fixed_vals=None,
77 return_uncertainties=False,
78 options={},
79 ):
80
81 """
82 Same signature as :func:`scipy.optimize.minimize`.
83
84 Note: an additional `minuit` is injected into the fitresult to get the
85 underlying minimizer.
86
87 Minimizer Options:
88 maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
89 return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
90
91 Returns:
92 fitresult (scipy.optimize.OptimizeResult): the fit result
93 """
94 maxiter = options.pop('maxiter', self.maxiter)
95 return_uncertainties = options.pop('return_uncertainties', False)
96 if options:
97 raise exceptions.Unsupported(
98 f"Unsupported options were passed in: {list(options.keys())}."
99 )
100
101 minimizer.migrad(ncall=maxiter)
102 # Following lines below come from:
103 # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
104 message = "Optimization terminated successfully."
105 if not minimizer.valid:
106 message = "Optimization failed."
107 fmin = minimizer.fmin
108 if fmin.has_reached_call_limit:
109 message += " Call limit was reached."
110 if fmin.is_above_max_edm:
111 message += " Estimated distance to minimum too large."
112
113 n = len(x0)
114 hess_inv = default_backend.ones((n, n))
115 if minimizer.valid:
116 hess_inv = minimizer.np_covariance()
117
118 unc = None
119 if return_uncertainties:
120 unc = minimizer.np_errors()
121
122 return scipy.optimize.OptimizeResult(
123 x=minimizer.np_values(),
124 unc=unc,
125 success=minimizer.valid,
126 fun=minimizer.fval,
127 hess_inv=hess_inv,
128 message=message,
129 nfev=minimizer.ncalls,
130 njev=minimizer.ngrads,
131 minuit=minimizer,
132 )
133
[end of src/pyhf/optimize/opt_minuit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -10,7 +10,7 @@
Optimizer that uses iminuit.Minuit.migrad.
"""
- __slots__ = ['name', 'errordef', 'steps']
+ __slots__ = ['name', 'errordef', 'steps', 'strategy']
def __init__(self, *args, **kwargs):
"""
@@ -27,10 +27,12 @@
Args:
errordef (:obj:`float`): See minuit docs. Default is 1.0.
steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.
"""
self.name = 'minuit'
self.errordef = kwargs.pop('errordef', 1)
self.steps = kwargs.pop('steps', 1000)
+ self.strategy = kwargs.pop('strategy', None)
super().__init__(*args, **kwargs)
def _get_minimizer(
@@ -87,17 +89,24 @@
Minimizer Options:
maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.
Returns:
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
return_uncertainties = options.pop('return_uncertainties', False)
+ # 0: Fast, user-provided gradient
+ # 1: Default, no user-provided gradient
+ strategy = options.pop(
+ 'strategy', self.strategy if self.strategy else not do_grad
+ )
if options:
raise exceptions.Unsupported(
f"Unsupported options were passed in: {list(options.keys())}."
)
+ minimizer.strategy = strategy
minimizer.migrad(ncall=maxiter)
# Following lines below come from:
# https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
@@ -113,6 +122,8 @@
n = len(x0)
hess_inv = default_backend.ones((n, n))
if minimizer.valid:
+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.
+ minimizer.hesse()
hess_inv = minimizer.np_covariance()
unc = None
| {"golden_diff": "diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py\n--- a/src/pyhf/optimize/opt_minuit.py\n+++ b/src/pyhf/optimize/opt_minuit.py\n@@ -10,7 +10,7 @@\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n \n- __slots__ = ['name', 'errordef', 'steps']\n+ __slots__ = ['name', 'errordef', 'steps', 'strategy']\n \n def __init__(self, *args, **kwargs):\n \"\"\"\n@@ -27,10 +27,12 @@\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n+ self.strategy = kwargs.pop('strategy', None)\n super().__init__(*args, **kwargs)\n \n def _get_minimizer(\n@@ -87,17 +89,24 @@\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n \n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n+ # 0: Fast, user-provided gradient\n+ # 1: Default, no user-provided gradient\n+ strategy = options.pop(\n+ 'strategy', self.strategy if self.strategy else not do_grad\n+ )\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n \n+ minimizer.strategy = strategy\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n@@ -113,6 +122,8 @@\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n+ # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n+ minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n \n unc = None\n", "issue": "use strategy 0 for user-provided gradients in minuit\n# Description\r\n\r\nsince we have exact gradient we can disable the checks minuit does\r\n\r\ncc @alexander-held \n", "before_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0]\n jac = lambda pars: objective_and_grad(pars)[1]\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}]} | 1,924 | 707 |
gh_patches_debug_26193 | rasdani/github-patches | git_diff | python-discord__site-1165 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support rescheduling of offensive messages
When a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.
When this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.
We should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).
However, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.
</issue>
<code>
[start of pydis_site/apps/api/viewsets/bot/offensive_message.py]
1 from rest_framework.mixins import (
2 CreateModelMixin,
3 DestroyModelMixin,
4 ListModelMixin
5 )
6 from rest_framework.viewsets import GenericViewSet
7
8 from pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage
9 from pydis_site.apps.api.serializers import OffensiveMessageSerializer
10
11
12 class OffensiveMessageViewSet(
13 CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
14 ):
15 """
16 View providing CRUD access to offensive messages.
17
18 ## Routes
19 ### GET /bot/offensive-messages
20 Returns all offensive messages in the database.
21
22 #### Response format
23 >>> [
24 ... {
25 ... 'id': '631953598091100200',
26 ... 'channel_id': '291284109232308226',
27 ... 'delete_date': '2019-11-01T21:51:15.545000Z'
28 ... },
29 ... ...
30 ... ]
31
32 #### Status codes
33 - 200: returned on success
34
35 ### POST /bot/offensive-messages
36 Create a new offensive message object.
37
38 #### Request body
39 >>> {
40 ... 'id': int,
41 ... 'channel_id': int,
42 ... 'delete_date': datetime.datetime # ISO-8601-formatted date
43 ... }
44
45 #### Status codes
46 - 201: returned on success
47 - 400: if the body format is invalid
48
49 ### DELETE /bot/offensive-messages/<id:int>
50 Delete the offensive message object with the given `id`.
51
52 #### Status codes
53 - 204: returned on success
54 - 404: if a offensive message object with the given `id` does not exist
55
56 ## Authentication
57 Requires an API token.
58 """
59
60 serializer_class = OffensiveMessageSerializer
61 queryset = OffensiveMessage.objects.all()
62
[end of pydis_site/apps/api/viewsets/bot/offensive_message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py
--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py
+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py
@@ -1,6 +1,7 @@
from rest_framework.mixins import (
CreateModelMixin,
DestroyModelMixin,
+ UpdateModelMixin,
ListModelMixin
)
from rest_framework.viewsets import GenericViewSet
@@ -10,7 +11,7 @@
class OffensiveMessageViewSet(
- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet
+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet
):
"""
View providing CRUD access to offensive messages.
@@ -46,6 +47,16 @@
- 201: returned on success
- 400: if the body format is invalid
+ ### PATCH /bot/offensive-messages/<id:int>
+ Perform a partial update of the offensive message with the given `id`.
+ Intended to allow rescheduling the deletion date in case the bot's attempt
+ to delete the message failed due to another error than the message already
+ being deleted.
+
+ #### Status codes
+ - 200: returned on success
+ - 404: if a offensive message object with the given `id` does not exist
+
### DELETE /bot/offensive-messages/<id:int>
Delete the offensive message object with the given `id`.
| {"golden_diff": "diff --git a/pydis_site/apps/api/viewsets/bot/offensive_message.py b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n--- a/pydis_site/apps/api/viewsets/bot/offensive_message.py\n+++ b/pydis_site/apps/api/viewsets/bot/offensive_message.py\n@@ -1,6 +1,7 @@\n from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n+ UpdateModelMixin,\n ListModelMixin\n )\n from rest_framework.viewsets import GenericViewSet\n@@ -10,7 +11,7 @@\n \n \n class OffensiveMessageViewSet(\n- CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n+ CreateModelMixin, ListModelMixin, UpdateModelMixin, DestroyModelMixin, GenericViewSet\n ):\n \"\"\"\n View providing CRUD access to offensive messages.\n@@ -46,6 +47,16 @@\n - 201: returned on success\n - 400: if the body format is invalid\n \n+ ### PATCH /bot/offensive-messages/<id:int>\n+ Perform a partial update of the offensive message with the given `id`.\n+ Intended to allow rescheduling the deletion date in case the bot's attempt\n+ to delete the message failed due to another error than the message already\n+ being deleted.\n+\n+ #### Status codes\n+ - 200: returned on success\n+ - 404: if a offensive message object with the given `id` does not exist\n+\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n", "issue": "Support rescheduling of offensive messages\nWhen a message trips the filter on the bot it is removed after a period of time. During this period it is a record in the database.\r\n\r\nWhen this deletion date is reached the bot will attempt to remove the message from Discord and remove the record from the offensive message table. We currently handle for the message being not found (deleted) but if another error occurs resulting in the message not being deleted we still continue to delete the database record, causing the message to be left around.\r\n\r\nWe should allow the bot to perform a PATCH request to the deleted message endpoint to update the delete time and reschedule if something has failed (for example, a hiccup on Discord's end).\r\n\r\nHowever, we must also bear in mind that permanent rescheduling could potentially leave lingering records in our database if a case is discovered where a message which cannot be deleted is rescheduled repetitively.\n", "before_files": [{"content": "from rest_framework.mixins import (\n CreateModelMixin,\n DestroyModelMixin,\n ListModelMixin\n)\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom pydis_site.apps.api.models.bot.offensive_message import OffensiveMessage\nfrom pydis_site.apps.api.serializers import OffensiveMessageSerializer\n\n\nclass OffensiveMessageViewSet(\n CreateModelMixin, ListModelMixin, DestroyModelMixin, GenericViewSet\n):\n \"\"\"\n View providing CRUD access to offensive messages.\n\n ## Routes\n ### GET /bot/offensive-messages\n Returns all offensive messages in the database.\n\n #### Response format\n >>> [\n ... {\n ... 'id': '631953598091100200',\n ... 'channel_id': '291284109232308226',\n ... 'delete_date': '2019-11-01T21:51:15.545000Z'\n ... },\n ... ...\n ... ]\n\n #### Status codes\n - 200: returned on success\n\n ### POST /bot/offensive-messages\n Create a new offensive message object.\n\n #### Request body\n >>> {\n ... 'id': int,\n ... 'channel_id': int,\n ... 'delete_date': datetime.datetime # ISO-8601-formatted date\n ... }\n\n #### Status codes\n - 201: returned on success\n - 400: if the body format is invalid\n\n ### DELETE /bot/offensive-messages/<id:int>\n Delete the offensive message object with the given `id`.\n\n #### Status codes\n - 204: returned on success\n - 404: if a offensive message object with the given `id` does not exist\n\n ## Authentication\n Requires an API token.\n \"\"\"\n\n serializer_class = OffensiveMessageSerializer\n queryset = OffensiveMessage.objects.all()\n", "path": "pydis_site/apps/api/viewsets/bot/offensive_message.py"}]} | 1,291 | 354 |
gh_patches_debug_16293 | rasdani/github-patches | git_diff | nf-core__tools-1261 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linting does not recognize README Nextflow minimum version mention in Quick Start
When running `nf-core lint` with a readme file that has the following in it:
```
## Quick Start
1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)
```
A warning is triggered:
```
readme: README did not have a Nextflow minimum version mentioned in Quick Start section.
```
This warning should not be triggering as the minimum nextflow version is in the readme file.
Link to code location
https://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66
</issue>
<code>
[start of nf_core/lint/readme.py]
1 #!/usr/bin/env python
2
3 import os
4 import re
5
6
7 def readme(self):
8 """Repository ``README.md`` tests
9
10 The ``README.md`` files for a project are very important and must meet some requirements:
11
12 * Nextflow badge
13
14 * If no Nextflow badge is found, a warning is given
15 * If a badge is found but the version doesn't match the minimum version in the config file, the test fails
16 * Example badge code:
17
18 .. code-block:: md
19
20 [](https://www.nextflow.io/)
21
22 * Bioconda badge
23
24 * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required
25 * Required badge code:
26
27 .. code-block:: md
28
29 [](https://bioconda.github.io/)
30
31 .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.
32 """
33 passed = []
34 warned = []
35 failed = []
36
37 with open(os.path.join(self.wf_path, "README.md"), "r") as fh:
38 content = fh.read()
39
40 # Check that there is a readme badge showing the minimum required version of Nextflow
41 # [](https://www.nextflow.io/)
42 # and that it has the correct version
43 nf_badge_re = r"\[!\[Nextflow\]\(https://img\.shields\.io/badge/nextflow%20DSL2-%E2%89%A5([\d\.]+)-23aa62\.svg\?labelColor=000000\)\]\(https://www\.nextflow\.io/\)"
44 match = re.search(nf_badge_re, content)
45 if match:
46 nf_badge_version = match.group(1).strip("'\"")
47 try:
48 assert nf_badge_version == self.minNextflowVersion
49 except (AssertionError, KeyError):
50 failed.append(
51 "README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`".format(
52 nf_badge_version, self.minNextflowVersion
53 )
54 )
55 else:
56 passed.append(
57 "README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`".format(
58 nf_badge_version, self.minNextflowVersion
59 )
60 )
61 else:
62 warned.append("README did not have a Nextflow minimum version badge.")
63
64 # Check that the minimum version mentioned in the quick start section is consistent
65 # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)"
66 nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
67 match = re.search(nf_version_re, content)
68 if match:
69 nf_quickstart_version = match.group(1)
70 try:
71 assert nf_quickstart_version == self.minNextflowVersion
72 except (AssertionError, KeyError):
73 failed.append(
74 f"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`"
75 )
76 else:
77 passed.append(
78 f"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`"
79 )
80 else:
81 warned.append("README did not have a Nextflow minimum version mentioned in Quick Start section.")
82
83 return {"passed": passed, "warned": warned, "failed": failed}
84
[end of nf_core/lint/readme.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py
--- a/nf_core/lint/readme.py
+++ b/nf_core/lint/readme.py
@@ -62,8 +62,8 @@
warned.append("README did not have a Nextflow minimum version badge.")
# Check that the minimum version mentioned in the quick start section is consistent
- # Looking for: "1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)"
- nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://nf-co.re/usage/installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
+ # Looking for: "1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)"
+ nf_version_re = r"1\.\s*Install\s*\[`Nextflow`\]\(https://www.nextflow.io/docs/latest/getstarted.html#installation\)\s*\(`>=(\d*\.\d*\.\d*)`\)"
match = re.search(nf_version_re, content)
if match:
nf_quickstart_version = match.group(1)
| {"golden_diff": "diff --git a/nf_core/lint/readme.py b/nf_core/lint/readme.py\n--- a/nf_core/lint/readme.py\n+++ b/nf_core/lint/readme.py\n@@ -62,8 +62,8 @@\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n \n # Check that the minimum version mentioned in the quick start section is consistent\n- # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n- nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n+ # Looking for: \"1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\"\n+ nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://www.nextflow.io/docs/latest/getstarted.html#installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n", "issue": "Linting does not recognize README Nextflow minimum version mention in Quick Start\nWhen running `nf-core lint` with a readme file that has the following in it:\r\n\r\n```\r\n## Quick Start\r\n\r\n1. Install [`Nextflow`](https://www.nextflow.io/docs/latest/getstarted.html#installation) (`>=21.04.0`)\r\n```\r\n\r\nA warning is triggered:\r\n\r\n```\r\nreadme: README did not have a Nextflow minimum version mentioned in Quick Start section.\r\n```\r\n\r\nThis warning should not be triggering as the minimum nextflow version is in the readme file.\r\n\r\nLink to code location\r\n\r\nhttps://github.com/nf-core/tools/blob/01291016652284bfba23a900399fa0155906a7c5/nf_core/lint/readme.py#L65-L66\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport re\n\n\ndef readme(self):\n \"\"\"Repository ``README.md`` tests\n\n The ``README.md`` files for a project are very important and must meet some requirements:\n\n * Nextflow badge\n\n * If no Nextflow badge is found, a warning is given\n * If a badge is found but the version doesn't match the minimum version in the config file, the test fails\n * Example badge code:\n\n .. code-block:: md\n\n [](https://www.nextflow.io/)\n\n * Bioconda badge\n\n * If your pipeline contains a file called ``environment.yml`` in the root directory, a bioconda badge is required\n * Required badge code:\n\n .. code-block:: md\n\n [](https://bioconda.github.io/)\n\n .. note:: These badges are a markdown image ```` *inside* a markdown link ``[markdown image](<link URL>)``, so a bit fiddly to write.\n \"\"\"\n passed = []\n warned = []\n failed = []\n\n with open(os.path.join(self.wf_path, \"README.md\"), \"r\") as fh:\n content = fh.read()\n\n # Check that there is a readme badge showing the minimum required version of Nextflow\n # [](https://www.nextflow.io/)\n # and that it has the correct version\n nf_badge_re = r\"\\[!\\[Nextflow\\]\\(https://img\\.shields\\.io/badge/nextflow%20DSL2-%E2%89%A5([\\d\\.]+)-23aa62\\.svg\\?labelColor=000000\\)\\]\\(https://www\\.nextflow\\.io/\\)\"\n match = re.search(nf_badge_re, content)\n if match:\n nf_badge_version = match.group(1).strip(\"'\\\"\")\n try:\n assert nf_badge_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n \"README Nextflow minimum version badge does not match config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n passed.append(\n \"README Nextflow minimum version badge matched config. Badge: `{}`, Config: `{}`\".format(\n nf_badge_version, self.minNextflowVersion\n )\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version badge.\")\n\n # Check that the minimum version mentioned in the quick start section is consistent\n # Looking for: \"1. Install [`Nextflow`](https://nf-co.re/usage/installation) (`>=21.04.0`)\"\n nf_version_re = r\"1\\.\\s*Install\\s*\\[`Nextflow`\\]\\(https://nf-co.re/usage/installation\\)\\s*\\(`>=(\\d*\\.\\d*\\.\\d*)`\\)\"\n match = re.search(nf_version_re, content)\n if match:\n nf_quickstart_version = match.group(1)\n try:\n assert nf_quickstart_version == self.minNextflowVersion\n except (AssertionError, KeyError):\n failed.append(\n f\"README Nextflow minimium version in Quick Start section does not match config. README: `{nf_quickstart_version}`, Config `{self.minNextflowVersion}`\"\n )\n else:\n passed.append(\n f\"README Nextflow minimum version in Quick Start section matched config. README: `{nf_quickstart_version}`, Config: `{self.minNextflowVersion}`\"\n )\n else:\n warned.append(\"README did not have a Nextflow minimum version mentioned in Quick Start section.\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed}\n", "path": "nf_core/lint/readme.py"}]} | 1,824 | 292 |
gh_patches_debug_50355 | rasdani/github-patches | git_diff | pypi__warehouse-6747 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Brazilian Portugese to localization footer
https://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.
@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.
</issue>
<code>
[start of warehouse/i18n/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from babel.core import Locale
14 from pyramid.i18n import TranslationStringFactory, default_locale_negotiator
15 from pyramid.threadlocal import get_current_request
16
17 KNOWN_LOCALES = {"en": "English"}
18
19 LOCALE_ATTR = "_LOCALE_"
20
21 _translation_factory = TranslationStringFactory("messages")
22
23
24 class LazyString:
25 def __init__(self, fn, *args, **kwargs):
26 self.fn = fn
27 self.args = args
28 self.mapping = kwargs.get("mapping", {})
29 self.kwargs = kwargs
30
31 def __json__(self, request):
32 return str(self)
33
34 def __mod__(self, new_mapping):
35 mapping = self.mapping.copy()
36 mapping.update(new_mapping)
37 return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)
38
39 def __str__(self):
40 return self.fn(*self.args, **self.kwargs)
41
42
43 def _locale(request):
44 """
45 Computes a babel.core:Locale() object for this request.
46 """
47 return Locale.parse(request.locale_name, sep="_")
48
49
50 def _negotiate_locale(request):
51 locale_name = getattr(request, LOCALE_ATTR, None)
52 if locale_name is not None:
53 return locale_name
54
55 locale_name = request.params.get(LOCALE_ATTR)
56 if locale_name is not None:
57 return locale_name
58
59 locale_name = request.cookies.get(LOCALE_ATTR)
60 if locale_name is not None:
61 return locale_name
62
63 if not request.accept_language:
64 return default_locale_negotiator(request)
65
66 return request.accept_language.best_match(
67 tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)
68 )
69
70
71 def localize(message, **kwargs):
72 def _localize(message, **kwargs):
73 request = get_current_request()
74 return request.localizer.translate(_translation_factory(message, **kwargs))
75
76 return LazyString(_localize, message, **kwargs)
77
78
79 def includeme(config):
80 # Add the request attributes
81 config.add_request_method(_locale, name="locale", reify=True)
82
83 # Register our translation directory.
84 config.add_translation_dirs("warehouse:locale/")
85
86 config.set_locale_negotiator(_negotiate_locale)
87
88 # Register our i18n/l10n filters for Jinja2
89 filters = config.get_settings().setdefault("jinja2.filters", {})
90 filters.setdefault("format_date", "warehouse.i18n.filters:format_date")
91 filters.setdefault("format_datetime", "warehouse.i18n.filters:format_datetime")
92 filters.setdefault(
93 "format_rfc822_datetime", "warehouse.i18n.filters:format_rfc822_datetime"
94 )
95 filters.setdefault("format_number", "warehouse.i18n.filters:format_number")
96
97 jglobals = config.get_settings().setdefault("jinja2.globals", {})
98 jglobals.setdefault("KNOWN_LOCALES", "warehouse.i18n:KNOWN_LOCALES")
99
[end of warehouse/i18n/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py
--- a/warehouse/i18n/__init__.py
+++ b/warehouse/i18n/__init__.py
@@ -14,7 +14,7 @@
from pyramid.i18n import TranslationStringFactory, default_locale_negotiator
from pyramid.threadlocal import get_current_request
-KNOWN_LOCALES = {"en": "English"}
+KNOWN_LOCALES = {"en": "English", "pt_BR": "Portuguese (Brazil)"}
LOCALE_ATTR = "_LOCALE_"
| {"golden_diff": "diff --git a/warehouse/i18n/__init__.py b/warehouse/i18n/__init__.py\n--- a/warehouse/i18n/__init__.py\n+++ b/warehouse/i18n/__init__.py\n@@ -14,7 +14,7 @@\n from pyramid.i18n import TranslationStringFactory, default_locale_negotiator\n from pyramid.threadlocal import get_current_request\n \n-KNOWN_LOCALES = {\"en\": \"English\"}\n+KNOWN_LOCALES = {\"en\": \"English\", \"pt_BR\": \"Portuguese (Brazil)\"}\n \n LOCALE_ATTR = \"_LOCALE_\"\n", "issue": "Add Brazilian Portugese to localization footer\nhttps://hosted.weblate.org/projects/pypa/warehouse/#translations says that we're now at 100% translated for Brazilian Portugese. Therefore, let's insert the footer of available locales/translations per #6624, and add Brazilian Portugese.\r\n\r\n@yeraydiazdiaz @nlhkabu can either of you do this? Thanks.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom babel.core import Locale\nfrom pyramid.i18n import TranslationStringFactory, default_locale_negotiator\nfrom pyramid.threadlocal import get_current_request\n\nKNOWN_LOCALES = {\"en\": \"English\"}\n\nLOCALE_ATTR = \"_LOCALE_\"\n\n_translation_factory = TranslationStringFactory(\"messages\")\n\n\nclass LazyString:\n def __init__(self, fn, *args, **kwargs):\n self.fn = fn\n self.args = args\n self.mapping = kwargs.get(\"mapping\", {})\n self.kwargs = kwargs\n\n def __json__(self, request):\n return str(self)\n\n def __mod__(self, new_mapping):\n mapping = self.mapping.copy()\n mapping.update(new_mapping)\n return LazyString(self.fn, *self.args, mapping=new_mapping, **self.kwargs)\n\n def __str__(self):\n return self.fn(*self.args, **self.kwargs)\n\n\ndef _locale(request):\n \"\"\"\n Computes a babel.core:Locale() object for this request.\n \"\"\"\n return Locale.parse(request.locale_name, sep=\"_\")\n\n\ndef _negotiate_locale(request):\n locale_name = getattr(request, LOCALE_ATTR, None)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.params.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n locale_name = request.cookies.get(LOCALE_ATTR)\n if locale_name is not None:\n return locale_name\n\n if not request.accept_language:\n return default_locale_negotiator(request)\n\n return request.accept_language.best_match(\n tuple(KNOWN_LOCALES.keys()), default_match=default_locale_negotiator(request)\n )\n\n\ndef localize(message, **kwargs):\n def _localize(message, **kwargs):\n request = get_current_request()\n return request.localizer.translate(_translation_factory(message, **kwargs))\n\n return LazyString(_localize, message, **kwargs)\n\n\ndef includeme(config):\n # Add the request attributes\n config.add_request_method(_locale, name=\"locale\", reify=True)\n\n # Register our translation directory.\n config.add_translation_dirs(\"warehouse:locale/\")\n\n config.set_locale_negotiator(_negotiate_locale)\n\n # Register our i18n/l10n filters for Jinja2\n filters = config.get_settings().setdefault(\"jinja2.filters\", {})\n filters.setdefault(\"format_date\", \"warehouse.i18n.filters:format_date\")\n filters.setdefault(\"format_datetime\", \"warehouse.i18n.filters:format_datetime\")\n filters.setdefault(\n \"format_rfc822_datetime\", \"warehouse.i18n.filters:format_rfc822_datetime\"\n )\n filters.setdefault(\"format_number\", \"warehouse.i18n.filters:format_number\")\n\n jglobals = config.get_settings().setdefault(\"jinja2.globals\", {})\n jglobals.setdefault(\"KNOWN_LOCALES\", \"warehouse.i18n:KNOWN_LOCALES\")\n", "path": "warehouse/i18n/__init__.py"}]} | 1,580 | 138 |
gh_patches_debug_15882 | rasdani/github-patches | git_diff | beeware__toga-850 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Table rows not rendering correctly in `update_data` winforms
## Expected Behavior
View text in the rows
## Current Behavior
Rows are created but text is not displayed.

When I insert a new row (with insert button) is working ok:

In found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.
The specific line is the change made from this one:
```
item._impl = WinForms.ListViewItem([
str(getattr(item, attr)) for attr in self.interface._accessors
])
```
to this:
```
item._impl = WinForms.ListViewItem(self.row_data(item))
```
## Steps to reproduce
1. Open example and that's it
## Your Environment
* Python Version (list the specific version number)
3.7.7
* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)
- [ ] macOS - version:
- [ ] Linux - distro: - version:
- [x] Windows - version: 10 Pro
- [ ] Other - name: - version:
* Toga Version (list the specific version number or git hash)
Master branch of toga.
* Toga Target (the type of app you are trying to generate)
- [ ] android
- [ ] cocoa
- [ ] django
- [ ] gtk
- [ ] iOS
- [ ] tvOS
- [ ] watchOS
- [x] winforms
- [ ] win32
- [ ] Other (please specify)
</issue>
<code>
[start of src/winforms/toga_winforms/widgets/table.py]
1 from travertino.size import at_least
2
3 from toga_winforms.libs import WinForms
4
5 from .base import Widget
6
7
8 class Table(Widget):
9 def create(self):
10 self._container = self
11 self.native = WinForms.ListView()
12 self.native.View = WinForms.View.Details
13
14 dataColumn = []
15 for heading in self.interface.headings:
16 col = WinForms.ColumnHeader()
17 col.Text = heading
18 dataColumn.append(col)
19
20 self.native.FullRowSelect = True
21 self.native.Multiselect = self.interface.multiple_select
22 self.native.DoubleBuffered = True
23 self.native.Columns.AddRange(dataColumn)
24
25 def change_source(self, source):
26 self.update_data()
27
28 def row_data(self, item):
29 # TODO: Winforms can't support icons in tree cells; so, if the data source
30 # specifies an icon, strip it when converting to row data.
31 def strip_icon(item, attr):
32 val = getattr(item, attr)
33 if isinstance(val, tuple):
34 return str(val[1])
35 return str(val)
36
37 return [item] + [
38 strip_icon(item, attr)
39 for attr in self.interface._accessors
40 ]
41
42 def update_data(self):
43 self.native.BeginUpdate()
44 self.native.Items.Clear()
45 items = []
46 for item in self.interface.data:
47 item._impl = WinForms.ListViewItem(self.row_data(item))
48 items.append(item._impl)
49 self.native.Items.AddRange(items)
50 self.native.EndUpdate()
51
52 def insert(self, index, item):
53 self.native.BeginUpdate()
54 item._impl = WinForms.ListViewItem([
55 str(getattr(item, attr)) for attr in self.interface._accessors
56 ])
57 self.native.Items.Insert(index, item._impl)
58 self.native.EndUpdate()
59
60 def change(self, item):
61 self.interface.factory.not_implemented('Table.change()')
62
63 def remove(self, item):
64 self.update_data()
65
66 def clear(self):
67 self.native.Items.Clear()
68
69 def set_on_select(self, handler):
70 self.interface.factory.not_implemented('Table.set_on_select()')
71
72 def scroll_to_row(self, row):
73 self.native.EnsureVisible(row)
74 self.interface.factory.not_implemented('Table.scroll_to_row()')
75
76 def rehint(self):
77 self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)
78 self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)
79
[end of src/winforms/toga_winforms/widgets/table.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py
--- a/src/winforms/toga_winforms/widgets/table.py
+++ b/src/winforms/toga_winforms/widgets/table.py
@@ -34,7 +34,7 @@
return str(val[1])
return str(val)
- return [item] + [
+ return [
strip_icon(item, attr)
for attr in self.interface._accessors
]
@@ -51,9 +51,7 @@
def insert(self, index, item):
self.native.BeginUpdate()
- item._impl = WinForms.ListViewItem([
- str(getattr(item, attr)) for attr in self.interface._accessors
- ])
+ item._impl = WinForms.ListViewItem(self.row_data(item))
self.native.Items.Insert(index, item._impl)
self.native.EndUpdate()
| {"golden_diff": "diff --git a/src/winforms/toga_winforms/widgets/table.py b/src/winforms/toga_winforms/widgets/table.py\n--- a/src/winforms/toga_winforms/widgets/table.py\n+++ b/src/winforms/toga_winforms/widgets/table.py\n@@ -34,7 +34,7 @@\n return str(val[1])\n return str(val)\n \n- return [item] + [\n+ return [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n@@ -51,9 +51,7 @@\n \n def insert(self, index, item):\n self.native.BeginUpdate()\n- item._impl = WinForms.ListViewItem([\n- str(getattr(item, attr)) for attr in self.interface._accessors\n- ])\n+ item._impl = WinForms.ListViewItem(self.row_data(item))\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n", "issue": "Table rows not rendering correctly in `update_data` winforms\n## Expected Behavior\r\nView text in the rows\r\n\r\n## Current Behavior\r\nRows are created but text is not displayed.\r\n\r\nWhen I insert a new row (with insert button) is working ok:\r\n\r\n\r\nIn found that a change was made in `update_data` ( https://github.com/beeware/toga/commit/cb326e79ea1884f9e71fadfb1d7daf0688e78753) and `update_data` use a different ListViewItem creation than `insert` method which is working ok.\r\n\r\nThe specific line is the change made from this one:\r\n```\r\nitem._impl = WinForms.ListViewItem([\r\n str(getattr(item, attr)) for attr in self.interface._accessors\r\n ])\r\n```\r\nto this:\r\n```\r\nitem._impl = WinForms.ListViewItem(self.row_data(item))\r\n```\r\n\r\n## Steps to reproduce\r\n1. Open example and that's it\r\n\r\n## Your Environment\r\n\r\n* Python Version (list the specific version number)\r\n3.7.7\r\n\r\n* Operating System and Version (select from the following and list the specific version number; if your OS is not listed, list that as well)\r\n\r\n - [ ] macOS - version:\r\n - [ ] Linux - distro: - version:\r\n - [x] Windows - version: 10 Pro\r\n - [ ] Other - name: - version:\r\n\r\n* Toga Version (list the specific version number or git hash)\r\nMaster branch of toga.\r\n\r\n* Toga Target (the type of app you are trying to generate)\r\n\r\n - [ ] android\r\n - [ ] cocoa\r\n - [ ] django\r\n - [ ] gtk\r\n - [ ] iOS\r\n - [ ] tvOS\r\n - [ ] watchOS\r\n - [x] winforms\r\n - [ ] win32\r\n - [ ] Other (please specify)\r\n\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga_winforms.libs import WinForms\n\nfrom .base import Widget\n\n\nclass Table(Widget):\n def create(self):\n self._container = self\n self.native = WinForms.ListView()\n self.native.View = WinForms.View.Details\n\n dataColumn = []\n for heading in self.interface.headings:\n col = WinForms.ColumnHeader()\n col.Text = heading\n dataColumn.append(col)\n\n self.native.FullRowSelect = True\n self.native.Multiselect = self.interface.multiple_select\n self.native.DoubleBuffered = True\n self.native.Columns.AddRange(dataColumn)\n\n def change_source(self, source):\n self.update_data()\n\n def row_data(self, item):\n # TODO: Winforms can't support icons in tree cells; so, if the data source\n # specifies an icon, strip it when converting to row data.\n def strip_icon(item, attr):\n val = getattr(item, attr)\n if isinstance(val, tuple):\n return str(val[1])\n return str(val)\n\n return [item] + [\n strip_icon(item, attr)\n for attr in self.interface._accessors\n ]\n\n def update_data(self):\n self.native.BeginUpdate()\n self.native.Items.Clear()\n items = []\n for item in self.interface.data:\n item._impl = WinForms.ListViewItem(self.row_data(item))\n items.append(item._impl)\n self.native.Items.AddRange(items)\n self.native.EndUpdate()\n\n def insert(self, index, item):\n self.native.BeginUpdate()\n item._impl = WinForms.ListViewItem([\n str(getattr(item, attr)) for attr in self.interface._accessors\n ])\n self.native.Items.Insert(index, item._impl)\n self.native.EndUpdate()\n\n def change(self, item):\n self.interface.factory.not_implemented('Table.change()')\n\n def remove(self, item):\n self.update_data()\n\n def clear(self):\n self.native.Items.Clear()\n\n def set_on_select(self, handler):\n self.interface.factory.not_implemented('Table.set_on_select()')\n\n def scroll_to_row(self, row):\n self.native.EnsureVisible(row)\n self.interface.factory.not_implemented('Table.scroll_to_row()')\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface.MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface.MIN_HEIGHT)\n", "path": "src/winforms/toga_winforms/widgets/table.py"}]} | 1,743 | 201 |
gh_patches_debug_9260 | rasdani/github-patches | git_diff | translate__pootle-6524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing source (with xliff) can create submissions with no submitter
# Steps to reproduce:
- change source in file of existing unit
- run update_stores
# Results
- submissions have no submitter
- stuff breaks
## Expected result:
- nothing breaks
this only happens in xliff afaict - as i dont think its possible to change source of units in this way with po
</issue>
<code>
[start of pootle/apps/pootle_store/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from hashlib import md5
10
11 from django.db.models.signals import post_save, pre_save
12 from django.dispatch import receiver
13 from django.utils.encoding import force_bytes
14
15 from pootle.core.delegate import lifecycle, uniqueid
16 from pootle.core.models import Revision
17 from pootle.core.signals import update_checks, update_data
18
19 from .constants import FUZZY, TRANSLATED, UNTRANSLATED
20 from .models import Suggestion, Unit, UnitChange, UnitSource
21
22
23 @receiver(post_save, sender=Suggestion)
24 def handle_suggestion_added(**kwargs):
25 created = kwargs.get("created")
26 if not created:
27 return
28 store = kwargs["instance"].unit.store
29 update_data.send(store.__class__, instance=store)
30
31
32 @receiver(post_save, sender=Suggestion)
33 def handle_suggestion_accepted(**kwargs):
34 created = kwargs.get("created")
35 suggestion = kwargs["instance"]
36 if created or not suggestion.is_accepted:
37 return
38 update_data.send(
39 suggestion.unit.store.__class__,
40 instance=suggestion.unit.store)
41
42
43 @receiver(pre_save, sender=UnitSource)
44 def handle_unit_source_pre_save(**kwargs):
45 unit_source = kwargs["instance"]
46 created = not unit_source.pk
47 unit = unit_source.unit
48 if created:
49 unit_source.creation_revision = unit.revision
50 if created or unit.source_updated:
51 unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()
52 unit_source.source_length = len(unit.source_f)
53 unit_source.source_wordcount = max(
54 1, (unit.counter.count_words(unit.source_f.strings) or 0))
55
56
57 @receiver(pre_save, sender=Unit)
58 def handle_unit_pre_save(**kwargs):
59 unit = kwargs["instance"]
60 auto_translated = False
61
62 if unit.source_updated:
63 # update source related fields
64 wc = unit.counter.count_words(unit.source_f.strings)
65 if not wc and not bool(filter(None, unit.target_f.strings)):
66 # auto-translate untranslated strings
67 unit.target = unit.source
68 unit.state = FUZZY
69 auto_translated = True
70 if unit.target_updated:
71 # update target related fields
72 unit.target_wordcount = unit.counter.count_words(
73 unit.target_f.strings)
74 unit.target_length = len(unit.target_f)
75 if filter(None, unit.target_f.strings):
76 if unit.state == UNTRANSLATED:
77 unit.state = TRANSLATED
78 else:
79 # if it was TRANSLATED then set to UNTRANSLATED
80 if unit.state > FUZZY:
81 unit.state = UNTRANSLATED
82
83 # Updating unit from the .po file set its revision property to
84 # a new value (the same for all units during its store updated)
85 # since that change doesn't require further sync but note that
86 # auto_translated units require further sync
87 update_revision = (
88 unit.revision is None
89 or (not unit.revision_updated
90 and (unit.updated and not auto_translated)))
91 if update_revision:
92 unit.revision = Revision.incr()
93
94 if unit.index is None:
95 unit.index = unit.store.max_index() + 1
96 unitid = uniqueid.get(unit.__class__)(unit)
97 if unitid.changed:
98 unit.setid(unitid.getid())
99
100
101 @receiver(pre_save, sender=UnitChange)
102 def handle_unit_pre_change(**kwargs):
103 unit_change = kwargs["instance"]
104 unit = unit_change.unit
105 if unit.state == UNTRANSLATED:
106 # clear reviewer and translator data if translation
107 # has been deleted
108 unit_change.submitted_by = None
109 unit_change.submitted_on = None
110
111
112 @receiver(post_save, sender=UnitChange)
113 def handle_unit_change(**kwargs):
114 unit_change = kwargs["instance"]
115 unit = unit_change.unit
116 created = not unit._frozen.pk
117
118 if not created:
119 lifecycle.get(Unit)(unit).change()
120 if not unit.source_updated and not unit.target_updated:
121 return
122 new_untranslated = (created and unit.state == UNTRANSLATED)
123 if not new_untranslated:
124 update_checks.send(unit.__class__, instance=unit)
125 if unit.istranslated():
126 unit.update_tmserver()
127
[end of pootle/apps/pootle_store/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py
--- a/pootle/apps/pootle_store/receivers.py
+++ b/pootle/apps/pootle_store/receivers.py
@@ -98,17 +98,6 @@
unit.setid(unitid.getid())
-@receiver(pre_save, sender=UnitChange)
-def handle_unit_pre_change(**kwargs):
- unit_change = kwargs["instance"]
- unit = unit_change.unit
- if unit.state == UNTRANSLATED:
- # clear reviewer and translator data if translation
- # has been deleted
- unit_change.submitted_by = None
- unit_change.submitted_on = None
-
-
@receiver(post_save, sender=UnitChange)
def handle_unit_change(**kwargs):
unit_change = kwargs["instance"]
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/receivers.py b/pootle/apps/pootle_store/receivers.py\n--- a/pootle/apps/pootle_store/receivers.py\n+++ b/pootle/apps/pootle_store/receivers.py\n@@ -98,17 +98,6 @@\n unit.setid(unitid.getid())\n \n \n-@receiver(pre_save, sender=UnitChange)\n-def handle_unit_pre_change(**kwargs):\n- unit_change = kwargs[\"instance\"]\n- unit = unit_change.unit\n- if unit.state == UNTRANSLATED:\n- # clear reviewer and translator data if translation\n- # has been deleted\n- unit_change.submitted_by = None\n- unit_change.submitted_on = None\n-\n-\n @receiver(post_save, sender=UnitChange)\n def handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n", "issue": "Changing source (with xliff) can create submissions with no submitter\n# Steps to reproduce:\r\n\r\n- change source in file of existing unit\r\n- run update_stores\r\n\r\n# Results\r\n\r\n- submissions have no submitter\r\n- stuff breaks\r\n\r\n## Expected result:\r\n\r\n- nothing breaks\r\n\r\n\r\nthis only happens in xliff afaict - as i dont think its possible to change source of units in this way with po\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\n\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import force_bytes\n\nfrom pootle.core.delegate import lifecycle, uniqueid\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_checks, update_data\n\nfrom .constants import FUZZY, TRANSLATED, UNTRANSLATED\nfrom .models import Suggestion, Unit, UnitChange, UnitSource\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_added(**kwargs):\n created = kwargs.get(\"created\")\n if not created:\n return\n store = kwargs[\"instance\"].unit.store\n update_data.send(store.__class__, instance=store)\n\n\n@receiver(post_save, sender=Suggestion)\ndef handle_suggestion_accepted(**kwargs):\n created = kwargs.get(\"created\")\n suggestion = kwargs[\"instance\"]\n if created or not suggestion.is_accepted:\n return\n update_data.send(\n suggestion.unit.store.__class__,\n instance=suggestion.unit.store)\n\n\n@receiver(pre_save, sender=UnitSource)\ndef handle_unit_source_pre_save(**kwargs):\n unit_source = kwargs[\"instance\"]\n created = not unit_source.pk\n unit = unit_source.unit\n if created:\n unit_source.creation_revision = unit.revision\n if created or unit.source_updated:\n unit_source.source_hash = md5(force_bytes(unit.source_f)).hexdigest()\n unit_source.source_length = len(unit.source_f)\n unit_source.source_wordcount = max(\n 1, (unit.counter.count_words(unit.source_f.strings) or 0))\n\n\n@receiver(pre_save, sender=Unit)\ndef handle_unit_pre_save(**kwargs):\n unit = kwargs[\"instance\"]\n auto_translated = False\n\n if unit.source_updated:\n # update source related fields\n wc = unit.counter.count_words(unit.source_f.strings)\n if not wc and not bool(filter(None, unit.target_f.strings)):\n # auto-translate untranslated strings\n unit.target = unit.source\n unit.state = FUZZY\n auto_translated = True\n if unit.target_updated:\n # update target related fields\n unit.target_wordcount = unit.counter.count_words(\n unit.target_f.strings)\n unit.target_length = len(unit.target_f)\n if filter(None, unit.target_f.strings):\n if unit.state == UNTRANSLATED:\n unit.state = TRANSLATED\n else:\n # if it was TRANSLATED then set to UNTRANSLATED\n if unit.state > FUZZY:\n unit.state = UNTRANSLATED\n\n # Updating unit from the .po file set its revision property to\n # a new value (the same for all units during its store updated)\n # since that change doesn't require further sync but note that\n # auto_translated units require further sync\n update_revision = (\n unit.revision is None\n or (not unit.revision_updated\n and (unit.updated and not auto_translated)))\n if update_revision:\n unit.revision = Revision.incr()\n\n if unit.index is None:\n unit.index = unit.store.max_index() + 1\n unitid = uniqueid.get(unit.__class__)(unit)\n if unitid.changed:\n unit.setid(unitid.getid())\n\n\n@receiver(pre_save, sender=UnitChange)\ndef handle_unit_pre_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n if unit.state == UNTRANSLATED:\n # clear reviewer and translator data if translation\n # has been deleted\n unit_change.submitted_by = None\n unit_change.submitted_on = None\n\n\n@receiver(post_save, sender=UnitChange)\ndef handle_unit_change(**kwargs):\n unit_change = kwargs[\"instance\"]\n unit = unit_change.unit\n created = not unit._frozen.pk\n\n if not created:\n lifecycle.get(Unit)(unit).change()\n if not unit.source_updated and not unit.target_updated:\n return\n new_untranslated = (created and unit.state == UNTRANSLATED)\n if not new_untranslated:\n update_checks.send(unit.__class__, instance=unit)\n if unit.istranslated():\n unit.update_tmserver()\n", "path": "pootle/apps/pootle_store/receivers.py"}]} | 1,883 | 196 |
gh_patches_debug_6221 | rasdani/github-patches | git_diff | scikit-hep__pyhf-837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bump jsonschema to v3.2.0+ to support draft 6
Currently on alpha release 3.0.x but can bump to 3.2.0 which was released.
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=1.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
83 author_email='[email protected], [email protected], [email protected]',
84 license='Apache',
85 keywords='physics fitting numpy scipy tensorflow pytorch',
86 classifiers=[
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=3.6",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
100 'jsonpatch',
101 'pyyaml', # for parsing CLI equal-delimited options
102 ],
103 extras_require=extras_require,
104 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
105 dependency_links=[],
106 use_scm_version=lambda: {'local_scheme': lambda version: ''},
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,7 +96,7 @@
'scipy', # requires numpy, which is required by pyhf and tensorflow
'click>=6.0', # for console scripts,
'tqdm', # for readxml
- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
+ 'jsonschema>=3.2.0', # for utils
'jsonpatch',
'pyyaml', # for parsing CLI equal-delimited options
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -96,7 +96,7 @@\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n- 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n+ 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n", "issue": "bump jsonschema to v3.2.0+ to support draft 6\nCurrently on alpha release 3.0.x but can bump to 3.2.0 which was released.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,642 | 144 |
gh_patches_debug_925 | rasdani/github-patches | git_diff | dynamiqs__dynamiqs-196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
implement a ver() method
As a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running
</issue>
<code>
[start of dynamiqs/__init__.py]
1 from .mesolve import mesolve
2 from .sesolve import sesolve
3 from .smesolve import smesolve
4 from .utils import *
5
[end of dynamiqs/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py
--- a/dynamiqs/__init__.py
+++ b/dynamiqs/__init__.py
@@ -1,4 +1,9 @@
+from importlib.metadata import version
+
from .mesolve import mesolve
from .sesolve import sesolve
from .smesolve import smesolve
from .utils import *
+
+# get version from pyproject.toml
+__version__ = version(__package__)
| {"golden_diff": "diff --git a/dynamiqs/__init__.py b/dynamiqs/__init__.py\n--- a/dynamiqs/__init__.py\n+++ b/dynamiqs/__init__.py\n@@ -1,4 +1,9 @@\n+from importlib.metadata import version\n+\n from .mesolve import mesolve\n from .sesolve import sesolve\n from .smesolve import smesolve\n from .utils import *\n+\n+# get version from pyproject.toml\n+__version__ = version(__package__)\n", "issue": "implement a ver() method\nAs a user if I want to make sure my setup is up to date with the latest version, I want to be able to call dq.ver() to know which version I am running\n", "before_files": [{"content": "from .mesolve import mesolve\nfrom .sesolve import sesolve\nfrom .smesolve import smesolve\nfrom .utils import *\n", "path": "dynamiqs/__init__.py"}]} | 620 | 114 |
gh_patches_debug_132 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-3433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ci - failure around mismatched versions of tabulate
Per current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.
This mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.
digging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.
</issue>
<code>
[start of setup.py]
1 import os
2 from io import open
3 from setuptools import setup, find_packages
4
5
6 def read(fname):
7 return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
8
9
10 setup(
11 name="c7n",
12 version='0.8.33.1',
13 description="Cloud Custodian - Policy Rules Engine",
14 long_description=read('README.rst'),
15 classifiers=[
16 "Topic :: System :: Systems Administration",
17 "Topic :: System :: Distributed Computing"
18 ],
19 url="https://github.com/capitalone/cloud-custodian",
20 license="Apache-2.0",
21 packages=find_packages(),
22 entry_points={
23 'console_scripts': [
24 'custodian = c7n.cli:main']},
25 install_requires=[
26 "boto3>=1.9.62",
27 "botocore>=1.12.62",
28 "python-dateutil>=2.6,<3.0.0",
29 "pyyaml",
30 "jsonschema",
31 "jsonpatch>=1.21",
32 "argcomplete",
33 "tabulate"
34 ],
35 )
36
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,6 +30,6 @@
"jsonschema",
"jsonpatch>=1.21",
"argcomplete",
- "tabulate"
+ "tabulate==0.8.2"
],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,6 +30,6 @@\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n- \"tabulate\"\n+ \"tabulate==0.8.2\"\n ],\n )\n", "issue": "ci - failure around mismatched versions of tabulate\nPer current ci failures we're getting 0.8.3 of tabulate installed even though azure-cli-core calls out a pin to under 0.8.2.\r\n\r\nThis mirrors the issue we had with fakeredis, where it properly declared a dependency for six == 0.12.0 and we picked up the version pin in requirements.txt.\r\n\r\ndigging around a bit more, pip released a new 19 release series in the last 72hrs, that i'm currently examining for regressions that allowed for installs that ignore package dependencies, when given requirements.\r\n\n", "before_files": [{"content": "import os\nfrom io import open\nfrom setuptools import setup, find_packages\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()\n\n\nsetup(\n name=\"c7n\",\n version='0.8.33.1',\n description=\"Cloud Custodian - Policy Rules Engine\",\n long_description=read('README.rst'),\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/capitalone/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'custodian = c7n.cli:main']},\n install_requires=[\n \"boto3>=1.9.62\",\n \"botocore>=1.12.62\",\n \"python-dateutil>=2.6,<3.0.0\",\n \"pyyaml\",\n \"jsonschema\",\n \"jsonpatch>=1.21\",\n \"argcomplete\",\n \"tabulate\"\n ],\n)\n", "path": "setup.py"}]} | 965 | 74 |
gh_patches_debug_26997 | rasdani/github-patches | git_diff | mdn__kuma-6098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
search_phase_execution_exception in ES on huuuge ?page params
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/
```
TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')
(24 additional frame(s) were not displayed)
...
File "rest_framework/views.py", line 492, in dispatch
response = handler(request, *args, **kwargs)
File "rest_framework/generics.py", line 201, in get
return self.list(request, *args, **kwargs)
File "rest_framework/mixins.py", line 42, in list
page = self.paginate_queryset(queryset)
File "rest_framework/generics.py", line 173, in paginate_queryset
return self.paginator.paginate_queryset(queryset, self.request, view=self)
File "rest_framework/pagination.py", line 204, in paginate_queryset
self.page = paginator.page(page_number)
TransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')
```
</issue>
<code>
[start of kuma/search/paginator.py]
1 from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
2 from django.utils.functional import cached_property
3
4
5 class SearchPaginator(Paginator):
6 """
7 A better paginator for search results
8
9 The normal Paginator does a .count() query and then a slice. Since ES
10 results contain the total number of results, we can take an optimistic
11 slice and then adjust the count.
12 """
13
14 def __init__(self, *args, **kwargs):
15 super(SearchPaginator, self).__init__(*args, **kwargs)
16 self._result_total = None
17
18 def validate_number(self, number):
19 """
20 Validates the given 1-based page number.
21
22 This class overrides the default behavior and ignores the upper bound.
23 """
24 try:
25 number = int(number)
26 except (TypeError, ValueError):
27 raise PageNotAnInteger('That page number is not an integer')
28 if number < 1:
29 raise EmptyPage('That page number is less than 1')
30 return number
31
32 def page(self, number):
33 """
34 Returns a page object.
35
36 This class overrides the default behavior and ignores "orphans" and
37 assigns the count from the ES result to the Paginator.
38 """
39 number = self.validate_number(number)
40 bottom = (number - 1) * self.per_page
41 top = bottom + self.per_page
42
43 # Force the search to evaluate and then attach the count. We want to
44 # avoid an extra useless query even if there are no results, so we
45 # directly fetch the count from hits.
46 result = self.object_list[bottom:top].execute()
47 page = Page(result.hits, number, self)
48 # Set the count to the results after post_filter
49 self._result_total = result.hits.total
50 # Also store the aggregations, if any.
51 page.aggregations = getattr(result, 'aggregations', None)
52
53 # Now that we have the count validate that the page number isn't higher
54 # than the possible number of pages and adjust accordingly.
55 if number > self.num_pages:
56 if number == 1 and self.allow_empty_first_page:
57 pass
58 else:
59 raise EmptyPage('That page contains no results')
60 return page
61
62 @cached_property
63 def count(self):
64 """
65 Returns the total number of results.
66
67 Paginator's count property will call .count() on the search object,
68 which returns results before the pre_filter. This will result in a
69 count that is too high. Instead, use 'total' from the results,
70 executing if needed.
71 """
72 if self._result_total is not None:
73 return self._result_total
74 return self.object_list.execute().hits.total
75
[end of kuma/search/paginator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py
--- a/kuma/search/paginator.py
+++ b/kuma/search/paginator.py
@@ -1,4 +1,5 @@
-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator
+from django.core.paginator import (
+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)
from django.utils.functional import cached_property
@@ -19,7 +20,7 @@
"""
Validates the given 1-based page number.
- This class overrides the default behavior and ignores the upper bound.
+ We also check that the number isn't too large.
"""
try:
number = int(number)
@@ -27,6 +28,19 @@
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
+
+ if number >= 1000:
+ # Anything >=1,000 will result in a hard error in
+ # Elasticsearch which would happen before we even get a chance
+ # to validate that the range is too big. The error you would
+ # get from Elasticsearch 6.x is something like this:
+ #
+ # Result window is too large, from + size must be less
+ # than or equal to: [10000] but was [11000].
+ #
+ # See https://github.com/mdn/kuma/issues/6092
+ raise InvalidPage('Page number too large')
+
return number
def page(self, number):
| {"golden_diff": "diff --git a/kuma/search/paginator.py b/kuma/search/paginator.py\n--- a/kuma/search/paginator.py\n+++ b/kuma/search/paginator.py\n@@ -1,4 +1,5 @@\n-from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\n+from django.core.paginator import (\n+ EmptyPage, InvalidPage, Page, PageNotAnInteger, Paginator)\n from django.utils.functional import cached_property\n \n \n@@ -19,7 +20,7 @@\n \"\"\"\n Validates the given 1-based page number.\n \n- This class overrides the default behavior and ignores the upper bound.\n+ We also check that the number isn't too large.\n \"\"\"\n try:\n number = int(number)\n@@ -27,6 +28,19 @@\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n+\n+ if number >= 1000:\n+ # Anything >=1,000 will result in a hard error in\n+ # Elasticsearch which would happen before we even get a chance\n+ # to validate that the range is too big. The error you would\n+ # get from Elasticsearch 6.x is something like this:\n+ #\n+ # Result window is too large, from + size must be less\n+ # than or equal to: [10000] but was [11000].\n+ #\n+ # See https://github.com/mdn/kuma/issues/6092\n+ raise InvalidPage('Page number too large')\n+\n return number\n \n def page(self, number):\n", "issue": "search_phase_execution_exception in ES on huuuge ?page params\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/6620806/\n\n```\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n(24 additional frame(s) were not displayed)\n...\n File \"rest_framework/views.py\", line 492, in dispatch\n response = handler(request, *args, **kwargs)\n File \"rest_framework/generics.py\", line 201, in get\n return self.list(request, *args, **kwargs)\n File \"rest_framework/mixins.py\", line 42, in list\n page = self.paginate_queryset(queryset)\n File \"rest_framework/generics.py\", line 173, in paginate_queryset\n return self.paginator.paginate_queryset(queryset, self.request, view=self)\n File \"rest_framework/pagination.py\", line 204, in paginate_queryset\n self.page = paginator.page(page_number)\n\nTransportError: TransportError(500, u'search_phase_execution_exception', u'Result window is too large, from + size must be less than or equal to: [10000] but was [33010]. See the scroll api for a more efficient way to request large data sets. This limit can be set by changing the [index.max_result_window] index level setting.')\n```\n", "before_files": [{"content": "from django.core.paginator import EmptyPage, Page, PageNotAnInteger, Paginator\nfrom django.utils.functional import cached_property\n\n\nclass SearchPaginator(Paginator):\n \"\"\"\n A better paginator for search results\n\n The normal Paginator does a .count() query and then a slice. Since ES\n results contain the total number of results, we can take an optimistic\n slice and then adjust the count.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SearchPaginator, self).__init__(*args, **kwargs)\n self._result_total = None\n\n def validate_number(self, number):\n \"\"\"\n Validates the given 1-based page number.\n\n This class overrides the default behavior and ignores the upper bound.\n \"\"\"\n try:\n number = int(number)\n except (TypeError, ValueError):\n raise PageNotAnInteger('That page number is not an integer')\n if number < 1:\n raise EmptyPage('That page number is less than 1')\n return number\n\n def page(self, number):\n \"\"\"\n Returns a page object.\n\n This class overrides the default behavior and ignores \"orphans\" and\n assigns the count from the ES result to the Paginator.\n \"\"\"\n number = self.validate_number(number)\n bottom = (number - 1) * self.per_page\n top = bottom + self.per_page\n\n # Force the search to evaluate and then attach the count. We want to\n # avoid an extra useless query even if there are no results, so we\n # directly fetch the count from hits.\n result = self.object_list[bottom:top].execute()\n page = Page(result.hits, number, self)\n # Set the count to the results after post_filter\n self._result_total = result.hits.total\n # Also store the aggregations, if any.\n page.aggregations = getattr(result, 'aggregations', None)\n\n # Now that we have the count validate that the page number isn't higher\n # than the possible number of pages and adjust accordingly.\n if number > self.num_pages:\n if number == 1 and self.allow_empty_first_page:\n pass\n else:\n raise EmptyPage('That page contains no results')\n return page\n\n @cached_property\n def count(self):\n \"\"\"\n Returns the total number of results.\n\n Paginator's count property will call .count() on the search object,\n which returns results before the pre_filter. This will result in a\n count that is too high. Instead, use 'total' from the results,\n executing if needed.\n \"\"\"\n if self._result_total is not None:\n return self._result_total\n return self.object_list.execute().hits.total\n", "path": "kuma/search/paginator.py"}]} | 1,643 | 374 |
gh_patches_debug_3331 | rasdani/github-patches | git_diff | fidals__shopelectro-885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stale import db issue
PO says the last import was at `<yml_catalog date="2019-05-23 00:38">`
Check if import db can autolaunch.
Import db is called as catalog update command
</issue>
<code>
[start of shopelectro/management/commands/_update_catalog/update_pack.py]
1 """
2 Update Product.in_pack and prices.
3
4 The update_catalog command always resets product prices to per unit format, so:
5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack
6 2. Multiply product prices by in_pack value and save.
7 """
8 import logging
9
10 from django.db import models, transaction
11
12 from shopelectro.models import TagQuerySet, TagGroup
13
14 logger = logging.getLogger(__name__)
15 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']
16
17
18 def update_in_packs(packs: TagQuerySet):
19 """Parse and save in pack quantity values."""
20 # @todo #859:60m Implement update_pack and render prices properly.
21
22
23 def update_prices(packs: TagQuerySet):
24 """Multiply product prices on in pack quantity."""
25 fields_to_update = {}
26 for price in PRICES:
27 fields_to_update[price] = models.F(price) * models.F('in_pack')
28
29 with transaction.atomic():
30 packs.products().update(**fields_to_update)
31
32
33 def main(*args, kwargs):
34 uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
35 pack_group = TagGroup.objects.filter(uuid=uuid).first()
36 if not pack_group:
37 logger.error(f'Couldn\'t find "Упаковка" tag group with uuid = "{uuid}".')
38 return
39
40 return
41
42 packs = pack_group.tags.all().prefetch_related('products')
43 update_in_packs(packs)
44 update_prices(packs)
45
[end of shopelectro/management/commands/_update_catalog/update_pack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py
--- a/shopelectro/management/commands/_update_catalog/update_pack.py
+++ b/shopelectro/management/commands/_update_catalog/update_pack.py
@@ -30,7 +30,7 @@
packs.products().update(**fields_to_update)
-def main(*args, kwargs):
+def main(*args, **kwargs):
uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'
pack_group = TagGroup.objects.filter(uuid=uuid).first()
if not pack_group:
| {"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -30,7 +30,7 @@\n packs.products().update(**fields_to_update)\n \n \n-def main(*args, kwargs):\n+def main(*args, **kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n", "issue": "Stale import db issue\nPO says the last import was at `<yml_catalog date=\"2019-05-23 00:38\">`\r\nCheck if import db can autolaunch.\r\n\r\nImport db is called as catalog update command\r\n\n", "before_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.db import models, transaction\n\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n # @todo #859:60m Implement update_pack and render prices properly.\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, kwargs):\n uuid = 'ae30f766-0bb8-11e6-80ea-02d2cc20e118'\n pack_group = TagGroup.objects.filter(uuid=uuid).first()\n if not pack_group:\n logger.error(f'Couldn\\'t find \"\u0423\u043f\u0430\u043a\u043e\u0432\u043a\u0430\" tag group with uuid = \"{uuid}\".')\n return\n\n return\n\n packs = pack_group.tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]} | 1,048 | 165 |
gh_patches_debug_4833 | rasdani/github-patches | git_diff | Theano__Theano-146 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get rid of warning when using Scipy 0.10.x
WARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.
</issue>
<code>
[start of theano/sparse/__init__.py]
1 import sys
2 try:
3 import scipy
4 enable_sparse = scipy.__version__ >= '0.7'
5 if not enable_sparse:
6 sys.stderr.write("WARNING: scipy version = %s."
7 " We request version >=0.7.0 for the sparse code as it has"
8 " bugs fixed in the sparse matrix code.\n" % scipy.__version__)
9 except ImportError:
10 enable_sparse = False
11 sys.stderr.write("WARNING: scipy can't be imported."
12 " We disable the sparse matrix code.")
13
14 if enable_sparse:
15 from basic import *
16 import sharedvar
17 from sharedvar import sparse_constructor as shared
18
19
[end of theano/sparse/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py
--- a/theano/sparse/__init__.py
+++ b/theano/sparse/__init__.py
@@ -1,7 +1,10 @@
+from pkg_resources import parse_version as V
import sys
+
try:
import scipy
- enable_sparse = scipy.__version__ >= '0.7'
+ enable_sparse = V(scipy.__version__) >= V('0.7')
+
if not enable_sparse:
sys.stderr.write("WARNING: scipy version = %s."
" We request version >=0.7.0 for the sparse code as it has"
| {"golden_diff": "diff --git a/theano/sparse/__init__.py b/theano/sparse/__init__.py\n--- a/theano/sparse/__init__.py\n+++ b/theano/sparse/__init__.py\n@@ -1,7 +1,10 @@\n+from pkg_resources import parse_version as V\n import sys\n+\n try:\n import scipy\n- enable_sparse = scipy.__version__ >= '0.7'\n+ enable_sparse = V(scipy.__version__) >= V('0.7')\n+\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n", "issue": "Get rid of warning when using Scipy 0.10.x\nWARNING: scipy version = 0.10.0b2. We request version >=0.7.0 for the sparse code as it has bugs fixed in the sparse matrix code.\n\n", "before_files": [{"content": "import sys\ntry:\n import scipy\n enable_sparse = scipy.__version__ >= '0.7'\n if not enable_sparse:\n sys.stderr.write(\"WARNING: scipy version = %s.\"\n \" We request version >=0.7.0 for the sparse code as it has\"\n \" bugs fixed in the sparse matrix code.\\n\" % scipy.__version__)\nexcept ImportError:\n enable_sparse = False\n sys.stderr.write(\"WARNING: scipy can't be imported.\"\n \" We disable the sparse matrix code.\")\n\nif enable_sparse:\n from basic import *\n import sharedvar\n from sharedvar import sparse_constructor as shared\n\n", "path": "theano/sparse/__init__.py"}]} | 758 | 148 |
gh_patches_debug_1163 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-2712 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document that RTD uses `rel` branch for production
Hi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.
Thanks in advance!
Oli
</issue>
<code>
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 import os
4 import sys
5
6 from recommonmark.parser import CommonMarkParser
7
8 sys.path.insert(0, os.path.abspath('..'))
9 sys.path.append(os.path.dirname(__file__))
10 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
11
12 from django.conf import settings
13
14 import django
15 django.setup()
16
17
18 sys.path.append(os.path.abspath('_ext'))
19 extensions = [
20 'sphinx.ext.autodoc',
21 'sphinx.ext.intersphinx',
22 'sphinxcontrib.httpdomain',
23 'djangodocs',
24 'doc_extensions',
25 ]
26 templates_path = ['_templates']
27
28 source_suffix = ['.rst', '.md']
29 source_parsers = {
30 '.md': CommonMarkParser,
31 }
32
33 master_doc = 'index'
34 project = u'Read The Docs'
35 copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
36 version = '1.0'
37 release = '1.0'
38 exclude_patterns = ['_build']
39 default_role = 'obj'
40 pygments_style = 'sphinx'
41 intersphinx_mapping = {
42 'python': ('http://python.readthedocs.io/en/latest/', None),
43 'django': ('http://django.readthedocs.io/en/1.8.x/', None),
44 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),
45 }
46 # This doesn't exist since we aren't shipping any static files ourselves.
47 #html_static_path = ['_static']
48 htmlhelp_basename = 'ReadTheDocsdoc'
49 latex_documents = [
50 ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',
51 u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
52 ]
53 man_pages = [
54 ('index', 'read-the-docs', u'Read The Docs Documentation',
55 [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
56 ]
57
58 exclude_patterns = [
59 # 'api' # needed for ``make gettext`` to not die.
60 ]
61
62 language = 'en'
63
64 locale_dirs = [
65 'locale/',
66 ]
67 gettext_compact = False
68
69
70 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
71 if not on_rtd: # only import and set the theme if we're building docs locally
72 import sphinx_rtd_theme
73 html_theme = 'sphinx_rtd_theme'
74 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
75
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -32,7 +32,7 @@
master_doc = 'index'
project = u'Read The Docs'
-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'
+copyright = u'2010-2017, Read the Docs, Inc & contributors'
version = '1.0'
release = '1.0'
exclude_patterns = ['_build']
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -32,7 +32,7 @@\n \n master_doc = 'index'\n project = u'Read The Docs'\n-copyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\n+copyright = u'2010-2017, Read the Docs, Inc & contributors'\n version = '1.0'\n release = '1.0'\n exclude_patterns = ['_build']\n", "issue": "Document that RTD uses `rel` branch for production\nHi, i'd like to add a new builder for doxygen documentation (but native, not with breath). Since there are a lot of branches like real/relcorp which a far ahead of master, i'd like to know, which branch to choose for development.\r\n\r\nThanks in advance!\r\nOli\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport sys\n\nfrom recommonmark.parser import CommonMarkParser\n\nsys.path.insert(0, os.path.abspath('..'))\nsys.path.append(os.path.dirname(__file__))\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"readthedocs.settings.dev\")\n\nfrom django.conf import settings\n\nimport django\ndjango.setup()\n\n\nsys.path.append(os.path.abspath('_ext'))\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinxcontrib.httpdomain',\n 'djangodocs',\n 'doc_extensions',\n]\ntemplates_path = ['_templates']\n\nsource_suffix = ['.rst', '.md']\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nmaster_doc = 'index'\nproject = u'Read The Docs'\ncopyright = u'2010, Eric Holscher, Charlie Leifer, Bobby Grace'\nversion = '1.0'\nrelease = '1.0'\nexclude_patterns = ['_build']\ndefault_role = 'obj'\npygments_style = 'sphinx'\nintersphinx_mapping = {\n 'python': ('http://python.readthedocs.io/en/latest/', None),\n 'django': ('http://django.readthedocs.io/en/1.8.x/', None),\n 'sphinx': ('http://sphinx.readthedocs.io/en/latest/', None),\n}\n# This doesn't exist since we aren't shipping any static files ourselves.\n#html_static_path = ['_static']\nhtmlhelp_basename = 'ReadTheDocsdoc'\nlatex_documents = [\n ('index', 'ReadTheDocs.tex', u'Read The Docs Documentation',\n u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),\n]\nman_pages = [\n ('index', 'read-the-docs', u'Read The Docs Documentation',\n [u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)\n]\n\nexclude_patterns = [\n # 'api' # needed for ``make gettext`` to not die.\n]\n\nlanguage = 'en'\n\nlocale_dirs = [\n 'locale/',\n]\ngettext_compact = False\n\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]} | 1,280 | 118 |
gh_patches_debug_5807 | rasdani/github-patches | git_diff | scikit-hep__awkward-970 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Doubly jagged workaround broken in awkward 1.4.0rc2
There was a quick fix (made for KM3NeT data 😄) for doubly jagged arrays which were discussed here https://github.com/scikit-hep/uproot4/issues/90 and the it uses the `akward._io` submodule which is not accessible anymore directly (`AttributeError`) in `1.4.0rc2`.
See here:
https://github.com/scikit-hep/awkward-1.0/blob/main/src/awkward/_connect/_uproot.py#L35
I am not sure what the desired fix is, to import `_io` in `_connect/_uproot.py` or if this fix is now covered by further developments, so I thought I ask first before I do a PR 😉
Here is the full MWE (needs `pip install km3net-testdata`, I am not sure if this test
```python
>>> import uproot
>>> uproot.__version__
'4.0.7'
>>> import awkward as ak
>>> ak.__version__
'1.4.0rc2'
>>> from km3net_testdata import data_path
>>> f = uproot.open(data_path("offline/mcv5.11r2.gsg_muonCChigherE-CC_50-5000GeV.km3_AAv1.jterbr00004695.jchain.aanet.498.root"))
>>> f["E/Evt/trks/trks.rec_stages"].array()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-80472509fad7> in <module>
----> 1 f["E/Evt/trks/trks.rec_stages"].array()
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in array(self, interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, array_cache, library)
2076 ranges_or_baskets.append((branch, basket_num, range_or_basket))
2077
-> 2078 _ranges_or_baskets_to_arrays(
2079 self,
2080 ranges_or_baskets,
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in _ranges_or_baskets_to_arrays(hasbranches, ranges_or_baskets, branchid_interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, library, arrays, update_ranges_or_baskets)
3476
3477 elif isinstance(obj, tuple) and len(obj) == 3:
-> 3478 uproot.source.futures.delayed_raise(*obj)
3479
3480 else:
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/source/futures.py in delayed_raise(exception_class, exception_value, traceback)
44 exec("raise exception_class, exception_value, traceback")
45 else:
---> 46 raise exception_value.with_traceback(traceback)
47
48
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in basket_to_array(basket)
3420 basket_arrays = branchid_arrays[branch.cache_key]
3421
-> 3422 basket_arrays[basket.basket_num] = interpretation.basket_array(
3423 basket.data,
3424 basket.byte_offsets,
~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/interpretation/objects.py in basket_array(self, data, byte_offsets, basket, branch, context, cursor_offset, library)
151 "cursor_offset": cursor_offset,
152 }
--> 153 output = awkward._connect._uproot.basket_array(
154 form, data, byte_offsets, extra
155 )
~/Dev/km3io/venv/lib/python3.9/site-packages/awkward/_connect/_uproot.py in basket_array(form, data, byte_offsets, extra)
36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
37
---> 38 return ak._io.uproot_issue_90(
39 form,
40 ak.layout.NumpyArray(data),
AttributeError: module 'awkward' has no attribute '_io'
```
</issue>
<code>
[start of src/awkward/_connect/_uproot.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 from __future__ import absolute_import
4
5 import json
6
7 # don't import awkward._connect._uproot in awkward/__init__.py!
8 import uproot
9
10 import awkward as ak
11
12
13 def can_optimize(interpretation, form):
14 if isinstance(interpretation, uproot.interpretation.objects.AsObjects):
15 jsonform = json.loads(form.tojson(verbose=True))
16 if (
17 jsonform["class"] == "ListOffsetArray64"
18 and jsonform["parameters"].get("uproot")
19 == {"as": "array", "header": True, "speedbump": False}
20 and jsonform["content"]["class"] == "ListOffsetArray64"
21 and jsonform["content"]["parameters"].get("uproot")
22 == {"as": "vector", "header": False}
23 and jsonform["content"]["content"]["class"] == "NumpyArray"
24 and jsonform["content"]["content"]["inner_shape"] == []
25 and (
26 jsonform["content"]["content"].get("primitive") == "float64"
27 or jsonform["content"]["content"].get("primitive") == "int32"
28 )
29 ):
30 return True
31
32 return False
33
34
35 def basket_array(form, data, byte_offsets, extra):
36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
37
38 return ak._io.uproot_issue_90(
39 form,
40 ak.layout.NumpyArray(data),
41 ak.layout.Index32(byte_offsets),
42 )
43
[end of src/awkward/_connect/_uproot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/awkward/_connect/_uproot.py b/src/awkward/_connect/_uproot.py
--- a/src/awkward/_connect/_uproot.py
+++ b/src/awkward/_connect/_uproot.py
@@ -33,9 +33,11 @@
def basket_array(form, data, byte_offsets, extra):
+ import awkward._io
+
# FIXME: uproot_issue_90 is just a placeholder, to show how it would be done
- return ak._io.uproot_issue_90(
+ return awkward._io.uproot_issue_90(
form,
ak.layout.NumpyArray(data),
ak.layout.Index32(byte_offsets),
| {"golden_diff": "diff --git a/src/awkward/_connect/_uproot.py b/src/awkward/_connect/_uproot.py\n--- a/src/awkward/_connect/_uproot.py\n+++ b/src/awkward/_connect/_uproot.py\n@@ -33,9 +33,11 @@\n \n \n def basket_array(form, data, byte_offsets, extra):\n+ import awkward._io\n+\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n \n- return ak._io.uproot_issue_90(\n+ return awkward._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n", "issue": "Doubly jagged workaround broken in awkward 1.4.0rc2\nThere was a quick fix (made for KM3NeT data \ud83d\ude04) for doubly jagged arrays which were discussed here https://github.com/scikit-hep/uproot4/issues/90 and the it uses the `akward._io` submodule which is not accessible anymore directly (`AttributeError`) in `1.4.0rc2`.\r\n\r\nSee here:\r\n\r\nhttps://github.com/scikit-hep/awkward-1.0/blob/main/src/awkward/_connect/_uproot.py#L35\r\n\r\nI am not sure what the desired fix is, to import `_io` in `_connect/_uproot.py` or if this fix is now covered by further developments, so I thought I ask first before I do a PR \ud83d\ude09 \r\n\r\nHere is the full MWE (needs `pip install km3net-testdata`, I am not sure if this test\r\n\r\n```python\r\n>>> import uproot\r\n\r\n>>> uproot.__version__\r\n'4.0.7'\r\n\r\n>>> import awkward as ak\r\n\r\n>>> ak.__version__\r\n'1.4.0rc2'\r\n\r\n>>> from km3net_testdata import data_path\r\n\r\n>>> f = uproot.open(data_path(\"offline/mcv5.11r2.gsg_muonCChigherE-CC_50-5000GeV.km3_AAv1.jterbr00004695.jchain.aanet.498.root\"))\r\n\r\n>>> f[\"E/Evt/trks/trks.rec_stages\"].array()\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-18-80472509fad7> in <module>\r\n----> 1 f[\"E/Evt/trks/trks.rec_stages\"].array()\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in array(self, interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, array_cache, library)\r\n 2076 ranges_or_baskets.append((branch, basket_num, range_or_basket))\r\n 2077\r\n-> 2078 _ranges_or_baskets_to_arrays(\r\n 2079 self,\r\n 2080 ranges_or_baskets,\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in _ranges_or_baskets_to_arrays(hasbranches, ranges_or_baskets, branchid_interpretation, entry_start, entry_stop, decompression_executor, interpretation_executor, library, arrays, update_ranges_or_baskets)\r\n 3476\r\n 3477 elif isinstance(obj, tuple) and len(obj) == 3:\r\n-> 3478 uproot.source.futures.delayed_raise(*obj)\r\n 3479\r\n 3480 else:\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/source/futures.py in delayed_raise(exception_class, exception_value, traceback)\r\n 44 exec(\"raise exception_class, exception_value, traceback\")\r\n 45 else:\r\n---> 46 raise exception_value.with_traceback(traceback)\r\n 47\r\n 48\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/behaviors/TBranch.py in basket_to_array(basket)\r\n 3420 basket_arrays = branchid_arrays[branch.cache_key]\r\n 3421\r\n-> 3422 basket_arrays[basket.basket_num] = interpretation.basket_array(\r\n 3423 basket.data,\r\n 3424 basket.byte_offsets,\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/uproot/interpretation/objects.py in basket_array(self, data, byte_offsets, basket, branch, context, cursor_offset, library)\r\n 151 \"cursor_offset\": cursor_offset,\r\n 152 }\r\n--> 153 output = awkward._connect._uproot.basket_array(\r\n 154 form, data, byte_offsets, extra\r\n 155 )\r\n\r\n~/Dev/km3io/venv/lib/python3.9/site-packages/awkward/_connect/_uproot.py in basket_array(form, data, byte_offsets, extra)\r\n 36 # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\r\n 37\r\n---> 38 return ak._io.uproot_issue_90(\r\n 39 form,\r\n 40 ak.layout.NumpyArray(data),\r\n\r\nAttributeError: module 'awkward' has no attribute '_io'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport json\n\n# don't import awkward._connect._uproot in awkward/__init__.py!\nimport uproot\n\nimport awkward as ak\n\n\ndef can_optimize(interpretation, form):\n if isinstance(interpretation, uproot.interpretation.objects.AsObjects):\n jsonform = json.loads(form.tojson(verbose=True))\n if (\n jsonform[\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"parameters\"].get(\"uproot\")\n == {\"as\": \"array\", \"header\": True, \"speedbump\": False}\n and jsonform[\"content\"][\"class\"] == \"ListOffsetArray64\"\n and jsonform[\"content\"][\"parameters\"].get(\"uproot\")\n == {\"as\": \"vector\", \"header\": False}\n and jsonform[\"content\"][\"content\"][\"class\"] == \"NumpyArray\"\n and jsonform[\"content\"][\"content\"][\"inner_shape\"] == []\n and (\n jsonform[\"content\"][\"content\"].get(\"primitive\") == \"float64\"\n or jsonform[\"content\"][\"content\"].get(\"primitive\") == \"int32\"\n )\n ):\n return True\n\n return False\n\n\ndef basket_array(form, data, byte_offsets, extra):\n # FIXME: uproot_issue_90 is just a placeholder, to show how it would be done\n\n return ak._io.uproot_issue_90(\n form,\n ak.layout.NumpyArray(data),\n ak.layout.Index32(byte_offsets),\n )\n", "path": "src/awkward/_connect/_uproot.py"}]} | 2,021 | 156 |
gh_patches_debug_30613 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2729 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Erreurs 404 non générées sur certaines pages
Certaines pages devraient générées des erreurs 404 au lieu d'afficher le contenu des pages.
J'ai découvert ce bug en voulant corriger des erreurs dans les liens sur la page des CGU, un oubli de mailto produit actuellement un lien vers http://zestedesavoir.com/pages/cgu/[email protected] qui affiche la page des CGU. Or une erreur 404 devrait être générée. Peu importe la suite de l'URL cela fonctionne et ce pour plusieurs pages...
Il manque un `$` à la fin de certaines expressions régulières dans zds/pages/urls.py.
De plus le fichier urls.py importe pages.views mais ne l'utilise à aucun moment.
Du coup je me demande si je supprime l'import de views (ligne 5) ou bien si j'utilise par exemple `views.about` au lieu de `'zds.pages.views.about'` ?
Je veux bien bien me charger de faire une PR pour corriger ces bugs :)
EDIT: Je n'arrive pas à m'assigner à cette issue (comme suggéré par CONTRIBUTING.md), problèmes de droits ?
</issue>
<code>
[start of zds/pages/urls.py]
1 # coding: utf-8
2
3 from django.conf.urls import patterns, url
4
5 from . import views
6
7
8 urlpatterns = patterns('',
9
10 url(r'^apropos/$', 'zds.pages.views.about'),
11 url(r'^association/$', 'zds.pages.views.association'),
12 url(r'^contact/', 'zds.pages.views.contact'),
13 url(r'^cgu/', 'zds.pages.views.eula'),
14 url(r'^alertes/', 'zds.pages.views.alerts'),
15 url(r'^cookies/', 'zds.pages.views.cookies'),
16 url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),
17
18 url(r'^$', 'zds.pages.views.index'),
19 )
20
[end of zds/pages/urls.py]
[start of zds/search/urls.py]
1 # coding: utf-8
2
3 from django.conf.urls import patterns, url
4
5 from . import views
6 from haystack.views import search_view_factory
7 from zds.search.views import CustomSearchView
8 from zds.search.forms import CustomSearchForm
9
10 urlpatterns = patterns('haystack.views',
11 url(r'^$', search_view_factory(
12 view_class=CustomSearchView,
13 template='search/search.html',
14 form_class=CustomSearchForm
15 ), name='haystack_search'))
16
17 urlpatterns += patterns('',
18 url(r'^opensearch.xml', 'zds.search.views.opensearch')
19 )
20
[end of zds/search/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/pages/urls.py b/zds/pages/urls.py
--- a/zds/pages/urls.py
+++ b/zds/pages/urls.py
@@ -2,17 +2,15 @@
from django.conf.urls import patterns, url
-from . import views
-
urlpatterns = patterns('',
url(r'^apropos/$', 'zds.pages.views.about'),
url(r'^association/$', 'zds.pages.views.association'),
- url(r'^contact/', 'zds.pages.views.contact'),
- url(r'^cgu/', 'zds.pages.views.eula'),
- url(r'^alertes/', 'zds.pages.views.alerts'),
- url(r'^cookies/', 'zds.pages.views.cookies'),
+ url(r'^contact/$', 'zds.pages.views.contact'),
+ url(r'^cgu/$', 'zds.pages.views.eula'),
+ url(r'^alertes/$', 'zds.pages.views.alerts'),
+ url(r'^cookies/$', 'zds.pages.views.cookies'),
url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),
url(r'^$', 'zds.pages.views.index'),
diff --git a/zds/search/urls.py b/zds/search/urls.py
--- a/zds/search/urls.py
+++ b/zds/search/urls.py
@@ -2,7 +2,6 @@
from django.conf.urls import patterns, url
-from . import views
from haystack.views import search_view_factory
from zds.search.views import CustomSearchView
from zds.search.forms import CustomSearchForm
@@ -15,5 +14,5 @@
), name='haystack_search'))
urlpatterns += patterns('',
- url(r'^opensearch.xml', 'zds.search.views.opensearch')
+ url(r'^opensearch\.xml$', 'zds.search.views.opensearch')
)
| {"golden_diff": "diff --git a/zds/pages/urls.py b/zds/pages/urls.py\n--- a/zds/pages/urls.py\n+++ b/zds/pages/urls.py\n@@ -2,17 +2,15 @@\n \n from django.conf.urls import patterns, url\n \n-from . import views\n-\n \n urlpatterns = patterns('',\n \n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n- url(r'^contact/', 'zds.pages.views.contact'),\n- url(r'^cgu/', 'zds.pages.views.eula'),\n- url(r'^alertes/', 'zds.pages.views.alerts'),\n- url(r'^cookies/', 'zds.pages.views.cookies'),\n+ url(r'^contact/$', 'zds.pages.views.contact'),\n+ url(r'^cgu/$', 'zds.pages.views.eula'),\n+ url(r'^alertes/$', 'zds.pages.views.alerts'),\n+ url(r'^cookies/$', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n \n url(r'^$', 'zds.pages.views.index'),\ndiff --git a/zds/search/urls.py b/zds/search/urls.py\n--- a/zds/search/urls.py\n+++ b/zds/search/urls.py\n@@ -2,7 +2,6 @@\n \n from django.conf.urls import patterns, url\n \n-from . import views\n from haystack.views import search_view_factory\n from zds.search.views import CustomSearchView\n from zds.search.forms import CustomSearchForm\n@@ -15,5 +14,5 @@\n ), name='haystack_search'))\n \n urlpatterns += patterns('',\n- url(r'^opensearch.xml', 'zds.search.views.opensearch')\n+ url(r'^opensearch\\.xml$', 'zds.search.views.opensearch')\n )\n", "issue": "Erreurs 404 non g\u00e9n\u00e9r\u00e9es sur certaines pages\nCertaines pages devraient g\u00e9n\u00e9r\u00e9es des erreurs 404 au lieu d'afficher le contenu des pages.\nJ'ai d\u00e9couvert ce bug en voulant corriger des erreurs dans les liens sur la page des CGU, un oubli de mailto produit actuellement un lien vers http://zestedesavoir.com/pages/cgu/[email protected] qui affiche la page des CGU. Or une erreur 404 devrait \u00eatre g\u00e9n\u00e9r\u00e9e. Peu importe la suite de l'URL cela fonctionne et ce pour plusieurs pages...\nIl manque un `$` \u00e0 la fin de certaines expressions r\u00e9guli\u00e8res dans zds/pages/urls.py.\n\nDe plus le fichier urls.py importe pages.views mais ne l'utilise \u00e0 aucun moment.\nDu coup je me demande si je supprime l'import de views (ligne 5) ou bien si j'utilise par exemple `views.about` au lieu de `'zds.pages.views.about'` ?\n\nJe veux bien bien me charger de faire une PR pour corriger ces bugs :)\n\nEDIT: Je n'arrive pas \u00e0 m'assigner \u00e0 cette issue (comme sugg\u00e9r\u00e9 par CONTRIBUTING.md), probl\u00e8mes de droits ?\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom . import views\n\n\nurlpatterns = patterns('',\n\n url(r'^apropos/$', 'zds.pages.views.about'),\n url(r'^association/$', 'zds.pages.views.association'),\n url(r'^contact/', 'zds.pages.views.contact'),\n url(r'^cgu/', 'zds.pages.views.eula'),\n url(r'^alertes/', 'zds.pages.views.alerts'),\n url(r'^cookies/', 'zds.pages.views.cookies'),\n url(r'^association/inscription/$', 'zds.pages.views.assoc_subscribe'),\n\n url(r'^$', 'zds.pages.views.index'),\n )\n", "path": "zds/pages/urls.py"}, {"content": "# coding: utf-8\n\nfrom django.conf.urls import patterns, url\n\nfrom . import views\nfrom haystack.views import search_view_factory\nfrom zds.search.views import CustomSearchView\nfrom zds.search.forms import CustomSearchForm\n\nurlpatterns = patterns('haystack.views',\n url(r'^$', search_view_factory(\n view_class=CustomSearchView,\n template='search/search.html',\n form_class=CustomSearchForm\n ), name='haystack_search'))\n\nurlpatterns += patterns('',\n url(r'^opensearch.xml', 'zds.search.views.opensearch')\n )\n", "path": "zds/search/urls.py"}]} | 1,166 | 406 |
gh_patches_debug_912 | rasdani/github-patches | git_diff | AUTOMATIC1111__stable-diffusion-webui-60 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FileNotFoundError after new update
Getting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts' after the new update.
Not exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.
Python 3.10.6
venv C:\Users\admin\stable-diffusion-webui\venv\Scripts\Python.exe
Launching webui.py...
Loading model from C:\Users\admin\stable-diffusion-webui\model.ckpt
Global Step: 470000
LatentDiffusion: Running in eps-prediction mode
DiffusionWrapper has 859.52 M params.
making attention of type 'vanilla' with 512 in_channels
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
making attention of type 'vanilla' with 512 in_channels
Traceback (most recent call last):
File "C:\Users\admin\stable-diffusion-webui\webui.py", line 135, in <module>
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
File "C:\Users\admin\stable-diffusion-webui\modules\scripts.py", line 32, in load_scripts
for filename in os.listdir(basedir):
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\admin\\stable-diffusion-webui\\scripts'
</issue>
<code>
[start of modules/scripts.py]
1 import os
2 import sys
3 import traceback
4
5 import modules.ui as ui
6 import gradio as gr
7
8 from modules.processing import StableDiffusionProcessing
9
10 class Script:
11 filename = None
12 args_from = None
13 args_to = None
14
15 def title(self):
16 raise NotImplementedError()
17
18 def ui(self, is_img2img):
19 pass
20
21 def run(self, *args):
22 raise NotImplementedError()
23
24 def describe(self):
25 return ""
26
27
28 scripts = []
29
30
31 def load_scripts(basedir):
32 for filename in os.listdir(basedir):
33 path = os.path.join(basedir, filename)
34
35 if not os.path.isfile(path):
36 continue
37
38 with open(path, "r", encoding="utf8") as file:
39 text = file.read()
40
41 from types import ModuleType
42 compiled = compile(text, path, 'exec')
43 module = ModuleType(filename)
44 exec(compiled, module.__dict__)
45
46 for key, script_class in module.__dict__.items():
47 if type(script_class) == type and issubclass(script_class, Script):
48 obj = script_class()
49 obj.filename = path
50
51 scripts.append(obj)
52
53
54 def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
55 try:
56 res = func(*args, **kwargs)
57 return res
58 except Exception:
59 print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
60 print(traceback.format_exc(), file=sys.stderr)
61
62 return default
63
64
65 def setup_ui(is_img2img):
66 titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in scripts]
67
68 dropdown = gr.Dropdown(label="Script", choices=["None"] + titles, value="None", type="index")
69
70 inputs = [dropdown]
71
72 for script in scripts:
73 script.args_from = len(inputs)
74 controls = script.ui(is_img2img)
75
76 for control in controls:
77 control.visible = False
78
79 inputs += controls
80 script.args_to = len(inputs)
81
82 def select_script(index):
83 if index > 0:
84 script = scripts[index-1]
85 args_from = script.args_from
86 args_to = script.args_to
87 else:
88 args_from = 0
89 args_to = 0
90
91 return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
92
93 dropdown.change(
94 fn=select_script,
95 inputs=[dropdown],
96 outputs=inputs
97 )
98
99 return inputs
100
101
102 def run(p: StableDiffusionProcessing, *args):
103 script_index = args[0] - 1
104
105 if script_index < 0 or script_index >= len(scripts):
106 return None
107
108 script = scripts[script_index]
109
110 script_args = args[script.args_from:script.args_to]
111 processed = script.run(p, *script_args)
112
113 return processed
114
[end of modules/scripts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modules/scripts.py b/modules/scripts.py
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -29,6 +29,9 @@
def load_scripts(basedir):
+ if not os.path.exists(basedir):
+ return
+
for filename in os.listdir(basedir):
path = os.path.join(basedir, filename)
| {"golden_diff": "diff --git a/modules/scripts.py b/modules/scripts.py\n--- a/modules/scripts.py\n+++ b/modules/scripts.py\n@@ -29,6 +29,9 @@\n \r\n \r\n def load_scripts(basedir):\r\n+ if not os.path.exists(basedir):\r\n+ return\r\n+\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\n", "issue": "FileNotFoundError after new update\nGetting a FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\admin\\\\stable-diffusion-webui\\\\scripts' after the new update. \r\n\r\nNot exactly good at all the coding stuff, using it just fine yesterday but I downloaded the repo instead of git clone, for the sake of easier update I started a new installation by git cloning into user folder and the installation went well but ran into this while launching through webui.py.\r\n\r\nPython 3.10.6\r\nvenv C:\\Users\\admin\\stable-diffusion-webui\\venv\\Scripts\\Python.exe\r\nLaunching webui.py...\r\nLoading model from C:\\Users\\admin\\stable-diffusion-webui\\model.ckpt\r\nGlobal Step: 470000\r\nLatentDiffusion: Running in eps-prediction mode\r\nDiffusionWrapper has 859.52 M params.\r\nmaking attention of type 'vanilla' with 512 in_channels\r\nWorking with z of shape (1, 4, 32, 32) = 4096 dimensions.\r\nmaking attention of type 'vanilla' with 512 in_channels\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\admin\\stable-diffusion-webui\\webui.py\", line 135, in <module>\r\n modules.scripts.load_scripts(os.path.join(script_path, \"scripts\"))\r\n File \"C:\\Users\\admin\\stable-diffusion-webui\\modules\\scripts.py\", line 32, in load_scripts\r\n for filename in os.listdir(basedir):\r\nFileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\admin\\\\stable-diffusion-webui\\\\scripts'\n", "before_files": [{"content": "import os\r\nimport sys\r\nimport traceback\r\n\r\nimport modules.ui as ui\r\nimport gradio as gr\r\n\r\nfrom modules.processing import StableDiffusionProcessing\r\n\r\nclass Script:\r\n filename = None\r\n args_from = None\r\n args_to = None\r\n\r\n def title(self):\r\n raise NotImplementedError()\r\n\r\n def ui(self, is_img2img):\r\n pass\r\n\r\n def run(self, *args):\r\n raise NotImplementedError()\r\n\r\n def describe(self):\r\n return \"\"\r\n\r\n\r\nscripts = []\r\n\r\n\r\ndef load_scripts(basedir):\r\n for filename in os.listdir(basedir):\r\n path = os.path.join(basedir, filename)\r\n\r\n if not os.path.isfile(path):\r\n continue\r\n\r\n with open(path, \"r\", encoding=\"utf8\") as file:\r\n text = file.read()\r\n\r\n from types import ModuleType\r\n compiled = compile(text, path, 'exec')\r\n module = ModuleType(filename)\r\n exec(compiled, module.__dict__)\r\n\r\n for key, script_class in module.__dict__.items():\r\n if type(script_class) == type and issubclass(script_class, Script):\r\n obj = script_class()\r\n obj.filename = path\r\n\r\n scripts.append(obj)\r\n\r\n\r\ndef wrap_call(func, filename, funcname, *args, default=None, **kwargs):\r\n try:\r\n res = func(*args, **kwargs)\r\n return res\r\n except Exception:\r\n print(f\"Error calling: {filename}/{funcname}\", file=sys.stderr)\r\n print(traceback.format_exc(), file=sys.stderr)\r\n\r\n return default\r\n\r\n\r\ndef setup_ui(is_img2img):\r\n titles = [wrap_call(script.title, script.filename, \"title\") or f\"{script.filename} [error]\" for script in scripts]\r\n\r\n dropdown = gr.Dropdown(label=\"Script\", choices=[\"None\"] + titles, value=\"None\", type=\"index\")\r\n\r\n inputs = [dropdown]\r\n\r\n for script in scripts:\r\n script.args_from = len(inputs)\r\n controls = script.ui(is_img2img)\r\n\r\n for control in controls:\r\n control.visible = False\r\n\r\n inputs += controls\r\n script.args_to = len(inputs)\r\n\r\n def select_script(index):\r\n if index > 0:\r\n script = scripts[index-1]\r\n args_from = script.args_from\r\n args_to = script.args_to\r\n else:\r\n args_from = 0\r\n args_to = 0\r\n\r\n return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]\r\n\r\n dropdown.change(\r\n fn=select_script,\r\n inputs=[dropdown],\r\n outputs=inputs\r\n )\r\n\r\n return inputs\r\n\r\n\r\ndef run(p: StableDiffusionProcessing, *args):\r\n script_index = args[0] - 1\r\n\r\n if script_index < 0 or script_index >= len(scripts):\r\n return None\r\n\r\n script = scripts[script_index]\r\n\r\n script_args = args[script.args_from:script.args_to]\r\n processed = script.run(p, *script_args)\r\n\r\n return processed\r\n", "path": "modules/scripts.py"}]} | 1,795 | 84 |
gh_patches_debug_12710 | rasdani/github-patches | git_diff | autorope__donkeycar-273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support WIFI network that does not have internet access
REF: https://github.com/wroscoe/donkey/blob/dev/donkeycar/util/web.py
The system determines its IP address using a ping to 8.8.8.8
This approach fails when the WIFI network does not have internet access.
</issue>
<code>
[start of donkeycar/util/web.py]
1 import socket
2
3 def get_ip_address():
4 ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
5 [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
6 [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
7 return ip
8
[end of donkeycar/util/web.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/donkeycar/util/web.py b/donkeycar/util/web.py
--- a/donkeycar/util/web.py
+++ b/donkeycar/util/web.py
@@ -1,7 +1,10 @@
import socket
def get_ip_address():
- ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
- [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
- [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
- return ip
+ try:
+ ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
+ [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
+ [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
+ return ip
+ except OSError: #occurs when cannot connect to '8.8.8.8'
+ return "127.0.0.1" #loopback
\ No newline at end of file
| {"golden_diff": "diff --git a/donkeycar/util/web.py b/donkeycar/util/web.py\n--- a/donkeycar/util/web.py\n+++ b/donkeycar/util/web.py\n@@ -1,7 +1,10 @@\n import socket\n \n def get_ip_address():\n- ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n- [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n- [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n- return ip\n+ try:\n+ ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n+ [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n+ [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n+ return ip\n+ except OSError: #occurs when cannot connect to '8.8.8.8' \n+ return \"127.0.0.1\" #loopback\n\\ No newline at end of file\n", "issue": "Support WIFI network that does not have internet access\nREF: https://github.com/wroscoe/donkey/blob/dev/donkeycar/util/web.py\r\n\r\nThe system determines its IP address using a ping to 8.8.8.8\r\nThis approach fails when the WIFI network does not have internet access.\r\n\r\n\r\n\n", "before_files": [{"content": "import socket\n\ndef get_ip_address():\n ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith(\"127.\")][:1],\n [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])\n return ip\n", "path": "donkeycar/util/web.py"}]} | 717 | 312 |
gh_patches_debug_26766 | rasdani/github-patches | git_diff | modin-project__modin-1045 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Capitalization of "S" in "S3://" results in inconsistent behaviors when reading from S3 path
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux
- **Modin installed from (source or binary)**: binary
- **Modin version**: 0.7.0
- **Python version**: 3.6.8
- **Exact command to reproduce**:
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
Reading data from a S3 path, e.g. `read_csv`, `read_json`, behaves differently based on the capitalization of "S" in the path. See below code example.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
```
import pandas as pd
import ray
import modin.pandas as mpd
filepath = "s3://my-bucket/data/traffic.json"
filepath_2 = "s3://my-bucket/data/BikeSharingDaily.csv"
filepath_3 = "S3://my-bucket/data/BikeSharingDaily.csv"
# working
df_native = pd.read_json(filepath, lines=True)
df_native_2 = pd.read_csv(filepath_2)
# not working (FileNotFoundError: [Errno 2] No such file or directory: 's3://my-bucket/data/traffic.json')
df_modin = mpd.read_json(filepath, lines=True)
# working (but it prints, defaulting to pandas implementation)
df_modin_2 = mpd.read_csv(filepath_2)
# working (no additional print)
df_modin_3 = mpd.read_csv(filepath_3)
```
</issue>
<code>
[start of modin/engines/base/io/file_reader.py]
1 import os
2 import re
3
4 S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
5 NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
6
7
8 class FileReader:
9 frame_cls = None
10 frame_partition_cls = None
11 query_compiler_cls = None
12
13 @classmethod
14 def get_path(cls, file_path):
15 if S3_ADDRESS_REGEX.search(file_path):
16 return file_path
17 else:
18 return os.path.abspath(file_path)
19
20 @classmethod
21 def file_open(cls, file_path, mode="rb", compression="infer"):
22 if isinstance(file_path, str):
23 match = S3_ADDRESS_REGEX.search(file_path)
24 if match:
25 import s3fs as S3FS
26 from botocore.exceptions import NoCredentialsError
27
28 s3fs = S3FS.S3FileSystem(anon=False)
29 try:
30 return s3fs.open(file_path)
31 except NoCredentialsError:
32 s3fs = S3FS.S3FileSystem(anon=True)
33 return s3fs.open(file_path)
34 elif compression == "gzip":
35 import gzip
36
37 return gzip.open(file_path, mode=mode)
38 elif compression == "bz2":
39 import bz2
40
41 return bz2.BZ2File(file_path, mode=mode)
42 elif compression == "xz":
43 import lzma
44
45 return lzma.LZMAFile(file_path, mode=mode)
46 elif compression == "zip":
47 import zipfile
48
49 zf = zipfile.ZipFile(file_path, mode=mode.replace("b", ""))
50 if zf.mode == "w":
51 return zf
52 elif zf.mode == "r":
53 zip_names = zf.namelist()
54 if len(zip_names) == 1:
55 f = zf.open(zip_names.pop())
56 return f
57 elif len(zip_names) == 0:
58 raise ValueError(
59 "Zero files found in ZIP file {}".format(file_path)
60 )
61 else:
62 raise ValueError(
63 "Multiple files found in ZIP file."
64 " Only one file per ZIP: {}".format(zip_names)
65 )
66
67 return open(file_path, mode=mode)
68
69 @classmethod
70 def file_size(cls, f):
71 cur_pos = f.tell()
72 f.seek(0, os.SEEK_END)
73 size = f.tell()
74 f.seek(cur_pos, os.SEEK_SET)
75 return size
76
77 @classmethod
78 def file_exists(cls, file_path):
79 if isinstance(file_path, str):
80 match = S3_ADDRESS_REGEX.search(file_path)
81 if match:
82 import s3fs as S3FS
83 from botocore.exceptions import NoCredentialsError
84
85 s3fs = S3FS.S3FileSystem(anon=False)
86 exists = False
87 try:
88 exists = s3fs.exists(file_path) or exists
89 except NoCredentialsError:
90 pass
91 s3fs = S3FS.S3FileSystem(anon=True)
92 return exists or s3fs.exists(file_path)
93 return os.path.exists(file_path)
94
95 @classmethod
96 def deploy(cls, func, args, num_return_vals):
97 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
98
99 def parse(self, func, args, num_return_vals):
100 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
101
102 @classmethod
103 def materialize(cls, obj_id):
104 raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
105
[end of modin/engines/base/io/file_reader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/engines/base/io/file_reader.py b/modin/engines/base/io/file_reader.py
--- a/modin/engines/base/io/file_reader.py
+++ b/modin/engines/base/io/file_reader.py
@@ -1,7 +1,7 @@
import os
import re
-S3_ADDRESS_REGEX = re.compile("s3://(.*?)/(.*)")
+S3_ADDRESS_REGEX = re.compile("[sS]3://(.*?)/(.*)")
NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
@@ -21,7 +21,9 @@
def file_open(cls, file_path, mode="rb", compression="infer"):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
- if match:
+ if match is not None:
+ if file_path[0] == "S":
+ file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
@@ -78,7 +80,9 @@
def file_exists(cls, file_path):
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
- if match:
+ if match is not None:
+ if file_path[0] == "S":
+ file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
| {"golden_diff": "diff --git a/modin/engines/base/io/file_reader.py b/modin/engines/base/io/file_reader.py\n--- a/modin/engines/base/io/file_reader.py\n+++ b/modin/engines/base/io/file_reader.py\n@@ -1,7 +1,7 @@\n import os\n import re\n \n-S3_ADDRESS_REGEX = re.compile(\"s3://(.*?)/(.*)\")\n+S3_ADDRESS_REGEX = re.compile(\"[sS]3://(.*?)/(.*)\")\n NOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n \n \n@@ -21,7 +21,9 @@\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n- if match:\n+ if match is not None:\n+ if file_path[0] == \"S\":\n+ file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n \n@@ -78,7 +80,9 @@\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n- if match:\n+ if match is not None:\n+ if file_path[0] == \"S\":\n+ file_path = \"{}{}\".format(\"s\", file_path[1:])\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n", "issue": "Capitalization of \"S\" in \"S3://\" results in inconsistent behaviors when reading from S3 path\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux\r\n- **Modin installed from (source or binary)**: binary\r\n- **Modin version**: 0.7.0\r\n- **Python version**: 3.6.8\r\n- **Exact command to reproduce**: \r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nReading data from a S3 path, e.g. `read_csv`, `read_json`, behaves differently based on the capitalization of \"S\" in the path. See below code example.\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n```\r\nimport pandas as pd\r\nimport ray\r\nimport modin.pandas as mpd\r\n\r\nfilepath = \"s3://my-bucket/data/traffic.json\"\r\nfilepath_2 = \"s3://my-bucket/data/BikeSharingDaily.csv\"\r\nfilepath_3 = \"S3://my-bucket/data/BikeSharingDaily.csv\"\r\n\r\n# working\r\ndf_native = pd.read_json(filepath, lines=True)\r\ndf_native_2 = pd.read_csv(filepath_2)\r\n\r\n# not working (FileNotFoundError: [Errno 2] No such file or directory: 's3://my-bucket/data/traffic.json')\r\ndf_modin = mpd.read_json(filepath, lines=True)\r\n\r\n# working (but it prints, defaulting to pandas implementation)\r\ndf_modin_2 = mpd.read_csv(filepath_2)\r\n\r\n# working (no additional print)\r\ndf_modin_3 = mpd.read_csv(filepath_3)\r\n```\n", "before_files": [{"content": "import os\nimport re\n\nS3_ADDRESS_REGEX = re.compile(\"s3://(.*?)/(.*)\")\nNOT_IMPLEMENTED_MESSAGE = \"Implement in children classes!\"\n\n\nclass FileReader:\n frame_cls = None\n frame_partition_cls = None\n query_compiler_cls = None\n\n @classmethod\n def get_path(cls, file_path):\n if S3_ADDRESS_REGEX.search(file_path):\n return file_path\n else:\n return os.path.abspath(file_path)\n\n @classmethod\n def file_open(cls, file_path, mode=\"rb\", compression=\"infer\"):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match:\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n try:\n return s3fs.open(file_path)\n except NoCredentialsError:\n s3fs = S3FS.S3FileSystem(anon=True)\n return s3fs.open(file_path)\n elif compression == \"gzip\":\n import gzip\n\n return gzip.open(file_path, mode=mode)\n elif compression == \"bz2\":\n import bz2\n\n return bz2.BZ2File(file_path, mode=mode)\n elif compression == \"xz\":\n import lzma\n\n return lzma.LZMAFile(file_path, mode=mode)\n elif compression == \"zip\":\n import zipfile\n\n zf = zipfile.ZipFile(file_path, mode=mode.replace(\"b\", \"\"))\n if zf.mode == \"w\":\n return zf\n elif zf.mode == \"r\":\n zip_names = zf.namelist()\n if len(zip_names) == 1:\n f = zf.open(zip_names.pop())\n return f\n elif len(zip_names) == 0:\n raise ValueError(\n \"Zero files found in ZIP file {}\".format(file_path)\n )\n else:\n raise ValueError(\n \"Multiple files found in ZIP file.\"\n \" Only one file per ZIP: {}\".format(zip_names)\n )\n\n return open(file_path, mode=mode)\n\n @classmethod\n def file_size(cls, f):\n cur_pos = f.tell()\n f.seek(0, os.SEEK_END)\n size = f.tell()\n f.seek(cur_pos, os.SEEK_SET)\n return size\n\n @classmethod\n def file_exists(cls, file_path):\n if isinstance(file_path, str):\n match = S3_ADDRESS_REGEX.search(file_path)\n if match:\n import s3fs as S3FS\n from botocore.exceptions import NoCredentialsError\n\n s3fs = S3FS.S3FileSystem(anon=False)\n exists = False\n try:\n exists = s3fs.exists(file_path) or exists\n except NoCredentialsError:\n pass\n s3fs = S3FS.S3FileSystem(anon=True)\n return exists or s3fs.exists(file_path)\n return os.path.exists(file_path)\n\n @classmethod\n def deploy(cls, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n def parse(self, func, args, num_return_vals):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n\n @classmethod\n def materialize(cls, obj_id):\n raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)\n", "path": "modin/engines/base/io/file_reader.py"}]} | 1,932 | 331 |
gh_patches_debug_35876 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-127 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add testing agains each feat PT version
## 🚀 Feature
Add a conda setup for testing against all PyTorch feature releases such as 1.4, 1.5, 1.6, ...
### Motivation
have better validation if some functions are not supported in old PT versions
### Pitch
<!-- A clear and concise description of what you want to happen. -->
### Alternatives
use CI action with conda setup, probably no need for pull large docker image
### Additional context
take inspiration from past Conda matrix in PL
</issue>
<code>
[start of integrations/lightning_models.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import torch
15 from pytorch_lightning import LightningModule
16 from torch.utils.data import Dataset
17
18
19 class RandomDictStringDataset(Dataset):
20
21 def __init__(self, size, length):
22 self.len = length
23 self.data = torch.randn(length, size)
24
25 def __getitem__(self, index):
26 return {"id": str(index), "x": self.data[index]}
27
28 def __len__(self):
29 return self.len
30
31
32 class RandomDataset(Dataset):
33
34 def __init__(self, size, length):
35 self.len = length
36 self.data = torch.randn(length, size)
37
38 def __getitem__(self, index):
39 return self.data[index]
40
41 def __len__(self):
42 return self.len
43
44
45 class BoringModel(LightningModule):
46
47 def __init__(self):
48 """
49 Testing PL Module
50
51 Use as follows:
52 - subclass
53 - modify the behavior for what you want
54
55 class TestModel(BaseTestModel):
56 def training_step(...):
57 # do your own thing
58
59 or:
60
61 model = BaseTestModel()
62 model.training_epoch_end = None
63
64 """
65 super().__init__()
66 self.layer = torch.nn.Linear(32, 2)
67
68 def forward(self, x):
69 return self.layer(x)
70
71 @staticmethod
72 def loss(_, prediction):
73 # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
74 return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
75
76 def step(self, x):
77 x = self(x)
78 out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
79 return out
80
81 def training_step(self, batch, batch_idx):
82 output = self.layer(batch)
83 loss = self.loss(batch, output)
84 return {"loss": loss}
85
86 def training_step_end(self, training_step_outputs):
87 return training_step_outputs
88
89 def training_epoch_end(self, outputs) -> None:
90 torch.stack([x["loss"] for x in outputs]).mean()
91
92 def validation_step(self, batch, batch_idx):
93 output = self.layer(batch)
94 loss = self.loss(batch, output)
95 return {"x": loss}
96
97 def validation_epoch_end(self, outputs) -> None:
98 torch.stack([x['x'] for x in outputs]).mean()
99
100 def test_step(self, batch, batch_idx):
101 output = self.layer(batch)
102 loss = self.loss(batch, output)
103 return {"y": loss}
104
105 def test_epoch_end(self, outputs) -> None:
106 torch.stack([x["y"] for x in outputs]).mean()
107
108 def configure_optimizers(self):
109 optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
110 lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
111 return [optimizer], [lr_scheduler]
112
113 def train_dataloader(self):
114 return torch.utils.data.DataLoader(RandomDataset(32, 64))
115
116 def val_dataloader(self):
117 return torch.utils.data.DataLoader(RandomDataset(32, 64))
118
119 def test_dataloader(self):
120 return torch.utils.data.DataLoader(RandomDataset(32, 64))
121
[end of integrations/lightning_models.py]
[start of torchmetrics/utilities/imports.py]
1 from distutils.version import LooseVersion
2
3 import torch
4
5 _TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion("1.4.0")
6 _TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion("1.5.0")
7 _TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion("1.6.0")
8
[end of torchmetrics/utilities/imports.py]
[start of integrations/__init__.py]
[end of integrations/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/integrations/__init__.py b/integrations/__init__.py
--- a/integrations/__init__.py
+++ b/integrations/__init__.py
@@ -0,0 +1,3 @@
+from torchmetrics.utilities.imports import _module_available
+
+_PL_AVAILABLE = _module_available('pytorch_lightning')
diff --git a/integrations/lightning_models.py b/integrations/lightning_models.py
--- a/integrations/lightning_models.py
+++ b/integrations/lightning_models.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+
import torch
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
diff --git a/torchmetrics/utilities/imports.py b/torchmetrics/utilities/imports.py
--- a/torchmetrics/utilities/imports.py
+++ b/torchmetrics/utilities/imports.py
@@ -1,6 +1,64 @@
+# Copyright The PyTorch Lightning team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
from distutils.version import LooseVersion
+from importlib import import_module
+from importlib.util import find_spec
import torch
+from pkg_resources import DistributionNotFound
+
+
+def _module_available(module_path: str) -> bool:
+ """
+ Check if a path is available in your environment
+
+ >>> _module_available('os')
+ True
+ >>> _module_available('bla.bla')
+ False
+ """
+ try:
+ return find_spec(module_path) is not None
+ except AttributeError:
+ # Python 3.6
+ return False
+ except ModuleNotFoundError:
+ # Python 3.7+
+ return False
+
+
+def _compare_version(package: str, op, version) -> bool:
+ """
+ Compare package version with some requirements
+
+ >>> import operator
+ >>> _compare_version("torch", operator.ge, "0.1")
+ True
+ """
+ try:
+ pkg = import_module(package)
+ except (ModuleNotFoundError, DistributionNotFound):
+ return False
+ try:
+ pkg_version = LooseVersion(pkg.__version__)
+ except AttributeError:
+ return False
+ if not (hasattr(pkg_version, "vstring") and hasattr(pkg_version, "version")):
+ # this is mock by sphinx, so it shall return True ro generate all summaries
+ return True
+ return op(pkg_version, LooseVersion(version))
+
_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion("1.4.0")
_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion("1.5.0")
| {"golden_diff": "diff --git a/integrations/__init__.py b/integrations/__init__.py\n--- a/integrations/__init__.py\n+++ b/integrations/__init__.py\n@@ -0,0 +1,3 @@\n+from torchmetrics.utilities.imports import _module_available\n+\n+_PL_AVAILABLE = _module_available('pytorch_lightning')\ndiff --git a/integrations/lightning_models.py b/integrations/lightning_models.py\n--- a/integrations/lightning_models.py\n+++ b/integrations/lightning_models.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+\n import torch\n from pytorch_lightning import LightningModule\n from torch.utils.data import Dataset\ndiff --git a/torchmetrics/utilities/imports.py b/torchmetrics/utilities/imports.py\n--- a/torchmetrics/utilities/imports.py\n+++ b/torchmetrics/utilities/imports.py\n@@ -1,6 +1,64 @@\n+# Copyright The PyTorch Lightning team.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n from distutils.version import LooseVersion\n+from importlib import import_module\n+from importlib.util import find_spec\n \n import torch\n+from pkg_resources import DistributionNotFound\n+\n+\n+def _module_available(module_path: str) -> bool:\n+ \"\"\"\n+ Check if a path is available in your environment\n+\n+ >>> _module_available('os')\n+ True\n+ >>> _module_available('bla.bla')\n+ False\n+ \"\"\"\n+ try:\n+ return find_spec(module_path) is not None\n+ except AttributeError:\n+ # Python 3.6\n+ return False\n+ except ModuleNotFoundError:\n+ # Python 3.7+\n+ return False\n+\n+\n+def _compare_version(package: str, op, version) -> bool:\n+ \"\"\"\n+ Compare package version with some requirements\n+\n+ >>> import operator\n+ >>> _compare_version(\"torch\", operator.ge, \"0.1\")\n+ True\n+ \"\"\"\n+ try:\n+ pkg = import_module(package)\n+ except (ModuleNotFoundError, DistributionNotFound):\n+ return False\n+ try:\n+ pkg_version = LooseVersion(pkg.__version__)\n+ except AttributeError:\n+ return False\n+ if not (hasattr(pkg_version, \"vstring\") and hasattr(pkg_version, \"version\")):\n+ # this is mock by sphinx, so it shall return True ro generate all summaries\n+ return True\n+ return op(pkg_version, LooseVersion(version))\n+\n \n _TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n _TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n", "issue": "Add testing agains each feat PT version\n## \ud83d\ude80 Feature\r\n\r\nAdd a conda setup for testing against all PyTorch feature releases such as 1.4, 1.5, 1.6, ...\r\n\r\n### Motivation\r\n\r\nhave better validation if some functions are not supported in old PT versions\r\n\r\n### Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Alternatives\r\n\r\nuse CI action with conda setup, probably no need for pull large docker image\r\n\r\n### Additional context\r\n\r\ntake inspiration from past Conda matrix in PL\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport torch\nfrom pytorch_lightning import LightningModule\nfrom torch.utils.data import Dataset\n\n\nclass RandomDictStringDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return {\"id\": str(index), \"x\": self.data[index]}\n\n def __len__(self):\n return self.len\n\n\nclass RandomDataset(Dataset):\n\n def __init__(self, size, length):\n self.len = length\n self.data = torch.randn(length, size)\n\n def __getitem__(self, index):\n return self.data[index]\n\n def __len__(self):\n return self.len\n\n\nclass BoringModel(LightningModule):\n\n def __init__(self):\n \"\"\"\n Testing PL Module\n\n Use as follows:\n - subclass\n - modify the behavior for what you want\n\n class TestModel(BaseTestModel):\n def training_step(...):\n # do your own thing\n\n or:\n\n model = BaseTestModel()\n model.training_epoch_end = None\n\n \"\"\"\n super().__init__()\n self.layer = torch.nn.Linear(32, 2)\n\n def forward(self, x):\n return self.layer(x)\n\n @staticmethod\n def loss(_, prediction):\n # An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls\n return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))\n\n def step(self, x):\n x = self(x)\n out = torch.nn.functional.mse_loss(x, torch.ones_like(x))\n return out\n\n def training_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"loss\": loss}\n\n def training_step_end(self, training_step_outputs):\n return training_step_outputs\n\n def training_epoch_end(self, outputs) -> None:\n torch.stack([x[\"loss\"] for x in outputs]).mean()\n\n def validation_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"x\": loss}\n\n def validation_epoch_end(self, outputs) -> None:\n torch.stack([x['x'] for x in outputs]).mean()\n\n def test_step(self, batch, batch_idx):\n output = self.layer(batch)\n loss = self.loss(batch, output)\n return {\"y\": loss}\n\n def test_epoch_end(self, outputs) -> None:\n torch.stack([x[\"y\"] for x in outputs]).mean()\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n return [optimizer], [lr_scheduler]\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n\n def test_dataloader(self):\n return torch.utils.data.DataLoader(RandomDataset(32, 64))\n", "path": "integrations/lightning_models.py"}, {"content": "from distutils.version import LooseVersion\n\nimport torch\n\n_TORCH_LOWER_1_4 = LooseVersion(torch.__version__) < LooseVersion(\"1.4.0\")\n_TORCH_LOWER_1_5 = LooseVersion(torch.__version__) < LooseVersion(\"1.5.0\")\n_TORCH_LOWER_1_6 = LooseVersion(torch.__version__) < LooseVersion(\"1.6.0\")\n", "path": "torchmetrics/utilities/imports.py"}, {"content": "", "path": "integrations/__init__.py"}]} | 1,876 | 745 |
gh_patches_debug_14606 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5301 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
La comparaison des versions a perdu sa sidebar
## Étapes pour reproduire
- prenez un tuto avec plusieurs éditions
- cliquer sur "comparer les versions"
- sélectionner deux versions
**Comportement observé**
la sidebar n'apparaît pas dans la page de comparaison des versions
**Comportement désiré**
La sidebar est là.
</issue>
<code>
[start of zds/utils/templatetags/htmldiff.py]
1 from difflib import HtmlDiff
2 from django import template
3 from django.utils.html import format_html
4 from django.utils.safestring import mark_safe
5 from django.utils.translation import ugettext_lazy as _
6
7
8 register = template.Library()
9
10
11 @register.simple_tag
12 def htmldiff(string1, string2):
13
14 try:
15 txt1 = string1.decode('utf-8').splitlines()
16 # string1 is an empty SafeText from template
17 except AttributeError:
18 txt1 = string1.splitlines()
19
20 try:
21 txt2 = string2.decode('utf-8').splitlines()
22 except AttributeError:
23 txt2 = string2.splitlines()
24
25 diff = HtmlDiff(tabsize=4, wrapcolumn=80)
26 result = diff.make_table(txt1, txt2, context=True, numlines=2)
27
28 if 'No Differences Found' in result:
29 return format_html('<p>{}</p>', _('Pas de changements.'))
30 else:
31 return format_html('<div class="diff_delta">{}</div>', mark_safe(result))
32
[end of zds/utils/templatetags/htmldiff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/templatetags/htmldiff.py b/zds/utils/templatetags/htmldiff.py
--- a/zds/utils/templatetags/htmldiff.py
+++ b/zds/utils/templatetags/htmldiff.py
@@ -22,10 +22,12 @@
except AttributeError:
txt2 = string2.splitlines()
- diff = HtmlDiff(tabsize=4, wrapcolumn=80)
+ diff = HtmlDiff(tabsize=4)
result = diff.make_table(txt1, txt2, context=True, numlines=2)
if 'No Differences Found' in result:
return format_html('<p>{}</p>', _('Pas de changements.'))
else:
- return format_html('<div class="diff_delta">{}</div>', mark_safe(result))
+ # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:
+ r = mark_safe(result.replace('<td nowrap="nowrap">', '<td>').replace(' ', ' '))
+ return format_html('<div class="diff_delta">{}</div>', r)
| {"golden_diff": "diff --git a/zds/utils/templatetags/htmldiff.py b/zds/utils/templatetags/htmldiff.py\n--- a/zds/utils/templatetags/htmldiff.py\n+++ b/zds/utils/templatetags/htmldiff.py\n@@ -22,10 +22,12 @@\n except AttributeError:\n txt2 = string2.splitlines()\n \n- diff = HtmlDiff(tabsize=4, wrapcolumn=80)\n+ diff = HtmlDiff(tabsize=4)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n \n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n- return format_html('<div class=\"diff_delta\">{}</div>', mark_safe(result))\n+ # the diff.make_table() replaces all spaces by non-breakable ones, which prevent line breaks:\n+ r = mark_safe(result.replace('<td nowrap=\"nowrap\">', '<td>').replace(' ', ' '))\n+ return format_html('<div class=\"diff_delta\">{}</div>', r)\n", "issue": "La comparaison des versions a perdu sa sidebar\n## \u00c9tapes pour reproduire \r\n\r\n- prenez un tuto avec plusieurs \u00e9ditions\r\n- cliquer sur \"comparer les versions\"\r\n- s\u00e9lectionner deux versions\r\n\r\n**Comportement observ\u00e9**\r\n\r\nla sidebar n'appara\u00eet pas dans la page de comparaison des versions\r\n\r\n**Comportement d\u00e9sir\u00e9**\r\n\r\nLa sidebar est l\u00e0.\n", "before_files": [{"content": "from difflib import HtmlDiff\nfrom django import template\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef htmldiff(string1, string2):\n\n try:\n txt1 = string1.decode('utf-8').splitlines()\n # string1 is an empty SafeText from template\n except AttributeError:\n txt1 = string1.splitlines()\n\n try:\n txt2 = string2.decode('utf-8').splitlines()\n except AttributeError:\n txt2 = string2.splitlines()\n\n diff = HtmlDiff(tabsize=4, wrapcolumn=80)\n result = diff.make_table(txt1, txt2, context=True, numlines=2)\n\n if 'No Differences Found' in result:\n return format_html('<p>{}</p>', _('Pas de changements.'))\n else:\n return format_html('<div class=\"diff_delta\">{}</div>', mark_safe(result))\n", "path": "zds/utils/templatetags/htmldiff.py"}]} | 919 | 253 |
gh_patches_debug_3292 | rasdani/github-patches | git_diff | getmoto__moto-2305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API Gateway generates invalid IDs
Generated IDs in API Gateway resources look like `A-Z601A-Z47201`. They contain `A-Z` instead of letters.
</issue>
<code>
[start of moto/apigateway/utils.py]
1 from __future__ import unicode_literals
2 import six
3 import random
4
5
6 def create_id():
7 size = 10
8 chars = list(range(10)) + ['A-Z']
9 return ''.join(six.text_type(random.choice(chars)) for x in range(size))
10
[end of moto/apigateway/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py
--- a/moto/apigateway/utils.py
+++ b/moto/apigateway/utils.py
@@ -1,9 +1,10 @@
from __future__ import unicode_literals
import six
import random
+import string
def create_id():
size = 10
- chars = list(range(10)) + ['A-Z']
+ chars = list(range(10)) + list(string.ascii_lowercase)
return ''.join(six.text_type(random.choice(chars)) for x in range(size))
| {"golden_diff": "diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py\n--- a/moto/apigateway/utils.py\n+++ b/moto/apigateway/utils.py\n@@ -1,9 +1,10 @@\n from __future__ import unicode_literals\n import six\n import random\n+import string\n \n \n def create_id():\n size = 10\n- chars = list(range(10)) + ['A-Z']\n+ chars = list(range(10)) + list(string.ascii_lowercase)\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "issue": "API Gateway generates invalid IDs\nGenerated IDs in API Gateway resources look like `A-Z601A-Z47201`. They contain `A-Z` instead of letters.\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport six\nimport random\n\n\ndef create_id():\n size = 10\n chars = list(range(10)) + ['A-Z']\n return ''.join(six.text_type(random.choice(chars)) for x in range(size))\n", "path": "moto/apigateway/utils.py"}]} | 646 | 131 |
gh_patches_debug_28024 | rasdani/github-patches | git_diff | google__flax-1311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
threading.Condition.notifyAll has been deprecated in favour of notify_all in Python 3.10
### Problem you have encountered:
`threading.Condition.notifyAll` has been deprecated in favour of `notify_all` in Python 3.10. Ref : python/cpython#25174
### What you expected to happen:
use `notify_all` in below places.
```
rg -t py -w 'currentThread|notifyAll|activeCount|isDaemon|setDaemon'
flax/training/prefetch_iterator.py
58: self._cond.notifyAll()
68: self._cond.notifyAll()
80: self._cond.notifyAll()
88: self._cond.notifyAll()
```
</issue>
<code>
[start of flax/training/prefetch_iterator.py]
1 # Copyright 2021 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Utility for constructing an iterator which prefetches data asynchronously.
16 """
17
18 import threading
19 import warnings
20
21
22 class PrefetchIterator:
23 """Wraps an iterator to provide async prefetching.
24
25 DEPRECATION WARNING:
26 TensorFlow datasets no longer require manual prefetching.
27
28 Previously this class was used to make data loading using TensorFlow datasets
29 more efficient. Now TF data handles prefetching with NumPy iterators
30 correctly.
31
32 Example::
33
34 tf_iter = dataset.as_numpy_iterator() # only loads data while calling next
35 tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background
36
37 """
38
39 def __init__(self, data_iter, buffer_size=1):
40 """Construct a PrefetchIterator.
41
42 Args:
43 data_iter: the Iterator that should be prefetched.
44 buffer_size: how many items to prefetch (default: 1).
45 """
46 warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`'
47 ' prefetch method instead', DeprecationWarning)
48
49 self._data_iter = data_iter
50 self.buffer_size = buffer_size
51 self._cond = threading.Condition()
52 self._buffer = []
53 self._active = True
54 self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)
55 self._thread.start()
56 self._error = None
57
58 def __iter__(self):
59 return self
60
61 def __next__(self):
62 with self._cond:
63 self._cond.wait_for(lambda: self._buffer or not self._active)
64 if self._buffer:
65 item = self._buffer.pop(0)
66 self._cond.notifyAll()
67 return item
68 if self._error:
69 raise self._error # pylint: disable=raising-bad-type
70 assert not self._active
71 raise StopIteration()
72
73 def close(self):
74 with self._cond:
75 self._active = False
76 self._cond.notifyAll()
77
78 def _prefetch_loop(self):
79 """Prefetch loop that prefetches a tf dataset."""
80 def _predicate():
81 return len(self._buffer) < self.buffer_size or not self._active
82
83 while True:
84 try:
85 item = next(self._data_iter)
86 with self._cond:
87 self._buffer.append(item)
88 self._cond.notifyAll()
89 self._cond.wait_for(_predicate)
90 if not self._active:
91 return
92 except Exception as e: # pylint: disable=broad-except
93 with self._cond:
94 self._error = e
95 self._active = False
96 self._cond.notifyAll()
97 return
98
[end of flax/training/prefetch_iterator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/training/prefetch_iterator.py b/flax/training/prefetch_iterator.py
--- a/flax/training/prefetch_iterator.py
+++ b/flax/training/prefetch_iterator.py
@@ -55,7 +55,7 @@
self._cond.wait_for(lambda: self._buffer or not self._active)
if self._buffer:
item = self._buffer.pop(0)
- self._cond.notifyAll()
+ self._cond.notify_all()
return item
if self._error:
raise self._error # pylint: disable=raising-bad-type
@@ -65,7 +65,7 @@
def close(self):
with self._cond:
self._active = False
- self._cond.notifyAll()
+ self._cond.notify_all()
def _prefetch_loop(self):
"""Prefetch loop that prefetches a tf dataset."""
@@ -77,7 +77,7 @@
item = next(self._data_iter)
with self._cond:
self._buffer.append(item)
- self._cond.notifyAll()
+ self._cond.notify_all()
self._cond.wait_for(_predicate)
if not self._active:
return
@@ -85,5 +85,5 @@
with self._cond:
self._error = e
self._active = False
- self._cond.notifyAll()
+ self._cond.notify_all()
return
| {"golden_diff": "diff --git a/flax/training/prefetch_iterator.py b/flax/training/prefetch_iterator.py\n--- a/flax/training/prefetch_iterator.py\n+++ b/flax/training/prefetch_iterator.py\n@@ -55,7 +55,7 @@\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n@@ -65,7 +65,7 @@\n def close(self):\n with self._cond:\n self._active = False\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n \n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n@@ -77,7 +77,7 @@\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n@@ -85,5 +85,5 @@\n with self._cond:\n self._error = e\n self._active = False\n- self._cond.notifyAll()\n+ self._cond.notify_all()\n return\n", "issue": " threading.Condition.notifyAll has been deprecated in favour of notify_all in Python 3.10\n### Problem you have encountered:\r\n\r\n `threading.Condition.notifyAll` has been deprecated in favour of `notify_all` in Python 3.10. Ref : python/cpython#25174\r\n\r\n### What you expected to happen:\r\n\r\nuse `notify_all` in below places.\r\n\r\n```\r\nrg -t py -w 'currentThread|notifyAll|activeCount|isDaemon|setDaemon' \r\nflax/training/prefetch_iterator.py\r\n58: self._cond.notifyAll()\r\n68: self._cond.notifyAll()\r\n80: self._cond.notifyAll()\r\n88: self._cond.notifyAll()\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility for constructing an iterator which prefetches data asynchronously.\n\"\"\"\n\nimport threading\nimport warnings\n\n\nclass PrefetchIterator:\n \"\"\"Wraps an iterator to provide async prefetching.\n\n DEPRECATION WARNING:\n TensorFlow datasets no longer require manual prefetching.\n\n Previously this class was used to make data loading using TensorFlow datasets\n more efficient. Now TF data handles prefetching with NumPy iterators\n correctly.\n\n Example::\n\n tf_iter = dataset.as_numpy_iterator() # only loads data while calling next\n tf_iter = PrefetchIterator(tf_iter) # prefetches data in the background\n\n \"\"\"\n\n def __init__(self, data_iter, buffer_size=1):\n \"\"\"Construct a PrefetchIterator.\n\n Args:\n data_iter: the Iterator that should be prefetched.\n buffer_size: how many items to prefetch (default: 1).\n \"\"\"\n warnings.warn('PrefetchIterator is deprecated. Use the standard `tf.data`'\n ' prefetch method instead', DeprecationWarning)\n\n self._data_iter = data_iter\n self.buffer_size = buffer_size\n self._cond = threading.Condition()\n self._buffer = []\n self._active = True\n self._thread = threading.Thread(target=self._prefetch_loop, daemon=True)\n self._thread.start()\n self._error = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n with self._cond:\n self._cond.wait_for(lambda: self._buffer or not self._active)\n if self._buffer:\n item = self._buffer.pop(0)\n self._cond.notifyAll()\n return item\n if self._error:\n raise self._error # pylint: disable=raising-bad-type\n assert not self._active\n raise StopIteration()\n\n def close(self):\n with self._cond:\n self._active = False\n self._cond.notifyAll()\n\n def _prefetch_loop(self):\n \"\"\"Prefetch loop that prefetches a tf dataset.\"\"\"\n def _predicate():\n return len(self._buffer) < self.buffer_size or not self._active\n\n while True:\n try:\n item = next(self._data_iter)\n with self._cond:\n self._buffer.append(item)\n self._cond.notifyAll()\n self._cond.wait_for(_predicate)\n if not self._active:\n return\n except Exception as e: # pylint: disable=broad-except\n with self._cond:\n self._error = e\n self._active = False\n self._cond.notifyAll()\n return\n", "path": "flax/training/prefetch_iterator.py"}]} | 1,584 | 323 |
gh_patches_debug_11730 | rasdani/github-patches | git_diff | ckan__ckan-7077 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search indexing logic called twice after update or create dataset
**CKAN version**
2.10
**Describe the bug**
When updating or creating a dataset, we are indexing the dataset twice in a row, ie the [`index_package()`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/lib/search/index.py#L108) function gets called twice during the same operation (and of course any `IPackageController.before_index()` hook gets called twice as well.
The root cause is the the obscure code run in the [`DomainObjectModificationExtension`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/model/modification.py#L27), which considers the same Package object both new and changed at the same time, and fires two separate notification events.
</issue>
<code>
[start of ckan/model/modification.py]
1 # encoding: utf-8
2
3 import logging
4 from typing import Any
5
6 from ckan.lib.search import SearchIndexError
7
8 import ckan.plugins as plugins
9 import ckan.model as model
10
11 log = logging.getLogger(__name__)
12
13 __all__ = ['DomainObjectModificationExtension']
14
15
16 class DomainObjectModificationExtension(plugins.SingletonPlugin):
17 """
18 Notify observers about domain object modifications before commit.
19
20 Observers are other plugins implementing the IDomainObjectModification
21 interface.
22 """
23
24 def before_commit(self, session: Any):
25 self.notify_observers(session, self.notify)
26
27 def notify_observers(self, session: Any, method: Any):
28 session.flush()
29 if not hasattr(session, '_object_cache'):
30 return
31
32 obj_cache = session._object_cache
33 new = obj_cache['new']
34 changed = obj_cache['changed']
35 deleted = obj_cache['deleted']
36
37 for obj in set(new):
38 if isinstance(obj, (model.Package, model.Resource)):
39 method(obj, model.DomainObjectOperation.new)
40 for obj in set(deleted):
41 if isinstance(obj, (model.Package, model.Resource)):
42 method(obj, model.DomainObjectOperation.deleted)
43 for obj in set(changed):
44 if isinstance(obj, model.Resource):
45 method(obj, model.DomainObjectOperation.changed)
46 if getattr(obj, 'url_changed', False):
47 for item in plugins.PluginImplementations(plugins.IResourceUrlChange):
48 item.notify(obj)
49
50 changed_pkgs = set(obj for obj in changed
51 if isinstance(obj, model.Package))
52
53 for obj in new | changed | deleted:
54 if not isinstance(obj, model.Package):
55 try:
56 changed_pkgs.update(obj.related_packages())
57 except AttributeError:
58 continue
59
60 for obj in changed_pkgs:
61 method(obj, model.DomainObjectOperation.changed)
62
63 def notify(self, entity: Any, operation: Any):
64 for observer in plugins.PluginImplementations(
65 plugins.IDomainObjectModification):
66 try:
67 observer.notify(entity, operation)
68 except SearchIndexError as search_error:
69 log.exception(search_error)
70 # Reraise, since it's pretty crucial to ckan if it can't index
71 # a dataset
72 raise
73 except Exception as ex:
74 log.exception(ex)
75 # Don't reraise other exceptions since they are generally of
76 # secondary importance so shouldn't disrupt the commit.
77
[end of ckan/model/modification.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/model/modification.py b/ckan/model/modification.py
--- a/ckan/model/modification.py
+++ b/ckan/model/modification.py
@@ -47,8 +47,12 @@
for item in plugins.PluginImplementations(plugins.IResourceUrlChange):
item.notify(obj)
- changed_pkgs = set(obj for obj in changed
- if isinstance(obj, model.Package))
+
+ changed_pkgs = set()
+ new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]
+ for obj in changed:
+ if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:
+ changed_pkgs.add(obj)
for obj in new | changed | deleted:
if not isinstance(obj, model.Package):
| {"golden_diff": "diff --git a/ckan/model/modification.py b/ckan/model/modification.py\n--- a/ckan/model/modification.py\n+++ b/ckan/model/modification.py\n@@ -47,8 +47,12 @@\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n \n- changed_pkgs = set(obj for obj in changed\n- if isinstance(obj, model.Package))\n+\n+ changed_pkgs = set()\n+ new_pkg_ids = [obj.id for obj in new if isinstance(obj, model.Package)]\n+ for obj in changed:\n+ if isinstance(obj, model.Package) and obj.id not in new_pkg_ids:\n+ changed_pkgs.add(obj)\n \n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n", "issue": "Search indexing logic called twice after update or create dataset\n**CKAN version**\r\n2.10\r\n\r\n\r\n**Describe the bug**\r\n\r\nWhen updating or creating a dataset, we are indexing the dataset twice in a row, ie the [`index_package()`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/lib/search/index.py#L108) function gets called twice during the same operation (and of course any `IPackageController.before_index()` hook gets called twice as well.\r\n\r\nThe root cause is the the obscure code run in the [`DomainObjectModificationExtension`](https://github.com/ckan/ckan/blob/9f1b5cfaff8c135b589e2ea0275f1286c2e02711/ckan/model/modification.py#L27), which considers the same Package object both new and changed at the same time, and fires two separate notification events.\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\nfrom typing import Any\n\nfrom ckan.lib.search import SearchIndexError\n\nimport ckan.plugins as plugins\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n\n__all__ = ['DomainObjectModificationExtension']\n\n\nclass DomainObjectModificationExtension(plugins.SingletonPlugin):\n \"\"\"\n Notify observers about domain object modifications before commit.\n\n Observers are other plugins implementing the IDomainObjectModification\n interface.\n \"\"\"\n\n def before_commit(self, session: Any):\n self.notify_observers(session, self.notify)\n\n def notify_observers(self, session: Any, method: Any):\n session.flush()\n if not hasattr(session, '_object_cache'):\n return\n\n obj_cache = session._object_cache\n new = obj_cache['new']\n changed = obj_cache['changed']\n deleted = obj_cache['deleted']\n\n for obj in set(new):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.new)\n for obj in set(deleted):\n if isinstance(obj, (model.Package, model.Resource)):\n method(obj, model.DomainObjectOperation.deleted)\n for obj in set(changed):\n if isinstance(obj, model.Resource):\n method(obj, model.DomainObjectOperation.changed)\n if getattr(obj, 'url_changed', False):\n for item in plugins.PluginImplementations(plugins.IResourceUrlChange):\n item.notify(obj)\n\n changed_pkgs = set(obj for obj in changed\n if isinstance(obj, model.Package))\n\n for obj in new | changed | deleted:\n if not isinstance(obj, model.Package):\n try:\n changed_pkgs.update(obj.related_packages())\n except AttributeError:\n continue\n\n for obj in changed_pkgs:\n method(obj, model.DomainObjectOperation.changed)\n\n def notify(self, entity: Any, operation: Any):\n for observer in plugins.PluginImplementations(\n plugins.IDomainObjectModification):\n try:\n observer.notify(entity, operation)\n except SearchIndexError as search_error:\n log.exception(search_error)\n # Reraise, since it's pretty crucial to ckan if it can't index\n # a dataset\n raise\n except Exception as ex:\n log.exception(ex)\n # Don't reraise other exceptions since they are generally of\n # secondary importance so shouldn't disrupt the commit.\n", "path": "ckan/model/modification.py"}]} | 1,429 | 179 |
gh_patches_debug_50780 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Explicitly depend on setuptools
Context: #3295
We should explicitly depend on a minimum version of setuptools to get around problems installing our packages if setuptools is too old.
</issue>
<code>
[start of core/setup.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
22
23 with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
24 README = file_obj.read()
25
26 # NOTE: This is duplicated throughout and we should try to
27 # consolidate.
28 SETUP_BASE = {
29 'author': 'Google Cloud Platform',
30 'author_email': '[email protected]',
31 'scripts': [],
32 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
33 'license': 'Apache 2.0',
34 'platforms': 'Posix; MacOS X; Windows',
35 'include_package_data': True,
36 'zip_safe': False,
37 'classifiers': [
38 'Development Status :: 4 - Beta',
39 'Intended Audience :: Developers',
40 'License :: OSI Approved :: Apache Software License',
41 'Operating System :: OS Independent',
42 'Programming Language :: Python :: 2',
43 'Programming Language :: Python :: 2.7',
44 'Programming Language :: Python :: 3',
45 'Programming Language :: Python :: 3.4',
46 'Programming Language :: Python :: 3.5',
47 'Programming Language :: Python :: 3.6',
48 'Topic :: Internet',
49 ],
50 }
51
52
53 REQUIREMENTS = [
54 'googleapis-common-protos >= 1.3.4',
55 'protobuf >= 3.0.0',
56 'google-auth >= 0.4.0, < 2.0.0dev',
57 'requests >= 2.4.0, < 3.0.0dev',
58 'six',
59 'tenacity >= 4.0.0, <5.0.0dev'
60 ]
61
62 setup(
63 name='google-cloud-core',
64 version='0.26.0',
65 description='API Client library for Google Cloud: Core Helpers',
66 long_description=README,
67 namespace_packages=[
68 'google',
69 'google.cloud',
70 'google.api',
71 ],
72 packages=find_packages(exclude=('tests*',)),
73 install_requires=REQUIREMENTS,
74 **SETUP_BASE
75 )
76
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -55,6 +55,7 @@
'protobuf >= 3.0.0',
'google-auth >= 0.4.0, < 2.0.0dev',
'requests >= 2.4.0, < 3.0.0dev',
+ 'setuptools >= 34.0.0',
'six',
'tenacity >= 4.0.0, <5.0.0dev'
]
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -55,6 +55,7 @@\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n+ 'setuptools >= 34.0.0',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n ]\n", "issue": "Explicitly depend on setuptools\nContext: #3295\r\n\r\nWe should explicitly depend on a minimum version of setuptools to get around problems installing our packages if setuptools is too old.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nPACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:\n README = file_obj.read()\n\n# NOTE: This is duplicated throughout and we should try to\n# consolidate.\nSETUP_BASE = {\n 'author': 'Google Cloud Platform',\n 'author_email': '[email protected]',\n 'scripts': [],\n 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',\n 'license': 'Apache 2.0',\n 'platforms': 'Posix; MacOS X; Windows',\n 'include_package_data': True,\n 'zip_safe': False,\n 'classifiers': [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet',\n ],\n}\n\n\nREQUIREMENTS = [\n 'googleapis-common-protos >= 1.3.4',\n 'protobuf >= 3.0.0',\n 'google-auth >= 0.4.0, < 2.0.0dev',\n 'requests >= 2.4.0, < 3.0.0dev',\n 'six',\n 'tenacity >= 4.0.0, <5.0.0dev'\n]\n\nsetup(\n name='google-cloud-core',\n version='0.26.0',\n description='API Client library for Google Cloud: Core Helpers',\n long_description=README,\n namespace_packages=[\n 'google',\n 'google.cloud',\n 'google.api',\n ],\n packages=find_packages(exclude=('tests*',)),\n install_requires=REQUIREMENTS,\n **SETUP_BASE\n)\n", "path": "core/setup.py"}]} | 1,309 | 128 |
gh_patches_debug_28051 | rasdani/github-patches | git_diff | cupy__cupy-5226 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove cupyx.allow_synchronize?
`cupyx.allow_synchronize` (#2808) was introduced originally for both unit tests and users who want to notice device synchronization in their own code.
Application in uint tests was dissmissed (#2893) because much more tests were synchronous than I had expected.
Now I doubt the usefulness of this feature for users as well.
It's fundamentally impossible to eliminate false positives and/or false negatives.
If we took a policy for zero false positives, there would be too many false negatives which I think would make this feature useless.
For example, the documentation of [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) says *"This function exhibits synchronous behavior for most use cases"*. If we took this policy, we wouldn't be able to consider this function synchronous, because no condition is mentioned that would make this function asynchronous.
If we took a policy to allow some false positives, this feature wouldn't be used to detect unexpected synchronization in users' code.
</issue>
<code>
[start of cupy/_core/syncdetect.py]
1 import contextlib
2 import threading
3
4 from cupy import _util
5
6
7 _thread_local = threading.local()
8
9
10 class DeviceSynchronized(RuntimeError):
11 """Raised when device synchronization is detected while disallowed.
12
13 .. seealso:: :func:`cupyx.allow_synchronize`
14
15 """
16
17 def __init__(self, message=None):
18 if message is None:
19 message = 'Device synchronization was detected while disallowed.'
20 super().__init__(message)
21
22
23 def _is_allowed():
24 # Returns whether device synchronization is allowed in the current thread.
25 try:
26 return _thread_local.allowed
27 except AttributeError:
28 _thread_local.allowed = True
29 return True
30
31
32 def _declare_synchronize():
33 # Raises DeviceSynchronized if device synchronization is disallowed in
34 # the current thread.
35 if not _is_allowed():
36 raise DeviceSynchronized()
37
38
39 @contextlib.contextmanager
40 def allow_synchronize(allow):
41 """Allows or disallows device synchronization temporarily in the current \
42 thread.
43
44 If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
45 will be raised.
46
47 Note that there can be false negatives and positives.
48 Device synchronization outside CuPy will not be detected.
49 """
50 _util.experimental('cupyx.allow_synchronize')
51 old = _is_allowed()
52 _thread_local.allowed = allow
53 try:
54 yield
55 finally:
56 _thread_local.allowed = old
57
[end of cupy/_core/syncdetect.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/_core/syncdetect.py b/cupy/_core/syncdetect.py
--- a/cupy/_core/syncdetect.py
+++ b/cupy/_core/syncdetect.py
@@ -1,7 +1,6 @@
import contextlib
import threading
-
-from cupy import _util
+import warnings
_thread_local = threading.local()
@@ -10,6 +9,11 @@
class DeviceSynchronized(RuntimeError):
"""Raised when device synchronization is detected while disallowed.
+ .. warning::
+
+ This API has been deprecated in CuPy v10 and will be removed in future
+ releases.
+
.. seealso:: :func:`cupyx.allow_synchronize`
"""
@@ -41,13 +45,21 @@
"""Allows or disallows device synchronization temporarily in the current \
thread.
+ .. warning::
+
+ This API has been deprecated in CuPy v10 and will be removed in future
+ releases.
+
If device synchronization is detected, :class:`cupyx.DeviceSynchronized`
will be raised.
Note that there can be false negatives and positives.
Device synchronization outside CuPy will not be detected.
"""
- _util.experimental('cupyx.allow_synchronize')
+ warnings.warn(
+ 'cupyx.allow_synchronize will be removed in future releases as it '
+ 'is not possible to reliably detect synchronizations.')
+
old = _is_allowed()
_thread_local.allowed = allow
try:
| {"golden_diff": "diff --git a/cupy/_core/syncdetect.py b/cupy/_core/syncdetect.py\n--- a/cupy/_core/syncdetect.py\n+++ b/cupy/_core/syncdetect.py\n@@ -1,7 +1,6 @@\n import contextlib\n import threading\n-\n-from cupy import _util\n+import warnings\n \n \n _thread_local = threading.local()\n@@ -10,6 +9,11 @@\n class DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n \n+ .. warning::\n+\n+ This API has been deprecated in CuPy v10 and will be removed in future\n+ releases.\n+\n .. seealso:: :func:`cupyx.allow_synchronize`\n \n \"\"\"\n@@ -41,13 +45,21 @@\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\n thread.\n \n+ .. warning::\n+\n+ This API has been deprecated in CuPy v10 and will be removed in future\n+ releases.\n+\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n \n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n- _util.experimental('cupyx.allow_synchronize')\n+ warnings.warn(\n+ 'cupyx.allow_synchronize will be removed in future releases as it '\n+ 'is not possible to reliably detect synchronizations.')\n+\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n", "issue": "Remove cupyx.allow_synchronize?\n`cupyx.allow_synchronize` (#2808) was introduced originally for both unit tests and users who want to notice device synchronization in their own code.\r\n\r\nApplication in uint tests was dissmissed (#2893) because much more tests were synchronous than I had expected. \r\n\r\nNow I doubt the usefulness of this feature for users as well.\r\nIt's fundamentally impossible to eliminate false positives and/or false negatives.\r\n\r\nIf we took a policy for zero false positives, there would be too many false negatives which I think would make this feature useless.\r\nFor example, the documentation of [cudaMemcpy](https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY.html#group__CUDART__MEMORY_1gc263dbe6574220cc776b45438fc351e8) says *\"This function exhibits synchronous behavior for most use cases\"*. If we took this policy, we wouldn't be able to consider this function synchronous, because no condition is mentioned that would make this function asynchronous.\r\n\r\nIf we took a policy to allow some false positives, this feature wouldn't be used to detect unexpected synchronization in users' code.\n", "before_files": [{"content": "import contextlib\nimport threading\n\nfrom cupy import _util\n\n\n_thread_local = threading.local()\n\n\nclass DeviceSynchronized(RuntimeError):\n \"\"\"Raised when device synchronization is detected while disallowed.\n\n .. seealso:: :func:`cupyx.allow_synchronize`\n\n \"\"\"\n\n def __init__(self, message=None):\n if message is None:\n message = 'Device synchronization was detected while disallowed.'\n super().__init__(message)\n\n\ndef _is_allowed():\n # Returns whether device synchronization is allowed in the current thread.\n try:\n return _thread_local.allowed\n except AttributeError:\n _thread_local.allowed = True\n return True\n\n\ndef _declare_synchronize():\n # Raises DeviceSynchronized if device synchronization is disallowed in\n # the current thread.\n if not _is_allowed():\n raise DeviceSynchronized()\n\n\[email protected]\ndef allow_synchronize(allow):\n \"\"\"Allows or disallows device synchronization temporarily in the current \\\nthread.\n\n If device synchronization is detected, :class:`cupyx.DeviceSynchronized`\n will be raised.\n\n Note that there can be false negatives and positives.\n Device synchronization outside CuPy will not be detected.\n \"\"\"\n _util.experimental('cupyx.allow_synchronize')\n old = _is_allowed()\n _thread_local.allowed = allow\n try:\n yield\n finally:\n _thread_local.allowed = old\n", "path": "cupy/_core/syncdetect.py"}]} | 1,208 | 334 |
gh_patches_debug_27897 | rasdani/github-patches | git_diff | freqtrade__freqtrade-4144 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Possibly slightly wrong informative pair merging
When merging longer timeframe to a shorter one, the timedelta is added to the date:
https://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L35
then it merges to the shorter timeframe with these dates..
https://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L42-L43
So lets say we are merging a `1h` timeframe to a `5m` timeframe and there is a signal `True` at `00:00` on the `1h` timeseries.
With this merge, the signal will appear in the `5m` timeseries also at `00:00`. However the `00:00` candle for the `5m` timeframe is received at `00:05`, that is five minutes later than the time you actually received the `1h` candle, which should have been received at the candle with date `11:55`.
So after merging, the values should be shifted backward (`shift(-1)`)..or the merging dates should be reduced by one unit of timedelta of the shorter timeframe..
```python
informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')
```
</issue>
<code>
[start of freqtrade/strategy/strategy_helper.py]
1 import pandas as pd
2
3 from freqtrade.exchange import timeframe_to_minutes
4
5
6 def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,
7 timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:
8 """
9 Correctly merge informative samples to the original dataframe, avoiding lookahead bias.
10
11 Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a
12 1h candle that starts at 15:00 will result in all candles to know the close at 16:00
13 which they should not know.
14
15 Moves the date of the informative pair by 1 time interval forward.
16 This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the
17 last candle that's closed at 15:00, 15:15, 15:30 or 15:45.
18
19 Assuming inf_tf = '1d' - then the resulting columns will be:
20 date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d
21
22 :param dataframe: Original dataframe
23 :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe
24 :param timeframe: Timeframe of the original pair sample.
25 :param timeframe_inf: Timeframe of the informative pair sample.
26 :param ffill: Forwardfill missing values - optional but usually required
27 """
28
29 minutes_inf = timeframe_to_minutes(timeframe_inf)
30 minutes = timeframe_to_minutes(timeframe)
31 if minutes >= minutes_inf:
32 # No need to forwardshift if the timeframes are identical
33 informative['date_merge'] = informative["date"]
34 else:
35 informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm')
36
37 # Rename columns to be unique
38 informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
39
40 # Combine the 2 dataframes
41 # all indicators on the informative sample MUST be calculated before this point
42 dataframe = pd.merge(dataframe, informative, left_on='date',
43 right_on=f'date_merge_{timeframe_inf}', how='left')
44 dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)
45
46 if ffill:
47 dataframe = dataframe.ffill()
48
49 return dataframe
50
[end of freqtrade/strategy/strategy_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py
--- a/freqtrade/strategy/strategy_helper.py
+++ b/freqtrade/strategy/strategy_helper.py
@@ -24,15 +24,24 @@
:param timeframe: Timeframe of the original pair sample.
:param timeframe_inf: Timeframe of the informative pair sample.
:param ffill: Forwardfill missing values - optional but usually required
+ :return: Merged dataframe
+ :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe
"""
minutes_inf = timeframe_to_minutes(timeframe_inf)
minutes = timeframe_to_minutes(timeframe)
- if minutes >= minutes_inf:
+ if minutes == minutes_inf:
# No need to forwardshift if the timeframes are identical
informative['date_merge'] = informative["date"]
+ elif minutes < minutes_inf:
+ # Subtract "small" timeframe so merging is not delayed by 1 small candle
+ # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073
+ informative['date_merge'] = (
+ informative["date"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')
+ )
else:
- informative['date_merge'] = informative["date"] + pd.to_timedelta(minutes_inf, 'm')
+ raise ValueError("Tried to merge a faster timeframe to a slower timeframe."
+ "This would create new rows, and can throw off your regular indicators.")
# Rename columns to be unique
informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
| {"golden_diff": "diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py\n--- a/freqtrade/strategy/strategy_helper.py\n+++ b/freqtrade/strategy/strategy_helper.py\n@@ -24,15 +24,24 @@\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n+ :return: Merged dataframe\n+ :raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe\n \"\"\"\n \n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n- if minutes >= minutes_inf:\n+ if minutes == minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n+ elif minutes < minutes_inf:\n+ # Subtract \"small\" timeframe so merging is not delayed by 1 small candle\n+ # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073\n+ informative['date_merge'] = (\n+ informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\n+ )\n else:\n- informative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm')\n+ raise ValueError(\"Tried to merge a faster timeframe to a slower timeframe.\"\n+ \"This would create new rows, and can throw off your regular indicators.\")\n \n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n", "issue": "Possibly slightly wrong informative pair merging\nWhen merging longer timeframe to a shorter one, the timedelta is added to the date:\r\nhttps://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L35\r\n\r\nthen it merges to the shorter timeframe with these dates..\r\nhttps://github.com/freqtrade/freqtrade/blob/f320cb0d7a12e064018967cf049dd6719fff8ddb/freqtrade/strategy/strategy_helper.py#L42-L43\r\n\r\nSo lets say we are merging a `1h` timeframe to a `5m` timeframe and there is a signal `True` at `00:00` on the `1h` timeseries. \r\nWith this merge, the signal will appear in the `5m` timeseries also at `00:00`. However the `00:00` candle for the `5m` timeframe is received at `00:05`, that is five minutes later than the time you actually received the `1h` candle, which should have been received at the candle with date `11:55`. \r\nSo after merging, the values should be shifted backward (`shift(-1)`)..or the merging dates should be reduced by one unit of timedelta of the shorter timeframe..\r\n```python\r\ninformative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm') - pd.to_timedelta(minutes, 'm')\r\n```\n", "before_files": [{"content": "import pandas as pd\n\nfrom freqtrade.exchange import timeframe_to_minutes\n\n\ndef merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame,\n timeframe: str, timeframe_inf: str, ffill: bool = True) -> pd.DataFrame:\n \"\"\"\n Correctly merge informative samples to the original dataframe, avoiding lookahead bias.\n\n Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a\n 1h candle that starts at 15:00 will result in all candles to know the close at 16:00\n which they should not know.\n\n Moves the date of the informative pair by 1 time interval forward.\n This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the\n last candle that's closed at 15:00, 15:15, 15:30 or 15:45.\n\n Assuming inf_tf = '1d' - then the resulting columns will be:\n date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d\n\n :param dataframe: Original dataframe\n :param informative: Informative pair, most likely loaded via dp.get_pair_dataframe\n :param timeframe: Timeframe of the original pair sample.\n :param timeframe_inf: Timeframe of the informative pair sample.\n :param ffill: Forwardfill missing values - optional but usually required\n \"\"\"\n\n minutes_inf = timeframe_to_minutes(timeframe_inf)\n minutes = timeframe_to_minutes(timeframe)\n if minutes >= minutes_inf:\n # No need to forwardshift if the timeframes are identical\n informative['date_merge'] = informative[\"date\"]\n else:\n informative['date_merge'] = informative[\"date\"] + pd.to_timedelta(minutes_inf, 'm')\n\n # Rename columns to be unique\n informative.columns = [f\"{col}_{timeframe_inf}\" for col in informative.columns]\n\n # Combine the 2 dataframes\n # all indicators on the informative sample MUST be calculated before this point\n dataframe = pd.merge(dataframe, informative, left_on='date',\n right_on=f'date_merge_{timeframe_inf}', how='left')\n dataframe = dataframe.drop(f'date_merge_{timeframe_inf}', axis=1)\n\n if ffill:\n dataframe = dataframe.ffill()\n\n return dataframe\n", "path": "freqtrade/strategy/strategy_helper.py"}]} | 1,549 | 373 |
gh_patches_debug_13394 | rasdani/github-patches | git_diff | ManimCommunity__manim-1923 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Command line help text is cut off
## Enhancement proposal
When running `manim --help`, the following output is returned:
```
Manim Community v0.9.0
Usage: manim [OPTIONS] COMMAND [ARGS]...
Animation engine for explanatory math videos
Options:
--version Show version and exit.
--help Show this message and exit.
Commands:
render* Render SCENE(S) from the input FILE.
cfg Manages Manim configuration files.
init Sets up a project in current working directory with default...
new Create a new project or insert a new scene.
plugins Manages Manim plugins.
Made with <3 by Manim Community developers.
```
As you can see, the help text for `init` is cut off, and does not provide sufficient information about what that command does.
## Additional comments
<!-- Add further context that you think might be relevant. -->
</issue>
<code>
[start of manim/cli/init/commands.py]
1 """Manim's init subcommand.
2
3 Manim's init subcommand is accessed in the command-line interface via ``manim
4 init``. Here you can specify options, subcommands, and subgroups for the init
5 group.
6
7 """
8 from pathlib import Path
9
10 import click
11
12 from ...constants import CONTEXT_SETTINGS, EPILOG
13 from ...utils.file_ops import copy_template_files
14
15
16 @click.command(
17 context_settings=CONTEXT_SETTINGS,
18 epilog=EPILOG,
19 )
20 def init():
21 """Sets up a project in current working directory with default settings.
22
23 It copies files from templates directory and pastes them in the current working dir.
24
25 The new project is set up with default settings.
26 """
27 cfg = Path("manim.cfg")
28 if cfg.exists():
29 raise FileExistsError(f"\t{cfg} exists\n")
30 else:
31 copy_template_files()
32
[end of manim/cli/init/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/cli/init/commands.py b/manim/cli/init/commands.py
--- a/manim/cli/init/commands.py
+++ b/manim/cli/init/commands.py
@@ -16,13 +16,14 @@
@click.command(
context_settings=CONTEXT_SETTINGS,
epilog=EPILOG,
+ short_help="""Sets up a new project in current working directory with default settings.\n
+It copies files from templates directory and pastes them in the current working dir.
+""",
)
def init():
- """Sets up a project in current working directory with default settings.
+ """Sets up a new project in current working directory with default settings.
It copies files from templates directory and pastes them in the current working dir.
-
- The new project is set up with default settings.
"""
cfg = Path("manim.cfg")
if cfg.exists():
| {"golden_diff": "diff --git a/manim/cli/init/commands.py b/manim/cli/init/commands.py\n--- a/manim/cli/init/commands.py\n+++ b/manim/cli/init/commands.py\n@@ -16,13 +16,14 @@\n @click.command(\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n+ short_help=\"\"\"Sets up a new project in current working directory with default settings.\\n\n+It copies files from templates directory and pastes them in the current working dir.\n+\"\"\",\n )\n def init():\n- \"\"\"Sets up a project in current working directory with default settings.\n+ \"\"\"Sets up a new project in current working directory with default settings.\n \n It copies files from templates directory and pastes them in the current working dir.\n-\n- The new project is set up with default settings.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n", "issue": "Command line help text is cut off\n## Enhancement proposal\r\nWhen running `manim --help`, the following output is returned:\r\n\r\n```\r\nManim Community v0.9.0\r\n\r\nUsage: manim [OPTIONS] COMMAND [ARGS]...\r\n\r\n Animation engine for explanatory math videos\r\n\r\nOptions:\r\n --version Show version and exit.\r\n --help Show this message and exit.\r\n\r\nCommands:\r\n render* Render SCENE(S) from the input FILE.\r\n cfg Manages Manim configuration files.\r\n init Sets up a project in current working directory with default...\r\n new Create a new project or insert a new scene.\r\n plugins Manages Manim plugins.\r\n\r\n Made with <3 by Manim Community developers.\r\n```\r\n\r\nAs you can see, the help text for `init` is cut off, and does not provide sufficient information about what that command does.\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's init subcommand.\n\nManim's init subcommand is accessed in the command-line interface via ``manim\ninit``. Here you can specify options, subcommands, and subgroups for the init\ngroup.\n\n\"\"\"\nfrom pathlib import Path\n\nimport click\n\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.file_ops import copy_template_files\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n)\ndef init():\n \"\"\"Sets up a project in current working directory with default settings.\n\n It copies files from templates directory and pastes them in the current working dir.\n\n The new project is set up with default settings.\n \"\"\"\n cfg = Path(\"manim.cfg\")\n if cfg.exists():\n raise FileExistsError(f\"\\t{cfg} exists\\n\")\n else:\n copy_template_files()\n", "path": "manim/cli/init/commands.py"}]} | 983 | 194 |
gh_patches_debug_16755 | rasdani/github-patches | git_diff | scrapy__scrapy-5299 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tests failing with new Sybil
Sybil 3.0.0 was just released, and at least the following change breaks our tests: `CodeBlockParser has been renamed to PythonCodeBlockParser`
```python-traceback
docs/conftest.py:24: in <module>
CodeBlockParser(future_imports=['print_function']),
E TypeError: __init__() got an unexpected keyword argument 'future_imports'
```
</issue>
<code>
[start of docs/conftest.py]
1 import os
2 from doctest import ELLIPSIS, NORMALIZE_WHITESPACE
3
4 from scrapy.http.response.html import HtmlResponse
5 from sybil import Sybil
6 from sybil.parsers.codeblock import CodeBlockParser
7 from sybil.parsers.doctest import DocTestParser
8 from sybil.parsers.skip import skip
9
10
11 def load_response(url, filename):
12 input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)
13 with open(input_path, 'rb') as input_file:
14 return HtmlResponse(url, body=input_file.read())
15
16
17 def setup(namespace):
18 namespace['load_response'] = load_response
19
20
21 pytest_collect_file = Sybil(
22 parsers=[
23 DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),
24 CodeBlockParser(future_imports=['print_function']),
25 skip,
26 ],
27 pattern='*.rst',
28 setup=setup,
29 ).pytest()
30
[end of docs/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conftest.py b/docs/conftest.py
--- a/docs/conftest.py
+++ b/docs/conftest.py
@@ -3,7 +3,11 @@
from scrapy.http.response.html import HtmlResponse
from sybil import Sybil
-from sybil.parsers.codeblock import CodeBlockParser
+try:
+ # >2.0.1
+ from sybil.parsers.codeblock import PythonCodeBlockParser
+except ImportError:
+ from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser
from sybil.parsers.doctest import DocTestParser
from sybil.parsers.skip import skip
@@ -21,7 +25,7 @@
pytest_collect_file = Sybil(
parsers=[
DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),
- CodeBlockParser(future_imports=['print_function']),
+ PythonCodeBlockParser(future_imports=['print_function']),
skip,
],
pattern='*.rst',
| {"golden_diff": "diff --git a/docs/conftest.py b/docs/conftest.py\n--- a/docs/conftest.py\n+++ b/docs/conftest.py\n@@ -3,7 +3,11 @@\n \n from scrapy.http.response.html import HtmlResponse\n from sybil import Sybil\n-from sybil.parsers.codeblock import CodeBlockParser\n+try:\n+ # >2.0.1\n+ from sybil.parsers.codeblock import PythonCodeBlockParser\n+except ImportError:\n+ from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser\n from sybil.parsers.doctest import DocTestParser\n from sybil.parsers.skip import skip\n \n@@ -21,7 +25,7 @@\n pytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n- CodeBlockParser(future_imports=['print_function']),\n+ PythonCodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n", "issue": "Tests failing with new Sybil\nSybil 3.0.0 was just released, and at least the following change breaks our tests: `CodeBlockParser has been renamed to PythonCodeBlockParser`\r\n\r\n```python-traceback\r\ndocs/conftest.py:24: in <module>\r\n CodeBlockParser(future_imports=['print_function']),\r\nE TypeError: __init__() got an unexpected keyword argument 'future_imports'\r\n```\n", "before_files": [{"content": "import os\nfrom doctest import ELLIPSIS, NORMALIZE_WHITESPACE\n\nfrom scrapy.http.response.html import HtmlResponse\nfrom sybil import Sybil\nfrom sybil.parsers.codeblock import CodeBlockParser\nfrom sybil.parsers.doctest import DocTestParser\nfrom sybil.parsers.skip import skip\n\n\ndef load_response(url, filename):\n input_path = os.path.join(os.path.dirname(__file__), '_tests', filename)\n with open(input_path, 'rb') as input_file:\n return HtmlResponse(url, body=input_file.read())\n\n\ndef setup(namespace):\n namespace['load_response'] = load_response\n\n\npytest_collect_file = Sybil(\n parsers=[\n DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE),\n CodeBlockParser(future_imports=['print_function']),\n skip,\n ],\n pattern='*.rst',\n setup=setup,\n).pytest()\n", "path": "docs/conftest.py"}]} | 873 | 217 |
gh_patches_debug_4388 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2197 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build number in -dev versioning
Since we do not have a build number in the -dev version, it may be quite complex to understand which particular version of freqtrade the user runs (especially with that your conda). So if we change user interface I guess it's better for a while to make those changes along with changing the version of both develop and master...
Is it possible to introduce the build number into the -dev versioning, which will change with every merge, as ccxt employs, for example?
It's not necessary for master (2019-8 is completely enough), but for develop changing every day this may be useful since `freqtrade -V` will give particular version info (even when a user has current codebase from github fetched in a dir, but installed an outdated version with that your conda, which is used instead of latest develop from the dir; that's the confusing usecase when it's complicated to find out the actual version of freqtrade that runs at the user site...)
It's not necessary to be an increasing number, it can be a short commit id (merged into develop), for example...
</issue>
<code>
[start of freqtrade/__init__.py]
1 """ FreqTrade bot """
2 __version__ = '2019.7-dev'
3
4
5 class DependencyException(Exception):
6 """
7 Indicates that an assumed dependency is not met.
8 This could happen when there is currently not enough money on the account.
9 """
10
11
12 class OperationalException(Exception):
13 """
14 Requires manual intervention and will usually stop the bot.
15 This happens when an exchange returns an unexpected error during runtime
16 or given configuration is invalid.
17 """
18
19
20 class InvalidOrderException(Exception):
21 """
22 This is returned when the order is not valid. Example:
23 If stoploss on exchange order is hit, then trying to cancel the order
24 should return this exception.
25 """
26
27
28 class TemporaryError(Exception):
29 """
30 Temporary network or exchange related error.
31 This could happen when an exchange is congested, unavailable, or the user
32 has networking problems. Usually resolves itself after a time.
33 """
34
[end of freqtrade/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py
--- a/freqtrade/__init__.py
+++ b/freqtrade/__init__.py
@@ -1,5 +1,16 @@
""" FreqTrade bot """
-__version__ = '2019.7-dev'
+__version__ = 'develop'
+
+if __version__ == 'develop':
+
+ try:
+ import subprocess
+ __version__ = 'develop-' + subprocess.check_output(
+ ['git', 'log', '--format="%h"', '-n 1'],
+ stderr=subprocess.DEVNULL).decode("utf-8").rstrip().strip('"')
+ except Exception:
+ # git not available, ignore
+ pass
class DependencyException(Exception):
| {"golden_diff": "diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py\n--- a/freqtrade/__init__.py\n+++ b/freqtrade/__init__.py\n@@ -1,5 +1,16 @@\n \"\"\" FreqTrade bot \"\"\"\n-__version__ = '2019.7-dev'\n+__version__ = 'develop'\n+\n+if __version__ == 'develop':\n+\n+ try:\n+ import subprocess\n+ __version__ = 'develop-' + subprocess.check_output(\n+ ['git', 'log', '--format=\"%h\"', '-n 1'],\n+ stderr=subprocess.DEVNULL).decode(\"utf-8\").rstrip().strip('\"')\n+ except Exception:\n+ # git not available, ignore\n+ pass\n \n \n class DependencyException(Exception):\n", "issue": "Build number in -dev versioning\nSince we do not have a build number in the -dev version, it may be quite complex to understand which particular version of freqtrade the user runs (especially with that your conda). So if we change user interface I guess it's better for a while to make those changes along with changing the version of both develop and master...\r\n\r\nIs it possible to introduce the build number into the -dev versioning, which will change with every merge, as ccxt employs, for example?\r\n\r\nIt's not necessary for master (2019-8 is completely enough), but for develop changing every day this may be useful since `freqtrade -V` will give particular version info (even when a user has current codebase from github fetched in a dir, but installed an outdated version with that your conda, which is used instead of latest develop from the dir; that's the confusing usecase when it's complicated to find out the actual version of freqtrade that runs at the user site...)\r\n\r\nIt's not necessary to be an increasing number, it can be a short commit id (merged into develop), for example...\r\n\n", "before_files": [{"content": "\"\"\" FreqTrade bot \"\"\"\n__version__ = '2019.7-dev'\n\n\nclass DependencyException(Exception):\n \"\"\"\n Indicates that an assumed dependency is not met.\n This could happen when there is currently not enough money on the account.\n \"\"\"\n\n\nclass OperationalException(Exception):\n \"\"\"\n Requires manual intervention and will usually stop the bot.\n This happens when an exchange returns an unexpected error during runtime\n or given configuration is invalid.\n \"\"\"\n\n\nclass InvalidOrderException(Exception):\n \"\"\"\n This is returned when the order is not valid. Example:\n If stoploss on exchange order is hit, then trying to cancel the order\n should return this exception.\n \"\"\"\n\n\nclass TemporaryError(Exception):\n \"\"\"\n Temporary network or exchange related error.\n This could happen when an exchange is congested, unavailable, or the user\n has networking problems. Usually resolves itself after a time.\n \"\"\"\n", "path": "freqtrade/__init__.py"}]} | 1,028 | 175 |
gh_patches_debug_22409 | rasdani/github-patches | git_diff | xonsh__xonsh-1551 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`xonsh.completers.pip` explodes if `pip` is not on PATH
On my Windows installation, Python is not on PATH (because multiple Python madness), and therefore neither is pip. However, the pip completer [expects pip to be on the path](https://github.com/xonsh/xonsh/blob/master/xonsh/completers/pip.py#L14).
This causes the completer to blow up with a `FileNotFoundError` when it tries to complete.
</issue>
<code>
[start of xonsh/completers/pip.py]
1 import re
2 import subprocess
3
4 import xonsh.lazyasd as xl
5
6 PIP_RE = xl.LazyObject(lambda: re.compile("pip(?:\d|\.)*"),
7 globals(), 'PIP_RE')
8 PIP_LIST_RE = xl.LazyObject(lambda: re.compile("pip(?:\d|\.)* (?:uninstall|show)"),
9 globals(), 'PIP_LIST_RE')
10
11
12 @xl.lazyobject
13 def ALL_COMMANDS():
14 help_text = str(subprocess.check_output(['pip', '--help'],
15 stderr=subprocess.DEVNULL))
16 commands = re.findall(" (\w+) ", help_text)
17 return [c for c in commands if c not in ['completion', 'help']]
18
19
20 def complete_pip(prefix, line, begidx, endidx, ctx):
21 """Completes python's package manager pip"""
22 line_len = len(line.split())
23 if (line_len > 3) or (line_len > 2 and line.endswith(' ')) or \
24 (not PIP_RE.search(line)):
25 return
26 if PIP_LIST_RE.search(line):
27 items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)
28 items = items.decode('utf-8').splitlines()
29 return set(i.split()[0] for i in items)
30
31 if (line_len > 1 and line.endswith(' ')) or line_len > 2:
32 # "pip show " -> no complete (note space)
33 return
34 if prefix not in ALL_COMMANDS:
35 suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]
36 if suggestions:
37 return suggestions, len(prefix)
38 return ALL_COMMANDS, len(prefix)
39
[end of xonsh/completers/pip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py
--- a/xonsh/completers/pip.py
+++ b/xonsh/completers/pip.py
@@ -11,8 +11,11 @@
@xl.lazyobject
def ALL_COMMANDS():
- help_text = str(subprocess.check_output(['pip', '--help'],
- stderr=subprocess.DEVNULL))
+ try:
+ help_text = str(subprocess.check_output(['pip', '--help'],
+ stderr=subprocess.DEVNULL))
+ except FileNotFoundError:
+ return []
commands = re.findall(" (\w+) ", help_text)
return [c for c in commands if c not in ['completion', 'help']]
@@ -24,7 +27,11 @@
(not PIP_RE.search(line)):
return
if PIP_LIST_RE.search(line):
- items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)
+ try:
+ items = subprocess.check_output(['pip', 'list'],
+ stderr=subprocess.DEVNULL)
+ except FileNotFoundError:
+ return set()
items = items.decode('utf-8').splitlines()
return set(i.split()[0] for i in items)
| {"golden_diff": "diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py\n--- a/xonsh/completers/pip.py\n+++ b/xonsh/completers/pip.py\n@@ -11,8 +11,11 @@\n \n @xl.lazyobject\n def ALL_COMMANDS():\n- help_text = str(subprocess.check_output(['pip', '--help'],\n- stderr=subprocess.DEVNULL))\n+ try:\n+ help_text = str(subprocess.check_output(['pip', '--help'],\n+ stderr=subprocess.DEVNULL))\n+ except FileNotFoundError:\n+ return []\n commands = re.findall(\" (\\w+) \", help_text)\n return [c for c in commands if c not in ['completion', 'help']]\n \n@@ -24,7 +27,11 @@\n (not PIP_RE.search(line)):\n return\n if PIP_LIST_RE.search(line):\n- items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)\n+ try:\n+ items = subprocess.check_output(['pip', 'list'],\n+ stderr=subprocess.DEVNULL)\n+ except FileNotFoundError:\n+ return set()\n items = items.decode('utf-8').splitlines()\n return set(i.split()[0] for i in items)\n", "issue": "`xonsh.completers.pip` explodes if `pip` is not on PATH\nOn my Windows installation, Python is not on PATH (because multiple Python madness), and therefore neither is pip. However, the pip completer [expects pip to be on the path](https://github.com/xonsh/xonsh/blob/master/xonsh/completers/pip.py#L14).\n\nThis causes the completer to blow up with a `FileNotFoundError` when it tries to complete.\n\n", "before_files": [{"content": "import re\nimport subprocess\n\nimport xonsh.lazyasd as xl\n\nPIP_RE = xl.LazyObject(lambda: re.compile(\"pip(?:\\d|\\.)*\"),\n globals(), 'PIP_RE')\nPIP_LIST_RE = xl.LazyObject(lambda: re.compile(\"pip(?:\\d|\\.)* (?:uninstall|show)\"),\n globals(), 'PIP_LIST_RE')\n\n\[email protected]\ndef ALL_COMMANDS():\n help_text = str(subprocess.check_output(['pip', '--help'],\n stderr=subprocess.DEVNULL))\n commands = re.findall(\" (\\w+) \", help_text)\n return [c for c in commands if c not in ['completion', 'help']]\n\n\ndef complete_pip(prefix, line, begidx, endidx, ctx):\n \"\"\"Completes python's package manager pip\"\"\"\n line_len = len(line.split())\n if (line_len > 3) or (line_len > 2 and line.endswith(' ')) or \\\n (not PIP_RE.search(line)):\n return\n if PIP_LIST_RE.search(line):\n items = subprocess.check_output(['pip', 'list'], stderr=subprocess.DEVNULL)\n items = items.decode('utf-8').splitlines()\n return set(i.split()[0] for i in items)\n\n if (line_len > 1 and line.endswith(' ')) or line_len > 2:\n # \"pip show \" -> no complete (note space)\n return\n if prefix not in ALL_COMMANDS:\n suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]\n if suggestions:\n return suggestions, len(prefix)\n return ALL_COMMANDS, len(prefix)\n", "path": "xonsh/completers/pip.py"}]} | 1,072 | 282 |
gh_patches_debug_28331 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 5024: missing location label
**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list
**user:** any
**expected behaviour:**
**behaviour:** location label (Bezeichnung des markierten Ortes) is missing
**important screensize:**
**device & browser:**
**Comment/Question:** maybe we need a smaller char restriction here? it's at 255 now, I wonder if something like 50 should be enough for something displayed as a tag? or continue with ... for longer words?
old list
<img width="446" alt="Bildschirmfoto 2021-12-21 um 16 35 27" src="https://user-images.githubusercontent.com/35491681/146956690-789f6d02-372c-4877-a4c9-c539b5fc90c3.png">
new list
<img width="446" alt="Bildschirmfoto 2021-12-21 um 16 34 09" src="https://user-images.githubusercontent.com/35491681/146956491-2472f9f2-e90d-4975-88a8-fbe1a7012657.png">
old list with long label
<img width="656" alt="Bildschirmfoto 2021-12-21 um 16 36 09" src="https://user-images.githubusercontent.com/35491681/146956804-ced5b4b8-0da8-42fc-a17c-901fc86efe9b.png">
</issue>
<code>
[start of meinberlin/apps/budgeting/serializers.py]
1 from django.contrib.contenttypes.models import ContentType
2 from rest_framework import serializers
3
4 from adhocracy4.categories.models import Category
5 from meinberlin.apps.votes.models import TokenVote
6
7 from .models import Proposal
8
9
10 class CategoryField(serializers.Field):
11
12 def to_internal_value(self, category):
13 if category:
14 return Category.objects.get(pk=category)
15 else:
16 return None
17
18 def to_representation(self, category):
19 return {'id': category.pk, 'name': category.name}
20
21
22 class ProposalSerializer(serializers.ModelSerializer):
23
24 creator = serializers.SerializerMethodField()
25 comment_count = serializers.SerializerMethodField()
26 positive_rating_count = serializers.SerializerMethodField()
27 negative_rating_count = serializers.SerializerMethodField()
28 category = CategoryField()
29 url = serializers.SerializerMethodField()
30 moderator_feedback = serializers.SerializerMethodField()
31 session_token_voted = serializers.SerializerMethodField()
32
33 class Meta:
34 model = Proposal
35 fields = ('budget', 'category', 'comment_count', 'created', 'modified',
36 'creator', 'is_archived', 'name', 'negative_rating_count',
37 'positive_rating_count', 'url', 'pk', 'moderator_feedback',
38 'session_token_voted')
39 read_only_fields = ('budget', 'category', 'comment_count', 'created',
40 'modified', 'creator', 'is_archived', 'name',
41 'negative_rating_count', 'positive_rating_count',
42 'url', 'pk', 'moderator_feedback',
43 'session_token_voted')
44
45 def get_creator(self, proposal):
46 return proposal.creator.username
47
48 def get_comment_count(self, proposal):
49 if hasattr(proposal, 'comment_count'):
50 return proposal.comment_count
51 else:
52 return 0
53
54 def get_positive_rating_count(self, proposal):
55 if hasattr(proposal, 'positive_rating_count'):
56 return proposal.positive_rating_count
57 else:
58 return 0
59
60 def get_negative_rating_count(self, proposal):
61 if hasattr(proposal, 'negative_rating_count'):
62 return proposal.negative_rating_count
63 else:
64 return 0
65
66 def get_url(self, proposal):
67 return proposal.get_absolute_url()
68
69 def get_moderator_feedback(self, proposal):
70 if hasattr(proposal, 'moderator_feedback'):
71 return (proposal.moderator_feedback,
72 proposal.get_moderator_feedback_display())
73 else:
74 return None
75
76 def get_session_token_voted(self, proposal):
77 """Serialize if proposal has been voted.
78
79 Returns bool that indicates whether the proposal has
80 been voted with the token in the current session
81 """
82 if 'request' in self.context:
83 if 'voting_token' in self.context['request'].session:
84 vote = TokenVote.objects.filter(
85 token__pk=self.context['request'].session['voting_token'],
86 content_type=ContentType.objects.get_for_model(
87 proposal.__class__),
88 object_pk=proposal.pk
89 )
90 if vote.exists():
91 return True
92
93 return False
94
[end of meinberlin/apps/budgeting/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py
--- a/meinberlin/apps/budgeting/serializers.py
+++ b/meinberlin/apps/budgeting/serializers.py
@@ -35,12 +35,12 @@
fields = ('budget', 'category', 'comment_count', 'created', 'modified',
'creator', 'is_archived', 'name', 'negative_rating_count',
'positive_rating_count', 'url', 'pk', 'moderator_feedback',
- 'session_token_voted')
+ 'point_label', 'session_token_voted')
read_only_fields = ('budget', 'category', 'comment_count', 'created',
'modified', 'creator', 'is_archived', 'name',
'negative_rating_count', 'positive_rating_count',
'url', 'pk', 'moderator_feedback',
- 'session_token_voted')
+ 'point_label', 'session_token_voted')
def get_creator(self, proposal):
return proposal.creator.username
@@ -73,6 +73,12 @@
else:
return None
+ def get_point_label(self, proposal):
+ if hasattr(proposal, 'point_label'):
+ return (proposal.point_label)
+ else:
+ return None
+
def get_session_token_voted(self, proposal):
"""Serialize if proposal has been voted.
| {"golden_diff": "diff --git a/meinberlin/apps/budgeting/serializers.py b/meinberlin/apps/budgeting/serializers.py\n--- a/meinberlin/apps/budgeting/serializers.py\n+++ b/meinberlin/apps/budgeting/serializers.py\n@@ -35,12 +35,12 @@\n fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n- 'session_token_voted')\n+ 'point_label', 'session_token_voted')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n- 'session_token_voted')\n+ 'point_label', 'session_token_voted')\n \n def get_creator(self, proposal):\n return proposal.creator.username\n@@ -73,6 +73,12 @@\n else:\n return None\n \n+ def get_point_label(self, proposal):\n+ if hasattr(proposal, 'point_label'):\n+ return (proposal.point_label)\n+ else:\n+ return None\n+\n def get_session_token_voted(self, proposal):\n \"\"\"Serialize if proposal has been voted.\n", "issue": "testing 5024: missing location label\n**URL:** https://meinberlin-dev.liqd.net/projekte/burgerhaushalt-spandau/?mode=list\r\n**user:** any\r\n**expected behaviour:** \r\n**behaviour:** location label (Bezeichnung des markierten Ortes) is missing\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** maybe we need a smaller char restriction here? it's at 255 now, I wonder if something like 50 should be enough for something displayed as a tag? or continue with ... for longer words?\r\n\r\nold list\r\n<img width=\"446\" alt=\"Bildschirmfoto 2021-12-21 um 16 35 27\" src=\"https://user-images.githubusercontent.com/35491681/146956690-789f6d02-372c-4877-a4c9-c539b5fc90c3.png\">\r\n\r\n\r\nnew list\r\n<img width=\"446\" alt=\"Bildschirmfoto 2021-12-21 um 16 34 09\" src=\"https://user-images.githubusercontent.com/35491681/146956491-2472f9f2-e90d-4975-88a8-fbe1a7012657.png\">\r\n\r\nold list with long label\r\n<img width=\"656\" alt=\"Bildschirmfoto 2021-12-21 um 16 36 09\" src=\"https://user-images.githubusercontent.com/35491681/146956804-ced5b4b8-0da8-42fc-a17c-901fc86efe9b.png\">\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom rest_framework import serializers\n\nfrom adhocracy4.categories.models import Category\nfrom meinberlin.apps.votes.models import TokenVote\n\nfrom .models import Proposal\n\n\nclass CategoryField(serializers.Field):\n\n def to_internal_value(self, category):\n if category:\n return Category.objects.get(pk=category)\n else:\n return None\n\n def to_representation(self, category):\n return {'id': category.pk, 'name': category.name}\n\n\nclass ProposalSerializer(serializers.ModelSerializer):\n\n creator = serializers.SerializerMethodField()\n comment_count = serializers.SerializerMethodField()\n positive_rating_count = serializers.SerializerMethodField()\n negative_rating_count = serializers.SerializerMethodField()\n category = CategoryField()\n url = serializers.SerializerMethodField()\n moderator_feedback = serializers.SerializerMethodField()\n session_token_voted = serializers.SerializerMethodField()\n\n class Meta:\n model = Proposal\n fields = ('budget', 'category', 'comment_count', 'created', 'modified',\n 'creator', 'is_archived', 'name', 'negative_rating_count',\n 'positive_rating_count', 'url', 'pk', 'moderator_feedback',\n 'session_token_voted')\n read_only_fields = ('budget', 'category', 'comment_count', 'created',\n 'modified', 'creator', 'is_archived', 'name',\n 'negative_rating_count', 'positive_rating_count',\n 'url', 'pk', 'moderator_feedback',\n 'session_token_voted')\n\n def get_creator(self, proposal):\n return proposal.creator.username\n\n def get_comment_count(self, proposal):\n if hasattr(proposal, 'comment_count'):\n return proposal.comment_count\n else:\n return 0\n\n def get_positive_rating_count(self, proposal):\n if hasattr(proposal, 'positive_rating_count'):\n return proposal.positive_rating_count\n else:\n return 0\n\n def get_negative_rating_count(self, proposal):\n if hasattr(proposal, 'negative_rating_count'):\n return proposal.negative_rating_count\n else:\n return 0\n\n def get_url(self, proposal):\n return proposal.get_absolute_url()\n\n def get_moderator_feedback(self, proposal):\n if hasattr(proposal, 'moderator_feedback'):\n return (proposal.moderator_feedback,\n proposal.get_moderator_feedback_display())\n else:\n return None\n\n def get_session_token_voted(self, proposal):\n \"\"\"Serialize if proposal has been voted.\n\n Returns bool that indicates whether the proposal has\n been voted with the token in the current session\n \"\"\"\n if 'request' in self.context:\n if 'voting_token' in self.context['request'].session:\n vote = TokenVote.objects.filter(\n token__pk=self.context['request'].session['voting_token'],\n content_type=ContentType.objects.get_for_model(\n proposal.__class__),\n object_pk=proposal.pk\n )\n if vote.exists():\n return True\n\n return False\n", "path": "meinberlin/apps/budgeting/serializers.py"}]} | 1,804 | 318 |
gh_patches_debug_56812 | rasdani/github-patches | git_diff | microsoft__knossos-ksc-1027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Segmentation fault in sqrl_pytorch-PyTorch CUDA
Just saw this while working on something else. I haven't done a lot to debug it, but note that it's in copydown, on a fairly innocuous operation (aten::sum(Tensor 2) -> Float), so might be something to do with KS_ALLOCATOR not being defined?
Or could just be out of memory not caught?

</issue>
<code>
[start of examples/dl-capsule/sqrl.py]
1 import torch
2 import ksc.torch_frontend as knossos
3
4 # run-bench: Knossos source, and "nice" PyTorch implementation
5 # BEGINDOC
6 @knossos.register
7 def sqrl(x: torch.Tensor):
8 """
9 sqrl: Squared Leaky Relu
10 Like a capsule from /Stuck in a Rut/
11 Typically x is a 4x4 tensor, possibly
12 packed in a 4n x 4m array
13 """
14 y = torch.sum(x)
15 if y < 0.0:
16 t = -0.125 * x
17 else:
18 t = 1 / 2 * x ** 2
19 return torch.mean(torch.sin(t) * t)
20
21
22 # ENDDOC
23
24 # run-bench: PyTorch "fast" implementation
25 def sqrl_pytorch(x: torch.Tensor):
26 return sqrl(x)
27
28
29 # run-bench: PyTorch "nice" implementation
30 def sqrl_pytorch_nice(x: torch.Tensor):
31 return sqrl(x)
32
33
34 # run-bench: Define a range of values at which to call the methods
35 def sqrl_bench_configs():
36 yield torch.randn((4, 4))
37 yield torch.randn((16, 16))
38
39
40 #################################
41 #
42 # vsqrl - vectorized sqrl
43 #
44
45 vsqrl = knossos.vmap(sqrl)
46
47
48 # run-bench: Define a range of values at which to call the methods
49 def vsqrl_bench_configs():
50 yield torch.randn((10, 4, 4))
51 yield torch.randn((1000, 4, 4))
52 yield torch.randn((1000, 16, 16))
53
[end of examples/dl-capsule/sqrl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/dl-capsule/sqrl.py b/examples/dl-capsule/sqrl.py
--- a/examples/dl-capsule/sqrl.py
+++ b/examples/dl-capsule/sqrl.py
@@ -23,12 +23,12 @@
# run-bench: PyTorch "fast" implementation
def sqrl_pytorch(x: torch.Tensor):
- return sqrl(x)
+ return sqrl.raw_f(x)
# run-bench: PyTorch "nice" implementation
def sqrl_pytorch_nice(x: torch.Tensor):
- return sqrl(x)
+ return sqrl.raw_f(x)
# run-bench: Define a range of values at which to call the methods
| {"golden_diff": "diff --git a/examples/dl-capsule/sqrl.py b/examples/dl-capsule/sqrl.py\n--- a/examples/dl-capsule/sqrl.py\n+++ b/examples/dl-capsule/sqrl.py\n@@ -23,12 +23,12 @@\n \n # run-bench: PyTorch \"fast\" implementation\n def sqrl_pytorch(x: torch.Tensor):\n- return sqrl(x)\n+ return sqrl.raw_f(x)\n \n \n # run-bench: PyTorch \"nice\" implementation\n def sqrl_pytorch_nice(x: torch.Tensor):\n- return sqrl(x)\n+ return sqrl.raw_f(x)\n \n \n # run-bench: Define a range of values at which to call the methods\n", "issue": "Bug: Segmentation fault in sqrl_pytorch-PyTorch CUDA\nJust saw this while working on something else. I haven't done a lot to debug it, but note that it's in copydown, on a fairly innocuous operation (aten::sum(Tensor 2) -> Float), so might be something to do with KS_ALLOCATOR not being defined?\r\nOr could just be out of memory not caught?\r\n\r\n\n", "before_files": [{"content": "import torch\nimport ksc.torch_frontend as knossos\n\n# run-bench: Knossos source, and \"nice\" PyTorch implementation\n# BEGINDOC\[email protected]\ndef sqrl(x: torch.Tensor):\n \"\"\"\n sqrl: Squared Leaky Relu\n Like a capsule from /Stuck in a Rut/\n Typically x is a 4x4 tensor, possibly\n packed in a 4n x 4m array\n \"\"\"\n y = torch.sum(x)\n if y < 0.0:\n t = -0.125 * x\n else:\n t = 1 / 2 * x ** 2\n return torch.mean(torch.sin(t) * t)\n\n\n# ENDDOC\n\n# run-bench: PyTorch \"fast\" implementation\ndef sqrl_pytorch(x: torch.Tensor):\n return sqrl(x)\n\n\n# run-bench: PyTorch \"nice\" implementation\ndef sqrl_pytorch_nice(x: torch.Tensor):\n return sqrl(x)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef sqrl_bench_configs():\n yield torch.randn((4, 4))\n yield torch.randn((16, 16))\n\n\n#################################\n#\n# vsqrl - vectorized sqrl\n#\n\nvsqrl = knossos.vmap(sqrl)\n\n\n# run-bench: Define a range of values at which to call the methods\ndef vsqrl_bench_configs():\n yield torch.randn((10, 4, 4))\n yield torch.randn((1000, 4, 4))\n yield torch.randn((1000, 16, 16))\n", "path": "examples/dl-capsule/sqrl.py"}]} | 1,178 | 167 |
gh_patches_debug_31908 | rasdani/github-patches | git_diff | rucio__rucio-5322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add add-exception command in the CLI
Motivation
----------
A CLI command to add a new exception is missing and need to be added
</issue>
<code>
[start of lib/rucio/client/lifetimeclient.py]
1 # Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 # Authors:
16 # - Cedric Serfon <[email protected]>, 2017
17 # - Vincent Garonne <[email protected]>, 2018
18 # - Martin Barisits <[email protected]>, 2018
19 # - Andrew Lister <[email protected]>, 2019
20
21 from __future__ import print_function
22
23 from json import loads
24 from requests.status_codes import codes
25
26 from rucio.client.baseclient import BaseClient
27 from rucio.client.baseclient import choice
28 from rucio.common.utils import build_url, render_json
29
30
31 class LifetimeClient(BaseClient):
32
33 """Lifetime client class for working with Lifetime Model exceptions"""
34
35 LIFETIME_BASEURL = 'lifetime_exceptions'
36
37 def list_exceptions(self, exception_id=None, states=None):
38 """
39 List exceptions to Lifetime Model.
40
41 :param id: The id of the exception
42 :param states: The states to filter
43 """
44
45 path = self.LIFETIME_BASEURL + '/'
46 params = {}
47 if exception_id:
48 params['exception_id'] = exception_id
49 if states:
50 params['states'] = exception_id
51 url = build_url(choice(self.list_hosts), path=path, params=params)
52
53 result = self._send_request(url)
54 if result.status_code == codes.ok:
55 lifetime_exceptions = self._load_json_data(result)
56 return lifetime_exceptions
57 else:
58 exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code)
59 raise exc_cls(exc_msg)
60
61 def add_exception(self, dids, account, pattern, comments, expires_at):
62 """
63 Add exceptions to Lifetime Model.
64
65 :param dids: The list of dids
66 :param account: The account of the requester.
67 :param pattern: The account.
68 :param comments: The comments associated to the exception.
69 :param expires_at: The expiration date of the exception.
70
71 returns: The id of the exception.
72 """
73 path = self.LIFETIME_BASEURL + '/'
74 url = build_url(choice(self.list_hosts), path=path)
75 data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}
76 print(render_json(**data))
77 result = self._send_request(url, type_='POST', data=render_json(**data))
78 print(result.text)
79 if result.status_code == codes.created:
80 return loads(result.text)
81 exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
82 raise exc_cls(exc_msg)
83
[end of lib/rucio/client/lifetimeclient.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/client/lifetimeclient.py b/lib/rucio/client/lifetimeclient.py
--- a/lib/rucio/client/lifetimeclient.py
+++ b/lib/rucio/client/lifetimeclient.py
@@ -1,4 +1,5 @@
-# Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.
+# -*- coding: utf-8 -*-
+# Copyright 2017-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +14,13 @@
# limitations under the License.
#
# Authors:
-# - Cedric Serfon <[email protected]>, 2017
-# - Vincent Garonne <[email protected]>, 2018
+# - Cedric Serfon <[email protected]>, 2017-2022
+# - Vincent Garonne <[email protected]>, 2018
+# - Joaquín Bogado <[email protected]>, 2018
# - Martin Barisits <[email protected]>, 2018
# - Andrew Lister <[email protected]>, 2019
+# - David Población Criado <[email protected]>, 2021
+# - Igor Mandrichenko <[email protected]>, 2021
from __future__ import print_function
@@ -73,9 +77,7 @@
path = self.LIFETIME_BASEURL + '/'
url = build_url(choice(self.list_hosts), path=path)
data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}
- print(render_json(**data))
result = self._send_request(url, type_='POST', data=render_json(**data))
- print(result.text)
if result.status_code == codes.created:
return loads(result.text)
exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
| {"golden_diff": "diff --git a/lib/rucio/client/lifetimeclient.py b/lib/rucio/client/lifetimeclient.py\n--- a/lib/rucio/client/lifetimeclient.py\n+++ b/lib/rucio/client/lifetimeclient.py\n@@ -1,4 +1,5 @@\n-# Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.\n+# -*- coding: utf-8 -*-\n+# Copyright 2017-2022 CERN\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -13,10 +14,13 @@\n # limitations under the License.\n #\n # Authors:\n-# - Cedric Serfon <[email protected]>, 2017\n-# - Vincent Garonne <[email protected]>, 2018\n+# - Cedric Serfon <[email protected]>, 2017-2022\n+# - Vincent Garonne <[email protected]>, 2018\n+# - Joaqu\u00edn Bogado <[email protected]>, 2018\n # - Martin Barisits <[email protected]>, 2018\n # - Andrew Lister <[email protected]>, 2019\n+# - David Poblaci\u00f3n Criado <[email protected]>, 2021\n+# - Igor Mandrichenko <[email protected]>, 2021\n \n from __future__ import print_function\n \n@@ -73,9 +77,7 @@\n path = self.LIFETIME_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}\n- print(render_json(**data))\n result = self._send_request(url, type_='POST', data=render_json(**data))\n- print(result.text)\n if result.status_code == codes.created:\n return loads(result.text)\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)\n", "issue": "Add add-exception command in the CLI\nMotivation\r\n----------\r\nA CLI command to add a new exception is missing and need to be added\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2017-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Cedric Serfon <[email protected]>, 2017\n# - Vincent Garonne <[email protected]>, 2018\n# - Martin Barisits <[email protected]>, 2018\n# - Andrew Lister <[email protected]>, 2019\n\nfrom __future__ import print_function\n\nfrom json import loads\nfrom requests.status_codes import codes\n\nfrom rucio.client.baseclient import BaseClient\nfrom rucio.client.baseclient import choice\nfrom rucio.common.utils import build_url, render_json\n\n\nclass LifetimeClient(BaseClient):\n\n \"\"\"Lifetime client class for working with Lifetime Model exceptions\"\"\"\n\n LIFETIME_BASEURL = 'lifetime_exceptions'\n\n def list_exceptions(self, exception_id=None, states=None):\n \"\"\"\n List exceptions to Lifetime Model.\n\n :param id: The id of the exception\n :param states: The states to filter\n \"\"\"\n\n path = self.LIFETIME_BASEURL + '/'\n params = {}\n if exception_id:\n params['exception_id'] = exception_id\n if states:\n params['states'] = exception_id\n url = build_url(choice(self.list_hosts), path=path, params=params)\n\n result = self._send_request(url)\n if result.status_code == codes.ok:\n lifetime_exceptions = self._load_json_data(result)\n return lifetime_exceptions\n else:\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code)\n raise exc_cls(exc_msg)\n\n def add_exception(self, dids, account, pattern, comments, expires_at):\n \"\"\"\n Add exceptions to Lifetime Model.\n\n :param dids: The list of dids\n :param account: The account of the requester.\n :param pattern: The account.\n :param comments: The comments associated to the exception.\n :param expires_at: The expiration date of the exception.\n\n returns: The id of the exception.\n \"\"\"\n path = self.LIFETIME_BASEURL + '/'\n url = build_url(choice(self.list_hosts), path=path)\n data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}\n print(render_json(**data))\n result = self._send_request(url, type_='POST', data=render_json(**data))\n print(result.text)\n if result.status_code == codes.created:\n return loads(result.text)\n exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)\n raise exc_cls(exc_msg)\n", "path": "lib/rucio/client/lifetimeclient.py"}]} | 1,465 | 514 |
gh_patches_debug_4120 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-2709 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/cli/launcher/__init__.py]
1 import click
2 from .run import launch_multi_processes
3 from colossalai.context import Config
4
5
6 @click.command(help="Launch distributed training on a single node or multiple nodes",
7 context_settings=dict(ignore_unknown_options=True))
8 @click.option("-H",
9 "-host",
10 "--host",
11 type=str,
12 default=None,
13 help="the list of hostnames to launch in the format <host1>,<host2>")
14 @click.option(
15 "--hostfile",
16 type=str,
17 default=None,
18 help="Hostfile path that defines the device pool available to the job, each line in the file is a hostname")
19 @click.option("--include",
20 type=str,
21 default=None,
22 help="Specify computing devices to use during execution. String format is <host1>,<host2>,"
23 " only effective when used with --hostfile.")
24 @click.option(
25 "--exclude",
26 type=str,
27 default=None,
28 help=
29 "Specify computing devices to NOT use during execution. Mutually exclusive with --include. Formatting is the same as --includ,"
30 " only effective when used with --hostfile.")
31 @click.option("--num_nodes",
32 type=int,
33 default=-1,
34 help="Total number of worker nodes to use, only effective when used with --hostfile.")
35 @click.option("--nproc_per_node", type=int, default=None, help="Number of GPUs to use on each node.")
36 @click.option("--master_port",
37 type=int,
38 default=29500,
39 help="(optional) Port used by PyTorch distributed for communication during distributed training.")
40 @click.option("--master_addr",
41 type=str,
42 default="127.0.0.1",
43 help="(optional) IP address of node 0, will be inferred via 'hostname -I' if not specified.")
44 @click.option(
45 "--extra_launch_args",
46 type=str,
47 default=None,
48 help=
49 "Set additional torch distributed launcher arguments such as --standalone. The format is --extra_launch_args arg1=1,arg2=2. "
50 "This will be converted to --arg1=1 --arg2=2 during execution")
51 @click.option("--ssh-port", type=int, default=None, help="(optional) the port used for ssh connection")
52 @click.argument("user_script", type=str)
53 @click.argument('user_args', nargs=-1)
54 def run(host: str, hostfile: str, num_nodes: int, nproc_per_node: int, include: str, exclude: str, master_addr: str,
55 master_port: int, extra_launch_args: str, ssh_port: int, user_script: str, user_args: str) -> None:
56 """
57 To launch multiple processes on a single node or multiple nodes via command line.
58
59 Usage::
60 # run with 4 GPUs on the current node use default port 29500
61 colossalai run --nprocs_per_node 4 train.py
62
63 # run with 2 GPUs on the current node at port 29550
64 colossalai run --nprocs_per_node 4 --master_port 29550 train.py
65
66 # run on two nodes
67 colossalai run --host <host1>,<host2> --master_addr host1 --nprocs_per_node 4 train.py
68
69 # run with hostfile
70 colossalai run --hostfile <file_path> --master_addr <host> --nprocs_per_node 4 train.py
71
72 # run with hostfile with only included hosts
73 colossalai run --hostfile <file_path> --master_addr host1 --include host1,host2 --nprocs_per_node 4 train.py
74
75 # run with hostfile excluding the hosts selected
76 colossalai run --hostfile <file_path> --master_addr host1 --exclude host2 --nprocs_per_node 4 train.py
77 """
78 if not user_script.endswith('.py'):
79 click.echo(f'Error: invalid Python file {user_script}. Did you use a wrong option? Try colossalai run --help')
80 exit()
81
82 args_dict = locals()
83 args = Config(args_dict)
84 args.user_args = list(args.user_args)
85 launch_multi_processes(args)
86
[end of colossalai/cli/launcher/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/cli/launcher/__init__.py b/colossalai/cli/launcher/__init__.py
--- a/colossalai/cli/launcher/__init__.py
+++ b/colossalai/cli/launcher/__init__.py
@@ -1,7 +1,9 @@
import click
-from .run import launch_multi_processes
+
from colossalai.context import Config
+from .run import launch_multi_processes
+
@click.command(help="Launch distributed training on a single node or multiple nodes",
context_settings=dict(ignore_unknown_options=True))
| {"golden_diff": "diff --git a/colossalai/cli/launcher/__init__.py b/colossalai/cli/launcher/__init__.py\n--- a/colossalai/cli/launcher/__init__.py\n+++ b/colossalai/cli/launcher/__init__.py\n@@ -1,7 +1,9 @@\n import click\n-from .run import launch_multi_processes\n+\n from colossalai.context import Config\n \n+from .run import launch_multi_processes\n+\n \n @click.command(help=\"Launch distributed training on a single node or multiple nodes\",\n context_settings=dict(ignore_unknown_options=True))\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import click\nfrom .run import launch_multi_processes\nfrom colossalai.context import Config\n\n\[email protected](help=\"Launch distributed training on a single node or multiple nodes\",\n context_settings=dict(ignore_unknown_options=True))\[email protected](\"-H\",\n \"-host\",\n \"--host\",\n type=str,\n default=None,\n help=\"the list of hostnames to launch in the format <host1>,<host2>\")\[email protected](\n \"--hostfile\",\n type=str,\n default=None,\n help=\"Hostfile path that defines the device pool available to the job, each line in the file is a hostname\")\[email protected](\"--include\",\n type=str,\n default=None,\n help=\"Specify computing devices to use during execution. String format is <host1>,<host2>,\"\n \" only effective when used with --hostfile.\")\[email protected](\n \"--exclude\",\n type=str,\n default=None,\n help=\n \"Specify computing devices to NOT use during execution. Mutually exclusive with --include. Formatting is the same as --includ,\"\n \" only effective when used with --hostfile.\")\[email protected](\"--num_nodes\",\n type=int,\n default=-1,\n help=\"Total number of worker nodes to use, only effective when used with --hostfile.\")\[email protected](\"--nproc_per_node\", type=int, default=None, help=\"Number of GPUs to use on each node.\")\[email protected](\"--master_port\",\n type=int,\n default=29500,\n help=\"(optional) Port used by PyTorch distributed for communication during distributed training.\")\[email protected](\"--master_addr\",\n type=str,\n default=\"127.0.0.1\",\n help=\"(optional) IP address of node 0, will be inferred via 'hostname -I' if not specified.\")\[email protected](\n \"--extra_launch_args\",\n type=str,\n default=None,\n help=\n \"Set additional torch distributed launcher arguments such as --standalone. The format is --extra_launch_args arg1=1,arg2=2. \"\n \"This will be converted to --arg1=1 --arg2=2 during execution\")\[email protected](\"--ssh-port\", type=int, default=None, help=\"(optional) the port used for ssh connection\")\[email protected](\"user_script\", type=str)\[email protected]('user_args', nargs=-1)\ndef run(host: str, hostfile: str, num_nodes: int, nproc_per_node: int, include: str, exclude: str, master_addr: str,\n master_port: int, extra_launch_args: str, ssh_port: int, user_script: str, user_args: str) -> None:\n \"\"\"\n To launch multiple processes on a single node or multiple nodes via command line.\n\n Usage::\n # run with 4 GPUs on the current node use default port 29500\n colossalai run --nprocs_per_node 4 train.py\n\n # run with 2 GPUs on the current node at port 29550\n colossalai run --nprocs_per_node 4 --master_port 29550 train.py\n\n # run on two nodes\n colossalai run --host <host1>,<host2> --master_addr host1 --nprocs_per_node 4 train.py\n\n # run with hostfile\n colossalai run --hostfile <file_path> --master_addr <host> --nprocs_per_node 4 train.py\n\n # run with hostfile with only included hosts\n colossalai run --hostfile <file_path> --master_addr host1 --include host1,host2 --nprocs_per_node 4 train.py\n\n # run with hostfile excluding the hosts selected\n colossalai run --hostfile <file_path> --master_addr host1 --exclude host2 --nprocs_per_node 4 train.py\n \"\"\"\n if not user_script.endswith('.py'):\n click.echo(f'Error: invalid Python file {user_script}. Did you use a wrong option? Try colossalai run --help')\n exit()\n\n args_dict = locals()\n args = Config(args_dict)\n args.user_args = list(args.user_args)\n launch_multi_processes(args)\n", "path": "colossalai/cli/launcher/__init__.py"}]} | 1,634 | 122 |
gh_patches_debug_12616 | rasdani/github-patches | git_diff | mne-tools__mne-python-9042 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use bibtex in plot_evoked_whitening.py
convert references in `examples/visualization/plot_evoked_whitening.py` to use footcite / footbibliography
</issue>
<code>
[start of examples/visualization/plot_evoked_whitening.py]
1 """
2 =============================================
3 Whitening evoked data with a noise covariance
4 =============================================
5
6 Evoked data are loaded and then whitened using a given noise covariance
7 matrix. It's an excellent quality check to see if baseline signals match
8 the assumption of Gaussian white noise during the baseline period.
9
10 Covariance estimation and diagnostic plots are based on [1]_.
11
12 References
13 ----------
14 .. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
15 covariance estimation and spatial whitening of MEG and EEG signals, vol.
16 108, 328-342, NeuroImage.
17
18 """
19 # Authors: Alexandre Gramfort <[email protected]>
20 # Denis A. Engemann <[email protected]>
21 #
22 # License: BSD (3-clause)
23
24 import mne
25
26 from mne import io
27 from mne.datasets import sample
28 from mne.cov import compute_covariance
29
30 print(__doc__)
31
32 ###############################################################################
33 # Set parameters
34
35 data_path = sample.data_path()
36 raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
37 event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
38
39 raw = io.read_raw_fif(raw_fname, preload=True)
40 raw.filter(1, 40, n_jobs=1, fir_design='firwin')
41 raw.info['bads'] += ['MEG 2443'] # bads + 1 more
42 events = mne.read_events(event_fname)
43
44 # let's look at rare events, button presses
45 event_id, tmin, tmax = 2, -0.2, 0.5
46 reject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)
47
48 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),
49 baseline=None, reject=reject, preload=True)
50
51 # Uncomment next line to use fewer samples and study regularization effects
52 # epochs = epochs[:20] # For your data, use as many samples as you can!
53
54 ###############################################################################
55 # Compute covariance using automated regularization
56 method_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))
57 noise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',
58 return_estimators=True, verbose=True, n_jobs=1,
59 projs=None, rank=None,
60 method_params=method_params)
61
62 # With "return_estimator=True" all estimated covariances sorted
63 # by log-likelihood are returned.
64
65 print('Covariance estimates sorted from best to worst')
66 for c in noise_covs:
67 print("%s : %s" % (c['method'], c['loglik']))
68
69 ###############################################################################
70 # Show the evoked data:
71
72 evoked = epochs.average()
73
74 evoked.plot(time_unit='s') # plot evoked response
75
76 ###############################################################################
77 # We can then show whitening for our various noise covariance estimates.
78 #
79 # Here we should look to see if baseline signals match the
80 # assumption of Gaussian white noise. we expect values centered at
81 # 0 within 2 standard deviations for 95% of the time points.
82 #
83 # For the Global field power we expect a value of 1.
84
85 evoked.plot_white(noise_covs, time_unit='s')
86
[end of examples/visualization/plot_evoked_whitening.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/visualization/plot_evoked_whitening.py
--- a/examples/visualization/plot_evoked_whitening.py
+++ b/examples/visualization/plot_evoked_whitening.py
@@ -7,13 +7,12 @@
matrix. It's an excellent quality check to see if baseline signals match
the assumption of Gaussian white noise during the baseline period.
-Covariance estimation and diagnostic plots are based on [1]_.
+Covariance estimation and diagnostic plots are based on
+:footcite:`EngemannGramfort2015`.
References
----------
-.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
- covariance estimation and spatial whitening of MEG and EEG signals, vol.
- 108, 328-342, NeuroImage.
+.. footbibliography::
"""
# Authors: Alexandre Gramfort <[email protected]>
| {"golden_diff": "diff --git a/examples/visualization/plot_evoked_whitening.py b/examples/visualization/plot_evoked_whitening.py\n--- a/examples/visualization/plot_evoked_whitening.py\n+++ b/examples/visualization/plot_evoked_whitening.py\n@@ -7,13 +7,12 @@\n matrix. It's an excellent quality check to see if baseline signals match\n the assumption of Gaussian white noise during the baseline period.\n \n-Covariance estimation and diagnostic plots are based on [1]_.\n+Covariance estimation and diagnostic plots are based on\n+:footcite:`EngemannGramfort2015`.\n \n References\n ----------\n-.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in\n- covariance estimation and spatial whitening of MEG and EEG signals, vol.\n- 108, 328-342, NeuroImage.\n+.. footbibliography::\n \n \"\"\"\n # Authors: Alexandre Gramfort <[email protected]>\n", "issue": "use bibtex in plot_evoked_whitening.py\nconvert references in `examples/visualization/plot_evoked_whitening.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "\"\"\"\n=============================================\nWhitening evoked data with a noise covariance\n=============================================\n\nEvoked data are loaded and then whitened using a given noise covariance\nmatrix. It's an excellent quality check to see if baseline signals match\nthe assumption of Gaussian white noise during the baseline period.\n\nCovariance estimation and diagnostic plots are based on [1]_.\n\nReferences\n----------\n.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in\n covariance estimation and spatial whitening of MEG and EEG signals, vol.\n 108, 328-342, NeuroImage.\n\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n# Denis A. Engemann <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport mne\n\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.cov import compute_covariance\n\nprint(__doc__)\n\n###############################################################################\n# Set parameters\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\n\nraw = io.read_raw_fif(raw_fname, preload=True)\nraw.filter(1, 40, n_jobs=1, fir_design='firwin')\nraw.info['bads'] += ['MEG 2443'] # bads + 1 more\nevents = mne.read_events(event_fname)\n\n# let's look at rare events, button presses\nevent_id, tmin, tmax = 2, -0.2, 0.5\nreject = dict(mag=4e-12, grad=4000e-13, eeg=80e-6)\n\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=('meg', 'eeg'),\n baseline=None, reject=reject, preload=True)\n\n# Uncomment next line to use fewer samples and study regularization effects\n# epochs = epochs[:20] # For your data, use as many samples as you can!\n\n###############################################################################\n# Compute covariance using automated regularization\nmethod_params = dict(diagonal_fixed=dict(mag=0.01, grad=0.01, eeg=0.01))\nnoise_covs = compute_covariance(epochs, tmin=None, tmax=0, method='auto',\n return_estimators=True, verbose=True, n_jobs=1,\n projs=None, rank=None,\n method_params=method_params)\n\n# With \"return_estimator=True\" all estimated covariances sorted\n# by log-likelihood are returned.\n\nprint('Covariance estimates sorted from best to worst')\nfor c in noise_covs:\n print(\"%s : %s\" % (c['method'], c['loglik']))\n\n###############################################################################\n# Show the evoked data:\n\nevoked = epochs.average()\n\nevoked.plot(time_unit='s') # plot evoked response\n\n###############################################################################\n# We can then show whitening for our various noise covariance estimates.\n#\n# Here we should look to see if baseline signals match the\n# assumption of Gaussian white noise. we expect values centered at\n# 0 within 2 standard deviations for 95% of the time points.\n#\n# For the Global field power we expect a value of 1.\n\nevoked.plot_white(noise_covs, time_unit='s')\n", "path": "examples/visualization/plot_evoked_whitening.py"}]} | 1,518 | 226 |
gh_patches_debug_15369 | rasdani/github-patches | git_diff | ibis-project__ibis-3044 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: isolated dask backend tests fail due to removed imports
For some reason lines 6 and 8 here: https://github.com/ibis-project/ibis/commit/a1262410310bb4d638a73e1cdfbe93c2b4089905#diff-96d84d9b6e9e84a2be7a046dc9853df1ca5fc6e894307339b02cd61e666c0149L6-L8
were removed.
This causes dasks tests to fail when they are run in isolation from other tests that (transitively) import from the pandas backend.
This is both a ci bug and a bug in the code, since we're not testing backends independently. Perhaps unsurprisingly I discovered the bug in #2937, which fixes the CI part of this problem.
</issue>
<code>
[start of ibis/backends/dask/__init__.py]
1 from typing import Mapping
2
3 import dask
4 import dask.dataframe as dd
5 import pandas as pd
6 import toolz
7 from dask.base import DaskMethodsMixin
8
9 import ibis.common.exceptions as com
10 import ibis.config
11 import ibis.expr.schema as sch
12 import ibis.expr.types as ir
13 from ibis.backends.pandas import BasePandasBackend
14
15 from .client import DaskDatabase, DaskTable, ibis_schema_to_dask
16 from .core import execute_and_reset
17
18 # Make sure that the pandas backend is loaded, dispatching has been
19 # executed, and options have been loaded
20 ibis.pandas
21
22
23 class Backend(BasePandasBackend):
24 name = 'dask'
25 database_class = DaskDatabase
26 table_class = DaskTable
27
28 def connect(self, dictionary):
29 # register dispatchers
30 from . import udf # noqa: F401
31
32 return super().connect(dictionary)
33
34 @property
35 def version(self):
36 return dask.__version__
37
38 def execute(
39 self,
40 query: ir.Expr,
41 params: Mapping[ir.Expr, object] = None,
42 limit: str = 'default',
43 **kwargs,
44 ):
45 if limit != 'default':
46 raise ValueError(
47 'limit parameter to execute is not yet implemented in the '
48 'dask backend'
49 )
50
51 if not isinstance(query, ir.Expr):
52 raise TypeError(
53 "`query` has type {!r}, expected ibis.expr.types.Expr".format(
54 type(query).__name__
55 )
56 )
57
58 result = self.compile(query, params, **kwargs)
59 if isinstance(result, DaskMethodsMixin):
60 return result.compute()
61 else:
62 return result
63
64 def compile(
65 self, query: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs
66 ):
67 """Compile `expr`.
68
69 Notes
70 -----
71 For the dask backend returns a dask graph that you can run ``.compute``
72 on to get a pandas object.
73
74 """
75 return execute_and_reset(query, params=params, **kwargs)
76
77 def create_table(
78 self,
79 table_name: str,
80 obj: dd.DataFrame = None,
81 schema: sch.Schema = None,
82 ):
83 """Create a table."""
84 if obj is not None:
85 df = obj
86 elif schema is not None:
87 dtypes = ibis_schema_to_dask(schema)
88 df = schema.apply_to(
89 dd.from_pandas(
90 pd.DataFrame(columns=list(map(toolz.first, dtypes))),
91 npartitions=1,
92 )
93 )
94 else:
95 raise com.IbisError('Must pass expr or schema')
96
97 self.dictionary[table_name] = df
98
[end of ibis/backends/dask/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/backends/dask/__init__.py b/ibis/backends/dask/__init__.py
--- a/ibis/backends/dask/__init__.py
+++ b/ibis/backends/dask/__init__.py
@@ -6,6 +6,9 @@
import toolz
from dask.base import DaskMethodsMixin
+# import the pandas execution module to register dispatched implementations of
+# execute_node that the dask backend will later override
+import ibis.backends.pandas.execution # noqa: F401
import ibis.common.exceptions as com
import ibis.config
import ibis.expr.schema as sch
@@ -15,8 +18,7 @@
from .client import DaskDatabase, DaskTable, ibis_schema_to_dask
from .core import execute_and_reset
-# Make sure that the pandas backend is loaded, dispatching has been
-# executed, and options have been loaded
+# Make sure that the pandas backend options have been loaded
ibis.pandas
| {"golden_diff": "diff --git a/ibis/backends/dask/__init__.py b/ibis/backends/dask/__init__.py\n--- a/ibis/backends/dask/__init__.py\n+++ b/ibis/backends/dask/__init__.py\n@@ -6,6 +6,9 @@\n import toolz\n from dask.base import DaskMethodsMixin\n \n+# import the pandas execution module to register dispatched implementations of\n+# execute_node that the dask backend will later override\n+import ibis.backends.pandas.execution # noqa: F401\n import ibis.common.exceptions as com\n import ibis.config\n import ibis.expr.schema as sch\n@@ -15,8 +18,7 @@\n from .client import DaskDatabase, DaskTable, ibis_schema_to_dask\n from .core import execute_and_reset\n \n-# Make sure that the pandas backend is loaded, dispatching has been\n-# executed, and options have been loaded\n+# Make sure that the pandas backend options have been loaded\n ibis.pandas\n", "issue": "bug: isolated dask backend tests fail due to removed imports\nFor some reason lines 6 and 8 here: https://github.com/ibis-project/ibis/commit/a1262410310bb4d638a73e1cdfbe93c2b4089905#diff-96d84d9b6e9e84a2be7a046dc9853df1ca5fc6e894307339b02cd61e666c0149L6-L8\r\n\r\nwere removed.\r\n\r\nThis causes dasks tests to fail when they are run in isolation from other tests that (transitively) import from the pandas backend.\r\n\r\nThis is both a ci bug and a bug in the code, since we're not testing backends independently. Perhaps unsurprisingly I discovered the bug in #2937, which fixes the CI part of this problem.\n", "before_files": [{"content": "from typing import Mapping\n\nimport dask\nimport dask.dataframe as dd\nimport pandas as pd\nimport toolz\nfrom dask.base import DaskMethodsMixin\n\nimport ibis.common.exceptions as com\nimport ibis.config\nimport ibis.expr.schema as sch\nimport ibis.expr.types as ir\nfrom ibis.backends.pandas import BasePandasBackend\n\nfrom .client import DaskDatabase, DaskTable, ibis_schema_to_dask\nfrom .core import execute_and_reset\n\n# Make sure that the pandas backend is loaded, dispatching has been\n# executed, and options have been loaded\nibis.pandas\n\n\nclass Backend(BasePandasBackend):\n name = 'dask'\n database_class = DaskDatabase\n table_class = DaskTable\n\n def connect(self, dictionary):\n # register dispatchers\n from . import udf # noqa: F401\n\n return super().connect(dictionary)\n\n @property\n def version(self):\n return dask.__version__\n\n def execute(\n self,\n query: ir.Expr,\n params: Mapping[ir.Expr, object] = None,\n limit: str = 'default',\n **kwargs,\n ):\n if limit != 'default':\n raise ValueError(\n 'limit parameter to execute is not yet implemented in the '\n 'dask backend'\n )\n\n if not isinstance(query, ir.Expr):\n raise TypeError(\n \"`query` has type {!r}, expected ibis.expr.types.Expr\".format(\n type(query).__name__\n )\n )\n\n result = self.compile(query, params, **kwargs)\n if isinstance(result, DaskMethodsMixin):\n return result.compute()\n else:\n return result\n\n def compile(\n self, query: ir.Expr, params: Mapping[ir.Expr, object] = None, **kwargs\n ):\n \"\"\"Compile `expr`.\n\n Notes\n -----\n For the dask backend returns a dask graph that you can run ``.compute``\n on to get a pandas object.\n\n \"\"\"\n return execute_and_reset(query, params=params, **kwargs)\n\n def create_table(\n self,\n table_name: str,\n obj: dd.DataFrame = None,\n schema: sch.Schema = None,\n ):\n \"\"\"Create a table.\"\"\"\n if obj is not None:\n df = obj\n elif schema is not None:\n dtypes = ibis_schema_to_dask(schema)\n df = schema.apply_to(\n dd.from_pandas(\n pd.DataFrame(columns=list(map(toolz.first, dtypes))),\n npartitions=1,\n )\n )\n else:\n raise com.IbisError('Must pass expr or schema')\n\n self.dictionary[table_name] = df\n", "path": "ibis/backends/dask/__init__.py"}]} | 1,539 | 225 |
gh_patches_debug_6667 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Version on GitHub different from version on PyPI
# Brief Description of Fix
<!-- Please describe the fix in terms of a "before" and "after". In other words, what's not so good about the current docs
page, and what you would like to see it become.
Example starter wording is provided. -->
Currently, the version in the repo is "0.19.0", whereas it's "0.20.0" on PyPI.
I would like to propose a change, such that the version is updated here.
# Relevant Context
<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available
to get you started. -->
- [Link to PyPI](https://pypi.org/project/pyjanitor/)
</issue>
<code>
[start of setup.py]
1 import re
2 from pathlib import Path
3
4 from setuptools import setup
5
6
7 def requirements():
8 with open("requirements.txt", "r+") as f:
9 return f.read()
10
11
12 def generate_long_description() -> str:
13 """
14 Extra chunks from README for PyPI description.
15
16 Target chunks must be contained within `.. pypi-doc` pair comments,
17 so there must be an even number of comments in README.
18
19 :returns: Extracted description from README
20
21 """
22 # Read the contents of README file
23 this_directory = Path(__file__).parent
24 with open(this_directory / "README.rst", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Find pypi-doc comments in README
28 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
29 if len(indices) % 2 != 0:
30 raise Exception("Odd number of `.. pypi-doc` comments in README")
31
32 # Loop through pairs of comments and save text between pairs
33 long_description = ""
34 for i in range(0, len(indices), 2):
35 start_index = indices[i] + 11
36 end_index = indices[i + 1]
37 long_description += readme[start_index:end_index]
38 return long_description
39
40
41 extra_spark = ["pyspark"]
42 extra_biology = ["biopython"]
43 extra_chemistry = ["rdkit"]
44 extra_engineering = ["unyt"]
45 extra_all = extra_biology + extra_engineering + extra_spark
46
47 setup(
48 name="pyjanitor",
49 version="0.19.0",
50 description="Tools for cleaning pandas DataFrames",
51 author="Eric J. Ma",
52 author_email="[email protected]",
53 url="https://github.com/ericmjl/pyjanitor",
54 packages=["janitor"],
55 install_requires=requirements(),
56 extras_require={
57 "all": extra_all,
58 "biology": extra_biology,
59 # "chemistry": extra_chemistry, should be inserted once rdkit
60 # fixes https://github.com/rdkit/rdkit/issues/1812
61 "engineering": extra_engineering,
62 "spark": extra_spark,
63 },
64 python_requires=">=3.6",
65 long_description=generate_long_description(),
66 long_description_content_type="text/x-rst",
67 )
68
[end of setup.py]
[start of janitor/__init__.py]
1 try:
2 import janitor.xarray
3 except ImportError:
4 pass
5
6 from .functions import * # noqa: F403, F401
7 from .math import *
8 from .ml import get_features_targets as _get_features_targets
9 from .utils import refactored_function
10
11 # from .dataframe import JanitorDataFrame as DataFrame # noqa: F401
12 # from .dataframe import JanitorSeries as Series # noqa: F401
13
14
15 @refactored_function(
16 "get_features_targets() has moved. Please use ml.get_features_targets()."
17 )
18 def get_features_targets(*args, **kwargs):
19 return _get_features_targets(*args, **kwargs)
20
21
22 __version__ = "0.19.0"
23
[end of janitor/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/__init__.py b/janitor/__init__.py
--- a/janitor/__init__.py
+++ b/janitor/__init__.py
@@ -19,4 +19,4 @@
return _get_features_targets(*args, **kwargs)
-__version__ = "0.19.0"
+__version__ = "0.20.0"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
setup(
name="pyjanitor",
- version="0.19.0",
+ version="0.20.0",
description="Tools for cleaning pandas DataFrames",
author="Eric J. Ma",
author_email="[email protected]",
| {"golden_diff": "diff --git a/janitor/__init__.py b/janitor/__init__.py\n--- a/janitor/__init__.py\n+++ b/janitor/__init__.py\n@@ -19,4 +19,4 @@\n return _get_features_targets(*args, **kwargs)\n \n \n-__version__ = \"0.19.0\"\n+__version__ = \"0.20.0\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n \n setup(\n name=\"pyjanitor\",\n- version=\"0.19.0\",\n+ version=\"0.20.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n", "issue": "Version on GitHub different from version on PyPI\n# Brief Description of Fix\r\n\r\n<!-- Please describe the fix in terms of a \"before\" and \"after\". In other words, what's not so good about the current docs\r\npage, and what you would like to see it become.\r\n\r\nExample starter wording is provided. -->\r\n\r\nCurrently, the version in the repo is \"0.19.0\", whereas it's \"0.20.0\" on PyPI.\r\n\r\nI would like to propose a change, such that the version is updated here.\r\n\r\n# Relevant Context\r\n\r\n<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available\r\nto get you started. -->\r\n\r\n- [Link to PyPI](https://pypi.org/project/pyjanitor/)\r\n\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.19.0\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}, {"content": "try:\n import janitor.xarray\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .math import *\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\n\n# from .dataframe import JanitorDataFrame as DataFrame # noqa: F401\n# from .dataframe import JanitorSeries as Series # noqa: F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.19.0\"\n", "path": "janitor/__init__.py"}]} | 1,551 | 185 |
gh_patches_debug_33758 | rasdani/github-patches | git_diff | kedro-org__kedro-2587 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update default suggestions in `settings.py` to ones that work
## Description
Update docs and default suggestions in `settings.py`, because currently some of those suggestions don't actually work.
Currently, the `BaseSessionStore` is the default session store. The other possible stores a user can use are the `ShelveStore` and the `SQLiteStore` (currently part of viz).
The `ShelveStore` is the default suggestion to override the default in `settings.py`, but when users are using some sort of multiprocessing this store type will not work. See: https://github.com/kedro-org/kedro/issues/1442
Also look at the other default suggestions and verify that they make sense.
(Later consideration, but not part of this work)
If we move the `SQLiteStore` from viz to kedro core, we could add that as the default suggestion instead.
</issue>
<code>
[start of kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py]
1 """Project settings. There is no need to edit this file unless you want to change values
2 from the Kedro defaults. For further information, including these default values, see
3 https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html."""
4
5 # Instantiated project hooks.
6 # from {{cookiecutter.python_package}}.hooks import ProjectHooks
7 # HOOKS = (ProjectHooks(),)
8
9 # Installed plugins for which to disable hook auto-registration.
10 # DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",)
11
12 # Class that manages storing KedroSession data.
13 # from kedro.framework.session.shelvestore import ShelveStore
14 # SESSION_STORE_CLASS = ShelveStore
15 # Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.
16 # SESSION_STORE_ARGS = {
17 # "path": "./sessions"
18 # }
19
20 # Class that manages Kedro's library components.
21 # from kedro.framework.context import KedroContext
22 # CONTEXT_CLASS = KedroContext
23
24 # Directory that holds configuration.
25 # CONF_SOURCE = "conf"
26
27 # Class that manages how configuration is loaded.
28 # CONFIG_LOADER_CLASS = ConfigLoader
29 # Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.
30 # CONFIG_LOADER_ARGS = {
31 # "config_patterns": {
32 # "spark" : ["spark*/"],
33 # "parameters": ["parameters*", "parameters*/**", "**/parameters*"],
34 # }
35 # }
36
37 # Class that manages the Data Catalog.
38 # from kedro.io import DataCatalog
39 # DATA_CATALOG_CLASS = DataCatalog
40
[end of kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
--- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
+++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py
@@ -3,6 +3,7 @@
https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html."""
# Instantiated project hooks.
+# For example, after creating a hooks.py and defining a ProjectHooks class there, do
# from {{cookiecutter.python_package}}.hooks import ProjectHooks
# HOOKS = (ProjectHooks(),)
@@ -10,22 +11,19 @@
# DISABLE_HOOKS_FOR_PLUGINS = ("kedro-viz",)
# Class that manages storing KedroSession data.
-# from kedro.framework.session.shelvestore import ShelveStore
-# SESSION_STORE_CLASS = ShelveStore
+# from kedro.framework.session.store import BaseSessionStore
+# SESSION_STORE_CLASS = BaseSessionStore
# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.
# SESSION_STORE_ARGS = {
# "path": "./sessions"
# }
-# Class that manages Kedro's library components.
-# from kedro.framework.context import KedroContext
-# CONTEXT_CLASS = KedroContext
-
# Directory that holds configuration.
# CONF_SOURCE = "conf"
# Class that manages how configuration is loaded.
-# CONFIG_LOADER_CLASS = ConfigLoader
+# from kedro.config import OmegaConfigLoader
+# CONFIG_LOADER_CLASS = OmegaConfigLoader
# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.
# CONFIG_LOADER_ARGS = {
# "config_patterns": {
@@ -34,6 +32,10 @@
# }
# }
+# Class that manages Kedro's library components.
+# from kedro.framework.context import KedroContext
+# CONTEXT_CLASS = KedroContext
+
# Class that manages the Data Catalog.
# from kedro.io import DataCatalog
# DATA_CATALOG_CLASS = DataCatalog
| {"golden_diff": "diff --git a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\n--- a/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\t\n+++ b/kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py\t\n@@ -3,6 +3,7 @@\n https://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n \n # Instantiated project hooks.\n+# For example, after creating a hooks.py and defining a ProjectHooks class there, do\n # from {{cookiecutter.python_package}}.hooks import ProjectHooks\n # HOOKS = (ProjectHooks(),)\n \n@@ -10,22 +11,19 @@\n # DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n \n # Class that manages storing KedroSession data.\n-# from kedro.framework.session.shelvestore import ShelveStore\n-# SESSION_STORE_CLASS = ShelveStore\n+# from kedro.framework.session.store import BaseSessionStore\n+# SESSION_STORE_CLASS = BaseSessionStore\n # Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n # SESSION_STORE_ARGS = {\n # \"path\": \"./sessions\"\n # }\n \n-# Class that manages Kedro's library components.\n-# from kedro.framework.context import KedroContext\n-# CONTEXT_CLASS = KedroContext\n-\n # Directory that holds configuration.\n # CONF_SOURCE = \"conf\"\n \n # Class that manages how configuration is loaded.\n-# CONFIG_LOADER_CLASS = ConfigLoader\n+# from kedro.config import OmegaConfigLoader\n+# CONFIG_LOADER_CLASS = OmegaConfigLoader\n # Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n # CONFIG_LOADER_ARGS = {\n # \"config_patterns\": {\n@@ -34,6 +32,10 @@\n # }\n # }\n \n+# Class that manages Kedro's library components.\n+# from kedro.framework.context import KedroContext\n+# CONTEXT_CLASS = KedroContext\n+\n # Class that manages the Data Catalog.\n # from kedro.io import DataCatalog\n # DATA_CATALOG_CLASS = DataCatalog\n", "issue": "Update default suggestions in `settings.py` to ones that work\n## Description\r\nUpdate docs and default suggestions in `settings.py`, because currently some of those suggestions don't actually work. \r\n\r\nCurrently, the `BaseSessionStore` is the default session store. The other possible stores a user can use are the `ShelveStore` and the `SQLiteStore` (currently part of viz).\r\n\r\nThe `ShelveStore` is the default suggestion to override the default in `settings.py`, but when users are using some sort of multiprocessing this store type will not work. See: https://github.com/kedro-org/kedro/issues/1442\r\n\r\nAlso look at the other default suggestions and verify that they make sense. \r\n\r\n(Later consideration, but not part of this work)\r\nIf we move the `SQLiteStore` from viz to kedro core, we could add that as the default suggestion instead. \r\n\n", "before_files": [{"content": "\"\"\"Project settings. There is no need to edit this file unless you want to change values\nfrom the Kedro defaults. For further information, including these default values, see\nhttps://kedro.readthedocs.io/en/stable/kedro_project_setup/settings.html.\"\"\"\n\n# Instantiated project hooks.\n# from {{cookiecutter.python_package}}.hooks import ProjectHooks\n# HOOKS = (ProjectHooks(),)\n\n# Installed plugins for which to disable hook auto-registration.\n# DISABLE_HOOKS_FOR_PLUGINS = (\"kedro-viz\",)\n\n# Class that manages storing KedroSession data.\n# from kedro.framework.session.shelvestore import ShelveStore\n# SESSION_STORE_CLASS = ShelveStore\n# Keyword arguments to pass to the `SESSION_STORE_CLASS` constructor.\n# SESSION_STORE_ARGS = {\n# \"path\": \"./sessions\"\n# }\n\n# Class that manages Kedro's library components.\n# from kedro.framework.context import KedroContext\n# CONTEXT_CLASS = KedroContext\n\n# Directory that holds configuration.\n# CONF_SOURCE = \"conf\"\n\n# Class that manages how configuration is loaded.\n# CONFIG_LOADER_CLASS = ConfigLoader\n# Keyword arguments to pass to the `CONFIG_LOADER_CLASS` constructor.\n# CONFIG_LOADER_ARGS = {\n# \"config_patterns\": {\n# \"spark\" : [\"spark*/\"],\n# \"parameters\": [\"parameters*\", \"parameters*/**\", \"**/parameters*\"],\n# }\n# }\n\n# Class that manages the Data Catalog.\n# from kedro.io import DataCatalog\n# DATA_CATALOG_CLASS = DataCatalog\n", "path": "kedro/templates/project/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/settings.py"}]} | 1,164 | 497 |
gh_patches_debug_51452 | rasdani/github-patches | git_diff | lutris__lutris-389 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create desktop/application menu shortcut writes a bad .desktop file
File contents:
```
[Desktop Entry]
Type=Application
Name=%s
Icon=%s
Exec=lutris lutris:%s
Categories=Game
```
**How to reproduce**
Right click a game and select Create desktop shortcut.
Navigate to ~/Desktop
You see a file with name `gameslug-id.desktop` but it contains what's above. If you're in a file manager you see the game title instead of the filename, so it appears as `%s` there.
**Lutris debug output**
```
[system]:Executing which xdg-user-dir
```
Operating system: Arch Linux
</issue>
<code>
[start of lutris/shortcuts.py]
1 """Desktop file creator."""
2 import os
3 import stat
4 import shutil
5 import subprocess
6
7 from textwrap import dedent
8 from xdg import BaseDirectory
9 from gi.repository import GLib
10
11 from lutris.util import system
12 from lutris.util.log import logger
13 from lutris.settings import CACHE_DIR
14
15
16 def get_xdg_basename(game_slug, game_id, legacy=False):
17 if legacy:
18 filename = "{}.desktop".format(game_slug)
19 else:
20 filename = "{}-{}.desktop".format(game_slug, game_id)
21 return filename
22
23
24 def create_launcher(game_slug, game_id, game_name, desktop=False, menu=False):
25 """Create a .desktop file."""
26 desktop_dir = (
27 GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)
28 )
29 launcher_content = dedent(
30 """
31 [Desktop Entry]
32 Type=Application
33 Name=%s
34 Icon=%s
35 Exec=lutris lutris:%s
36 Categories=Game
37 """.format(game_name, 'lutris_{}'.format(game_slug), game_id)
38 )
39
40 launcher_filename = get_xdg_basename(game_slug, game_id, legacy=False)
41 tmp_launcher_path = os.path.join(CACHE_DIR, launcher_filename)
42 tmp_launcher = open(tmp_launcher_path, "w")
43 tmp_launcher.write(launcher_content)
44 tmp_launcher.close()
45 os.chmod(tmp_launcher_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC |
46 stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
47
48 if desktop:
49 shutil.copy(tmp_launcher_path,
50 os.path.join(desktop_dir, launcher_filename))
51 if menu:
52 menu_path = os.path.join(GLib.get_user_data_dir(), 'applications')
53 shutil.copy(tmp_launcher_path,
54 os.path.join(menu_path, launcher_filename))
55 os.remove(tmp_launcher_path)
56
57
58 def get_launcher_path(game_slug, game_id):
59 """Return the path of a XDG game launcher.
60 When legacy is set, it will return the old path with only the slug,
61 otherwise it will return the path with slug + id
62 """
63 xdg_executable = 'xdg-user-dir'
64 if not system.find_executable(xdg_executable):
65 logger.error("%s not found", xdg_executable)
66 return
67 desktop_dir = subprocess.Popen([xdg_executable, 'DESKTOP'],
68 stdout=subprocess.PIPE).communicate()[0]
69 desktop_dir = str(desktop_dir).strip()
70
71 legacy_launcher_path = os.path.join(
72 desktop_dir, get_xdg_basename(game_slug, game_id, legacy=True)
73 )
74 # First check if legacy path exists, for backward compatibility
75 if system.path_exists(legacy_launcher_path):
76 return legacy_launcher_path
77 # Otherwise return new path, whether it exists or not
78 return os.path.join(
79 desktop_dir, get_xdg_basename(game_slug, game_id, legacy=False)
80 )
81
82
83 def get_menu_launcher_path(game_slug, game_id):
84 """Return the path to a XDG menu launcher, prioritizing legacy paths if
85 they exist
86 """
87 menu_dir = os.path.join(BaseDirectory.xdg_data_home, 'applications')
88 menu_path = os.path.join(
89 menu_dir, get_xdg_basename(game_slug, game_id, legacy=True)
90 )
91 if system.path_exists(menu_path):
92 return menu_path
93 return os.path.join(
94 menu_dir, get_xdg_basename(game_slug, game_id, legacy=False)
95 )
96
97
98 def desktop_launcher_exists(game_slug, game_id):
99 return system.path_exists(get_launcher_path(game_slug, game_id))
100
101
102 def menu_launcher_exists(game_slug, game_id):
103 return system.path_exists(get_menu_launcher_path(game_slug, game_id))
104
105
106 def remove_launcher(game_slug, game_id, desktop=False, menu=False):
107 """Remove existing .desktop file."""
108 if desktop:
109 launcher_path = get_launcher_path(game_slug, game_id)
110 if system.path_exists(launcher_path):
111 os.remove(launcher_path)
112
113 if menu:
114 menu_path = get_menu_launcher_path(game_slug, game_id)
115 if system.path_exists(menu_path):
116 os.remove(menu_path)
117
[end of lutris/shortcuts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/shortcuts.py b/lutris/shortcuts.py
--- a/lutris/shortcuts.py
+++ b/lutris/shortcuts.py
@@ -30,9 +30,9 @@
"""
[Desktop Entry]
Type=Application
- Name=%s
- Icon=%s
- Exec=lutris lutris:%s
+ Name={}
+ Icon={}
+ Exec=lutris lutris:{}
Categories=Game
""".format(game_name, 'lutris_{}'.format(game_slug), game_id)
)
| {"golden_diff": "diff --git a/lutris/shortcuts.py b/lutris/shortcuts.py\n--- a/lutris/shortcuts.py\n+++ b/lutris/shortcuts.py\n@@ -30,9 +30,9 @@\n \"\"\"\n [Desktop Entry]\n Type=Application\n- Name=%s\n- Icon=%s\n- Exec=lutris lutris:%s\n+ Name={}\n+ Icon={}\n+ Exec=lutris lutris:{}\n Categories=Game\n \"\"\".format(game_name, 'lutris_{}'.format(game_slug), game_id)\n )\n", "issue": "Create desktop/application menu shortcut writes a bad .desktop file\nFile contents:\n\n```\n[Desktop Entry]\nType=Application\nName=%s\nIcon=%s\nExec=lutris lutris:%s\nCategories=Game\n```\n\n**How to reproduce**\nRight click a game and select Create desktop shortcut.\nNavigate to ~/Desktop\nYou see a file with name `gameslug-id.desktop` but it contains what's above. If you're in a file manager you see the game title instead of the filename, so it appears as `%s` there.\n\n**Lutris debug output**\n\n```\n[system]:Executing which xdg-user-dir\n```\n\nOperating system: Arch Linux\n\n", "before_files": [{"content": "\"\"\"Desktop file creator.\"\"\"\nimport os\nimport stat\nimport shutil\nimport subprocess\n\nfrom textwrap import dedent\nfrom xdg import BaseDirectory\nfrom gi.repository import GLib\n\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.settings import CACHE_DIR\n\n\ndef get_xdg_basename(game_slug, game_id, legacy=False):\n if legacy:\n filename = \"{}.desktop\".format(game_slug)\n else:\n filename = \"{}-{}.desktop\".format(game_slug, game_id)\n return filename\n\n\ndef create_launcher(game_slug, game_id, game_name, desktop=False, menu=False):\n \"\"\"Create a .desktop file.\"\"\"\n desktop_dir = (\n GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DESKTOP)\n )\n launcher_content = dedent(\n \"\"\"\n [Desktop Entry]\n Type=Application\n Name=%s\n Icon=%s\n Exec=lutris lutris:%s\n Categories=Game\n \"\"\".format(game_name, 'lutris_{}'.format(game_slug), game_id)\n )\n\n launcher_filename = get_xdg_basename(game_slug, game_id, legacy=False)\n tmp_launcher_path = os.path.join(CACHE_DIR, launcher_filename)\n tmp_launcher = open(tmp_launcher_path, \"w\")\n tmp_launcher.write(launcher_content)\n tmp_launcher.close()\n os.chmod(tmp_launcher_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC |\n stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)\n\n if desktop:\n shutil.copy(tmp_launcher_path,\n os.path.join(desktop_dir, launcher_filename))\n if menu:\n menu_path = os.path.join(GLib.get_user_data_dir(), 'applications')\n shutil.copy(tmp_launcher_path,\n os.path.join(menu_path, launcher_filename))\n os.remove(tmp_launcher_path)\n\n\ndef get_launcher_path(game_slug, game_id):\n \"\"\"Return the path of a XDG game launcher.\n When legacy is set, it will return the old path with only the slug,\n otherwise it will return the path with slug + id\n \"\"\"\n xdg_executable = 'xdg-user-dir'\n if not system.find_executable(xdg_executable):\n logger.error(\"%s not found\", xdg_executable)\n return\n desktop_dir = subprocess.Popen([xdg_executable, 'DESKTOP'],\n stdout=subprocess.PIPE).communicate()[0]\n desktop_dir = str(desktop_dir).strip()\n\n legacy_launcher_path = os.path.join(\n desktop_dir, get_xdg_basename(game_slug, game_id, legacy=True)\n )\n # First check if legacy path exists, for backward compatibility\n if system.path_exists(legacy_launcher_path):\n return legacy_launcher_path\n # Otherwise return new path, whether it exists or not\n return os.path.join(\n desktop_dir, get_xdg_basename(game_slug, game_id, legacy=False)\n )\n\n\ndef get_menu_launcher_path(game_slug, game_id):\n \"\"\"Return the path to a XDG menu launcher, prioritizing legacy paths if\n they exist\n \"\"\"\n menu_dir = os.path.join(BaseDirectory.xdg_data_home, 'applications')\n menu_path = os.path.join(\n menu_dir, get_xdg_basename(game_slug, game_id, legacy=True)\n )\n if system.path_exists(menu_path):\n return menu_path\n return os.path.join(\n menu_dir, get_xdg_basename(game_slug, game_id, legacy=False)\n )\n\n\ndef desktop_launcher_exists(game_slug, game_id):\n return system.path_exists(get_launcher_path(game_slug, game_id))\n\n\ndef menu_launcher_exists(game_slug, game_id):\n return system.path_exists(get_menu_launcher_path(game_slug, game_id))\n\n\ndef remove_launcher(game_slug, game_id, desktop=False, menu=False):\n \"\"\"Remove existing .desktop file.\"\"\"\n if desktop:\n launcher_path = get_launcher_path(game_slug, game_id)\n if system.path_exists(launcher_path):\n os.remove(launcher_path)\n\n if menu:\n menu_path = get_menu_launcher_path(game_slug, game_id)\n if system.path_exists(menu_path):\n os.remove(menu_path)\n", "path": "lutris/shortcuts.py"}]} | 1,815 | 131 |
gh_patches_debug_4426 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-569 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error while running the exported model
Hi,
I was trying to run the example given [https://github.com/OpenNMT/OpenNMT-tf/tree/master/examples/serving/python](url).
I am getting the following error.
> Source: I am going.
Traceback (most recent call last):
File "ende_client.py", line 66, in <module>
main()
File "ende_client.py", line 60, in main
output = translator.translate([text])
File "ende_client.py", line 22, in translate
return self._postprocess(outputs)
File "ende_client.py", line 47, in _postprocess
texts.append(self._tokenizer.detokenize(tokens))
TypeError: detokenize(): incompatible function arguments. The following argument types are supported:
1. (self: pyonmttok.Tokenizer, tokens: list, features: object = None) -> str
> Invoked with: <pyonmttok.Tokenizer object at 0x147d10d0d538>, array([b'\xe2\x96\x81Ich', b'\xe2\x96\x81gehe', b'.'], dtype=object)
> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.features_inputter.ids_to_tokens._initializer
> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.labels_inputter.ids_to_tokens._initializer
> WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/alpha/guide/checkpoints#loading_mechanics for details.
>
I have the updated version of pyonmttok.
Thanks,
Sriram
</issue>
<code>
[start of examples/serving/python/ende_client.py]
1 import argparse
2 import os
3
4 import tensorflow as tf
5 import tensorflow_addons as tfa # Register TensorFlow Addons kernels.
6
7 import pyonmttok
8
9
10 class EnDeTranslator(object):
11
12 def __init__(self, export_dir):
13 imported = tf.saved_model.load(export_dir)
14 self._translate_fn = imported.signatures["serving_default"]
15 sp_model_path = os.path.join(export_dir, "assets.extra", "wmtende.model")
16 self._tokenizer = pyonmttok.Tokenizer("none", sp_model_path=sp_model_path)
17
18 def translate(self, texts):
19 """Translates a batch of texts."""
20 inputs = self._preprocess(texts)
21 outputs = self._translate_fn(**inputs)
22 return self._postprocess(outputs)
23
24 def _preprocess(self, texts):
25 all_tokens = []
26 lengths = []
27 max_length = 0
28 for text in texts:
29 tokens, _ = self._tokenizer.tokenize(text)
30 length = len(tokens)
31 all_tokens.append(tokens)
32 lengths.append(length)
33 max_length = max(max_length, length)
34 for tokens, length in zip(all_tokens, lengths):
35 if length < max_length:
36 tokens += [""] * (max_length - length)
37
38 inputs = {
39 "tokens": tf.constant(all_tokens, dtype=tf.string),
40 "length": tf.constant(lengths, dtype=tf.int32)}
41 return inputs
42
43 def _postprocess(self, outputs):
44 texts = []
45 for tokens, length in zip(outputs["tokens"].numpy(), outputs["length"].numpy()):
46 tokens = tokens[0][:length[0]]
47 texts.append(self._tokenizer.detokenize(tokens))
48 return texts
49
50
51 def main():
52 parser = argparse.ArgumentParser(description="Translation client example")
53 parser.add_argument("export_dir", help="Saved model directory")
54 args = parser.parse_args()
55
56 translator = EnDeTranslator(args.export_dir)
57
58 while True:
59 text = input("Source: ")
60 output = translator.translate([text])
61 print("Target: %s" % output[0])
62 print("")
63
64
65 if __name__ == "__main__":
66 main()
67
[end of examples/serving/python/ende_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/serving/python/ende_client.py b/examples/serving/python/ende_client.py
--- a/examples/serving/python/ende_client.py
+++ b/examples/serving/python/ende_client.py
@@ -43,7 +43,7 @@
def _postprocess(self, outputs):
texts = []
for tokens, length in zip(outputs["tokens"].numpy(), outputs["length"].numpy()):
- tokens = tokens[0][:length[0]]
+ tokens = tokens[0][:length[0]].tolist()
texts.append(self._tokenizer.detokenize(tokens))
return texts
| {"golden_diff": "diff --git a/examples/serving/python/ende_client.py b/examples/serving/python/ende_client.py\n--- a/examples/serving/python/ende_client.py\n+++ b/examples/serving/python/ende_client.py\n@@ -43,7 +43,7 @@\n def _postprocess(self, outputs):\n texts = []\n for tokens, length in zip(outputs[\"tokens\"].numpy(), outputs[\"length\"].numpy()):\n- tokens = tokens[0][:length[0]]\n+ tokens = tokens[0][:length[0]].tolist()\n texts.append(self._tokenizer.detokenize(tokens))\n return texts\n", "issue": "Error while running the exported model \nHi,\r\n\r\nI was trying to run the example given [https://github.com/OpenNMT/OpenNMT-tf/tree/master/examples/serving/python](url).\r\n\r\nI am getting the following error.\r\n\r\n> Source: I am going.\r\nTraceback (most recent call last):\r\n File \"ende_client.py\", line 66, in <module>\r\n main()\r\n File \"ende_client.py\", line 60, in main\r\n output = translator.translate([text])\r\n File \"ende_client.py\", line 22, in translate\r\n return self._postprocess(outputs)\r\n File \"ende_client.py\", line 47, in _postprocess\r\n texts.append(self._tokenizer.detokenize(tokens))\r\nTypeError: detokenize(): incompatible function arguments. The following argument types are supported:\r\n 1. (self: pyonmttok.Tokenizer, tokens: list, features: object = None) -> str\r\n\r\n> Invoked with: <pyonmttok.Tokenizer object at 0x147d10d0d538>, array([b'\\xe2\\x96\\x81Ich', b'\\xe2\\x96\\x81gehe', b'.'], dtype=object)\r\n> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.features_inputter.ids_to_tokens._initializer\r\n> WARNING:tensorflow:Unresolved object in checkpoint: (root).examples_inputter.labels_inputter.ids_to_tokens._initializer\r\n> WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/alpha/guide/checkpoints#loading_mechanics for details.\r\n> \r\n\r\nI have the updated version of pyonmttok.\r\n\r\nThanks,\r\nSriram\n", "before_files": [{"content": "import argparse\nimport os\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa # Register TensorFlow Addons kernels.\n\nimport pyonmttok\n\n\nclass EnDeTranslator(object):\n\n def __init__(self, export_dir):\n imported = tf.saved_model.load(export_dir)\n self._translate_fn = imported.signatures[\"serving_default\"]\n sp_model_path = os.path.join(export_dir, \"assets.extra\", \"wmtende.model\")\n self._tokenizer = pyonmttok.Tokenizer(\"none\", sp_model_path=sp_model_path)\n\n def translate(self, texts):\n \"\"\"Translates a batch of texts.\"\"\"\n inputs = self._preprocess(texts)\n outputs = self._translate_fn(**inputs)\n return self._postprocess(outputs)\n\n def _preprocess(self, texts):\n all_tokens = []\n lengths = []\n max_length = 0\n for text in texts:\n tokens, _ = self._tokenizer.tokenize(text)\n length = len(tokens)\n all_tokens.append(tokens)\n lengths.append(length)\n max_length = max(max_length, length)\n for tokens, length in zip(all_tokens, lengths):\n if length < max_length:\n tokens += [\"\"] * (max_length - length)\n\n inputs = {\n \"tokens\": tf.constant(all_tokens, dtype=tf.string),\n \"length\": tf.constant(lengths, dtype=tf.int32)}\n return inputs\n\n def _postprocess(self, outputs):\n texts = []\n for tokens, length in zip(outputs[\"tokens\"].numpy(), outputs[\"length\"].numpy()):\n tokens = tokens[0][:length[0]]\n texts.append(self._tokenizer.detokenize(tokens))\n return texts\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Translation client example\")\n parser.add_argument(\"export_dir\", help=\"Saved model directory\")\n args = parser.parse_args()\n\n translator = EnDeTranslator(args.export_dir)\n\n while True:\n text = input(\"Source: \")\n output = translator.translate([text])\n print(\"Target: %s\" % output[0])\n print(\"\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/serving/python/ende_client.py"}]} | 1,564 | 130 |
gh_patches_debug_22338 | rasdani/github-patches | git_diff | Kinto__kinto-554 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
id and last_modified should be stripped before validating the JSON schema
Otherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.
- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties
- See #256
- See #548
``` diff
try:
- jsonschema.validate(new, schema)
+ stripped = copy.deepcopy(new)
+ stripped.pop(self.model.id_field, None)
+ stripped.pop(self.model.modified_field, None)
+ jsonschema.validate(stripped, schema)
```
id and last_modified should be stripped before validating the JSON schema
Otherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.
- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties
- See #256
- See #548
``` diff
try:
- jsonschema.validate(new, schema)
+ stripped = copy.deepcopy(new)
+ stripped.pop(self.model.id_field, None)
+ stripped.pop(self.model.modified_field, None)
+ jsonschema.validate(stripped, schema)
```
</issue>
<code>
[start of kinto/views/records.py]
1 import jsonschema
2 from cliquet import resource
3 from cliquet.errors import raise_invalid
4 from jsonschema import exceptions as jsonschema_exceptions
5 from pyramid.security import Authenticated
6 from pyramid.settings import asbool
7
8 from kinto.views import object_exists_or_404
9
10
11 class RecordSchema(resource.ResourceSchema):
12 class Options:
13 preserve_unknown = True
14
15
16 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
17
18
19 @resource.register(name='record',
20 collection_path=_parent_path + '/records',
21 record_path=_parent_path + '/records/{{id}}')
22 class Record(resource.ShareableResource):
23
24 mapping = RecordSchema()
25 schema_field = 'schema'
26
27 def __init__(self, *args, **kwargs):
28 super(Record, self).__init__(*args, **kwargs)
29
30 # Check if already fetched before (in batch).
31 collections = self.request.bound_data.setdefault('collections', {})
32 collection_uri = self.get_parent_id(self.request)
33 if collection_uri not in collections:
34 # Unknown yet, fetch from storage.
35 collection_parent_id = '/buckets/%s' % self.bucket_id
36 collection = object_exists_or_404(self.request,
37 collection_id='collection',
38 parent_id=collection_parent_id,
39 object_id=self.collection_id)
40 collections[collection_uri] = collection
41
42 self._collection = collections[collection_uri]
43
44 def get_parent_id(self, request):
45 self.bucket_id = request.matchdict['bucket_id']
46 self.collection_id = request.matchdict['collection_id']
47 return '/buckets/%s/collections/%s' % (self.bucket_id,
48 self.collection_id)
49
50 def is_known_field(self, field_name):
51 """Without schema, any field is considered as known."""
52 return True
53
54 def process_record(self, new, old=None):
55 """Validate records against collection schema, if any."""
56 new = super(Record, self).process_record(new, old)
57
58 schema = self._collection.get('schema')
59 settings = self.request.registry.settings
60 schema_validation = 'experimental_collection_schema_validation'
61 if not schema or not asbool(settings.get(schema_validation)):
62 return new
63
64 collection_timestamp = self._collection[self.model.modified_field]
65
66 try:
67 jsonschema.validate(new, schema)
68 new[self.schema_field] = collection_timestamp
69 except jsonschema_exceptions.ValidationError as e:
70 field = e.path.pop() if e.path else e.validator_value.pop()
71 raise_invalid(self.request, name=field, description=e.message)
72
73 return new
74
75 def collection_get(self):
76 result = super(Record, self).collection_get()
77 self._handle_cache_expires(self.request.response)
78 return result
79
80 def get(self):
81 result = super(Record, self).get()
82 self._handle_cache_expires(self.request.response)
83 return result
84
85 def _handle_cache_expires(self, response):
86 """If the parent collection defines a ``cache_expires`` attribute,
87 then cache-control response headers are sent.
88
89 .. note::
90
91 Those headers are also sent if the
92 ``kinto.record_cache_expires_seconds`` setting is defined.
93 """
94 is_anonymous = Authenticated not in self.request.effective_principals
95 if not is_anonymous:
96 return
97
98 cache_expires = self._collection.get('cache_expires')
99 if cache_expires is None:
100 by_bucket = 'kinto.%s_record_cache_expires_seconds' % (
101 self.bucket_id)
102 by_collection = '%s_%s_record_cache_expires_seconds' % (
103 self.bucket_id, self.collection_id)
104 settings = self.request.registry.settings
105 cache_expires = settings.get(by_collection,
106 settings.get(by_bucket))
107
108 if cache_expires is not None:
109 response.cache_expires(seconds=cache_expires)
110
[end of kinto/views/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -1,3 +1,5 @@
+import copy
+
import jsonschema
from cliquet import resource
from cliquet.errors import raise_invalid
@@ -64,12 +66,17 @@
collection_timestamp = self._collection[self.model.modified_field]
try:
- jsonschema.validate(new, schema)
- new[self.schema_field] = collection_timestamp
+ stripped = copy.deepcopy(new)
+ stripped.pop(self.model.id_field, None)
+ stripped.pop(self.model.modified_field, None)
+ stripped.pop(self.model.permissions_field, None)
+ stripped.pop(self.schema_field, None)
+ jsonschema.validate(stripped, schema)
except jsonschema_exceptions.ValidationError as e:
field = e.path.pop() if e.path else e.validator_value.pop()
raise_invalid(self.request, name=field, description=e.message)
+ new[self.schema_field] = collection_timestamp
return new
def collection_get(self):
| {"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -1,3 +1,5 @@\n+import copy\n+\n import jsonschema\n from cliquet import resource\n from cliquet.errors import raise_invalid\n@@ -64,12 +66,17 @@\n collection_timestamp = self._collection[self.model.modified_field]\n \n try:\n- jsonschema.validate(new, schema)\n- new[self.schema_field] = collection_timestamp\n+ stripped = copy.deepcopy(new)\n+ stripped.pop(self.model.id_field, None)\n+ stripped.pop(self.model.modified_field, None)\n+ stripped.pop(self.model.permissions_field, None)\n+ stripped.pop(self.schema_field, None)\n+ jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n \n+ new[self.schema_field] = collection_timestamp\n return new\n \n def collection_get(self):\n", "issue": "id and last_modified should be stripped before validating the JSON schema\nOtherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.\n- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties\n- See #256 \n- See #548 \n\n``` diff\n try:\n- jsonschema.validate(new, schema)\n+ stripped = copy.deepcopy(new)\n+ stripped.pop(self.model.id_field, None)\n+ stripped.pop(self.model.modified_field, None)\n+ jsonschema.validate(stripped, schema)\n```\n\nid and last_modified should be stripped before validating the JSON schema\nOtherwise it obliges everyone to add `id` and `last_modified` to their JSON schema or use `additionalProperties : true`.\n- http://spacetelescope.github.io/understanding-json-schema/reference/object.html#properties\n- See #256 \n- See #548 \n\n``` diff\n try:\n- jsonschema.validate(new, schema)\n+ stripped = copy.deepcopy(new)\n+ stripped.pop(self.model.id_field, None)\n+ stripped.pop(self.model.modified_field, None)\n+ jsonschema.validate(stripped, schema)\n```\n\n", "before_files": [{"content": "import jsonschema\nfrom cliquet import resource\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = 'kinto.%s_record_cache_expires_seconds' % (\n self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=cache_expires)\n", "path": "kinto/views/records.py"}]} | 1,843 | 243 |
gh_patches_debug_31727 | rasdani/github-patches | git_diff | onnx__onnx-5555 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use pillow to replace opencv in reference evaluator
Caveat: https://github.com/python-pillow/Pillow/issues/6047#issuecomment-1038150443
cc @jcwchen
</issue>
<code>
[start of onnx/reference/ops/op_image_decoder.py]
1 # Copyright (c) ONNX Project Contributors
2
3 # SPDX-License-Identifier: Apache-2.0
4 # pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613
5
6 import numpy as np
7
8 from onnx.reference.op_run import OpRun
9
10
11 class ImageDecoder(OpRun):
12 def _run( # type: ignore
13 self,
14 encoded,
15 pixel_format="RGB",
16 ):
17 try:
18 # pylint: disable=import-outside-toplevel`
19 import cv2
20 except ImportError as e:
21 raise ImportError(
22 "opencv-python must be installed to use the reference implementation of the ImageDecoder operator"
23 ) from e
24 decoded = None
25 if pixel_format == "BGR":
26 decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
27 elif pixel_format == "RGB":
28 decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
29 decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)
30 elif pixel_format == "Grayscale":
31 decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)
32 decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)
33 else:
34 raise RuntimeError(f"pixel_format={pixel_format!r} is not supported.")
35 return (decoded,)
36
[end of onnx/reference/ops/op_image_decoder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onnx/reference/ops/op_image_decoder.py b/onnx/reference/ops/op_image_decoder.py
--- a/onnx/reference/ops/op_image_decoder.py
+++ b/onnx/reference/ops/op_image_decoder.py
@@ -1,7 +1,10 @@
# Copyright (c) ONNX Project Contributors
# SPDX-License-Identifier: Apache-2.0
-# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613
+
+from __future__ import annotations
+
+import io
import numpy as np
@@ -9,27 +12,22 @@
class ImageDecoder(OpRun):
- def _run( # type: ignore
- self,
- encoded,
- pixel_format="RGB",
- ):
+ def _run(self, encoded: np.ndarray, pixel_format="RGB") -> tuple[np.ndarray]: # type: ignore
try:
- # pylint: disable=import-outside-toplevel`
- import cv2
+ import PIL.Image # pylint: disable=import-outside-toplevel
except ImportError as e:
raise ImportError(
- "opencv-python must be installed to use the reference implementation of the ImageDecoder operator"
+ "Pillow must be installed to use the reference implementation of the ImageDecoder operator"
) from e
- decoded = None
+ img = PIL.Image.open(io.BytesIO(encoded.tobytes()))
if pixel_format == "BGR":
- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
+ decoded = np.array(img)[:, :, ::-1]
elif pixel_format == "RGB":
- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)
- decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)
+ decoded = np.array(img)
elif pixel_format == "Grayscale":
- decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)
+ img = img.convert("L")
+ decoded = np.array(img)
decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)
else:
- raise RuntimeError(f"pixel_format={pixel_format!r} is not supported.")
+ raise ValueError(f"pixel_format={pixel_format!r} is not supported.")
return (decoded,)
| {"golden_diff": "diff --git a/onnx/reference/ops/op_image_decoder.py b/onnx/reference/ops/op_image_decoder.py\n--- a/onnx/reference/ops/op_image_decoder.py\n+++ b/onnx/reference/ops/op_image_decoder.py\n@@ -1,7 +1,10 @@\n # Copyright (c) ONNX Project Contributors\n \n # SPDX-License-Identifier: Apache-2.0\n-# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613\n+\n+from __future__ import annotations\n+\n+import io\n \n import numpy as np\n \n@@ -9,27 +12,22 @@\n \n \n class ImageDecoder(OpRun):\n- def _run( # type: ignore\n- self,\n- encoded,\n- pixel_format=\"RGB\",\n- ):\n+ def _run(self, encoded: np.ndarray, pixel_format=\"RGB\") -> tuple[np.ndarray]: # type: ignore\n try:\n- # pylint: disable=import-outside-toplevel`\n- import cv2\n+ import PIL.Image # pylint: disable=import-outside-toplevel\n except ImportError as e:\n raise ImportError(\n- \"opencv-python must be installed to use the reference implementation of the ImageDecoder operator\"\n+ \"Pillow must be installed to use the reference implementation of the ImageDecoder operator\"\n ) from e\n- decoded = None\n+ img = PIL.Image.open(io.BytesIO(encoded.tobytes()))\n if pixel_format == \"BGR\":\n- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n+ decoded = np.array(img)[:, :, ::-1]\n elif pixel_format == \"RGB\":\n- decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n- decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)\n+ decoded = np.array(img)\n elif pixel_format == \"Grayscale\":\n- decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)\n+ img = img.convert(\"L\")\n+ decoded = np.array(img)\n decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)\n else:\n- raise RuntimeError(f\"pixel_format={pixel_format!r} is not supported.\")\n+ raise ValueError(f\"pixel_format={pixel_format!r} is not supported.\")\n return (decoded,)\n", "issue": "Use pillow to replace opencv in reference evaluator\nCaveat: https://github.com/python-pillow/Pillow/issues/6047#issuecomment-1038150443\r\n\r\ncc @jcwchen \n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n\n# SPDX-License-Identifier: Apache-2.0\n# pylint: disable=C0123,C3001,R0912,R0913,R0914,R1730,W0221,W0613\n\nimport numpy as np\n\nfrom onnx.reference.op_run import OpRun\n\n\nclass ImageDecoder(OpRun):\n def _run( # type: ignore\n self,\n encoded,\n pixel_format=\"RGB\",\n ):\n try:\n # pylint: disable=import-outside-toplevel`\n import cv2\n except ImportError as e:\n raise ImportError(\n \"opencv-python must be installed to use the reference implementation of the ImageDecoder operator\"\n ) from e\n decoded = None\n if pixel_format == \"BGR\":\n decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n elif pixel_format == \"RGB\":\n decoded = cv2.imdecode(encoded, cv2.IMREAD_COLOR)\n decoded = cv2.cvtColor(decoded, cv2.COLOR_BGR2RGB)\n elif pixel_format == \"Grayscale\":\n decoded = cv2.imdecode(encoded, cv2.IMREAD_GRAYSCALE)\n decoded = np.expand_dims(decoded, axis=2) # (H, W) to (H, W, 1)\n else:\n raise RuntimeError(f\"pixel_format={pixel_format!r} is not supported.\")\n return (decoded,)\n", "path": "onnx/reference/ops/op_image_decoder.py"}]} | 972 | 539 |
gh_patches_debug_30815 | rasdani/github-patches | git_diff | PrefectHQ__prefect-238 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement `map` for `LocalExecutor`
For some reason we avoided doing this, but it's actually entirely possible to do! Would be great for local debugging.
</issue>
<code>
[start of src/prefect/engine/executors/local.py]
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
2
3 from prefect.engine.executors.base import Executor
4
5
6 class LocalExecutor(Executor):
7 """
8 An executor that runs all functions synchronously and immediately in
9 the local thread. To be used mainly for debugging purposes.
10 """
11
12 def submit(self, fn, *args, **kwargs):
13 """
14 Submit a function to the executor for execution. Returns the result of the computation.
15
16 Args:
17 - fn (Callable): function which is being submitted for execution
18 - *args (Any): arguments to be passed to `fn`
19 - **kwargs (Any): keyword arguments to be passed to `fn`
20
21 Returns:
22 - Any: the result of `fn(*args, **kwargs)`
23 """
24 return fn(*args, **kwargs)
25
26 def wait(self, futures, timeout=None):
27 """
28 Returns:
29 - Any: whatever `futures` were provided
30 """
31 return futures
32
[end of src/prefect/engine/executors/local.py]
[start of src/prefect/engine/executors/__init__.py]
1 # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
2
3 """
4 Prefect Executors implement the logic for how Tasks are run. The standard interface
5 for an Executor consists of the following methods:
6
7 - `submit(fn, *args, **kwargs)`: submit `fn(*args, **kwargs)` for execution;
8 note that this function is (in general) non-blocking, meaning that `executor.submit(...)`
9 will _immediately_ return a future-like object regardless of whether `fn(*args, **kwargs)`
10 has completed running
11 - `submit_with_context(fn, *args, context, **kwargs)`: submit `fn(*args,
12 **kwargs)` for execution with the provided `prefect.context`
13 - `wait(object)`: resolves any objects returned by `executor.submit` to
14 their values; this function _will_ block until execution of `object` is complete
15 - `map(fn, *args, upstream_states, **kwargs)`: submit function to be mapped
16 over based on the edge information contained in `upstream_states`. Any "mapped" Edge
17 will be converted into multiple function submissions, one for each value of the upstream mapped tasks.
18
19 Currently, the available executor options are:
20
21 - `LocalExecutor`: the no frills, straightforward executor - great for simple
22 debugging; tasks are executed immediately upon being called by `executor.submit()`.
23 Note that the `map` feature is currently _not_ supported with this executor.
24 - `SynchronousExecutor`: an executor that runs on `dask` primitives with the
25 synchronous dask scheduler; currently the default executor
26 - `DaskExecutor`: the most feature-rich of the executors, this executor runs
27 on `dask.distributed` and has support for multiprocessing, multithreading, and distributed execution.
28
29 Which executor you choose depends on whether you intend to use things like parallelism
30 of task execution.
31 """
32 import sys
33
34 from warnings import warn as _warn
35 from importlib import import_module as _import_module
36
37 import prefect as _prefect
38 from prefect.engine.executors.base import Executor
39 from prefect.engine.executors.local import LocalExecutor
40 from prefect.engine.executors.sync import SynchronousExecutor
41
42 if sys.version_info >= (3, 5):
43 from prefect.engine.executors.dask import DaskExecutor
44
45 try:
46 cfg_exec = _prefect.config.engine.executor
47 *module, cls_name = cfg_exec.split(".")
48 module = _import_module(".".join(module))
49 DEFAULT_EXECUTOR = getattr(module, cls_name)()
50 except:
51 _warn(
52 "Could not import {}, using prefect.engine.executors.LocalExecutor instead.".format(
53 _prefect.config.engine.executor
54 )
55 )
56 DEFAULT_EXECUTOR = LocalExecutor()
57
[end of src/prefect/engine/executors/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/engine/executors/__init__.py b/src/prefect/engine/executors/__init__.py
--- a/src/prefect/engine/executors/__init__.py
+++ b/src/prefect/engine/executors/__init__.py
@@ -20,7 +20,6 @@
- `LocalExecutor`: the no frills, straightforward executor - great for simple
debugging; tasks are executed immediately upon being called by `executor.submit()`.
- Note that the `map` feature is currently _not_ supported with this executor.
- `SynchronousExecutor`: an executor that runs on `dask` primitives with the
synchronous dask scheduler; currently the default executor
- `DaskExecutor`: the most feature-rich of the executors, this executor runs
diff --git a/src/prefect/engine/executors/local.py b/src/prefect/engine/executors/local.py
--- a/src/prefect/engine/executors/local.py
+++ b/src/prefect/engine/executors/local.py
@@ -1,6 +1,9 @@
# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula
+from typing import Any, Callable, Iterable
+
from prefect.engine.executors.base import Executor
+from prefect.utilities.executors import dict_to_list
class LocalExecutor(Executor):
@@ -9,6 +12,17 @@
the local thread. To be used mainly for debugging purposes.
"""
+ def map(
+ self, fn: Callable, *args: Any, upstream_states=None, **kwargs: Any
+ ) -> Iterable[Any]:
+
+ states = dict_to_list(upstream_states)
+ results = []
+ for elem in states:
+ results.append(self.submit(fn, *args, upstream_states=elem, **kwargs))
+
+ return results
+
def submit(self, fn, *args, **kwargs):
"""
Submit a function to the executor for execution. Returns the result of the computation.
| {"golden_diff": "diff --git a/src/prefect/engine/executors/__init__.py b/src/prefect/engine/executors/__init__.py\n--- a/src/prefect/engine/executors/__init__.py\n+++ b/src/prefect/engine/executors/__init__.py\n@@ -20,7 +20,6 @@\n \n - `LocalExecutor`: the no frills, straightforward executor - great for simple\n debugging; tasks are executed immediately upon being called by `executor.submit()`.\n- Note that the `map` feature is currently _not_ supported with this executor.\n - `SynchronousExecutor`: an executor that runs on `dask` primitives with the\n synchronous dask scheduler; currently the default executor\n - `DaskExecutor`: the most feature-rich of the executors, this executor runs\ndiff --git a/src/prefect/engine/executors/local.py b/src/prefect/engine/executors/local.py\n--- a/src/prefect/engine/executors/local.py\n+++ b/src/prefect/engine/executors/local.py\n@@ -1,6 +1,9 @@\n # Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n \n+from typing import Any, Callable, Iterable\n+\n from prefect.engine.executors.base import Executor\n+from prefect.utilities.executors import dict_to_list\n \n \n class LocalExecutor(Executor):\n@@ -9,6 +12,17 @@\n the local thread. To be used mainly for debugging purposes.\n \"\"\"\n \n+ def map(\n+ self, fn: Callable, *args: Any, upstream_states=None, **kwargs: Any\n+ ) -> Iterable[Any]:\n+\n+ states = dict_to_list(upstream_states)\n+ results = []\n+ for elem in states:\n+ results.append(self.submit(fn, *args, upstream_states=elem, **kwargs))\n+\n+ return results\n+\n def submit(self, fn, *args, **kwargs):\n \"\"\"\n Submit a function to the executor for execution. Returns the result of the computation.\n", "issue": "Implement `map` for `LocalExecutor`\nFor some reason we avoided doing this, but it's actually entirely possible to do! Would be great for local debugging.\n", "before_files": [{"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\nfrom prefect.engine.executors.base import Executor\n\n\nclass LocalExecutor(Executor):\n \"\"\"\n An executor that runs all functions synchronously and immediately in\n the local thread. To be used mainly for debugging purposes.\n \"\"\"\n\n def submit(self, fn, *args, **kwargs):\n \"\"\"\n Submit a function to the executor for execution. Returns the result of the computation.\n\n Args:\n - fn (Callable): function which is being submitted for execution\n - *args (Any): arguments to be passed to `fn`\n - **kwargs (Any): keyword arguments to be passed to `fn`\n\n Returns:\n - Any: the result of `fn(*args, **kwargs)`\n \"\"\"\n return fn(*args, **kwargs)\n\n def wait(self, futures, timeout=None):\n \"\"\"\n Returns:\n - Any: whatever `futures` were provided\n \"\"\"\n return futures\n", "path": "src/prefect/engine/executors/local.py"}, {"content": "# Licensed under LICENSE.md; also available at https://www.prefect.io/licenses/alpha-eula\n\n\"\"\"\nPrefect Executors implement the logic for how Tasks are run. The standard interface\nfor an Executor consists of the following methods:\n\n- `submit(fn, *args, **kwargs)`: submit `fn(*args, **kwargs)` for execution;\n note that this function is (in general) non-blocking, meaning that `executor.submit(...)`\n will _immediately_ return a future-like object regardless of whether `fn(*args, **kwargs)`\n has completed running\n- `submit_with_context(fn, *args, context, **kwargs)`: submit `fn(*args,\n **kwargs)` for execution with the provided `prefect.context`\n- `wait(object)`: resolves any objects returned by `executor.submit` to\n their values; this function _will_ block until execution of `object` is complete\n- `map(fn, *args, upstream_states, **kwargs)`: submit function to be mapped\n over based on the edge information contained in `upstream_states`. Any \"mapped\" Edge\n will be converted into multiple function submissions, one for each value of the upstream mapped tasks.\n\nCurrently, the available executor options are:\n\n- `LocalExecutor`: the no frills, straightforward executor - great for simple\n debugging; tasks are executed immediately upon being called by `executor.submit()`.\n Note that the `map` feature is currently _not_ supported with this executor.\n- `SynchronousExecutor`: an executor that runs on `dask` primitives with the\n synchronous dask scheduler; currently the default executor\n- `DaskExecutor`: the most feature-rich of the executors, this executor runs\n on `dask.distributed` and has support for multiprocessing, multithreading, and distributed execution.\n\nWhich executor you choose depends on whether you intend to use things like parallelism\nof task execution.\n\"\"\"\nimport sys\n\nfrom warnings import warn as _warn\nfrom importlib import import_module as _import_module\n\nimport prefect as _prefect\nfrom prefect.engine.executors.base import Executor\nfrom prefect.engine.executors.local import LocalExecutor\nfrom prefect.engine.executors.sync import SynchronousExecutor\n\nif sys.version_info >= (3, 5):\n from prefect.engine.executors.dask import DaskExecutor\n\ntry:\n cfg_exec = _prefect.config.engine.executor\n *module, cls_name = cfg_exec.split(\".\")\n module = _import_module(\".\".join(module))\n DEFAULT_EXECUTOR = getattr(module, cls_name)()\nexcept:\n _warn(\n \"Could not import {}, using prefect.engine.executors.LocalExecutor instead.\".format(\n _prefect.config.engine.executor\n )\n )\n DEFAULT_EXECUTOR = LocalExecutor()\n", "path": "src/prefect/engine/executors/__init__.py"}]} | 1,568 | 433 |
gh_patches_debug_17691 | rasdani/github-patches | git_diff | docker__docker-py-867 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No documentation for network api
The following have missing documentation ([readthedocs](http://docker-py.readthedocs.org/)).
- [x] `Client.networks`
- [x] `Client.create_network`
- [x] `Client.remove_network`
- [x] `Client.inspect_network`
- [x] `Client.connect_container_to_network`
- [x] `Client.disconnect_container_from_network`
</issue>
<code>
[start of docker/api/volume.py]
1 from .. import utils
2
3
4 class VolumeApiMixin(object):
5 @utils.minimum_version('1.21')
6 def volumes(self, filters=None):
7 params = {
8 'filters': utils.convert_filters(filters) if filters else None
9 }
10 url = self._url('/volumes')
11 return self._result(self._get(url, params=params), True)
12
13 @utils.minimum_version('1.21')
14 def create_volume(self, name, driver=None, driver_opts=None):
15 url = self._url('/volumes/create')
16 if driver_opts is not None and not isinstance(driver_opts, dict):
17 raise TypeError('driver_opts must be a dictionary')
18
19 data = {
20 'Name': name,
21 'Driver': driver,
22 'DriverOpts': driver_opts,
23 }
24 return self._result(self._post_json(url, data=data), True)
25
26 @utils.minimum_version('1.21')
27 def inspect_volume(self, name):
28 url = self._url('/volumes/{0}', name)
29 return self._result(self._get(url), True)
30
31 @utils.minimum_version('1.21')
32 def remove_volume(self, name):
33 url = self._url('/volumes/{0}', name)
34 resp = self._delete(url)
35 self._raise_for_status(resp)
36 return True
37
[end of docker/api/volume.py]
[start of docker/api/network.py]
1 import json
2
3 from ..utils import check_resource, minimum_version
4
5
6 class NetworkApiMixin(object):
7 @minimum_version('1.21')
8 def networks(self, names=None, ids=None):
9 filters = {}
10 if names:
11 filters['name'] = names
12 if ids:
13 filters['id'] = ids
14
15 params = {'filters': json.dumps(filters)}
16
17 url = self._url("/networks")
18 res = self._get(url, params=params)
19 return self._result(res, json=True)
20
21 @minimum_version('1.21')
22 def create_network(self, name, driver=None):
23 data = {
24 'name': name,
25 'driver': driver,
26 }
27 url = self._url("/networks/create")
28 res = self._post_json(url, data=data)
29 return self._result(res, json=True)
30
31 @minimum_version('1.21')
32 def remove_network(self, net_id):
33 url = self._url("/networks/{0}", net_id)
34 res = self._delete(url)
35 self._raise_for_status(res)
36
37 @minimum_version('1.21')
38 def inspect_network(self, net_id):
39 url = self._url("/networks/{0}", net_id)
40 res = self._get(url)
41 return self._result(res, json=True)
42
43 @check_resource
44 @minimum_version('1.21')
45 def connect_container_to_network(self, container, net_id):
46 data = {"container": container}
47 url = self._url("/networks/{0}/connect", net_id)
48 self._post_json(url, data=data)
49
50 @check_resource
51 @minimum_version('1.21')
52 def disconnect_container_from_network(self, container, net_id):
53 data = {"container": container}
54 url = self._url("/networks/{0}/disconnect", net_id)
55 self._post_json(url, data=data)
56
[end of docker/api/network.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/api/network.py b/docker/api/network.py
--- a/docker/api/network.py
+++ b/docker/api/network.py
@@ -19,10 +19,14 @@
return self._result(res, json=True)
@minimum_version('1.21')
- def create_network(self, name, driver=None):
+ def create_network(self, name, driver=None, options=None):
+ if options is not None and not isinstance(options, dict):
+ raise TypeError('options must be a dictionary')
+
data = {
'name': name,
'driver': driver,
+ 'options': options
}
url = self._url("/networks/create")
res = self._post_json(url, data=data)
diff --git a/docker/api/volume.py b/docker/api/volume.py
--- a/docker/api/volume.py
+++ b/docker/api/volume.py
@@ -33,4 +33,3 @@
url = self._url('/volumes/{0}', name)
resp = self._delete(url)
self._raise_for_status(resp)
- return True
| {"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -19,10 +19,14 @@\n return self._result(res, json=True)\n \n @minimum_version('1.21')\n- def create_network(self, name, driver=None):\n+ def create_network(self, name, driver=None, options=None):\n+ if options is not None and not isinstance(options, dict):\n+ raise TypeError('options must be a dictionary')\n+\n data = {\n 'name': name,\n 'driver': driver,\n+ 'options': options\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\ndiff --git a/docker/api/volume.py b/docker/api/volume.py\n--- a/docker/api/volume.py\n+++ b/docker/api/volume.py\n@@ -33,4 +33,3 @@\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n- return True\n", "issue": "No documentation for network api\nThe following have missing documentation ([readthedocs](http://docker-py.readthedocs.org/)).\n- [x] `Client.networks`\n- [x] `Client.create_network`\n- [x] `Client.remove_network`\n- [x] `Client.inspect_network`\n- [x] `Client.connect_container_to_network`\n- [x] `Client.disconnect_container_from_network`\n\n", "before_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n return True\n", "path": "docker/api/volume.py"}, {"content": "import json\n\nfrom ..utils import check_resource, minimum_version\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None):\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n\n params = {'filters': json.dumps(filters)}\n\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None):\n data = {\n 'name': name,\n 'driver': driver,\n }\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def remove_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n def inspect_network(self, net_id):\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url)\n return self._result(res, json=True)\n\n @check_resource\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/connect\", net_id)\n self._post_json(url, data=data)\n\n @check_resource\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id):\n data = {\"container\": container}\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n self._post_json(url, data=data)\n", "path": "docker/api/network.py"}]} | 1,508 | 244 |
gh_patches_debug_4081 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pin click
resolves #8048
### Description
Pin main to `click>=8.1.1,<8.1.4`
### Checklist
- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me
- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)
- [ ] I have run this code in development and it appears to resolve the stated issue
- [ ] This PR includes tests, or tests are not required/relevant for this PR
- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR
- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)
</issue>
<code>
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 7, 2):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.7.2 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.3.4"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.main:main"],
47 },
48 install_requires=[
49 "Jinja2==3.1.2",
50 "agate>=1.6,<1.6.4",
51 "click>=7.0,<9",
52 "colorama>=0.3.9,<0.4.6",
53 "hologram>=0.0.14,<=0.0.15",
54 "isodate>=0.6,<0.7",
55 "logbook>=1.5,<1.6",
56 "mashumaro[msgpack]==3.0.4",
57 "minimal-snowplow-tracker==0.0.2",
58 "networkx>=2.3,<2.8.1;python_version<'3.8'",
59 "networkx>=2.3,<3;python_version>='3.8'",
60 "packaging>=20.9,<22.0",
61 "sqlparse>=0.2.3,<0.4.4",
62 "dbt-extractor~=0.4.1",
63 "typing-extensions>=3.7.4",
64 "werkzeug>=1,<3",
65 "pathspec~=0.9.0",
66 "pytz>=2015.7",
67 # the following are all to match snowflake-connector-python
68 "requests<3.0.0",
69 "idna>=2.5,<4",
70 "cffi>=1.9,<2.0.0",
71 "pyyaml>=6.0",
72 ],
73 zip_safe=False,
74 classifiers=[
75 "Development Status :: 5 - Production/Stable",
76 "License :: OSI Approved :: Apache Software License",
77 "Operating System :: Microsoft :: Windows",
78 "Operating System :: MacOS :: MacOS X",
79 "Operating System :: POSIX :: Linux",
80 "Programming Language :: Python :: 3.7",
81 "Programming Language :: Python :: 3.8",
82 "Programming Language :: Python :: 3.9",
83 "Programming Language :: Python :: 3.10",
84 ],
85 python_requires=">=3.7.2",
86 )
87
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -48,7 +48,8 @@
install_requires=[
"Jinja2==3.1.2",
"agate>=1.6,<1.6.4",
- "click>=7.0,<9",
+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558
+ "click>=7.0,<8.1.4",
"colorama>=0.3.9,<0.4.6",
"hologram>=0.0.14,<=0.0.15",
"isodate>=0.6,<0.7",
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -48,7 +48,8 @@\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n- \"click>=7.0,<9\",\n+ # temporarily pinning click for mypy failures: https://github.com/pallets/click/issues/2558\n+ \"click>=7.0,<8.1.4\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n", "issue": "pin click\nresolves #8048 \r\n\r\n### Description\r\n\r\nPin main to `click>=8.1.1,<8.1.4`\r\n\r\n### Checklist\r\n\r\n- [ ] I have read [the contributing guide](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md) and understand what's expected of me\r\n- [ ] I have signed the [CLA](https://docs.getdbt.com/docs/contributor-license-agreements)\r\n- [ ] I have run this code in development and it appears to resolve the stated issue\r\n- [ ] This PR includes tests, or tests are not required/relevant for this PR\r\n- [ ] I have [opened an issue to add/update docs](https://github.com/dbt-labs/docs.getdbt.com/issues/new/choose), or docs changes are not required/relevant for this PR\r\n- [ ] I have run `changie new` to [create a changelog entry](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md#adding-a-changelog-entry)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.3.4\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.main:main\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.6.4\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.6\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.0.4\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>=20.9,<22.0\",\n \"sqlparse>=0.2.3,<0.4.4\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec~=0.9.0\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]} | 1,713 | 172 |
gh_patches_debug_14814 | rasdani/github-patches | git_diff | bridgecrewio__checkov-599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update urllib3: HTTP Header Injection vuln
**Describe the bug**
urllib3 needs to be updated to at least 1.25.9 to fix a high severity HTTP Header Injection vulnerability. Snyk info page [here](https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-1014645).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "alabaster==0.7.12",
28 "attrs==19.3.0",
29 "babel==2.7.0",
30 "certifi==2019.11.28",
31 "chardet==3.0.4",
32 "coverage==4.5.4",
33 "coverage-badge==1.0.1",
34 "docopt==0.6.2",
35 "docutils==0.15.2",
36 "idna==2.8",
37 "imagesize==1.1.0",
38 "importlib-metadata==1.1.0; python_version < '3.8'",
39 "jinja2==2.10.3",
40 "lark-parser==0.7.8",
41 "markupsafe==1.1.1",
42 "more-itertools==8.0.0",
43 "packaging==19.2",
44 "pluggy==0.13.1",
45 "py==1.8.0",
46 "pygments==2.5.2",
47 "pyparsing==2.4.5",
48 "pytest==5.3.1",
49 "bc-python-hcl2>=0.3.10",
50 "pytz==2019.3",
51 "pyyaml==5.3.1",
52 "requests==2.22.0",
53 "six==1.15.0",
54 "snowballstemmer==2.0.0",
55 "sphinx==2.2.1",
56 "sphinxcontrib-applehelp==1.0.1",
57 "sphinxcontrib-devhelp==1.0.1",
58 "sphinxcontrib-htmlhelp==1.0.2",
59 "sphinxcontrib-jsmath==1.0.1",
60 "sphinxcontrib-qthelp==1.0.2",
61 "sphinxcontrib-serializinghtml==1.1.3",
62 "urllib3==1.25.7",
63 "wcwidth==0.1.7",
64 "zipp==0.6.0",
65 "GitPython==3.1.7",
66 "gitdb==4.0.5"
67 ]
68 },
69 install_requires=[
70 "boto3==1.12.43",
71 "chardet==3.0.4",
72 "colorama==0.4.3",
73 "docopt==0.6.2",
74 "idna==2.8",
75 "jmespath==0.10.0",
76 "junit-xml==1.8",
77 "lark-parser==0.7.8",
78 "bc-python-hcl2>=0.3.11",
79 "pyyaml==5.3.1",
80 "requests==2.22.0",
81 "six==1.15.0",
82 "tabulate==0.8.6",
83 "termcolor==1.1.0",
84 "urllib3==1.25.7",
85 "dpath==1.5.0",
86 "GitPython==3.1.7",
87 "gitdb==4.0.5"
88 ],
89 license="Apache License 2.0",
90 name="checkov",
91 version=version,
92 description="Infrastructure as code static analysis",
93 author="bridgecrew",
94 author_email="[email protected]",
95 url="https://github.com/bridgecrewio/checkov",
96 packages=setuptools.find_packages(exclude=["tests*","integration_tests*"]),
97 scripts=["bin/checkov","bin/checkov.cmd"],
98 long_description=long_description,
99 long_description_content_type="text/markdown",
100 classifiers=[
101 'Environment :: Console',
102 'Intended Audience :: Developers',
103 'Intended Audience :: System Administrators',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: Security',
106 'Topic :: Software Development :: Build Tools'
107 ]
108 )
109
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -59,7 +59,7 @@
"sphinxcontrib-jsmath==1.0.1",
"sphinxcontrib-qthelp==1.0.2",
"sphinxcontrib-serializinghtml==1.1.3",
- "urllib3==1.25.7",
+ "urllib3==1.25.10",
"wcwidth==0.1.7",
"zipp==0.6.0",
"GitPython==3.1.7",
@@ -81,7 +81,7 @@
"six==1.15.0",
"tabulate==0.8.6",
"termcolor==1.1.0",
- "urllib3==1.25.7",
+ "urllib3==1.25.10",
"dpath==1.5.0",
"GitPython==3.1.7",
"gitdb==4.0.5"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -59,7 +59,7 @@\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n- \"urllib3==1.25.7\",\n+ \"urllib3==1.25.10\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n@@ -81,7 +81,7 @@\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n- \"urllib3==1.25.7\",\n+ \"urllib3==1.25.10\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n", "issue": "Update urllib3: HTTP Header Injection vuln\n**Describe the bug**\r\nurllib3 needs to be updated to at least 1.25.9 to fix a high severity HTTP Header Injection vulnerability. Snyk info page [here](https://snyk.io/vuln/SNYK-PYTHON-URLLIB3-1014645).\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"bc-python-hcl2>=0.3.10\",\n \"pytz==2019.3\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.7\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ]\n },\n install_requires=[\n \"boto3==1.12.43\",\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"jmespath==0.10.0\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"bc-python-hcl2>=0.3.11\",\n \"pyyaml==5.3.1\",\n \"requests==2.22.0\",\n \"six==1.15.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.7\",\n \"dpath==1.5.0\",\n \"GitPython==3.1.7\",\n \"gitdb==4.0.5\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\",\"integration_tests*\"]),\n scripts=[\"bin/checkov\",\"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]} | 1,862 | 247 |
gh_patches_debug_16857 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4882 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
c7n_mailer, AWS not installing Lambda, no logs, no errors
I have tried to setup/install the c7n_mailer lambda on our AWS account according to the docs. I have tried it from my Mac and from Docker Images (in a Jenkins pipeline) to no avail. The kicker is I am not getting any error, or output. Is there anything I can look at to see if I have an issue from my end our something on the AWS account. This is the command I am running:
```
c7n-mailer --config mailer.yml --update-lambda
```
</issue>
<code>
[start of tools/c7n_mailer/c7n_mailer/deploy.py]
1 # Copyright 2016-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import copy
17 import json
18 import os
19
20 from c7n.mu import (
21 CloudWatchEventSource,
22 LambdaFunction,
23 LambdaManager,
24 PythonPackageArchive)
25
26
27 entry_source = """\
28 import logging
29
30 from c7n_mailer import handle
31
32 logger = logging.getLogger('custodian.mailer')
33 log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
34 logging.basicConfig(level=logging.INFO, format=log_format)
35 logging.getLogger('botocore').setLevel(logging.WARNING)
36
37 def dispatch(event, context):
38 return handle.start_c7n_mailer(logger)
39 """
40
41
42 def get_archive(config):
43 archive = PythonPackageArchive(modules=[
44 'c7n_mailer',
45 # core deps
46 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',
47 # for other dependencies
48 'pkg_resources',
49 # transport datadog - recursive deps
50 'datadog', 'simplejson', 'decorator',
51 # requests (recursive deps), needed by datadog, slackclient, splunk
52 'requests', 'urllib3', 'idna', 'chardet', 'certifi',
53 # used by splunk; also dependencies of c7n itself
54 'jsonpointer', 'jsonpatch'])
55
56 for d in set(config['templates_folders']):
57 if not os.path.exists(d):
58 continue
59 for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:
60 with open(os.path.join(d, t)) as fh:
61 archive.add_contents('msg-templates/%s' % t, fh.read())
62
63 function_config = copy.deepcopy(config)
64 function_config['templates_folders'] = ['msg-templates/']
65 archive.add_contents('config.json', json.dumps(function_config))
66 archive.add_contents('periodic.py', entry_source)
67
68 archive.close()
69 return archive
70
71
72 def provision(config, session_factory):
73 func_config = dict(
74 name=config.get('lambda_name', 'cloud-custodian-mailer'),
75 description=config.get('lambda_description', 'Cloud Custodian Mailer'),
76 tags=config.get('lambda_tags', {}),
77 handler='periodic.dispatch',
78 runtime=config['runtime'],
79 memory_size=config['memory'],
80 timeout=config['timeout'],
81 role=config['role'],
82 subnets=config['subnets'],
83 security_groups=config['security_groups'],
84 dead_letter_config=config.get('dead_letter_config', {}),
85 events=[
86 CloudWatchEventSource(
87 {'type': 'periodic',
88 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},
89 session_factory)
90 ])
91
92 archive = get_archive(config)
93 func = LambdaFunction(func_config, archive)
94 manager = LambdaManager(session_factory)
95 manager.publish(func)
96
[end of tools/c7n_mailer/c7n_mailer/deploy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/c7n_mailer/c7n_mailer/deploy.py b/tools/c7n_mailer/c7n_mailer/deploy.py
--- a/tools/c7n_mailer/c7n_mailer/deploy.py
+++ b/tools/c7n_mailer/c7n_mailer/deploy.py
@@ -14,6 +14,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
+import logging
import json
import os
@@ -24,6 +25,8 @@
PythonPackageArchive)
+log = logging.getLogger('custodian-mailer')
+
entry_source = """\
import logging
@@ -91,5 +94,6 @@
archive = get_archive(config)
func = LambdaFunction(func_config, archive)
+ log.info("Provisioning mailer lambda %s" % (session_factory().region_name))
manager = LambdaManager(session_factory)
manager.publish(func)
| {"golden_diff": "diff --git a/tools/c7n_mailer/c7n_mailer/deploy.py b/tools/c7n_mailer/c7n_mailer/deploy.py\n--- a/tools/c7n_mailer/c7n_mailer/deploy.py\n+++ b/tools/c7n_mailer/c7n_mailer/deploy.py\n@@ -14,6 +14,7 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import copy\n+import logging\n import json\n import os\n \n@@ -24,6 +25,8 @@\n PythonPackageArchive)\n \n \n+log = logging.getLogger('custodian-mailer')\n+\n entry_source = \"\"\"\\\n import logging\n \n@@ -91,5 +94,6 @@\n \n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n+ log.info(\"Provisioning mailer lambda %s\" % (session_factory().region_name))\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "issue": "c7n_mailer, AWS not installing Lambda, no logs, no errors\nI have tried to setup/install the c7n_mailer lambda on our AWS account according to the docs. I have tried it from my Mac and from Docker Images (in a Jenkins pipeline) to no avail. The kicker is I am not getting any error, or output. Is there anything I can look at to see if I have an issue from my end our something on the AWS account. This is the command I am running:\r\n```\r\nc7n-mailer --config mailer.yml --update-lambda\r\n```\n", "before_files": [{"content": "# Copyright 2016-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport copy\nimport json\nimport os\n\nfrom c7n.mu import (\n CloudWatchEventSource,\n LambdaFunction,\n LambdaManager,\n PythonPackageArchive)\n\n\nentry_source = \"\"\"\\\nimport logging\n\nfrom c7n_mailer import handle\n\nlogger = logging.getLogger('custodian.mailer')\nlog_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\nlogging.getLogger('botocore').setLevel(logging.WARNING)\n\ndef dispatch(event, context):\n return handle.start_c7n_mailer(logger)\n\"\"\"\n\n\ndef get_archive(config):\n archive = PythonPackageArchive(modules=[\n 'c7n_mailer',\n # core deps\n 'jinja2', 'markupsafe', 'ruamel', 'ldap3', 'pyasn1', 'redis',\n # for other dependencies\n 'pkg_resources',\n # transport datadog - recursive deps\n 'datadog', 'simplejson', 'decorator',\n # requests (recursive deps), needed by datadog, slackclient, splunk\n 'requests', 'urllib3', 'idna', 'chardet', 'certifi',\n # used by splunk; also dependencies of c7n itself\n 'jsonpointer', 'jsonpatch'])\n\n for d in set(config['templates_folders']):\n if not os.path.exists(d):\n continue\n for t in [f for f in os.listdir(d) if os.path.splitext(f)[1] == '.j2']:\n with open(os.path.join(d, t)) as fh:\n archive.add_contents('msg-templates/%s' % t, fh.read())\n\n function_config = copy.deepcopy(config)\n function_config['templates_folders'] = ['msg-templates/']\n archive.add_contents('config.json', json.dumps(function_config))\n archive.add_contents('periodic.py', entry_source)\n\n archive.close()\n return archive\n\n\ndef provision(config, session_factory):\n func_config = dict(\n name=config.get('lambda_name', 'cloud-custodian-mailer'),\n description=config.get('lambda_description', 'Cloud Custodian Mailer'),\n tags=config.get('lambda_tags', {}),\n handler='periodic.dispatch',\n runtime=config['runtime'],\n memory_size=config['memory'],\n timeout=config['timeout'],\n role=config['role'],\n subnets=config['subnets'],\n security_groups=config['security_groups'],\n dead_letter_config=config.get('dead_letter_config', {}),\n events=[\n CloudWatchEventSource(\n {'type': 'periodic',\n 'schedule': config.get('lambda_schedule', 'rate(5 minutes)')},\n session_factory)\n ])\n\n archive = get_archive(config)\n func = LambdaFunction(func_config, archive)\n manager = LambdaManager(session_factory)\n manager.publish(func)\n", "path": "tools/c7n_mailer/c7n_mailer/deploy.py"}]} | 1,623 | 213 |
gh_patches_debug_17873 | rasdani/github-patches | git_diff | PyGithub__PyGithub-1327 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
InputGitTreeElement should allow passing "null" for sha
Github's [Tree creation api](https://developer.github.com/v3/git/trees/#create-a-tree) allows us to pass `sha = null` to indicate that the specified blob needs to be deleted.
However, I don't have a way to pass this info to my `InputGitTreeElement`. I can either give it a str or a `github.GithubObject.NotSet`. This means I have no way of deleting files from a tree using PyGithub (I'd like to delete multiple files in a single commit so tree creation is the ideal choice for me).
The current design is to only pass the `sha` if it is actually set:
https://github.com/PyGithub/PyGithub/blob/540a085001/github/InputGitTreeElement.py#L81
I can understand that passing a `None` goes against the design. I think something like `github.GithubObject.Null` could be introduced to explicitly say that this field is `null`. It can be used everywhere the GH API accepts a null value.
Example
```python
new_tree = repo.create_git_tree(
[
InputGitTreeElement(
path="my/dir/my_file.txt", mode="100644", type="blob", sha=github.GithubObject.Null
),
],
base_tree=head_commit.tree
)
```
This will delete `my/dir/my_file.txt`
---
My current workaround is to directly hit the api to create tree (using requests, setting `sha=None`), get the tree sha & use it with pygithub for my remaining workflow (committing, etc).
Please let me know in case I misunderstood some aspect or if anything needs to be elaborated upon.
</issue>
<code>
[start of github/InputGitTreeElement.py]
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2014 Vincent Jacques <[email protected]> #
9 # Copyright 2016 Peter Buckley <[email protected]> #
10 # Copyright 2018 Wan Liuyang <[email protected]> #
11 # Copyright 2018 sfdye <[email protected]> #
12 # #
13 # This file is part of PyGithub. #
14 # http://pygithub.readthedocs.io/ #
15 # #
16 # PyGithub is free software: you can redistribute it and/or modify it under #
17 # the terms of the GNU Lesser General Public License as published by the Free #
18 # Software Foundation, either version 3 of the License, or (at your option) #
19 # any later version. #
20 # #
21 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
22 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
23 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
24 # details. #
25 # #
26 # You should have received a copy of the GNU Lesser General Public License #
27 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
28 # #
29 ################################################################################
30
31 from __future__ import absolute_import
32
33 import six
34
35 import github.GithubObject
36
37
38 class InputGitTreeElement(object):
39 """
40 This class represents InputGitTreeElements
41 """
42
43 def __init__(
44 self,
45 path,
46 mode,
47 type,
48 content=github.GithubObject.NotSet,
49 sha=github.GithubObject.NotSet,
50 ):
51 """
52 :param path: string
53 :param mode: string
54 :param type: string
55 :param content: string
56 :param sha: string
57 """
58
59 assert isinstance(path, (str, six.text_type)), path
60 assert isinstance(mode, (str, six.text_type)), mode
61 assert isinstance(type, (str, six.text_type)), type
62 assert content is github.GithubObject.NotSet or isinstance(
63 content, (str, six.text_type)
64 ), content
65 assert sha is github.GithubObject.NotSet or isinstance(
66 sha, (str, six.text_type)
67 ), sha
68 self.__path = path
69 self.__mode = mode
70 self.__type = type
71 self.__content = content
72 self.__sha = sha
73
74 @property
75 def _identity(self):
76 identity = {
77 "path": self.__path,
78 "mode": self.__mode,
79 "type": self.__type,
80 }
81 if self.__sha is not github.GithubObject.NotSet:
82 identity["sha"] = self.__sha
83 if self.__content is not github.GithubObject.NotSet:
84 identity["content"] = self.__content
85 return identity
86
[end of github/InputGitTreeElement.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/github/InputGitTreeElement.py b/github/InputGitTreeElement.py
--- a/github/InputGitTreeElement.py
+++ b/github/InputGitTreeElement.py
@@ -53,7 +53,7 @@
:param mode: string
:param type: string
:param content: string
- :param sha: string
+ :param sha: string or None
"""
assert isinstance(path, (str, six.text_type)), path
@@ -62,8 +62,10 @@
assert content is github.GithubObject.NotSet or isinstance(
content, (str, six.text_type)
), content
- assert sha is github.GithubObject.NotSet or isinstance(
- sha, (str, six.text_type)
+ assert (
+ sha is github.GithubObject.NotSet
+ or sha is None
+ or isinstance(sha, (str, six.text_type))
), sha
self.__path = path
self.__mode = mode
| {"golden_diff": "diff --git a/github/InputGitTreeElement.py b/github/InputGitTreeElement.py\n--- a/github/InputGitTreeElement.py\n+++ b/github/InputGitTreeElement.py\n@@ -53,7 +53,7 @@\n :param mode: string\n :param type: string\n :param content: string\n- :param sha: string\n+ :param sha: string or None\n \"\"\"\n \n assert isinstance(path, (str, six.text_type)), path\n@@ -62,8 +62,10 @@\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n- assert sha is github.GithubObject.NotSet or isinstance(\n- sha, (str, six.text_type)\n+ assert (\n+ sha is github.GithubObject.NotSet\n+ or sha is None\n+ or isinstance(sha, (str, six.text_type))\n ), sha\n self.__path = path\n self.__mode = mode\n", "issue": "InputGitTreeElement should allow passing \"null\" for sha\nGithub's [Tree creation api](https://developer.github.com/v3/git/trees/#create-a-tree) allows us to pass `sha = null` to indicate that the specified blob needs to be deleted.\r\n\r\nHowever, I don't have a way to pass this info to my `InputGitTreeElement`. I can either give it a str or a `github.GithubObject.NotSet`. This means I have no way of deleting files from a tree using PyGithub (I'd like to delete multiple files in a single commit so tree creation is the ideal choice for me).\r\n\r\nThe current design is to only pass the `sha` if it is actually set:\r\nhttps://github.com/PyGithub/PyGithub/blob/540a085001/github/InputGitTreeElement.py#L81\r\n\r\nI can understand that passing a `None` goes against the design. I think something like `github.GithubObject.Null` could be introduced to explicitly say that this field is `null`. It can be used everywhere the GH API accepts a null value.\r\n\r\nExample\r\n```python\r\nnew_tree = repo.create_git_tree(\r\n [\r\n InputGitTreeElement(\r\n path=\"my/dir/my_file.txt\", mode=\"100644\", type=\"blob\", sha=github.GithubObject.Null\r\n ),\r\n ],\r\n base_tree=head_commit.tree\r\n)\r\n```\r\nThis will delete `my/dir/my_file.txt`\r\n\r\n---\r\n\r\nMy current workaround is to directly hit the api to create tree (using requests, setting `sha=None`), get the tree sha & use it with pygithub for my remaining workflow (committing, etc).\r\n\r\nPlease let me know in case I misunderstood some aspect or if anything needs to be elaborated upon.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom __future__ import absolute_import\n\nimport six\n\nimport github.GithubObject\n\n\nclass InputGitTreeElement(object):\n \"\"\"\n This class represents InputGitTreeElements\n \"\"\"\n\n def __init__(\n self,\n path,\n mode,\n type,\n content=github.GithubObject.NotSet,\n sha=github.GithubObject.NotSet,\n ):\n \"\"\"\n :param path: string\n :param mode: string\n :param type: string\n :param content: string\n :param sha: string\n \"\"\"\n\n assert isinstance(path, (str, six.text_type)), path\n assert isinstance(mode, (str, six.text_type)), mode\n assert isinstance(type, (str, six.text_type)), type\n assert content is github.GithubObject.NotSet or isinstance(\n content, (str, six.text_type)\n ), content\n assert sha is github.GithubObject.NotSet or isinstance(\n sha, (str, six.text_type)\n ), sha\n self.__path = path\n self.__mode = mode\n self.__type = type\n self.__content = content\n self.__sha = sha\n\n @property\n def _identity(self):\n identity = {\n \"path\": self.__path,\n \"mode\": self.__mode,\n \"type\": self.__type,\n }\n if self.__sha is not github.GithubObject.NotSet:\n identity[\"sha\"] = self.__sha\n if self.__content is not github.GithubObject.NotSet:\n identity[\"content\"] = self.__content\n return identity\n", "path": "github/InputGitTreeElement.py"}]} | 1,783 | 224 |
gh_patches_debug_18915 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Links to company websites doesn't work without http://
On a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.
There is no information to the user creating an event to add http either, so I can imagine this being a growing problem.
Links to company websites doesn't work without http://
On a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.
There is no information to the user creating an event to add http either, so I can imagine this being a growing problem.
</issue>
<code>
[start of apps/companyprofile/dashboard/forms.py]
1 # -*- coding: utf-8 -*-
2 from django.forms import ModelForm
3
4 from apps.companyprofile.models import Company
5 from apps.dashboard.widgets import widget_generator
6 from apps.gallery.widgets import SingleImageInput
7
8
9 class CompanyForm(ModelForm):
10
11 class Meta(object):
12 model = Company
13 fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)
14 exclude = ['old_image']
15
16 # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict
17 widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])
18
[end of apps/companyprofile/dashboard/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/companyprofile/dashboard/forms.py b/apps/companyprofile/dashboard/forms.py
--- a/apps/companyprofile/dashboard/forms.py
+++ b/apps/companyprofile/dashboard/forms.py
@@ -1,5 +1,6 @@
# -*- coding: utf-8 -*-
from django.forms import ModelForm
+from django.forms.fields import URLField
from apps.companyprofile.models import Company
from apps.dashboard.widgets import widget_generator
@@ -7,10 +8,12 @@
class CompanyForm(ModelForm):
+ site = URLField(max_length=100)
class Meta(object):
model = Company
fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)
+
exclude = ['old_image']
# Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict
| {"golden_diff": "diff --git a/apps/companyprofile/dashboard/forms.py b/apps/companyprofile/dashboard/forms.py\n--- a/apps/companyprofile/dashboard/forms.py\n+++ b/apps/companyprofile/dashboard/forms.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n from django.forms import ModelForm\n+from django.forms.fields import URLField\n \n from apps.companyprofile.models import Company\n from apps.dashboard.widgets import widget_generator\n@@ -7,10 +8,12 @@\n \n \n class CompanyForm(ModelForm):\n+ site = URLField(max_length=100)\n \n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n+\n exclude = ['old_image']\n \n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n", "issue": "Links to company websites doesn't work without http:// \nOn a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.\nThere is no information to the user creating an event to add http either, so I can imagine this being a growing problem. \n\nLinks to company websites doesn't work without http:// \nOn a company profile page the link to the company's website will only redirect the user if `http://` is specified when the link is added in the dashboard. For example, the link to AppearTV is written as `www.appeartv.com`, and redirects to `https://online.ntnu.no/company/60/www.appeartv.com`.\nThere is no information to the user creating an event to add http either, so I can imagine this being a growing problem. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom django.forms import ModelForm\n\nfrom apps.companyprofile.models import Company\nfrom apps.dashboard.widgets import widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\n\nclass CompanyForm(ModelForm):\n\n class Meta(object):\n model = Company\n fields = ('name', 'short_description', 'long_description', 'image', 'site', 'email_address', 'phone_number',)\n exclude = ['old_image']\n\n # Widget generator accepts a form widget, and a list of tuples between field name and an attribute dict\n widgets = widget_generator(SingleImageInput, [('image', {'id': 'responsive-image-id'})])\n", "path": "apps/companyprofile/dashboard/forms.py"}]} | 927 | 189 |
gh_patches_debug_11671 | rasdani/github-patches | git_diff | netbox-community__netbox-14461 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove the `clearcache` management command
### Proposed Changes
Remove the `clearcache` management command (from the `core` app), and omit it from the upgrade script.
### Justification
~This command was introduced back when we were experimenting with query caching, and is no longer needed.~ I was mistaken; it was actually introduced under #9122 to provide a mechanism for clearing the cached API spec. However, this is also no longer used since we moved to `drf-spectacular` (see #9608).
The Django cache is currently used only for discrete caching operations, including:
* Config revision tracking
* Recording the most recent release
* Caching RSS feed content (the RSSFeedWidget)
There has already been at least one bug related to this function (see #14182). Additionally, plugins may utilize the cache for other purposes, and we cannot make the assumption that it is safe to clear other cached data.
### Impact
Any mechanisms within NetBox or a plugin which employ caching will be responsible for their own cleanup, where applicable.
</issue>
<code>
[start of netbox/core/management/commands/clearcache.py]
1 from django.core.cache import cache
2 from django.core.management.base import BaseCommand
3
4 from core.models import ConfigRevision
5
6
7 class Command(BaseCommand):
8 """Command to clear the entire cache."""
9 help = 'Clears the cache.'
10
11 def handle(self, *args, **kwargs):
12 # Fetch the current config revision from the cache
13 config_version = cache.get('config_version')
14 # Clear the cache
15 cache.clear()
16 self.stdout.write('Cache has been cleared.', ending="\n")
17 if config_version:
18 # Activate the current config revision
19 ConfigRevision.objects.get(id=config_version).activate()
20 self.stdout.write(f'Config revision ({config_version}) has been restored.', ending="\n")
21
[end of netbox/core/management/commands/clearcache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/core/management/commands/clearcache.py b/netbox/core/management/commands/clearcache.py
deleted file mode 100644
--- a/netbox/core/management/commands/clearcache.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from django.core.cache import cache
-from django.core.management.base import BaseCommand
-
-from core.models import ConfigRevision
-
-
-class Command(BaseCommand):
- """Command to clear the entire cache."""
- help = 'Clears the cache.'
-
- def handle(self, *args, **kwargs):
- # Fetch the current config revision from the cache
- config_version = cache.get('config_version')
- # Clear the cache
- cache.clear()
- self.stdout.write('Cache has been cleared.', ending="\n")
- if config_version:
- # Activate the current config revision
- ConfigRevision.objects.get(id=config_version).activate()
- self.stdout.write(f'Config revision ({config_version}) has been restored.', ending="\n")
| {"golden_diff": "diff --git a/netbox/core/management/commands/clearcache.py b/netbox/core/management/commands/clearcache.py\ndeleted file mode 100644\n--- a/netbox/core/management/commands/clearcache.py\n+++ /dev/null\n@@ -1,20 +0,0 @@\n-from django.core.cache import cache\n-from django.core.management.base import BaseCommand\n-\n-from core.models import ConfigRevision\n-\n-\n-class Command(BaseCommand):\n- \"\"\"Command to clear the entire cache.\"\"\"\n- help = 'Clears the cache.'\n-\n- def handle(self, *args, **kwargs):\n- # Fetch the current config revision from the cache\n- config_version = cache.get('config_version')\n- # Clear the cache\n- cache.clear()\n- self.stdout.write('Cache has been cleared.', ending=\"\\n\")\n- if config_version:\n- # Activate the current config revision\n- ConfigRevision.objects.get(id=config_version).activate()\n- self.stdout.write(f'Config revision ({config_version}) has been restored.', ending=\"\\n\")\n", "issue": "Remove the `clearcache` management command\n### Proposed Changes\r\n\r\nRemove the `clearcache` management command (from the `core` app), and omit it from the upgrade script.\r\n\r\n### Justification\r\n\r\n~This command was introduced back when we were experimenting with query caching, and is no longer needed.~ I was mistaken; it was actually introduced under #9122 to provide a mechanism for clearing the cached API spec. However, this is also no longer used since we moved to `drf-spectacular` (see #9608).\r\n\r\nThe Django cache is currently used only for discrete caching operations, including:\r\n\r\n* Config revision tracking\r\n* Recording the most recent release\r\n* Caching RSS feed content (the RSSFeedWidget)\r\n\r\nThere has already been at least one bug related to this function (see #14182). Additionally, plugins may utilize the cache for other purposes, and we cannot make the assumption that it is safe to clear other cached data.\r\n\r\n### Impact\r\n\r\nAny mechanisms within NetBox or a plugin which employ caching will be responsible for their own cleanup, where applicable.\n", "before_files": [{"content": "from django.core.cache import cache\nfrom django.core.management.base import BaseCommand\n\nfrom core.models import ConfigRevision\n\n\nclass Command(BaseCommand):\n \"\"\"Command to clear the entire cache.\"\"\"\n help = 'Clears the cache.'\n\n def handle(self, *args, **kwargs):\n # Fetch the current config revision from the cache\n config_version = cache.get('config_version')\n # Clear the cache\n cache.clear()\n self.stdout.write('Cache has been cleared.', ending=\"\\n\")\n if config_version:\n # Activate the current config revision\n ConfigRevision.objects.get(id=config_version).activate()\n self.stdout.write(f'Config revision ({config_version}) has been restored.', ending=\"\\n\")\n", "path": "netbox/core/management/commands/clearcache.py"}]} | 957 | 232 |
gh_patches_debug_54036 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3190 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 0.1.3
## 2023-08-16
```[tasklist]
### Tasks
- [x] Cut 0.1.3 release branch, freeze code
- [x] Update version number in all places in the new branch
- [x] Make an image from the branch with tag `0.1.3`, push to Dockerhub
- [x] Test installation with the new image
- [x] Test upgrade
- [x] Smoke testing application
- [x] Stability of the newly released items
```
</issue>
<code>
[start of mathesar/__init__.py]
1 default_app_config = 'mathesar.apps.MathesarConfig'
2
3 __version__ = "0.1.2"
4
[end of mathesar/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mathesar/__init__.py b/mathesar/__init__.py
--- a/mathesar/__init__.py
+++ b/mathesar/__init__.py
@@ -1,3 +1,3 @@
default_app_config = 'mathesar.apps.MathesarConfig'
-__version__ = "0.1.2"
+__version__ = "0.1.3"
| {"golden_diff": "diff --git a/mathesar/__init__.py b/mathesar/__init__.py\n--- a/mathesar/__init__.py\n+++ b/mathesar/__init__.py\n@@ -1,3 +1,3 @@\n default_app_config = 'mathesar.apps.MathesarConfig'\n \n-__version__ = \"0.1.2\"\n+__version__ = \"0.1.3\"\n", "issue": "Release 0.1.3\n## 2023-08-16\r\n```[tasklist]\r\n### Tasks\r\n- [x] Cut 0.1.3 release branch, freeze code\r\n- [x] Update version number in all places in the new branch\r\n- [x] Make an image from the branch with tag `0.1.3`, push to Dockerhub\r\n- [x] Test installation with the new image\r\n- [x] Test upgrade\r\n- [x] Smoke testing application\r\n- [x] Stability of the newly released items\r\n```\r\n\n", "before_files": [{"content": "default_app_config = 'mathesar.apps.MathesarConfig'\n\n__version__ = \"0.1.2\"\n", "path": "mathesar/__init__.py"}]} | 691 | 84 |
gh_patches_debug_10502 | rasdani/github-patches | git_diff | Kinto__kinto-158 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add custom (meta) data on buckets and collections
For some use-cases, it might become useful to be able to store some custom attributes in buckets or collections (e.g. metadata like application version, contact email or whatever).
Currently both Collection and Bucket resources do not define extra fields in their schema, and Cliquet drops unknown fields if not explicitly allowed.
We can either:
- Allow unknown fields in collection and buckets schemas
- Add a specific root level field (along `data` and `permissions`)
- Add a specific field (called `meta` for example) in the schema that could receive anything.
The advantage of the latter is that custom fields do not interfere with anything in the protocol, and are trivial to implement. The inconvenient is having to put `{data: {metadata: {email: "[email protected]"}}` in the payload.
Thoughts ?
</issue>
<code>
[start of kinto/views/collections.py]
1 import colander
2 import jsonschema
3 from cliquet import resource
4 from jsonschema import exceptions as jsonschema_exceptions
5
6 from kinto.views import NameGenerator, object_exists_or_404
7
8
9 class JSONSchemaMapping(colander.SchemaNode):
10 def schema_type(self, **kw):
11 return colander.Mapping(unknown='preserve')
12
13 def deserialize(self, cstruct=colander.null):
14 # Start by deserializing a simple mapping.
15 validated = super(JSONSchemaMapping, self).deserialize(cstruct)
16
17 # In case it is optional in parent schema.
18 if not validated or validated in (colander.null, colander.drop):
19 return validated
20
21 try:
22 jsonschema.Draft4Validator.check_schema(validated)
23 except jsonschema_exceptions.SchemaError as e:
24 self.raise_invalid(e.path.pop() + e.message)
25 return validated
26
27
28 class CollectionSchema(resource.ResourceSchema):
29 schema = JSONSchemaMapping(missing=colander.drop)
30
31
32 @resource.register(name='collection',
33 collection_methods=('GET',),
34 collection_path='/buckets/{{bucket_id}}/collections',
35 record_path='/buckets/{{bucket_id}}/collections/{{id}}')
36 class Collection(resource.ProtectedResource):
37 mapping = CollectionSchema()
38 permissions = ('read', 'write', 'record:create')
39
40 def __init__(self, *args, **kwargs):
41 super(Collection, self).__init__(*args, **kwargs)
42
43 bucket_id = self.request.matchdict['bucket_id']
44 object_exists_or_404(self.request,
45 collection_id='bucket',
46 object_id=bucket_id)
47
48 self.collection.id_generator = NameGenerator()
49
50 def get_parent_id(self, request):
51 bucket_id = request.matchdict['bucket_id']
52 parent_id = '/buckets/%s' % bucket_id
53 return parent_id
54
55 def delete(self):
56 result = super(Collection, self).delete()
57
58 # Delete records.
59 storage = self.collection.storage
60 parent_id = '%s/collections/%s' % (self.collection.parent_id,
61 self.record_id)
62 storage.delete_all(collection_id='record',
63 parent_id=parent_id,
64 with_deleted=False)
65 storage.purge_deleted(collection_id='record', parent_id=parent_id)
66
67 return result
68
[end of kinto/views/collections.py]
[start of kinto/views/records.py]
1 import jsonschema
2 from cliquet import resource, schema
3 from cliquet.errors import raise_invalid
4 from jsonschema import exceptions as jsonschema_exceptions
5
6 from kinto.views import object_exists_or_404
7
8
9 class RecordSchema(schema.ResourceSchema):
10 class Options():
11 preserve_unknown = True
12
13
14 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
15
16
17 @resource.register(name='record',
18 collection_path=_parent_path + '/records',
19 record_path=_parent_path + '/records/{{id}}')
20 class Record(resource.ProtectedResource):
21
22 mapping = RecordSchema()
23 schema_field = 'schema'
24
25 def __init__(self, *args, **kwargs):
26 super(Record, self).__init__(*args, **kwargs)
27
28 bucket_id = self.request.matchdict['bucket_id']
29 object_exists_or_404(self.request,
30 collection_id='bucket',
31 object_id=bucket_id)
32
33 collection_id = self.request.matchdict['collection_id']
34 parent_id = '/buckets/%s' % bucket_id
35 self._collection = object_exists_or_404(self.request,
36 collection_id='collection',
37 parent_id=parent_id,
38 object_id=collection_id)
39
40 def get_parent_id(self, request):
41 bucket_id = request.matchdict['bucket_id']
42 collection_id = request.matchdict['collection_id']
43 return '/buckets/%s/collections/%s' % (bucket_id, collection_id)
44
45 def is_known_field(self, field_name):
46 """Without schema, any field is considered as known."""
47 return True
48
49 def process_record(self, new, old=None):
50 """Validate records against collection schema, if any."""
51 schema = self._collection.get('schema')
52 if not schema:
53 return new
54
55 collection_timestamp = self._collection[self.collection.modified_field]
56
57 try:
58 jsonschema.validate(new, schema)
59 new[self.schema_field] = collection_timestamp
60 except jsonschema_exceptions.ValidationError as e:
61 field = e.path.pop() if e.path else e.validator_value.pop()
62 raise_invalid(self.request, name=field, description=e.message)
63
64 return new
65
[end of kinto/views/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/collections.py b/kinto/views/collections.py
--- a/kinto/views/collections.py
+++ b/kinto/views/collections.py
@@ -28,6 +28,9 @@
class CollectionSchema(resource.ResourceSchema):
schema = JSONSchemaMapping(missing=colander.drop)
+ class Options:
+ preserve_unknown = True
+
@resource.register(name='collection',
collection_methods=('GET',),
diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -7,7 +7,7 @@
class RecordSchema(schema.ResourceSchema):
- class Options():
+ class Options:
preserve_unknown = True
| {"golden_diff": "diff --git a/kinto/views/collections.py b/kinto/views/collections.py\n--- a/kinto/views/collections.py\n+++ b/kinto/views/collections.py\n@@ -28,6 +28,9 @@\n class CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n \n+ class Options:\n+ preserve_unknown = True\n+\n \n @resource.register(name='collection',\n collection_methods=('GET',),\ndiff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -7,7 +7,7 @@\n \n \n class RecordSchema(schema.ResourceSchema):\n- class Options():\n+ class Options:\n preserve_unknown = True\n", "issue": "Add custom (meta) data on buckets and collections \nFor some use-cases, it might become useful to be able to store some custom attributes in buckets or collections (e.g. metadata like application version, contact email or whatever).\n\nCurrently both Collection and Bucket resources do not define extra fields in their schema, and Cliquet drops unknown fields if not explicitly allowed.\n\nWe can either:\n- Allow unknown fields in collection and buckets schemas\n- Add a specific root level field (along `data` and `permissions`)\n- Add a specific field (called `meta` for example) in the schema that could receive anything.\n\nThe advantage of the latter is that custom fields do not interfere with anything in the protocol, and are trivial to implement. The inconvenient is having to put `{data: {metadata: {email: \"[email protected]\"}}` in the payload.\n\nThoughts ?\n\n", "before_files": [{"content": "import colander\nimport jsonschema\nfrom cliquet import resource\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import NameGenerator, object_exists_or_404\n\n\nclass JSONSchemaMapping(colander.SchemaNode):\n def schema_type(self, **kw):\n return colander.Mapping(unknown='preserve')\n\n def deserialize(self, cstruct=colander.null):\n # Start by deserializing a simple mapping.\n validated = super(JSONSchemaMapping, self).deserialize(cstruct)\n\n # In case it is optional in parent schema.\n if not validated or validated in (colander.null, colander.drop):\n return validated\n\n try:\n jsonschema.Draft4Validator.check_schema(validated)\n except jsonschema_exceptions.SchemaError as e:\n self.raise_invalid(e.path.pop() + e.message)\n return validated\n\n\nclass CollectionSchema(resource.ResourceSchema):\n schema = JSONSchemaMapping(missing=colander.drop)\n\n\[email protected](name='collection',\n collection_methods=('GET',),\n collection_path='/buckets/{{bucket_id}}/collections',\n record_path='/buckets/{{bucket_id}}/collections/{{id}}')\nclass Collection(resource.ProtectedResource):\n mapping = CollectionSchema()\n permissions = ('read', 'write', 'record:create')\n\n def __init__(self, *args, **kwargs):\n super(Collection, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n self.collection.id_generator = NameGenerator()\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n parent_id = '/buckets/%s' % bucket_id\n return parent_id\n\n def delete(self):\n result = super(Collection, self).delete()\n\n # Delete records.\n storage = self.collection.storage\n parent_id = '%s/collections/%s' % (self.collection.parent_id,\n self.record_id)\n storage.delete_all(collection_id='record',\n parent_id=parent_id,\n with_deleted=False)\n storage.purge_deleted(collection_id='record', parent_id=parent_id)\n\n return result\n", "path": "kinto/views/collections.py"}, {"content": "import jsonschema\nfrom cliquet import resource, schema\nfrom cliquet.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\n\nfrom kinto.views import object_exists_or_404\n\n\nclass RecordSchema(schema.ResourceSchema):\n class Options():\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ProtectedResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n bucket_id = self.request.matchdict['bucket_id']\n object_exists_or_404(self.request,\n collection_id='bucket',\n object_id=bucket_id)\n\n collection_id = self.request.matchdict['collection_id']\n parent_id = '/buckets/%s' % bucket_id\n self._collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=parent_id,\n object_id=collection_id)\n\n def get_parent_id(self, request):\n bucket_id = request.matchdict['bucket_id']\n collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n schema = self._collection.get('schema')\n if not schema:\n return new\n\n collection_timestamp = self._collection[self.collection.modified_field]\n\n try:\n jsonschema.validate(new, schema)\n new[self.schema_field] = collection_timestamp\n except jsonschema_exceptions.ValidationError as e:\n field = e.path.pop() if e.path else e.validator_value.pop()\n raise_invalid(self.request, name=field, description=e.message)\n\n return new\n", "path": "kinto/views/records.py"}]} | 1,925 | 168 |
gh_patches_debug_5423 | rasdani/github-patches | git_diff | ivy-llc__ivy-18290 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
linear
#15051
</issue>
<code>
[start of ivy/functional/frontends/paddle/nn/functional/common.py]
1 # local
2 import ivy
3 from ivy.func_wrapper import with_supported_dtypes
4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back
5
6
7 @to_ivy_arrays_and_back
8 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
9 def cosine_similarity(x1, x2, *, axis=1, eps=1e-08):
10 if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:
11 numerator = ivy.sum(x1 * x2, axis=axis)
12 x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)
13 x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)
14 else:
15 numerator = ivy.sum(x1 * x2)
16 x1_squared_norm = ivy.sum(ivy.square(x1))
17 x2_squared_norm = ivy.sum(ivy.square(x2))
18
19 x1_norm = ivy.sqrt(x1_squared_norm)
20 x2_norm = ivy.sqrt(x2_squared_norm)
21 norm_mm = x1_norm * x2_norm
22 denominator = ivy.maximum(norm_mm, eps)
23
24 cosine = numerator / denominator
25 return cosine
26
27
28 @to_ivy_arrays_and_back
29 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
30 def dropout2d(x, *, p=0.5, training=True, data_format="NCHW", name=None):
31 return ivy.dropout2d(x, p=p, training=training, data_format=data_format)
32
33
34 def get_mask(shape, device, prob, seed=None):
35 mask = ivy.where(
36 ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,
37 0.0,
38 1.0,
39 )
40 return mask
41
42
43 @to_ivy_arrays_and_back
44 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
45 def dropout(x, p=0.5, axis=None, training=True, mode="upscale_in_train", name=None):
46 if axis > 1:
47 raise ValueError("Axis value can only be 0 or 1 or None.")
48 elif axis is None or (isinstance(axis, list) and len(axis) == 2):
49 mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)
50 elif axis == 0:
51 mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)
52 mask = ivy.broadcast_to(mask, x.shape)
53 elif axis == 1:
54 mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)
55 mask = ivy.broadcast_to(mask, x.shape)
56 if mode == "upscale_in_train":
57 if training:
58 out = ivy.multiply(x, mask)
59 ret = ivy.multiply(out, 1.0 / (1.0 - p))
60 else:
61 ret = x
62 else:
63 if training:
64 ret = ivy.multiply(x, mask)
65 else:
66 ret = ivy.multiply(x, (1.0 - p))
67 return ret
68
69
70 @to_ivy_arrays_and_back
71 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
72 def zeropad2d(x, padding, data_format="NCHW", name=None):
73 if ivy.is_array(padding):
74 padding = padding.to_list()
75 if isinstance(padding, int):
76 padding = [padding, padding, padding, padding]
77 if len(padding) != 4:
78 raise ValueError("Padding length should be 4.")
79 if x.ndim != 4:
80 raise ValueError("Input x must be 4-dimensional.")
81 if data_format == "NCHW":
82 padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))
83 elif data_format == "NHWC":
84 padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))
85 else:
86 raise ValueError("Unknown data_format: {}".format(data_format))
87 return ivy.pad(x, padding, mode="constant", constant_values=0.0)
88
[end of ivy/functional/frontends/paddle/nn/functional/common.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py
--- a/ivy/functional/frontends/paddle/nn/functional/common.py
+++ b/ivy/functional/frontends/paddle/nn/functional/common.py
@@ -85,3 +85,10 @@
else:
raise ValueError("Unknown data_format: {}".format(data_format))
return ivy.pad(x, padding, mode="constant", constant_values=0.0)
+
+
+@to_ivy_arrays_and_back
+@with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle")
+def linear(x, weight, bias=None, name=None):
+ weight = ivy.swapaxes(weight, -1, -2)
+ return ivy.linear(x, weight, bias=bias)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/common.py b/ivy/functional/frontends/paddle/nn/functional/common.py\n--- a/ivy/functional/frontends/paddle/nn/functional/common.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/common.py\n@@ -85,3 +85,10 @@\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n+\n+\n+@to_ivy_arrays_and_back\n+@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+def linear(x, weight, bias=None, name=None):\n+ weight = ivy.swapaxes(weight, -1, -2)\n+ return ivy.linear(x, weight, bias=bias)\n", "issue": "linear\n#15051 \n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef cosine_similarity(x1, x2, *, axis=1, eps=1e-08):\n if len(x1.shape) == len(x2.shape) and len(x2.shape) >= 2:\n numerator = ivy.sum(x1 * x2, axis=axis)\n x1_squared_norm = ivy.sum(ivy.square(x1), axis=axis)\n x2_squared_norm = ivy.sum(ivy.square(x2), axis=axis)\n else:\n numerator = ivy.sum(x1 * x2)\n x1_squared_norm = ivy.sum(ivy.square(x1))\n x2_squared_norm = ivy.sum(ivy.square(x2))\n\n x1_norm = ivy.sqrt(x1_squared_norm)\n x2_norm = ivy.sqrt(x2_squared_norm)\n norm_mm = x1_norm * x2_norm\n denominator = ivy.maximum(norm_mm, eps)\n\n cosine = numerator / denominator\n return cosine\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout2d(x, *, p=0.5, training=True, data_format=\"NCHW\", name=None):\n return ivy.dropout2d(x, p=p, training=training, data_format=data_format)\n\n\ndef get_mask(shape, device, prob, seed=None):\n mask = ivy.where(\n ivy.random_uniform(shape=shape, device=device, seed=seed) < prob,\n 0.0,\n 1.0,\n )\n return mask\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef dropout(x, p=0.5, axis=None, training=True, mode=\"upscale_in_train\", name=None):\n if axis > 1:\n raise ValueError(\"Axis value can only be 0 or 1 or None.\")\n elif axis is None or (isinstance(axis, list) and len(axis) == 2):\n mask = get_mask(shape=x.shape, device=ivy.dev(x), prob=p, seed=None)\n elif axis == 0:\n mask = get_mask(shape=(x.shape[0], 1), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n elif axis == 1:\n mask = get_mask(shape=(1, x.shape[1]), device=ivy.dev(x), prob=p)\n mask = ivy.broadcast_to(mask, x.shape)\n if mode == \"upscale_in_train\":\n if training:\n out = ivy.multiply(x, mask)\n ret = ivy.multiply(out, 1.0 / (1.0 - p))\n else:\n ret = x\n else:\n if training:\n ret = ivy.multiply(x, mask)\n else:\n ret = ivy.multiply(x, (1.0 - p))\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\ndef zeropad2d(x, padding, data_format=\"NCHW\", name=None):\n if ivy.is_array(padding):\n padding = padding.to_list()\n if isinstance(padding, int):\n padding = [padding, padding, padding, padding]\n if len(padding) != 4:\n raise ValueError(\"Padding length should be 4.\")\n if x.ndim != 4:\n raise ValueError(\"Input x must be 4-dimensional.\")\n if data_format == \"NCHW\":\n padding = ((0, 0), (0, 0), (padding[2], padding[3]), (padding[0], padding[1]))\n elif data_format == \"NHWC\":\n padding = ((0, 0), (padding[2], padding[3]), (padding[0], padding[1]), (0, 0))\n else:\n raise ValueError(\"Unknown data_format: {}\".format(data_format))\n return ivy.pad(x, padding, mode=\"constant\", constant_values=0.0)\n", "path": "ivy/functional/frontends/paddle/nn/functional/common.py"}]} | 1,716 | 202 |
gh_patches_debug_2077 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[requires.io] dependency update on master branch
</issue>
<code>
[start of setup.py]
1 import os
2 import runpy
3 from codecs import open
4
5 from setuptools import setup, find_packages
6
7 # Based on https://github.com/pypa/sampleproject/blob/master/setup.py
8 # and https://python-packaging-user-guide.readthedocs.org/
9
10 here = os.path.abspath(os.path.dirname(__file__))
11
12 with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
13 long_description = f.read()
14
15 VERSION = runpy.run_path(os.path.join(here, "mitmproxy", "version.py"))["VERSION"]
16
17 setup(
18 name="mitmproxy",
19 version=VERSION,
20 description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.",
21 long_description=long_description,
22 url="http://mitmproxy.org",
23 author="Aldo Cortesi",
24 author_email="[email protected]",
25 license="MIT",
26 classifiers=[
27 "License :: OSI Approved :: MIT License",
28 "Development Status :: 5 - Production/Stable",
29 "Environment :: Console",
30 "Environment :: Console :: Curses",
31 "Operating System :: MacOS :: MacOS X",
32 "Operating System :: POSIX",
33 "Operating System :: Microsoft :: Windows",
34 "Programming Language :: Python",
35 "Programming Language :: Python :: 3",
36 "Programming Language :: Python :: 3 :: Only",
37 "Programming Language :: Python :: 3.5",
38 "Programming Language :: Python :: 3.6",
39 "Programming Language :: Python :: Implementation :: CPython",
40 "Topic :: Security",
41 "Topic :: Internet",
42 "Topic :: Internet :: WWW/HTTP",
43 "Topic :: Internet :: Proxy Servers",
44 "Topic :: Software Development :: Testing"
45 ],
46 packages=find_packages(include=[
47 "mitmproxy", "mitmproxy.*",
48 "pathod", "pathod.*",
49 ]),
50 include_package_data=True,
51 entry_points={
52 'console_scripts': [
53 "mitmproxy = mitmproxy.tools.main:mitmproxy",
54 "mitmdump = mitmproxy.tools.main:mitmdump",
55 "mitmweb = mitmproxy.tools.main:mitmweb",
56 "pathod = pathod.pathod_cmdline:go_pathod",
57 "pathoc = pathod.pathoc_cmdline:go_pathoc"
58 ]
59 },
60 # https://packaging.python.org/en/latest/requirements/#install-requires
61 # It is not considered best practice to use install_requires to pin dependencies to specific versions.
62 install_requires=[
63 "blinker>=1.4, <1.5",
64 "brotlipy>=0.5.1, <0.8",
65 "certifi>=2015.11.20.1", # no semver here - this should always be on the last release!
66 "click>=6.2, <7",
67 "cryptography>=2.0,<2.2",
68 "h2>=3.0, <4",
69 "hyperframe>=5.0, <6",
70 "kaitaistruct>=0.7, <0.8",
71 "ldap3>=2.2.0, <2.4",
72 "passlib>=1.6.5, <1.8",
73 "pyasn1>=0.3.1, <0.4",
74 "pyOpenSSL>=17.2,<17.4",
75 "pyparsing>=2.1.3, <2.3",
76 "pyperclip>=1.5.22, <1.6",
77 "requests>=2.9.1, <3",
78 "ruamel.yaml>=0.13.2, <0.16",
79 "sortedcontainers>=1.5.4, <1.6",
80 "tornado>=4.3, <4.6",
81 "urwid>=1.3.1, <1.4",
82 ],
83 extras_require={
84 ':sys_platform == "win32"': [
85 "pydivert>=2.0.3,<2.2",
86 ],
87 'dev': [
88 "flake8>=3.2.1, <3.5",
89 "Flask>=0.10.1, <0.13",
90 "mypy>=0.530,<0.541",
91 "pytest-cov>=2.2.1, <3",
92 "pytest-faulthandler>=1.3.0, <2",
93 "pytest-timeout>=1.0.0, <2",
94 "pytest-xdist>=1.14, <2",
95 "pytest>=3.1, <4",
96 "rstcheck>=2.2, <4.0",
97 "sphinx_rtd_theme>=0.1.9, <0.3",
98 "sphinx-autobuild>=0.5.2, <0.8",
99 "sphinx>=1.3.5, <1.7",
100 "sphinxcontrib-documentedlist>=0.5.0, <0.7",
101 "tox>=2.3, <3",
102 ],
103 'examples': [
104 "beautifulsoup4>=4.4.1, <4.7",
105 "Pillow>=4.3,<4.4",
106 ]
107 }
108 )
109
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,7 @@
"pydivert>=2.0.3,<2.2",
],
'dev': [
- "flake8>=3.2.1, <3.5",
+ "flake8>=3.5, <3.6",
"Flask>=0.10.1, <0.13",
"mypy>=0.530,<0.541",
"pytest-cov>=2.2.1, <3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,7 +85,7 @@\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n- \"flake8>=3.2.1, <3.5\",\n+ \"flake8>=3.5, <3.6\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n", "issue": "[requires.io] dependency update on master branch\n\n", "before_files": [{"content": "import os\nimport runpy\nfrom codecs import open\n\nfrom setuptools import setup, find_packages\n\n# Based on https://github.com/pypa/sampleproject/blob/master/setup.py\n# and https://python-packaging-user-guide.readthedocs.org/\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nVERSION = runpy.run_path(os.path.join(here, \"mitmproxy\", \"version.py\"))[\"VERSION\"]\n\nsetup(\n name=\"mitmproxy\",\n version=VERSION,\n description=\"An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.\",\n long_description=long_description,\n url=\"http://mitmproxy.org\",\n author=\"Aldo Cortesi\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Environment :: Console :: Curses\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Topic :: Security\",\n \"Topic :: Internet\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: Proxy Servers\",\n \"Topic :: Software Development :: Testing\"\n ],\n packages=find_packages(include=[\n \"mitmproxy\", \"mitmproxy.*\",\n \"pathod\", \"pathod.*\",\n ]),\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n \"mitmproxy = mitmproxy.tools.main:mitmproxy\",\n \"mitmdump = mitmproxy.tools.main:mitmdump\",\n \"mitmweb = mitmproxy.tools.main:mitmweb\",\n \"pathod = pathod.pathod_cmdline:go_pathod\",\n \"pathoc = pathod.pathoc_cmdline:go_pathoc\"\n ]\n },\n # https://packaging.python.org/en/latest/requirements/#install-requires\n # It is not considered best practice to use install_requires to pin dependencies to specific versions.\n install_requires=[\n \"blinker>=1.4, <1.5\",\n \"brotlipy>=0.5.1, <0.8\",\n \"certifi>=2015.11.20.1\", # no semver here - this should always be on the last release!\n \"click>=6.2, <7\",\n \"cryptography>=2.0,<2.2\",\n \"h2>=3.0, <4\",\n \"hyperframe>=5.0, <6\",\n \"kaitaistruct>=0.7, <0.8\",\n \"ldap3>=2.2.0, <2.4\",\n \"passlib>=1.6.5, <1.8\",\n \"pyasn1>=0.3.1, <0.4\",\n \"pyOpenSSL>=17.2,<17.4\",\n \"pyparsing>=2.1.3, <2.3\",\n \"pyperclip>=1.5.22, <1.6\",\n \"requests>=2.9.1, <3\",\n \"ruamel.yaml>=0.13.2, <0.16\",\n \"sortedcontainers>=1.5.4, <1.6\",\n \"tornado>=4.3, <4.6\",\n \"urwid>=1.3.1, <1.4\",\n ],\n extras_require={\n ':sys_platform == \"win32\"': [\n \"pydivert>=2.0.3,<2.2\",\n ],\n 'dev': [\n \"flake8>=3.2.1, <3.5\",\n \"Flask>=0.10.1, <0.13\",\n \"mypy>=0.530,<0.541\",\n \"pytest-cov>=2.2.1, <3\",\n \"pytest-faulthandler>=1.3.0, <2\",\n \"pytest-timeout>=1.0.0, <2\",\n \"pytest-xdist>=1.14, <2\",\n \"pytest>=3.1, <4\",\n \"rstcheck>=2.2, <4.0\",\n \"sphinx_rtd_theme>=0.1.9, <0.3\",\n \"sphinx-autobuild>=0.5.2, <0.8\",\n \"sphinx>=1.3.5, <1.7\",\n \"sphinxcontrib-documentedlist>=0.5.0, <0.7\",\n \"tox>=2.3, <3\",\n ],\n 'examples': [\n \"beautifulsoup4>=4.4.1, <4.7\",\n \"Pillow>=4.3,<4.4\",\n ]\n }\n)\n", "path": "setup.py"}]} | 1,915 | 138 |
gh_patches_debug_28334 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
only caputring the first line https://github.com/bridgecrewio/checkov/blob/2.0.1131/checkov/dockerfile/checks/WorkdirIsAbsolute.py
def scan_entity_conf(self, conf):
for mydir in conf:
mypath = mydir["value"]
if re.match(PATH, mypath):
return CheckResult.FAILED, mydir
return CheckResult.PASSED, None
</issue>
<code>
[start of checkov/dockerfile/checks/WorkdirIsAbsolute.py]
1 import re
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
5
6 ISABSOLUTE = re.compile("(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\.*)|(^\\$[{}A-z0-9-_+].*)")
7
8
9 class WorkdirIsAbsolute(BaseDockerfileCheck):
10 def __init__(self):
11 """
12 For clarity and reliability, you should always use absolute paths for your WORKDIR.
13 """
14 name = "Ensure that WORKDIR values are absolute paths"
15 id = "CKV_DOCKER_10"
16 supported_instructions = ["WORKDIR"]
17 categories = [CheckCategories.CONVENTION]
18 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
19
20 def scan_entity_conf(self, conf):
21 for mydir in conf:
22 mypath = mydir["value"]
23 if not re.match(ISABSOLUTE, mypath):
24 return CheckResult.FAILED, mydir
25 return CheckResult.PASSED, None
26
27
28 check = WorkdirIsAbsolute()
29
[end of checkov/dockerfile/checks/WorkdirIsAbsolute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py
+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
import re
from checkov.common.models.enums import CheckCategories, CheckResult
@@ -7,21 +9,26 @@
class WorkdirIsAbsolute(BaseDockerfileCheck):
- def __init__(self):
+ def __init__(self) -> None:
"""
For clarity and reliability, you should always use absolute paths for your WORKDIR.
"""
name = "Ensure that WORKDIR values are absolute paths"
id = "CKV_DOCKER_10"
- supported_instructions = ["WORKDIR"]
- categories = [CheckCategories.CONVENTION]
+ supported_instructions = ("WORKDIR",)
+ categories = (CheckCategories.CONVENTION,)
super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
- def scan_entity_conf(self, conf):
- for mydir in conf:
- mypath = mydir["value"]
- if not re.match(ISABSOLUTE, mypath):
- return CheckResult.FAILED, mydir
+ def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:
+ workdirs = []
+ for workdir in conf:
+ path = workdir["value"]
+ if not re.match(ISABSOLUTE, path):
+ workdirs.append(workdir)
+
+ if workdirs:
+ return CheckResult.FAILED, workdirs
+
return CheckResult.PASSED, None
| {"golden_diff": "diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n import re\n \n from checkov.common.models.enums import CheckCategories, CheckResult\n@@ -7,21 +9,26 @@\n \n \n class WorkdirIsAbsolute(BaseDockerfileCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n- supported_instructions = [\"WORKDIR\"]\n- categories = [CheckCategories.CONVENTION]\n+ supported_instructions = (\"WORKDIR\",)\n+ categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n \n- def scan_entity_conf(self, conf):\n- for mydir in conf:\n- mypath = mydir[\"value\"]\n- if not re.match(ISABSOLUTE, mypath):\n- return CheckResult.FAILED, mydir\n+ def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n+ workdirs = []\n+ for workdir in conf:\n+ path = workdir[\"value\"]\n+ if not re.match(ISABSOLUTE, path):\n+ workdirs.append(workdir)\n+\n+ if workdirs:\n+ return CheckResult.FAILED, workdirs\n+\n return CheckResult.PASSED, None\n", "issue": "only caputring the first line https://github.com/bridgecrewio/checkov/blob/2.0.1131/checkov/dockerfile/checks/WorkdirIsAbsolute.py\ndef scan_entity_conf(self, conf):\r\n for mydir in conf:\r\n mypath = mydir[\"value\"]\r\n if re.match(PATH, mypath):\r\n return CheckResult.FAILED, mydir\r\n return CheckResult.PASSED, None\n", "before_files": [{"content": "import re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"(^/[A-z0-9-_+]*)|(^[A-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-z0-9-_+].*)\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self):\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = [\"WORKDIR\"]\n categories = [CheckCategories.CONVENTION]\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf):\n for mydir in conf:\n mypath = mydir[\"value\"]\n if not re.match(ISABSOLUTE, mypath):\n return CheckResult.FAILED, mydir\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}]} | 947 | 412 |
gh_patches_debug_29107 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-410 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nested stack reference to InstanceProfile triggers E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile
*cfn-lint version: `0.8.1`*
# Description of issue
When using nested stacks and passing IamInstanceProfile ARNs between stacks, E2502 is triggered though it shouldn't be.
# Steps to reproduce
Create a parent template like this
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
IAMInstanceProfile:
Type: AWS::CloudFormation::Stack
Properties:
TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance-profile.yml
Instance:
Type: AWS::CloudFormation::Stack
Properties:
Parameters:
IamInstanceProfile: !GetAtt IAMInstanceProfile.Outputs.InstanceProfileArn
TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance.yml
```
and a child template like this
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
InstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Roles:
- ExampleRole
Outputs:
InstanceProfileArn:
Value: !GetAtt InstanceProfile.Arn
```
# Expected results
The `IamInstanceProfile` parameter in the parent template's `Instance` sub-stack resource definition does indeed contain a valid IAM Instance Profile ARN (passed in from the `IAMInstanceProfile` sub-stack resource and as a result, there should be no error.
Ideally cfn-lint would recognize that `GetAtt` is referencing an output from another stack which very well could be an InstanceProfile ARN and as a result, optimistically not report this error.
Alternatively, if cfn-lint could introspect the sub-stack and determine the object type of the output, it would know whether or not it was the correct object type.
# Actual results
cfn-lint reports the error
> E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for Resources/Instance/Properties/Parameters/IamInstanceProfile/Fn::GetAtt
> example-parent.yml:11:9
</issue>
<code>
[start of src/cfnlint/rules/resources/iam/InstanceProfile.py]
1 """
2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class InstanceProfile(CloudFormationLintRule):
22 """Check if IamInstanceProfile are used"""
23 id = 'E2502'
24 shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'
25 description = 'See if there are any properties IamInstanceProfile' + \
26 'are using name and not ARN'
27 source_url = 'https://github.com/awslabs/cfn-python-lint'
28 tags = ['properties']
29
30 def match(self, cfn):
31 """Check CloudFormation IamInstanceProfile Parameters"""
32
33 matches = []
34
35 # Build the list of keys
36 trees = cfn.search_deep_keys('Fn::GetAtt')
37 # Filter only resources
38 # Disable pylint for Pylint 2
39 # pylint: disable=W0110
40 trees = filter(lambda x: x[0] == 'Resources', trees)
41 for tree in trees:
42 if any(e == 'IamInstanceProfile' for e in tree):
43 obj = tree[-1]
44 objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
45 if objtype:
46 if objtype != 'AWS::IAM::InstanceProfile':
47 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
48 '/'.join(map(str, tree[:-1])))
49 matches.append(RuleMatch(tree[:-1], message))
50 else:
51 if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
52 if obj[1] != 'Arn':
53 message = 'Property IamInstanceProfile should be an ARN for %s' % (
54 '/'.join(map(str, tree[:-1])))
55 matches.append(RuleMatch(tree[:-1], message))
56 else:
57 if obj[1] == 'Arn':
58 message = 'Property IamInstanceProfile shouldn\'t be an ARN for %s' % (
59 '/'.join(map(str, tree[:-1])))
60 matches.append(RuleMatch(tree[:-1], message))
61
62 # Search Refs
63 trees = cfn.search_deep_keys('Ref')
64 # Filter only resoureces
65 trees = filter(lambda x: x[0] == 'Resources', trees)
66 for tree in trees:
67 if any(e == 'IamInstanceProfile' for e in tree):
68 obj = tree[-1]
69 objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')
70 if objtype:
71 if objtype != 'AWS::IAM::InstanceProfile':
72 message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
73 '/'.join(map(str, tree[:-1])))
74 matches.append(RuleMatch(tree[:-1], message))
75
76 return matches
77
[end of src/cfnlint/rules/resources/iam/InstanceProfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py
--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py
+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py
@@ -43,12 +43,17 @@
obj = tree[-1]
objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')
if objtype:
- if objtype != 'AWS::IAM::InstanceProfile':
+ if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:
message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
'/'.join(map(str, tree[:-1])))
matches.append(RuleMatch(tree[:-1], message))
else:
- if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
+ if objtype in ['AWS::CloudFormation::Stack']:
+ if obj[1] != 'Outputs':
+ message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (
+ '/'.join(map(str, tree[:-1])))
+ matches.append(RuleMatch(tree[:-1], message))
+ elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:
if obj[1] != 'Arn':
message = 'Property IamInstanceProfile should be an ARN for %s' % (
'/'.join(map(str, tree[:-1])))
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/iam/InstanceProfile.py b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n--- a/src/cfnlint/rules/resources/iam/InstanceProfile.py\n+++ b/src/cfnlint/rules/resources/iam/InstanceProfile.py\n@@ -43,12 +43,17 @@\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n- if objtype != 'AWS::IAM::InstanceProfile':\n+ if objtype not in ['AWS::IAM::InstanceProfile', 'AWS::CloudFormation::Stack', 'AWS::CloudFormation::CustomResource']:\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n- if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n+ if objtype in ['AWS::CloudFormation::Stack']:\n+ if obj[1] != 'Outputs':\n+ message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n+ '/'.join(map(str, tree[:-1])))\n+ matches.append(RuleMatch(tree[:-1], message))\n+ elif cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n", "issue": "Nested stack reference to InstanceProfile triggers E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile\n*cfn-lint version: `0.8.1`*\r\n\r\n# Description of issue\r\n\r\nWhen using nested stacks and passing IamInstanceProfile ARNs between stacks, E2502 is triggered though it shouldn't be.\r\n\r\n# Steps to reproduce\r\n\r\nCreate a parent template like this\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n IAMInstanceProfile:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance-profile.yml\r\n Instance:\r\n Type: AWS::CloudFormation::Stack\r\n Properties:\r\n Parameters:\r\n IamInstanceProfile: !GetAtt IAMInstanceProfile.Outputs.InstanceProfileArn\r\n TemplateURL: https://s3-us-west-2.amazonaws.com/example-bucket/example-instance.yml\r\n```\r\nand a child template like this\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n InstanceProfile:\r\n Type: AWS::IAM::InstanceProfile\r\n Properties:\r\n Roles:\r\n - ExampleRole\r\nOutputs:\r\n InstanceProfileArn:\r\n Value: !GetAtt InstanceProfile.Arn\r\n```\r\n\r\n# Expected results\r\n\r\nThe `IamInstanceProfile` parameter in the parent template's `Instance` sub-stack resource definition does indeed contain a valid IAM Instance Profile ARN (passed in from the `IAMInstanceProfile` sub-stack resource and as a result, there should be no error.\r\n\r\nIdeally cfn-lint would recognize that `GetAtt` is referencing an output from another stack which very well could be an InstanceProfile ARN and as a result, optimistically not report this error.\r\n\r\nAlternatively, if cfn-lint could introspect the sub-stack and determine the object type of the output, it would know whether or not it was the correct object type.\r\n\r\n# Actual results\r\n\r\ncfn-lint reports the error\r\n\r\n> E2502 Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for Resources/Instance/Properties/Parameters/IamInstanceProfile/Fn::GetAtt\r\n> example-parent.yml:11:9\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass InstanceProfile(CloudFormationLintRule):\n \"\"\"Check if IamInstanceProfile are used\"\"\"\n id = 'E2502'\n shortdesc = 'Check if IamInstanceProfile are using the name and not ARN'\n description = 'See if there are any properties IamInstanceProfile' + \\\n 'are using name and not ARN'\n source_url = 'https://github.com/awslabs/cfn-python-lint'\n tags = ['properties']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation IamInstanceProfile Parameters\"\"\"\n\n matches = []\n\n # Build the list of keys\n trees = cfn.search_deep_keys('Fn::GetAtt')\n # Filter only resources\n # Disable pylint for Pylint 2\n # pylint: disable=W0110\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj[0], {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if cfn.template.get('Resources', {}).get(tree[1], {}).get('Type') in ['AWS::EC2::SpotFleet']:\n if obj[1] != 'Arn':\n message = 'Property IamInstanceProfile should be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n else:\n if obj[1] == 'Arn':\n message = 'Property IamInstanceProfile shouldn\\'t be an ARN for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n # Search Refs\n trees = cfn.search_deep_keys('Ref')\n # Filter only resoureces\n trees = filter(lambda x: x[0] == 'Resources', trees)\n for tree in trees:\n if any(e == 'IamInstanceProfile' for e in tree):\n obj = tree[-1]\n objtype = cfn.template.get('Resources', {}).get(obj, {}).get('Type')\n if objtype:\n if objtype != 'AWS::IAM::InstanceProfile':\n message = 'Property IamInstanceProfile should relate to AWS::IAM::InstanceProfile for %s' % (\n '/'.join(map(str, tree[:-1])))\n matches.append(RuleMatch(tree[:-1], message))\n\n return matches\n", "path": "src/cfnlint/rules/resources/iam/InstanceProfile.py"}]} | 2,020 | 385 |
gh_patches_debug_18313 | rasdani/github-patches | git_diff | docker__docker-py-575 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docker.utils.decorators.check_resource problem
When use `docker_client.start(**kwargs)` to start the container, will be raise follow exception:
```
c.start(**s_kwargs)
File "/home/simplecloud/shiyanlou/env/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 12, in wrapped
return f(self, resource_id, *args, **kwargs)
TypeError: start() got multiple values for keyword argument 'container'
```
</issue>
<code>
[start of docker/utils/decorators.py]
1 from .. import errors
2
3
4 def check_resource(f):
5 def wrapped(self, resource_id=None, *args, **kwargs):
6 if resource_id is None and (
7 kwargs.get('container') is None and kwargs.get('image') is None
8 ):
9 raise errors.NullResource(
10 'image or container param is None'
11 )
12 return f(self, resource_id, *args, **kwargs)
13 return wrapped
14
[end of docker/utils/decorators.py]
[start of docker/version.py]
1 version = "1.2.1-dev"
2 version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])
3
[end of docker/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py
--- a/docker/utils/decorators.py
+++ b/docker/utils/decorators.py
@@ -3,11 +3,14 @@
def check_resource(f):
def wrapped(self, resource_id=None, *args, **kwargs):
- if resource_id is None and (
- kwargs.get('container') is None and kwargs.get('image') is None
- ):
- raise errors.NullResource(
- 'image or container param is None'
- )
+ if resource_id is None:
+ if kwargs.get('container'):
+ resource_id = kwargs.pop('container')
+ elif kwargs.get('image'):
+ resource_id = kwargs.pop('image')
+ else:
+ raise errors.NullResource(
+ 'image or container param is undefined'
+ )
return f(self, resource_id, *args, **kwargs)
return wrapped
diff --git a/docker/version.py b/docker/version.py
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "1.2.1-dev"
+version = "1.2.1"
version_info = tuple([int(d) for d in version.replace("-dev", "").split(".")])
| {"golden_diff": "diff --git a/docker/utils/decorators.py b/docker/utils/decorators.py\n--- a/docker/utils/decorators.py\n+++ b/docker/utils/decorators.py\n@@ -3,11 +3,14 @@\n \n def check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n- if resource_id is None and (\n- kwargs.get('container') is None and kwargs.get('image') is None\n- ):\n- raise errors.NullResource(\n- 'image or container param is None'\n- )\n+ if resource_id is None:\n+ if kwargs.get('container'):\n+ resource_id = kwargs.pop('container')\n+ elif kwargs.get('image'):\n+ resource_id = kwargs.pop('image')\n+ else:\n+ raise errors.NullResource(\n+ 'image or container param is undefined'\n+ )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\ndiff --git a/docker/version.py b/docker/version.py\n--- a/docker/version.py\n+++ b/docker/version.py\n@@ -1,2 +1,2 @@\n-version = \"1.2.1-dev\"\n+version = \"1.2.1\"\n version_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "issue": "docker.utils.decorators.check_resource problem\nWhen use `docker_client.start(**kwargs)` to start the container, will be raise follow exception:\n\n```\nc.start(**s_kwargs)\n File \"/home/simplecloud/shiyanlou/env/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 12, in wrapped\n return f(self, resource_id, *args, **kwargs)\nTypeError: start() got multiple values for keyword argument 'container'\n```\n\n", "before_files": [{"content": "from .. import errors\n\n\ndef check_resource(f):\n def wrapped(self, resource_id=None, *args, **kwargs):\n if resource_id is None and (\n kwargs.get('container') is None and kwargs.get('image') is None\n ):\n raise errors.NullResource(\n 'image or container param is None'\n )\n return f(self, resource_id, *args, **kwargs)\n return wrapped\n", "path": "docker/utils/decorators.py"}, {"content": "version = \"1.2.1-dev\"\nversion_info = tuple([int(d) for d in version.replace(\"-dev\", \"\").split(\".\")])\n", "path": "docker/version.py"}]} | 791 | 280 |
gh_patches_debug_25492 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-643 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create owner references among master and worker pods
Currently we have the following two ways to delete master + worker pods:
* Delete each pod one by one
* Delete all pods related to this elasticdl run via `elasticdl_job_name` label `kubectl delete pod -l elasticdl_job_name=test-job-1559292773-93`
It would be much easier if users could just delete master pod and then worker pods can be delete automatically. This would be possible if there are owner references among master and worker pods.
</issue>
<code>
[start of elasticdl/python/elasticdl/master/k8s_client.py]
1 import logging
2 import os
3 import threading
4 import traceback
5
6 from kubernetes import client, config, watch
7 from kubernetes.client import (
8 V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,
9 )
10
11 WORKER_POD_NAME_PREFIX = "elasticdl-worker-"
12
13
14 class Client(object):
15 def __init__(self, *, worker_image, namespace, job_name, event_callback):
16 """
17 ElasticDL k8s client.
18
19 Args:
20 worker_image: Docker image path for ElasticDL workers.
21 namespace: k8s namespace for ElasticDL pods.
22 job_name: ElasticDL job name, should be unique in the namespace.
23 Used as worker pod name prefix and value for "elasticdl" label.
24 event_callback: If not None, an event watcher will be created and
25 events passed to the callback.
26 """
27 if os.getenv("KUBERNETES_SERVICE_HOST"):
28 # We are running inside k8s
29 config.load_incluster_config()
30 else:
31 # Use user's kube config
32 config.load_kube_config()
33
34 self._v1 = client.CoreV1Api()
35 self._logger = logging.getLogger(__name__)
36 self._image = worker_image
37 self._ns = namespace
38 self._job_name = job_name
39 self._event_cb = event_callback
40 if self._event_cb:
41 threading.Thread(
42 target=self._watch, name="event_watcher", daemon=True
43 ).start()
44
45 def _watch(self):
46 stream = watch.Watch().stream(
47 self._v1.list_namespaced_pod,
48 self._ns,
49 label_selector="elasticdl_job_name=" + self._job_name,
50 )
51 for event in stream:
52 try:
53 self._event_cb(event)
54 except Exception:
55 traceback.print_exc()
56
57 def get_worker_pod_name(self, worker_id):
58 return WORKER_POD_NAME_PREFIX + self._job_name + "-" + str(worker_id)
59
60 def _create_worker_pod(
61 self,
62 worker_id,
63 resource_requests,
64 resource_limits,
65 priority,
66 mount_path,
67 volume_name,
68 image_pull_policy,
69 command,
70 args,
71 restart_policy,
72 ):
73 # Worker container config
74 container = client.V1Container(
75 name=self.get_worker_pod_name(worker_id),
76 image=self._image,
77 command=command,
78 resources=client.V1ResourceRequirements(
79 requests=resource_requests, limits=resource_limits
80 ),
81 image_pull_policy=image_pull_policy,
82 args=args,
83 )
84
85 # Pod
86 spec = client.V1PodSpec(
87 containers=[container], restart_policy=restart_policy
88 )
89
90 # Mount data path
91 if mount_path is not None and volume_name is not None:
92 volume = client.V1Volume(
93 name="data-volume",
94 persistent_volume_claim=pvcVolumeSource(
95 claim_name="fileserver-claim", read_only=False
96 ),
97 )
98 spec.volumes = [volume]
99 container.volume_mounts = [
100 client.V1VolumeMount(name=volume_name, mount_path=mount_path)
101 ]
102
103 if priority is not None:
104 spec.priority_class_name = priority
105
106 pod = client.V1Pod(
107 spec=spec,
108 metadata=client.V1ObjectMeta(
109 name=self.get_worker_pod_name(worker_id),
110 labels={
111 "app": "elasticdl",
112 "elasticdl_job_name": self._job_name,
113 },
114 ),
115 )
116 return pod
117
118 def create_worker(
119 self,
120 worker_id,
121 resource_requests,
122 resource_limits,
123 priority=None,
124 mount_path=None,
125 volume_name=None,
126 image_pull_policy=None,
127 command=None,
128 args=None,
129 restart_policy="OnFailure",
130 ):
131 self._logger.info("Creating worker: " + str(worker_id))
132 pod = self._create_worker_pod(
133 worker_id,
134 resource_requests,
135 resource_limits,
136 priority,
137 mount_path,
138 volume_name,
139 image_pull_policy,
140 command=command,
141 args=args,
142 restart_policy=restart_policy,
143 )
144 return self._v1.create_namespaced_pod(self._ns, pod)
145
146 def delete_worker(self, worker_id):
147 self._logger.info("Deleting worker: " + str(worker_id))
148 self._v1.delete_namespaced_pod(
149 self.get_worker_pod_name(worker_id),
150 self._ns,
151 body=client.V1DeleteOptions(grace_period_seconds=0),
152 )
153
[end of elasticdl/python/elasticdl/master/k8s_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py
--- a/elasticdl/python/elasticdl/master/k8s_client.py
+++ b/elasticdl/python/elasticdl/master/k8s_client.py
@@ -103,6 +103,16 @@
if priority is not None:
spec.priority_class_name = priority
+ # Find that master pod that will be used as the owner reference
+ # for this worker pod.
+ pods = self._v1.list_namespaced_pod(
+ namespace=self._ns,
+ label_selector="elasticdl_job_name=" + self._job_name
+ ).items
+ master_pod = [pod for pod in pods if (
+ pod.metadata.name == "elasticdl-master-" + self._job_name
+ )][0]
+
pod = client.V1Pod(
spec=spec,
metadata=client.V1ObjectMeta(
@@ -111,6 +121,17 @@
"app": "elasticdl",
"elasticdl_job_name": self._job_name,
},
+ # TODO: Add tests for this once we've done refactoring on
+ # k8s client code and the constant strings
+ owner_references=[
+ client.V1OwnerReference(
+ api_version="v1",
+ block_owner_deletion=True,
+ kind="Pod",
+ name=master_pod.metadata.name,
+ uid=master_pod.metadata.uid,
+ ),
+ ],
),
)
return pod
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/master/k8s_client.py b/elasticdl/python/elasticdl/master/k8s_client.py\n--- a/elasticdl/python/elasticdl/master/k8s_client.py\n+++ b/elasticdl/python/elasticdl/master/k8s_client.py\n@@ -103,6 +103,16 @@\n if priority is not None:\n spec.priority_class_name = priority\n \n+ # Find that master pod that will be used as the owner reference\n+ # for this worker pod.\n+ pods = self._v1.list_namespaced_pod(\n+ namespace=self._ns,\n+ label_selector=\"elasticdl_job_name=\" + self._job_name\n+ ).items\n+ master_pod = [pod for pod in pods if (\n+ pod.metadata.name == \"elasticdl-master-\" + self._job_name\n+ )][0]\n+\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n@@ -111,6 +121,17 @@\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n+ # TODO: Add tests for this once we've done refactoring on\n+ # k8s client code and the constant strings\n+ owner_references=[\n+ client.V1OwnerReference(\n+ api_version=\"v1\",\n+ block_owner_deletion=True,\n+ kind=\"Pod\",\n+ name=master_pod.metadata.name,\n+ uid=master_pod.metadata.uid,\n+ ),\n+ ],\n ),\n )\n return pod\n", "issue": "Create owner references among master and worker pods\nCurrently we have the following two ways to delete master + worker pods:\r\n* Delete each pod one by one\r\n* Delete all pods related to this elasticdl run via `elasticdl_job_name` label `kubectl delete pod -l elasticdl_job_name=test-job-1559292773-93`\r\n\r\nIt would be much easier if users could just delete master pod and then worker pods can be delete automatically. This would be possible if there are owner references among master and worker pods.\n", "before_files": [{"content": "import logging\nimport os\nimport threading\nimport traceback\n\nfrom kubernetes import client, config, watch\nfrom kubernetes.client import (\n V1PersistentVolumeClaimVolumeSource as pvcVolumeSource,\n)\n\nWORKER_POD_NAME_PREFIX = \"elasticdl-worker-\"\n\n\nclass Client(object):\n def __init__(self, *, worker_image, namespace, job_name, event_callback):\n \"\"\"\n ElasticDL k8s client.\n\n Args:\n worker_image: Docker image path for ElasticDL workers.\n namespace: k8s namespace for ElasticDL pods.\n job_name: ElasticDL job name, should be unique in the namespace.\n Used as worker pod name prefix and value for \"elasticdl\" label.\n event_callback: If not None, an event watcher will be created and\n events passed to the callback.\n \"\"\"\n if os.getenv(\"KUBERNETES_SERVICE_HOST\"):\n # We are running inside k8s\n config.load_incluster_config()\n else:\n # Use user's kube config\n config.load_kube_config()\n\n self._v1 = client.CoreV1Api()\n self._logger = logging.getLogger(__name__)\n self._image = worker_image\n self._ns = namespace\n self._job_name = job_name\n self._event_cb = event_callback\n if self._event_cb:\n threading.Thread(\n target=self._watch, name=\"event_watcher\", daemon=True\n ).start()\n\n def _watch(self):\n stream = watch.Watch().stream(\n self._v1.list_namespaced_pod,\n self._ns,\n label_selector=\"elasticdl_job_name=\" + self._job_name,\n )\n for event in stream:\n try:\n self._event_cb(event)\n except Exception:\n traceback.print_exc()\n\n def get_worker_pod_name(self, worker_id):\n return WORKER_POD_NAME_PREFIX + self._job_name + \"-\" + str(worker_id)\n\n def _create_worker_pod(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command,\n args,\n restart_policy,\n ):\n # Worker container config\n container = client.V1Container(\n name=self.get_worker_pod_name(worker_id),\n image=self._image,\n command=command,\n resources=client.V1ResourceRequirements(\n requests=resource_requests, limits=resource_limits\n ),\n image_pull_policy=image_pull_policy,\n args=args,\n )\n\n # Pod\n spec = client.V1PodSpec(\n containers=[container], restart_policy=restart_policy\n )\n\n # Mount data path\n if mount_path is not None and volume_name is not None:\n volume = client.V1Volume(\n name=\"data-volume\",\n persistent_volume_claim=pvcVolumeSource(\n claim_name=\"fileserver-claim\", read_only=False\n ),\n )\n spec.volumes = [volume]\n container.volume_mounts = [\n client.V1VolumeMount(name=volume_name, mount_path=mount_path)\n ]\n\n if priority is not None:\n spec.priority_class_name = priority\n\n pod = client.V1Pod(\n spec=spec,\n metadata=client.V1ObjectMeta(\n name=self.get_worker_pod_name(worker_id),\n labels={\n \"app\": \"elasticdl\",\n \"elasticdl_job_name\": self._job_name,\n },\n ),\n )\n return pod\n\n def create_worker(\n self,\n worker_id,\n resource_requests,\n resource_limits,\n priority=None,\n mount_path=None,\n volume_name=None,\n image_pull_policy=None,\n command=None,\n args=None,\n restart_policy=\"OnFailure\",\n ):\n self._logger.info(\"Creating worker: \" + str(worker_id))\n pod = self._create_worker_pod(\n worker_id,\n resource_requests,\n resource_limits,\n priority,\n mount_path,\n volume_name,\n image_pull_policy,\n command=command,\n args=args,\n restart_policy=restart_policy,\n )\n return self._v1.create_namespaced_pod(self._ns, pod)\n\n def delete_worker(self, worker_id):\n self._logger.info(\"Deleting worker: \" + str(worker_id))\n self._v1.delete_namespaced_pod(\n self.get_worker_pod_name(worker_id),\n self._ns,\n body=client.V1DeleteOptions(grace_period_seconds=0),\n )\n", "path": "elasticdl/python/elasticdl/master/k8s_client.py"}]} | 1,967 | 353 |
gh_patches_debug_56983 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-172 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove unused import
As per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation
</issue>
<code>
[start of instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16
17 Usage
18 -----
19
20 The OpenTelemetry ``jinja2`` integration traces templates loading, compilation
21 and rendering.
22
23 Usage
24 -----
25
26 .. code-block:: python
27
28 from jinja2 import Environment, FileSystemLoader
29 from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor
30 from opentelemetry import trace
31 from opentelemetry.trace import TracerProvider
32
33 trace.set_tracer_provider(TracerProvider())
34
35 Jinja2Instrumentor().instrument()
36
37 env = Environment(loader=FileSystemLoader("templates"))
38 template = env.get_template("mytemplate.html")
39
40 API
41 ---
42 """
43 # pylint: disable=no-value-for-parameter
44
45 import logging
46
47 import jinja2
48 from wrapt import ObjectProxy
49 from wrapt import wrap_function_wrapper as _wrap
50
51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
52 from opentelemetry.instrumentation.jinja2.version import __version__
53 from opentelemetry.instrumentation.utils import unwrap
54 from opentelemetry.trace import SpanKind, get_tracer
55 from opentelemetry.trace.status import Status, StatusCode
56
57 logger = logging.getLogger(__name__)
58
59 ATTRIBUTE_JINJA2_TEMPLATE_NAME = "jinja2.template_name"
60 ATTRIBUTE_JINJA2_TEMPLATE_PATH = "jinja2.template_path"
61 DEFAULT_TEMPLATE_NAME = "<memory>"
62
63
64 def _with_tracer_wrapper(func):
65 """Helper for providing tracer for wrapper functions.
66 """
67
68 def _with_tracer(tracer):
69 def wrapper(wrapped, instance, args, kwargs):
70 return func(tracer, wrapped, instance, args, kwargs)
71
72 return wrapper
73
74 return _with_tracer
75
76
77 @_with_tracer_wrapper
78 def _wrap_render(tracer, wrapped, instance, args, kwargs):
79 """Wrap `Template.render()` or `Template.generate()`
80 """
81 with tracer.start_as_current_span(
82 "jinja2.render", kind=SpanKind.INTERNAL,
83 ) as span:
84 if span.is_recording():
85 template_name = instance.name or DEFAULT_TEMPLATE_NAME
86 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
87 return wrapped(*args, **kwargs)
88
89
90 @_with_tracer_wrapper
91 def _wrap_compile(tracer, wrapped, _, args, kwargs):
92 with tracer.start_as_current_span(
93 "jinja2.compile", kind=SpanKind.INTERNAL,
94 ) as span:
95 if span.is_recording():
96 template_name = (
97 args[1]
98 if len(args) > 1
99 else kwargs.get("name", DEFAULT_TEMPLATE_NAME)
100 )
101 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
102 return wrapped(*args, **kwargs)
103
104
105 @_with_tracer_wrapper
106 def _wrap_load_template(tracer, wrapped, _, args, kwargs):
107 with tracer.start_as_current_span(
108 "jinja2.load", kind=SpanKind.INTERNAL,
109 ) as span:
110 if span.is_recording():
111 template_name = kwargs.get("name", args[0])
112 span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)
113 template = None
114 try:
115 template = wrapped(*args, **kwargs)
116 return template
117 finally:
118 if template and span.is_recording():
119 span.set_attribute(
120 ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename
121 )
122
123
124 class Jinja2Instrumentor(BaseInstrumentor):
125 """An instrumentor for jinja2
126
127 See `BaseInstrumentor`
128 """
129
130 def _instrument(self, **kwargs):
131 tracer_provider = kwargs.get("tracer_provider")
132 tracer = get_tracer(__name__, __version__, tracer_provider)
133
134 _wrap(jinja2, "environment.Template.render", _wrap_render(tracer))
135 _wrap(jinja2, "environment.Template.generate", _wrap_render(tracer))
136 _wrap(jinja2, "environment.Environment.compile", _wrap_compile(tracer))
137 _wrap(
138 jinja2,
139 "environment.Environment._load_template",
140 _wrap_load_template(tracer),
141 )
142
143 def _uninstrument(self, **kwargs):
144 unwrap(jinja2.Template, "render")
145 unwrap(jinja2.Template, "generate")
146 unwrap(jinja2.Environment, "compile")
147 unwrap(jinja2.Environment, "_load_template")
148
[end of instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py
@@ -52,7 +52,6 @@
from opentelemetry.instrumentation.jinja2.version import __version__
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer
-from opentelemetry.trace.status import Status, StatusCode
logger = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py\n@@ -52,7 +52,6 @@\n from opentelemetry.instrumentation.jinja2.version import __version__\n from opentelemetry.instrumentation.utils import unwrap\n from opentelemetry.trace import SpanKind, get_tracer\n-from opentelemetry.trace.status import Status, StatusCode\n \n logger = logging.getLogger(__name__)\n", "issue": "Remove unused import\nAs per comment https://github.com/open-telemetry/opentelemetry-python-contrib/pull/107#discussion_r516262746, there appears to be an unused import in the jinja2 instrumentation\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nUsage\n-----\n\nThe OpenTelemetry ``jinja2`` integration traces templates loading, compilation\nand rendering.\n\nUsage\n-----\n\n.. code-block:: python\n\n from jinja2 import Environment, FileSystemLoader\n from opentelemetry.instrumentation.jinja2 import Jinja2Instrumentor\n from opentelemetry import trace\n from opentelemetry.trace import TracerProvider\n\n trace.set_tracer_provider(TracerProvider())\n\n Jinja2Instrumentor().instrument()\n\n env = Environment(loader=FileSystemLoader(\"templates\"))\n template = env.get_template(\"mytemplate.html\")\n\nAPI\n---\n\"\"\"\n# pylint: disable=no-value-for-parameter\n\nimport logging\n\nimport jinja2\nfrom wrapt import ObjectProxy\nfrom wrapt import wrap_function_wrapper as _wrap\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.jinja2.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCode\n\nlogger = logging.getLogger(__name__)\n\nATTRIBUTE_JINJA2_TEMPLATE_NAME = \"jinja2.template_name\"\nATTRIBUTE_JINJA2_TEMPLATE_PATH = \"jinja2.template_path\"\nDEFAULT_TEMPLATE_NAME = \"<memory>\"\n\n\ndef _with_tracer_wrapper(func):\n \"\"\"Helper for providing tracer for wrapper functions.\n \"\"\"\n\n def _with_tracer(tracer):\n def wrapper(wrapped, instance, args, kwargs):\n return func(tracer, wrapped, instance, args, kwargs)\n\n return wrapper\n\n return _with_tracer\n\n\n@_with_tracer_wrapper\ndef _wrap_render(tracer, wrapped, instance, args, kwargs):\n \"\"\"Wrap `Template.render()` or `Template.generate()`\n \"\"\"\n with tracer.start_as_current_span(\n \"jinja2.render\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = instance.name or DEFAULT_TEMPLATE_NAME\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_compile(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.compile\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = (\n args[1]\n if len(args) > 1\n else kwargs.get(\"name\", DEFAULT_TEMPLATE_NAME)\n )\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n return wrapped(*args, **kwargs)\n\n\n@_with_tracer_wrapper\ndef _wrap_load_template(tracer, wrapped, _, args, kwargs):\n with tracer.start_as_current_span(\n \"jinja2.load\", kind=SpanKind.INTERNAL,\n ) as span:\n if span.is_recording():\n template_name = kwargs.get(\"name\", args[0])\n span.set_attribute(ATTRIBUTE_JINJA2_TEMPLATE_NAME, template_name)\n template = None\n try:\n template = wrapped(*args, **kwargs)\n return template\n finally:\n if template and span.is_recording():\n span.set_attribute(\n ATTRIBUTE_JINJA2_TEMPLATE_PATH, template.filename\n )\n\n\nclass Jinja2Instrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for jinja2\n\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n tracer = get_tracer(__name__, __version__, tracer_provider)\n\n _wrap(jinja2, \"environment.Template.render\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Template.generate\", _wrap_render(tracer))\n _wrap(jinja2, \"environment.Environment.compile\", _wrap_compile(tracer))\n _wrap(\n jinja2,\n \"environment.Environment._load_template\",\n _wrap_load_template(tracer),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(jinja2.Template, \"render\")\n unwrap(jinja2.Template, \"generate\")\n unwrap(jinja2.Environment, \"compile\")\n unwrap(jinja2.Environment, \"_load_template\")\n", "path": "instrumentation/opentelemetry-instrumentation-jinja2/src/opentelemetry/instrumentation/jinja2/__init__.py"}]} | 1,999 | 184 |
gh_patches_debug_27210 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-282 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[master]Use different RPC for reporting gradient and task result.
</issue>
<code>
[start of elasticdl/master/servicer.py]
1 import threading
2 import numpy as np
3
4 import tensorflow as tf
5 assert tf.executing_eagerly()
6
7 from proto import master_pb2
8 from proto import master_pb2_grpc
9 from util.ndarray import ndarray_to_tensor, tensor_to_ndarray
10
11
12 class MasterServicer(master_pb2_grpc.MasterServicer):
13 """Master service implementation"""
14
15 def __init__(self, logger, grads_to_wait, optimizer):
16 self.logger = logger
17 self._opt = optimizer
18 self._lock = threading.Lock()
19 # TODO: random initialization
20 # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable
21 # instead ndarray to avoid copying and conversion when calling
22 # optimizer's apply_gradients() function.
23 self._model = {}
24 self._version = 0
25 self._gradient_sum = {}
26 self._grad_to_wait = grads_to_wait
27 self._grad_n = 0
28
29 def _set_model_var(self, name, value):
30 """Add or set model variable. Value should be a float32 ndarray"""
31 if value.dtype != np.float32:
32 raise ValueError("Value should be a float32 numpy array")
33 self._model[name] = tf.Variable(value, name=name)
34
35 def GetTask(self, request, context):
36 # TODO: implent task queues. Return an empty task for now.
37 res = master_pb2.Task()
38 res.shard_file_name = ""
39 res.model_version = self._version
40 return res
41
42 def GetModel(self, request, context):
43 if request.min_version > self._version:
44 err_msg = (
45 "Requested version %d not available yet, current version: %d"
46 % (request.min_version, self._version)
47 )
48 self.logger.warning(err_msg)
49 raise ValueError(err_msg)
50
51 res = master_pb2.Model()
52 with self._lock:
53 res.version = self._version
54 for k, v in self._model.items():
55 res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))
56 return res
57
58 def _update_model(self):
59 assert self._lock.locked()
60 grad_var = []
61 for k in self._gradient_sum:
62 self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait
63 grad_var.append((self._gradient_sum[k], self._model[k]))
64 self._opt.apply_gradients(grad_var)
65 self._version += 1
66 self._gradient_sum.clear()
67 self._grad_n = 0
68
69 def ReportTaskResult(self, request, context):
70 if request.model_version > self._version:
71 err_msg = "Model version %d out of range, current version: %d" % (
72 request.model_version,
73 self._version,
74 )
75 self.logger.warning(err_msg)
76 raise ValueError(err_msg)
77
78 res = master_pb2.ReportTaskResultReply()
79 if request.model_version < self._version:
80 self.logger.warning(
81 "Task result for outdated version %d dropped",
82 request.model_version,
83 )
84 res.accepted = False
85 res.model_version = self._version
86 return res
87
88 if request.err_message:
89 self.logger.warning("Worker error: %s" % request.err_message)
90 res.accepted = False
91 res.model_version = self._version
92 return res
93
94 # TODO: Update task queue with task_id
95 with self._lock:
96 tmp = {}
97 # Do sanity check before accumulating gradients.
98 for k, v in request.gradient.items():
99 if k not in self._model:
100 raise ValueError(
101 "Gradient key: %s is not part of model", k
102 )
103 arr = tensor_to_ndarray(v)
104 if arr.shape != self._model[k].numpy().shape:
105 raise ValueError(
106 "Gradient key: %s has incompatible dimension", k
107 )
108 tmp[k] = arr
109
110 for k, v in tmp.items():
111 if k in self._gradient_sum:
112 self._gradient_sum[k] = self._gradient_sum[k] + v
113 else:
114 self._gradient_sum[k] = v
115
116 self._grad_n += 1
117 if self._grad_n >= self._grad_to_wait:
118 self._update_model()
119 res.accepted = True
120 res.model_version = self._version
121 return res
122
[end of elasticdl/master/servicer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py
--- a/elasticdl/master/servicer.py
+++ b/elasticdl/master/servicer.py
@@ -66,7 +66,7 @@
self._gradient_sum.clear()
self._grad_n = 0
- def ReportTaskResult(self, request, context):
+ def ReportGradient(self, request, context):
if request.model_version > self._version:
err_msg = "Model version %d out of range, current version: %d" % (
request.model_version,
@@ -75,7 +75,7 @@
self.logger.warning(err_msg)
raise ValueError(err_msg)
- res = master_pb2.ReportTaskResultReply()
+ res = master_pb2.ReportGradientReply()
if request.model_version < self._version:
self.logger.warning(
"Task result for outdated version %d dropped",
@@ -85,12 +85,6 @@
res.model_version = self._version
return res
- if request.err_message:
- self.logger.warning("Worker error: %s" % request.err_message)
- res.accepted = False
- res.model_version = self._version
- return res
-
# TODO: Update task queue with task_id
with self._lock:
tmp = {}
| {"golden_diff": "diff --git a/elasticdl/master/servicer.py b/elasticdl/master/servicer.py\n--- a/elasticdl/master/servicer.py\n+++ b/elasticdl/master/servicer.py\n@@ -66,7 +66,7 @@\n self._gradient_sum.clear()\n self._grad_n = 0\n \n- def ReportTaskResult(self, request, context):\n+ def ReportGradient(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n@@ -75,7 +75,7 @@\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n \n- res = master_pb2.ReportTaskResultReply()\n+ res = master_pb2.ReportGradientReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n@@ -85,12 +85,6 @@\n res.model_version = self._version\n return res\n \n- if request.err_message:\n- self.logger.warning(\"Worker error: %s\" % request.err_message)\n- res.accepted = False\n- res.model_version = self._version\n- return res\n-\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n", "issue": "[master]Use different RPC for reporting gradient and task result.\n\n", "before_files": [{"content": "import threading\nimport numpy as np\n\nimport tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom proto import master_pb2\nfrom proto import master_pb2_grpc\nfrom util.ndarray import ndarray_to_tensor, tensor_to_ndarray\n\n\nclass MasterServicer(master_pb2_grpc.MasterServicer):\n \"\"\"Master service implementation\"\"\"\n\n def __init__(self, logger, grads_to_wait, optimizer):\n self.logger = logger\n self._opt = optimizer\n self._lock = threading.Lock()\n # TODO: random initialization\n # A <string, tf.ResourceVariable> map. We use tf.ResourceVariable\n # instead ndarray to avoid copying and conversion when calling\n # optimizer's apply_gradients() function.\n self._model = {}\n self._version = 0\n self._gradient_sum = {}\n self._grad_to_wait = grads_to_wait\n self._grad_n = 0\n\n def _set_model_var(self, name, value):\n \"\"\"Add or set model variable. Value should be a float32 ndarray\"\"\"\n if value.dtype != np.float32:\n raise ValueError(\"Value should be a float32 numpy array\")\n self._model[name] = tf.Variable(value, name=name)\n\n def GetTask(self, request, context):\n # TODO: implent task queues. Return an empty task for now.\n res = master_pb2.Task()\n res.shard_file_name = \"\"\n res.model_version = self._version\n return res\n\n def GetModel(self, request, context):\n if request.min_version > self._version:\n err_msg = (\n \"Requested version %d not available yet, current version: %d\"\n % (request.min_version, self._version)\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.Model()\n with self._lock:\n res.version = self._version\n for k, v in self._model.items():\n res.param[k].CopyFrom(ndarray_to_tensor(v.numpy()))\n return res\n\n def _update_model(self):\n assert self._lock.locked()\n grad_var = []\n for k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] / self._grad_to_wait\n grad_var.append((self._gradient_sum[k], self._model[k]))\n self._opt.apply_gradients(grad_var)\n self._version += 1\n self._gradient_sum.clear()\n self._grad_n = 0\n\n def ReportTaskResult(self, request, context):\n if request.model_version > self._version:\n err_msg = \"Model version %d out of range, current version: %d\" % (\n request.model_version,\n self._version,\n )\n self.logger.warning(err_msg)\n raise ValueError(err_msg)\n\n res = master_pb2.ReportTaskResultReply()\n if request.model_version < self._version:\n self.logger.warning(\n \"Task result for outdated version %d dropped\",\n request.model_version,\n )\n res.accepted = False\n res.model_version = self._version\n return res\n\n if request.err_message:\n self.logger.warning(\"Worker error: %s\" % request.err_message)\n res.accepted = False\n res.model_version = self._version\n return res\n\n # TODO: Update task queue with task_id\n with self._lock:\n tmp = {}\n # Do sanity check before accumulating gradients.\n for k, v in request.gradient.items():\n if k not in self._model:\n raise ValueError(\n \"Gradient key: %s is not part of model\", k\n )\n arr = tensor_to_ndarray(v)\n if arr.shape != self._model[k].numpy().shape:\n raise ValueError(\n \"Gradient key: %s has incompatible dimension\", k\n )\n tmp[k] = arr\n\n for k, v in tmp.items():\n if k in self._gradient_sum:\n self._gradient_sum[k] = self._gradient_sum[k] + v\n else:\n self._gradient_sum[k] = v\n\n self._grad_n += 1\n if self._grad_n >= self._grad_to_wait:\n self._update_model()\n res.accepted = True\n res.model_version = self._version\n return res\n", "path": "elasticdl/master/servicer.py"}]} | 1,746 | 302 |
gh_patches_debug_61519 | rasdani/github-patches | git_diff | open-mmlab__mmpose-1906 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
little config error in 1.x
mmpose/tree/1.x/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand)/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
mobilenetv2 out_channels is 1280, however "in_channles" of the head is 2048 in this config file.
</issue>
<code>
[start of configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py]
1 _base_ = ['../../../_base_/default_runtime.py']
2
3 # runtime
4 train_cfg = dict(max_epochs=210, val_interval=10)
5
6 # optimizer
7 optim_wrapper = dict(optimizer=dict(
8 type='Adam',
9 lr=5e-4,
10 ))
11
12 # learning policy
13 param_scheduler = [
14 dict(
15 type='LinearLR', begin=0, end=500, start_factor=0.001,
16 by_epoch=False), # warm-up
17 dict(
18 type='MultiStepLR',
19 begin=0,
20 end=210,
21 milestones=[170, 200],
22 gamma=0.1,
23 by_epoch=True)
24 ]
25
26 # automatically scaling LR based on the actual training batch size
27 auto_scale_lr = dict(base_batch_size=256)
28
29 # hooks
30 default_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))
31 # codec settings
32 codec = dict(
33 type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)
34
35 # model settings
36 model = dict(
37 type='TopdownPoseEstimator',
38 data_preprocessor=dict(
39 type='PoseDataPreprocessor',
40 mean=[123.675, 116.28, 103.53],
41 std=[58.395, 57.12, 57.375],
42 bgr_to_rgb=True),
43 backbone=dict(
44 type='MobileNetV2',
45 widen_factor=1.,
46 out_indices=(7, ),
47 init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),
48 head=dict(
49 type='HeatmapHead',
50 in_channels=2048,
51 out_channels=21,
52 loss=dict(type='KeypointMSELoss', use_target_weight=True),
53 decoder=codec),
54 test_cfg=dict(
55 flip_test=True,
56 flip_mode='heatmap',
57 shift_heatmap=True,
58 ))
59
60 # base dataset settings
61 dataset_type = 'CocoWholeBodyHandDataset'
62 data_mode = 'topdown'
63 data_root = 'data/coco/'
64
65 # pipelines
66 train_pipeline = [
67 dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),
68 dict(type='GetBBoxCenterScale'),
69 dict(
70 type='RandomBBoxTransform', rotate_factor=180,
71 scale_factor=(0.7, 1.3)),
72 dict(type='RandomFlip', direction='horizontal'),
73 dict(type='TopdownAffine', input_size=codec['input_size']),
74 dict(type='GenerateTarget', encoder=codec),
75 dict(type='PackPoseInputs')
76 ]
77 val_pipeline = [
78 dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),
79 dict(type='GetBBoxCenterScale'),
80 dict(type='TopdownAffine', input_size=codec['input_size']),
81 dict(type='PackPoseInputs')
82 ]
83
84 # data loaders
85 train_dataloader = dict(
86 batch_size=32,
87 num_workers=2,
88 persistent_workers=True,
89 sampler=dict(type='DefaultSampler', shuffle=True),
90 dataset=dict(
91 type=dataset_type,
92 data_root=data_root,
93 data_mode=data_mode,
94 ann_file='annotations/coco_wholebody_train_v1.0.json',
95 data_prefix=dict(img='train2017/'),
96 pipeline=train_pipeline,
97 ))
98 val_dataloader = dict(
99 batch_size=32,
100 num_workers=2,
101 persistent_workers=True,
102 drop_last=False,
103 sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
104 dataset=dict(
105 type=dataset_type,
106 data_root=data_root,
107 data_mode=data_mode,
108 ann_file='annotations/coco_wholebody_val_v1.0.json',
109 data_prefix=dict(img='val2017/'),
110 test_mode=True,
111 pipeline=val_pipeline,
112 ))
113 test_dataloader = val_dataloader
114
115 val_evaluator = [
116 dict(type='PCKAccuracy', thr=0.2),
117 dict(type='AUC'),
118 dict(type='EPE')
119 ]
120 test_evaluator = val_evaluator
121
[end of configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
--- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
+++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py
@@ -47,7 +47,7 @@
init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),
head=dict(
type='HeatmapHead',
- in_channels=2048,
+ in_channels=1280,
out_channels=21,
loss=dict(type='KeypointMSELoss', use_target_weight=True),
decoder=codec),
| {"golden_diff": "diff --git a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n--- a/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n+++ b/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\n@@ -47,7 +47,7 @@\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n- in_channels=2048,\n+ in_channels=1280,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n", "issue": "little config error in 1.x\n\r\nmmpose/tree/1.x/configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand)/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py\r\n\r\nmobilenetv2 out_channels is 1280, however \"in_channles\" of the head is 2048 in this config file. \r\n\n", "before_files": [{"content": "_base_ = ['../../../_base_/default_runtime.py']\n\n# runtime\ntrain_cfg = dict(max_epochs=210, val_interval=10)\n\n# optimizer\noptim_wrapper = dict(optimizer=dict(\n type='Adam',\n lr=5e-4,\n))\n\n# learning policy\nparam_scheduler = [\n dict(\n type='LinearLR', begin=0, end=500, start_factor=0.001,\n by_epoch=False), # warm-up\n dict(\n type='MultiStepLR',\n begin=0,\n end=210,\n milestones=[170, 200],\n gamma=0.1,\n by_epoch=True)\n]\n\n# automatically scaling LR based on the actual training batch size\nauto_scale_lr = dict(base_batch_size=256)\n\n# hooks\ndefault_hooks = dict(checkpoint=dict(save_best='AUC', rule='greater'))\n# codec settings\ncodec = dict(\n type='MSRAHeatmap', input_size=(256, 256), heatmap_size=(64, 64), sigma=2)\n\n# model settings\nmodel = dict(\n type='TopdownPoseEstimator',\n data_preprocessor=dict(\n type='PoseDataPreprocessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True),\n backbone=dict(\n type='MobileNetV2',\n widen_factor=1.,\n out_indices=(7, ),\n init_cfg=dict(type='Pretrained', checkpoint='mmcls://mobilenet_v2')),\n head=dict(\n type='HeatmapHead',\n in_channels=2048,\n out_channels=21,\n loss=dict(type='KeypointMSELoss', use_target_weight=True),\n decoder=codec),\n test_cfg=dict(\n flip_test=True,\n flip_mode='heatmap',\n shift_heatmap=True,\n ))\n\n# base dataset settings\ndataset_type = 'CocoWholeBodyHandDataset'\ndata_mode = 'topdown'\ndata_root = 'data/coco/'\n\n# pipelines\ntrain_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(\n type='RandomBBoxTransform', rotate_factor=180,\n scale_factor=(0.7, 1.3)),\n dict(type='RandomFlip', direction='horizontal'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='GenerateTarget', encoder=codec),\n dict(type='PackPoseInputs')\n]\nval_pipeline = [\n dict(type='LoadImage', file_client_args={{_base_.file_client_args}}),\n dict(type='GetBBoxCenterScale'),\n dict(type='TopdownAffine', input_size=codec['input_size']),\n dict(type='PackPoseInputs')\n]\n\n# data loaders\ntrain_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_train_v1.0.json',\n data_prefix=dict(img='train2017/'),\n pipeline=train_pipeline,\n ))\nval_dataloader = dict(\n batch_size=32,\n num_workers=2,\n persistent_workers=True,\n drop_last=False,\n sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_mode=data_mode,\n ann_file='annotations/coco_wholebody_val_v1.0.json',\n data_prefix=dict(img='val2017/'),\n test_mode=True,\n pipeline=val_pipeline,\n ))\ntest_dataloader = val_dataloader\n\nval_evaluator = [\n dict(type='PCKAccuracy', thr=0.2),\n dict(type='AUC'),\n dict(type='EPE')\n]\ntest_evaluator = val_evaluator\n", "path": "configs/hand_2d_keypoint/topdown_heatmap/coco_wholebody_hand/td-hm_mobilenetv2_8xb32-210e_coco-wholebody-hand-256x256.py"}]} | 1,909 | 317 |
gh_patches_debug_20002 | rasdani/github-patches | git_diff | scrapy__scrapy-2510 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disk queues don't preserve Request class
When a Request subclass (e.g. FormRequest) is sent to a disk queue a bare Request is what you get back.
This is inconvenient for scrapy-splash: Splash requests all have Splash URL as request.url, but for logging it is nice to display the requested URL, not only Splash URL. In scrapy-splash this is implemented by changing `__repr__` in a Request subclass, but it works only when request is kept in memory.
</issue>
<code>
[start of scrapy/utils/reqser.py]
1 """
2 Helper functions for serializing (and deserializing) requests.
3 """
4 import six
5
6 from scrapy.http import Request
7 from scrapy.utils.python import to_unicode, to_native_str
8
9
10 def request_to_dict(request, spider=None):
11 """Convert Request object to a dict.
12
13 If a spider is given, it will try to find out the name of the spider method
14 used in the callback and store that as the callback.
15 """
16 cb = request.callback
17 if callable(cb):
18 cb = _find_method(spider, cb)
19 eb = request.errback
20 if callable(eb):
21 eb = _find_method(spider, eb)
22 d = {
23 'url': to_unicode(request.url), # urls should be safe (safe_string_url)
24 'callback': cb,
25 'errback': eb,
26 'method': request.method,
27 'headers': dict(request.headers),
28 'body': request.body,
29 'cookies': request.cookies,
30 'meta': request.meta,
31 '_encoding': request._encoding,
32 'priority': request.priority,
33 'dont_filter': request.dont_filter,
34 }
35 return d
36
37
38 def request_from_dict(d, spider=None):
39 """Create Request object from a dict.
40
41 If a spider is given, it will try to resolve the callbacks looking at the
42 spider for methods with the same name.
43 """
44 cb = d['callback']
45 if cb and spider:
46 cb = _get_method(spider, cb)
47 eb = d['errback']
48 if eb and spider:
49 eb = _get_method(spider, eb)
50 return Request(
51 url=to_native_str(d['url']),
52 callback=cb,
53 errback=eb,
54 method=d['method'],
55 headers=d['headers'],
56 body=d['body'],
57 cookies=d['cookies'],
58 meta=d['meta'],
59 encoding=d['_encoding'],
60 priority=d['priority'],
61 dont_filter=d['dont_filter'])
62
63
64 def _find_method(obj, func):
65 if obj:
66 try:
67 func_self = six.get_method_self(func)
68 except AttributeError: # func has no __self__
69 pass
70 else:
71 if func_self is obj:
72 return six.get_method_function(func).__name__
73 raise ValueError("Function %s is not a method of: %s" % (func, obj))
74
75
76 def _get_method(obj, name):
77 name = str(name)
78 try:
79 return getattr(obj, name)
80 except AttributeError:
81 raise ValueError("Method %r not found in: %s" % (name, obj))
82
[end of scrapy/utils/reqser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/reqser.py b/scrapy/utils/reqser.py
--- a/scrapy/utils/reqser.py
+++ b/scrapy/utils/reqser.py
@@ -5,6 +5,7 @@
from scrapy.http import Request
from scrapy.utils.python import to_unicode, to_native_str
+from scrapy.utils.misc import load_object
def request_to_dict(request, spider=None):
@@ -32,6 +33,8 @@
'priority': request.priority,
'dont_filter': request.dont_filter,
}
+ if type(request) is not Request:
+ d['_class'] = request.__module__ + '.' + request.__class__.__name__
return d
@@ -47,7 +50,8 @@
eb = d['errback']
if eb and spider:
eb = _get_method(spider, eb)
- return Request(
+ request_cls = load_object(d['_class']) if '_class' in d else Request
+ return request_cls(
url=to_native_str(d['url']),
callback=cb,
errback=eb,
| {"golden_diff": "diff --git a/scrapy/utils/reqser.py b/scrapy/utils/reqser.py\n--- a/scrapy/utils/reqser.py\n+++ b/scrapy/utils/reqser.py\n@@ -5,6 +5,7 @@\n \n from scrapy.http import Request\n from scrapy.utils.python import to_unicode, to_native_str\n+from scrapy.utils.misc import load_object\n \n \n def request_to_dict(request, spider=None):\n@@ -32,6 +33,8 @@\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n+ if type(request) is not Request:\n+ d['_class'] = request.__module__ + '.' + request.__class__.__name__\n return d\n \n \n@@ -47,7 +50,8 @@\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n- return Request(\n+ request_cls = load_object(d['_class']) if '_class' in d else Request\n+ return request_cls(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n", "issue": "Disk queues don't preserve Request class\nWhen a Request subclass (e.g. FormRequest) is sent to a disk queue a bare Request is what you get back. \n\nThis is inconvenient for scrapy-splash: Splash requests all have Splash URL as request.url, but for logging it is nice to display the requested URL, not only Splash URL. In scrapy-splash this is implemented by changing `__repr__` in a Request subclass, but it works only when request is kept in memory.\n\n", "before_files": [{"content": "\"\"\"\nHelper functions for serializing (and deserializing) requests.\n\"\"\"\nimport six\n\nfrom scrapy.http import Request\nfrom scrapy.utils.python import to_unicode, to_native_str\n\n\ndef request_to_dict(request, spider=None):\n \"\"\"Convert Request object to a dict.\n\n If a spider is given, it will try to find out the name of the spider method\n used in the callback and store that as the callback.\n \"\"\"\n cb = request.callback\n if callable(cb):\n cb = _find_method(spider, cb)\n eb = request.errback\n if callable(eb):\n eb = _find_method(spider, eb)\n d = {\n 'url': to_unicode(request.url), # urls should be safe (safe_string_url)\n 'callback': cb,\n 'errback': eb,\n 'method': request.method,\n 'headers': dict(request.headers),\n 'body': request.body,\n 'cookies': request.cookies,\n 'meta': request.meta,\n '_encoding': request._encoding,\n 'priority': request.priority,\n 'dont_filter': request.dont_filter,\n }\n return d\n\n\ndef request_from_dict(d, spider=None):\n \"\"\"Create Request object from a dict.\n\n If a spider is given, it will try to resolve the callbacks looking at the\n spider for methods with the same name.\n \"\"\"\n cb = d['callback']\n if cb and spider:\n cb = _get_method(spider, cb)\n eb = d['errback']\n if eb and spider:\n eb = _get_method(spider, eb)\n return Request(\n url=to_native_str(d['url']),\n callback=cb,\n errback=eb,\n method=d['method'],\n headers=d['headers'],\n body=d['body'],\n cookies=d['cookies'],\n meta=d['meta'],\n encoding=d['_encoding'],\n priority=d['priority'],\n dont_filter=d['dont_filter'])\n\n\ndef _find_method(obj, func):\n if obj:\n try:\n func_self = six.get_method_self(func)\n except AttributeError: # func has no __self__\n pass\n else:\n if func_self is obj:\n return six.get_method_function(func).__name__\n raise ValueError(\"Function %s is not a method of: %s\" % (func, obj))\n\n\ndef _get_method(obj, name):\n name = str(name)\n try:\n return getattr(obj, name)\n except AttributeError:\n raise ValueError(\"Method %r not found in: %s\" % (name, obj))\n", "path": "scrapy/utils/reqser.py"}]} | 1,343 | 244 |
gh_patches_debug_7433 | rasdani/github-patches | git_diff | SciTools__cartopy-439 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: Geostationary example bug
```
python cartopy/docs/source/examples/geostationary.py
Traceback (most recent call last):
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 60, in <module>
main()
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 54, in main
img, crs, extent, origin = geos_image()
File "/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py", line 43, in geos_image
img_handle = BytesIO(urllib2.urlopen(url).read())
NameError: global name 'urllib2' is not defined
```
</issue>
<code>
[start of lib/cartopy/examples/geostationary.py]
1 """
2 Reprojecting images from a Geostationary projection
3 ---------------------------------------------------
4
5 This example demonstrates Cartopy's ability to project images into the desired
6 projection on-the-fly. The image itself is retrieved from a URL and is loaded
7 directly into memory without storing it intermediately into a file. It
8 represents pre-processed data from Moderate-Resolution Imaging
9 Spectroradiometer (MODIS) which has been put into an image in the data's
10 native Geostationary coordinate system - it is then projected by cartopy
11 into a global Miller map.
12
13 """
14 __tags__ = ["Scalar data"]
15 try:
16 from urllib2 import urlopen
17 except ImportError:
18 from urllib.request import urlopen
19 from io import BytesIO
20
21 import cartopy.crs as ccrs
22 import matplotlib.pyplot as plt
23
24
25 def geos_image():
26 """
27 Return a specific MODIS image by retrieving it from a github gist URL.
28
29 Returns
30 -------
31 img : numpy array
32 The pixels of the image in a numpy array.
33 img_proj : cartopy CRS
34 The rectangular coordinate system of the image.
35 img_extent : tuple of floats
36 The extent of the image ``(x0, y0, x1, y1)`` referenced in
37 the ``img_proj`` coordinate system.
38 origin : str
39 The origin of the image to be passed through to matplotlib's imshow.
40
41 """
42 url = ('https://gist.github.com/pelson/5871263/raw/'
43 'EIDA50_201211061300_clip2.png')
44 img_handle = BytesIO(urllib2.urlopen(url).read())
45 img = plt.imread(img_handle)
46 img_proj = ccrs.Geostationary(satellite_height=35786000)
47 img_extent = (-5500000, 5500000, -5500000, 5500000)
48 return img, img_proj, img_extent, 'upper'
49
50
51 def main():
52 ax = plt.axes(projection=ccrs.Miller())
53 ax.coastlines()
54 ax.set_global()
55 img, crs, extent, origin = geos_image()
56 plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')
57 plt.show()
58
59
60 if __name__ == '__main__':
61 main()
62
[end of lib/cartopy/examples/geostationary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/cartopy/examples/geostationary.py b/lib/cartopy/examples/geostationary.py
--- a/lib/cartopy/examples/geostationary.py
+++ b/lib/cartopy/examples/geostationary.py
@@ -41,7 +41,7 @@
"""
url = ('https://gist.github.com/pelson/5871263/raw/'
'EIDA50_201211061300_clip2.png')
- img_handle = BytesIO(urllib2.urlopen(url).read())
+ img_handle = BytesIO(urlopen(url).read())
img = plt.imread(img_handle)
img_proj = ccrs.Geostationary(satellite_height=35786000)
img_extent = (-5500000, 5500000, -5500000, 5500000)
| {"golden_diff": "diff --git a/lib/cartopy/examples/geostationary.py b/lib/cartopy/examples/geostationary.py\n--- a/lib/cartopy/examples/geostationary.py\n+++ b/lib/cartopy/examples/geostationary.py\n@@ -41,7 +41,7 @@\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n- img_handle = BytesIO(urllib2.urlopen(url).read())\n+ img_handle = BytesIO(urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n", "issue": "BUG: Geostationary example bug\n```\npython cartopy/docs/source/examples/geostationary.py\nTraceback (most recent call last):\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 60, in <module>\n main()\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 54, in main\n img, crs, extent, origin = geos_image()\n File \"/net/home/h05/cpelley/git/cartopy/docs/source/examples/geostationary.py\", line 43, in geos_image\n img_handle = BytesIO(urllib2.urlopen(url).read())\nNameError: global name 'urllib2' is not defined\n```\n\n", "before_files": [{"content": "\"\"\"\nReprojecting images from a Geostationary projection\n---------------------------------------------------\n\nThis example demonstrates Cartopy's ability to project images into the desired\nprojection on-the-fly. The image itself is retrieved from a URL and is loaded\ndirectly into memory without storing it intermediately into a file. It\nrepresents pre-processed data from Moderate-Resolution Imaging\nSpectroradiometer (MODIS) which has been put into an image in the data's\nnative Geostationary coordinate system - it is then projected by cartopy\ninto a global Miller map.\n\n\"\"\"\n__tags__ = [\"Scalar data\"]\ntry:\n from urllib2 import urlopen\nexcept ImportError:\n from urllib.request import urlopen\nfrom io import BytesIO\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\n\ndef geos_image():\n \"\"\"\n Return a specific MODIS image by retrieving it from a github gist URL.\n\n Returns\n -------\n img : numpy array\n The pixels of the image in a numpy array.\n img_proj : cartopy CRS\n The rectangular coordinate system of the image.\n img_extent : tuple of floats\n The extent of the image ``(x0, y0, x1, y1)`` referenced in\n the ``img_proj`` coordinate system.\n origin : str\n The origin of the image to be passed through to matplotlib's imshow.\n\n \"\"\"\n url = ('https://gist.github.com/pelson/5871263/raw/'\n 'EIDA50_201211061300_clip2.png')\n img_handle = BytesIO(urllib2.urlopen(url).read())\n img = plt.imread(img_handle)\n img_proj = ccrs.Geostationary(satellite_height=35786000)\n img_extent = (-5500000, 5500000, -5500000, 5500000)\n return img, img_proj, img_extent, 'upper'\n\n\ndef main():\n ax = plt.axes(projection=ccrs.Miller())\n ax.coastlines()\n ax.set_global()\n img, crs, extent, origin = geos_image()\n plt.imshow(img, transform=crs, extent=extent, origin=origin, cmap='gray')\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/cartopy/examples/geostationary.py"}]} | 1,356 | 208 |
gh_patches_debug_774 | rasdani/github-patches | git_diff | getredash__redash-2501 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Non blocking widget refresh indicator
When refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times.
To solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress.
Is the following animation enough?

After refreshing a dashboard, widgets become draggable even when not in edit mode
</issue>
<code>
[start of redash/handlers/widgets.py]
1 import json
2
3 from flask import request
4 from redash import models
5 from redash.handlers.base import BaseResource
6 from redash.permissions import (require_access,
7 require_object_modify_permission,
8 require_permission, view_only)
9
10
11 class WidgetListResource(BaseResource):
12 @require_permission('edit_dashboard')
13 def post(self):
14 """
15 Add a widget to a dashboard.
16
17 :<json number dashboard_id: The ID for the dashboard being added to
18 :<json visualization_id: The ID of the visualization to put in this widget
19 :<json object options: Widget options
20 :<json string text: Text box contents
21 :<json number width: Width for widget display
22
23 :>json object widget: The created widget
24 """
25 widget_properties = request.get_json(force=True)
26 dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
27 require_object_modify_permission(dashboard, self.current_user)
28
29 widget_properties['options'] = json.dumps(widget_properties['options'])
30 widget_properties.pop('id', None)
31 widget_properties['dashboard'] = dashboard
32
33 visualization_id = widget_properties.pop('visualization_id')
34 if visualization_id:
35 visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
36 require_access(visualization.query_rel.groups, self.current_user, view_only)
37 else:
38 visualization = None
39
40 widget_properties['visualization'] = visualization
41
42 widget = models.Widget(**widget_properties)
43 models.db.session.add(widget)
44 models.db.session.commit()
45
46 models.db.session.commit()
47 return {'widget': widget.to_dict()}
48
49
50 class WidgetResource(BaseResource):
51 @require_permission('edit_dashboard')
52 def post(self, widget_id):
53 """
54 Updates a widget in a dashboard.
55 This method currently handles Text Box widgets only.
56
57 :param number widget_id: The ID of the widget to modify
58
59 :<json string text: The new contents of the text box
60 """
61 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
62 require_object_modify_permission(widget.dashboard, self.current_user)
63 widget_properties = request.get_json(force=True)
64 widget.text = widget_properties['text']
65 widget.options = json.dumps(widget_properties['options'])
66 models.db.session.commit()
67 return widget.to_dict()
68
69 @require_permission('edit_dashboard')
70 def delete(self, widget_id):
71 """
72 Remove a widget from a dashboard.
73
74 :param number widget_id: ID of widget to remove
75 """
76 widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
77 require_object_modify_permission(widget.dashboard, self.current_user)
78 models.db.session.delete(widget)
79 models.db.session.commit()
80
[end of redash/handlers/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py
--- a/redash/handlers/widgets.py
+++ b/redash/handlers/widgets.py
@@ -44,7 +44,7 @@
models.db.session.commit()
models.db.session.commit()
- return {'widget': widget.to_dict()}
+ return widget.to_dict()
class WidgetResource(BaseResource):
| {"golden_diff": "diff --git a/redash/handlers/widgets.py b/redash/handlers/widgets.py\n--- a/redash/handlers/widgets.py\n+++ b/redash/handlers/widgets.py\n@@ -44,7 +44,7 @@\n models.db.session.commit()\n \n models.db.session.commit()\n- return {'widget': widget.to_dict()}\n+ return widget.to_dict()\n \n \n class WidgetResource(BaseResource):\n", "issue": "Non blocking widget refresh indicator\nWhen refreshing a dashboard widget the previous results are hidden by the refresh animation. This can be an issue when refreshing a dashboard frequently, as you might happen to see the spinner for long period of times.\r\n\r\nTo solve this we can keep showing the old data until new one is available, while showing some indication that refresh is in progress.\r\n\r\nIs the following animation enough?\r\n\r\n\nAfter refreshing a dashboard, widgets become draggable even when not in edit mode\n\n", "before_files": [{"content": "import json\n\nfrom flask import request\nfrom redash import models\nfrom redash.handlers.base import BaseResource\nfrom redash.permissions import (require_access,\n require_object_modify_permission,\n require_permission, view_only)\n\n\nclass WidgetListResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self):\n \"\"\"\n Add a widget to a dashboard.\n\n :<json number dashboard_id: The ID for the dashboard being added to\n :<json visualization_id: The ID of the visualization to put in this widget\n :<json object options: Widget options\n :<json string text: Text box contents\n :<json number width: Width for widget display\n\n :>json object widget: The created widget\n \"\"\"\n widget_properties = request.get_json(force=True)\n dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)\n require_object_modify_permission(dashboard, self.current_user)\n\n widget_properties['options'] = json.dumps(widget_properties['options'])\n widget_properties.pop('id', None)\n widget_properties['dashboard'] = dashboard\n\n visualization_id = widget_properties.pop('visualization_id')\n if visualization_id:\n visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)\n require_access(visualization.query_rel.groups, self.current_user, view_only)\n else:\n visualization = None\n\n widget_properties['visualization'] = visualization\n\n widget = models.Widget(**widget_properties)\n models.db.session.add(widget)\n models.db.session.commit()\n\n models.db.session.commit()\n return {'widget': widget.to_dict()}\n\n\nclass WidgetResource(BaseResource):\n @require_permission('edit_dashboard')\n def post(self, widget_id):\n \"\"\"\n Updates a widget in a dashboard.\n This method currently handles Text Box widgets only.\n\n :param number widget_id: The ID of the widget to modify\n\n :<json string text: The new contents of the text box\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n widget_properties = request.get_json(force=True)\n widget.text = widget_properties['text']\n widget.options = json.dumps(widget_properties['options'])\n models.db.session.commit()\n return widget.to_dict()\n\n @require_permission('edit_dashboard')\n def delete(self, widget_id):\n \"\"\"\n Remove a widget from a dashboard.\n\n :param number widget_id: ID of widget to remove\n \"\"\"\n widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)\n require_object_modify_permission(widget.dashboard, self.current_user)\n models.db.session.delete(widget)\n models.db.session.commit()\n", "path": "redash/handlers/widgets.py"}]} | 1,383 | 90 |
gh_patches_debug_34407 | rasdani/github-patches | git_diff | svthalia__concrexit-2726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Liked photos API endpoint
To add an overview of liked photos to ThaliApp, we need a new endpoint for liked photos.
I think it would be best to have `api/v2/photos/photos/` with `liked` boolean GET filter. It will need to do some filtering to prevent photos that are not published in an album from being returned.
</issue>
<code>
[start of website/photos/api/v2/urls.py]
1 """Photos app API v2 urls."""
2 from django.urls import include, path
3
4 from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView
5
6 app_name = "photos"
7
8 urlpatterns = [
9 path(
10 "photos/",
11 include(
12 [
13 path("albums/", AlbumListView.as_view(), name="album-list"),
14 path(
15 "albums/<slug:slug>/",
16 AlbumDetailView.as_view(),
17 name="album-detail",
18 ),
19 path(
20 "photos/<int:pk>/like/", PhotoLikeView.as_view(), name="photo-like"
21 ),
22 ]
23 ),
24 ),
25 ]
26
[end of website/photos/api/v2/urls.py]
[start of website/photos/api/v2/views.py]
1 from django.db.models import Count, Prefetch, Q
2
3 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
4 from rest_framework import filters, status
5 from rest_framework.exceptions import PermissionDenied
6 from rest_framework.generics import ListAPIView, RetrieveAPIView
7 from rest_framework.response import Response
8 from rest_framework.views import APIView
9
10 from photos import services
11 from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer
12 from photos.models import Album, Like, Photo
13
14
15 class AlbumListView(ListAPIView):
16 """Returns an overview of all albums."""
17
18 serializer_class = AlbumListSerializer
19 queryset = Album.objects.filter(hidden=False)
20 permission_classes = [
21 IsAuthenticatedOrTokenHasScope,
22 ]
23 required_scopes = ["photos:read"]
24 filter_backends = (filters.SearchFilter,)
25 search_fields = ("title", "date", "slug")
26
27
28 class AlbumDetailView(RetrieveAPIView):
29 """Returns the details of an album."""
30
31 serializer_class = AlbumSerializer
32 permission_classes = [
33 IsAuthenticatedOrTokenHasScope,
34 ]
35 required_scopes = ["photos:read"]
36 lookup_field = "slug"
37
38 def retrieve(self, request, *args, **kwargs):
39 if not services.is_album_accessible(request, self.get_object()):
40 raise PermissionDenied
41 return super().retrieve(request, *args, **kwargs)
42
43 def get_queryset(self):
44 photos = Photo.objects.select_properties("num_likes")
45 if self.request.member:
46 photos = photos.annotate(
47 member_likes=Count("likes", filter=Q(likes__member=self.request.member))
48 )
49 return Album.objects.filter(hidden=False).prefetch_related(
50 Prefetch("photo_set", queryset=photos)
51 )
52
53
54 class PhotoLikeView(APIView):
55 permission_classes = [IsAuthenticatedOrTokenHasScope]
56 required_scopes = ["photos:read"]
57
58 def get(self, request, **kwargs):
59 photo_id = kwargs.get("pk")
60 try:
61 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
62 except Photo.DoesNotExist:
63 return Response(status=status.HTTP_404_NOT_FOUND)
64
65 return Response(
66 {
67 "liked": photo.likes.filter(member=request.member).exists(),
68 "num_likes": photo.num_likes,
69 },
70 status=status.HTTP_200_OK,
71 )
72
73 def post(self, request, **kwargs):
74 photo_id = kwargs.get("pk")
75 try:
76 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
77 except Photo.DoesNotExist:
78 return Response(status=status.HTTP_404_NOT_FOUND)
79
80 _, created = Like.objects.get_or_create(photo=photo, member=request.member)
81
82 if created:
83 return Response(
84 {
85 "liked": photo.likes.filter(member=request.member).exists(),
86 "num_likes": photo.num_likes,
87 },
88 status=status.HTTP_201_CREATED,
89 )
90 return Response(
91 {
92 "liked": photo.likes.filter(member=request.member).exists(),
93 "num_likes": photo.num_likes,
94 },
95 status=status.HTTP_200_OK,
96 )
97
98 def delete(self, request, **kwargs):
99 photo_id = kwargs.get("pk")
100 try:
101 photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)
102 except Photo.DoesNotExist:
103 return Response(status=status.HTTP_404_NOT_FOUND)
104
105 try:
106 like = Like.objects.filter(photo__album__hidden=False).get(
107 member=request.member, photo__pk=photo_id
108 )
109 except Like.DoesNotExist:
110 return Response(
111 {
112 "liked": False,
113 "num_likes": photo.num_likes,
114 },
115 status=status.HTTP_204_NO_CONTENT,
116 )
117
118 like.delete()
119
120 return Response(
121 {
122 "liked": False,
123 "num_likes": photo.num_likes,
124 },
125 status=status.HTTP_202_ACCEPTED,
126 )
127
[end of website/photos/api/v2/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/photos/api/v2/urls.py b/website/photos/api/v2/urls.py
--- a/website/photos/api/v2/urls.py
+++ b/website/photos/api/v2/urls.py
@@ -1,7 +1,12 @@
"""Photos app API v2 urls."""
from django.urls import include, path
-from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView
+from photos.api.v2.views import (
+ AlbumDetailView,
+ AlbumListView,
+ LikedPhotosListView,
+ PhotoLikeView,
+)
app_name = "photos"
@@ -19,6 +24,9 @@
path(
"photos/<int:pk>/like/", PhotoLikeView.as_view(), name="photo-like"
),
+ path(
+ "photos/liked/", LikedPhotosListView.as_view(), name="liked-photos"
+ ),
]
),
),
diff --git a/website/photos/api/v2/views.py b/website/photos/api/v2/views.py
--- a/website/photos/api/v2/views.py
+++ b/website/photos/api/v2/views.py
@@ -8,7 +8,11 @@
from rest_framework.views import APIView
from photos import services
-from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer
+from photos.api.v2.serializers.album import (
+ AlbumListSerializer,
+ AlbumSerializer,
+ PhotoListSerializer,
+)
from photos.models import Album, Like, Photo
@@ -51,6 +55,35 @@
)
+class LikedPhotosListView(ListAPIView):
+ """Returns the details the liked album."""
+
+ serializer_class = PhotoListSerializer
+ permission_classes = [
+ IsAuthenticatedOrTokenHasScope,
+ ]
+ required_scopes = ["photos:read"]
+
+ def get(self, request, *args, **kwargs):
+ if not self.request.member:
+ return Response(
+ data={
+ "detail": "You need to be a member in order to view your liked photos."
+ },
+ status=status.HTTP_403_FORBIDDEN,
+ )
+ return self.list(request, *args, **kwargs)
+
+ def get_queryset(self):
+ return (
+ Photo.objects.filter(likes__member=self.request.member, album__hidden=False)
+ .annotate(
+ member_likes=Count("likes", filter=Q(likes__member=self.request.member))
+ )
+ .select_properties("num_likes")
+ )
+
+
class PhotoLikeView(APIView):
permission_classes = [IsAuthenticatedOrTokenHasScope]
required_scopes = ["photos:read"]
| {"golden_diff": "diff --git a/website/photos/api/v2/urls.py b/website/photos/api/v2/urls.py\n--- a/website/photos/api/v2/urls.py\n+++ b/website/photos/api/v2/urls.py\n@@ -1,7 +1,12 @@\n \"\"\"Photos app API v2 urls.\"\"\"\n from django.urls import include, path\n \n-from photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView\n+from photos.api.v2.views import (\n+ AlbumDetailView,\n+ AlbumListView,\n+ LikedPhotosListView,\n+ PhotoLikeView,\n+)\n \n app_name = \"photos\"\n \n@@ -19,6 +24,9 @@\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n+ path(\n+ \"photos/liked/\", LikedPhotosListView.as_view(), name=\"liked-photos\"\n+ ),\n ]\n ),\n ),\ndiff --git a/website/photos/api/v2/views.py b/website/photos/api/v2/views.py\n--- a/website/photos/api/v2/views.py\n+++ b/website/photos/api/v2/views.py\n@@ -8,7 +8,11 @@\n from rest_framework.views import APIView\n \n from photos import services\n-from photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer\n+from photos.api.v2.serializers.album import (\n+ AlbumListSerializer,\n+ AlbumSerializer,\n+ PhotoListSerializer,\n+)\n from photos.models import Album, Like, Photo\n \n \n@@ -51,6 +55,35 @@\n )\n \n \n+class LikedPhotosListView(ListAPIView):\n+ \"\"\"Returns the details the liked album.\"\"\"\n+\n+ serializer_class = PhotoListSerializer\n+ permission_classes = [\n+ IsAuthenticatedOrTokenHasScope,\n+ ]\n+ required_scopes = [\"photos:read\"]\n+\n+ def get(self, request, *args, **kwargs):\n+ if not self.request.member:\n+ return Response(\n+ data={\n+ \"detail\": \"You need to be a member in order to view your liked photos.\"\n+ },\n+ status=status.HTTP_403_FORBIDDEN,\n+ )\n+ return self.list(request, *args, **kwargs)\n+\n+ def get_queryset(self):\n+ return (\n+ Photo.objects.filter(likes__member=self.request.member, album__hidden=False)\n+ .annotate(\n+ member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n+ )\n+ .select_properties(\"num_likes\")\n+ )\n+\n+\n class PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n", "issue": "Liked photos API endpoint\nTo add an overview of liked photos to ThaliApp, we need a new endpoint for liked photos.\r\n\r\nI think it would be best to have `api/v2/photos/photos/` with `liked` boolean GET filter. It will need to do some filtering to prevent photos that are not published in an album from being returned.\n", "before_files": [{"content": "\"\"\"Photos app API v2 urls.\"\"\"\nfrom django.urls import include, path\n\nfrom photos.api.v2.views import AlbumDetailView, AlbumListView, PhotoLikeView\n\napp_name = \"photos\"\n\nurlpatterns = [\n path(\n \"photos/\",\n include(\n [\n path(\"albums/\", AlbumListView.as_view(), name=\"album-list\"),\n path(\n \"albums/<slug:slug>/\",\n AlbumDetailView.as_view(),\n name=\"album-detail\",\n ),\n path(\n \"photos/<int:pk>/like/\", PhotoLikeView.as_view(), name=\"photo-like\"\n ),\n ]\n ),\n ),\n]\n", "path": "website/photos/api/v2/urls.py"}, {"content": "from django.db.models import Count, Prefetch, Q\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom photos import services\nfrom photos.api.v2.serializers.album import AlbumListSerializer, AlbumSerializer\nfrom photos.models import Album, Like, Photo\n\n\nclass AlbumListView(ListAPIView):\n \"\"\"Returns an overview of all albums.\"\"\"\n\n serializer_class = AlbumListSerializer\n queryset = Album.objects.filter(hidden=False)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n filter_backends = (filters.SearchFilter,)\n search_fields = (\"title\", \"date\", \"slug\")\n\n\nclass AlbumDetailView(RetrieveAPIView):\n \"\"\"Returns the details of an album.\"\"\"\n\n serializer_class = AlbumSerializer\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"photos:read\"]\n lookup_field = \"slug\"\n\n def retrieve(self, request, *args, **kwargs):\n if not services.is_album_accessible(request, self.get_object()):\n raise PermissionDenied\n return super().retrieve(request, *args, **kwargs)\n\n def get_queryset(self):\n photos = Photo.objects.select_properties(\"num_likes\")\n if self.request.member:\n photos = photos.annotate(\n member_likes=Count(\"likes\", filter=Q(likes__member=self.request.member))\n )\n return Album.objects.filter(hidden=False).prefetch_related(\n Prefetch(\"photo_set\", queryset=photos)\n )\n\n\nclass PhotoLikeView(APIView):\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"photos:read\"]\n\n def get(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def post(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n _, created = Like.objects.get_or_create(photo=photo, member=request.member)\n\n if created:\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_201_CREATED,\n )\n return Response(\n {\n \"liked\": photo.likes.filter(member=request.member).exists(),\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_200_OK,\n )\n\n def delete(self, request, **kwargs):\n photo_id = kwargs.get(\"pk\")\n try:\n photo = Photo.objects.filter(album__hidden=False).get(pk=photo_id)\n except Photo.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n try:\n like = Like.objects.filter(photo__album__hidden=False).get(\n member=request.member, photo__pk=photo_id\n )\n except Like.DoesNotExist:\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_204_NO_CONTENT,\n )\n\n like.delete()\n\n return Response(\n {\n \"liked\": False,\n \"num_likes\": photo.num_likes,\n },\n status=status.HTTP_202_ACCEPTED,\n )\n", "path": "website/photos/api/v2/views.py"}]} | 1,917 | 589 |
gh_patches_debug_38309 | rasdani/github-patches | git_diff | tornadoweb__tornado-2562 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update release notes and set version to 5.0b1
</issue>
<code>
[start of docs/conf.py]
1 # Ensure we get the local copy of tornado instead of what's on the standard path
2 import os
3 import sys
4 import time
5 sys.path.insert(0, os.path.abspath(".."))
6 import tornado
7
8 master_doc = "index"
9
10 project = "Tornado"
11 copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
12
13 version = release = tornado.version
14
15 extensions = [
16 "sphinx.ext.autodoc",
17 "sphinx.ext.coverage",
18 "sphinx.ext.doctest",
19 "sphinx.ext.intersphinx",
20 "sphinx.ext.viewcode",
21 ]
22
23 primary_domain = 'py'
24 default_role = 'py:obj'
25
26 autodoc_member_order = "bysource"
27 autoclass_content = "both"
28 autodoc_inherit_docstrings = False
29
30 # Without this line sphinx includes a copy of object.__init__'s docstring
31 # on any class that doesn't define __init__.
32 # https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
33 autodoc_docstring_signature = False
34
35 coverage_skip_undoc_in_source = True
36 coverage_ignore_modules = [
37 "tornado.platform.asyncio",
38 "tornado.platform.caresresolver",
39 "tornado.platform.twisted",
40 ]
41 # I wish this could go in a per-module file...
42 coverage_ignore_classes = [
43 # tornado.gen
44 "Runner",
45
46 # tornado.web
47 "ChunkedTransferEncoding",
48 "GZipContentEncoding",
49 "OutputTransform",
50 "TemplateModule",
51 "url",
52
53 # tornado.websocket
54 "WebSocketProtocol",
55 "WebSocketProtocol13",
56 "WebSocketProtocol76",
57 ]
58
59 coverage_ignore_functions = [
60 # various modules
61 "doctests",
62 "main",
63
64 # tornado.escape
65 # parse_qs_bytes should probably be documented but it's complicated by
66 # having different implementations between py2 and py3.
67 "parse_qs_bytes",
68
69 # tornado.gen
70 "Multi",
71 ]
72
73 html_favicon = 'favicon.ico'
74
75 latex_documents = [
76 ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
77 ]
78
79 intersphinx_mapping = {
80 'python': ('https://docs.python.org/3.6/', None),
81 }
82
83 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
84
85 # On RTD we can't import sphinx_rtd_theme, but it will be applied by
86 # default anyway. This block will use the same theme when building locally
87 # as on RTD.
88 if not on_rtd:
89 import sphinx_rtd_theme
90 html_theme = 'sphinx_rtd_theme'
91 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
92
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,14 +1,14 @@
# Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
-import time
+
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
-copyright = "2009-%s, The Tornado Authors" % time.strftime("%Y")
+copyright = "The Tornado Authors"
version = release = tornado.version
@@ -20,8 +20,8 @@
"sphinx.ext.viewcode",
]
-primary_domain = 'py'
-default_role = 'py:obj'
+primary_domain = "py"
+default_role = "py:obj"
autodoc_member_order = "bysource"
autoclass_content = "both"
@@ -42,14 +42,12 @@
coverage_ignore_classes = [
# tornado.gen
"Runner",
-
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
-
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
@@ -60,32 +58,36 @@
# various modules
"doctests",
"main",
-
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
-
# tornado.gen
"Multi",
]
-html_favicon = 'favicon.ico'
+html_favicon = "favicon.ico"
latex_documents = [
- ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),
+ (
+ "index",
+ "tornado.tex",
+ "Tornado Documentation",
+ "The Tornado Authors",
+ "manual",
+ False,
+ )
]
-intersphinx_mapping = {
- 'python': ('https://docs.python.org/3.6/', None),
-}
+intersphinx_mapping = {"python": ("https://docs.python.org/3.6/", None)}
-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
- html_theme = 'sphinx_rtd_theme'
+
+ html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,14 +1,14 @@\n # Ensure we get the local copy of tornado instead of what's on the standard path\n import os\n import sys\n-import time\n+\n sys.path.insert(0, os.path.abspath(\"..\"))\n import tornado\n \n master_doc = \"index\"\n \n project = \"Tornado\"\n-copyright = \"2009-%s, The Tornado Authors\" % time.strftime(\"%Y\")\n+copyright = \"The Tornado Authors\"\n \n version = release = tornado.version\n \n@@ -20,8 +20,8 @@\n \"sphinx.ext.viewcode\",\n ]\n \n-primary_domain = 'py'\n-default_role = 'py:obj'\n+primary_domain = \"py\"\n+default_role = \"py:obj\"\n \n autodoc_member_order = \"bysource\"\n autoclass_content = \"both\"\n@@ -42,14 +42,12 @@\n coverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n-\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n-\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n@@ -60,32 +58,36 @@\n # various modules\n \"doctests\",\n \"main\",\n-\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n-\n # tornado.gen\n \"Multi\",\n ]\n \n-html_favicon = 'favicon.ico'\n+html_favicon = \"favicon.ico\"\n \n latex_documents = [\n- ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),\n+ (\n+ \"index\",\n+ \"tornado.tex\",\n+ \"Tornado Documentation\",\n+ \"The Tornado Authors\",\n+ \"manual\",\n+ False,\n+ )\n ]\n \n-intersphinx_mapping = {\n- 'python': ('https://docs.python.org/3.6/', None),\n-}\n+intersphinx_mapping = {\"python\": (\"https://docs.python.org/3.6/\", None)}\n \n-on_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n+on_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n \n # On RTD we can't import sphinx_rtd_theme, but it will be applied by\n # default anyway. This block will use the same theme when building locally\n # as on RTD.\n if not on_rtd:\n import sphinx_rtd_theme\n- html_theme = 'sphinx_rtd_theme'\n+\n+ html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "issue": "Update release notes and set version to 5.0b1\n\n", "before_files": [{"content": "# Ensure we get the local copy of tornado instead of what's on the standard path\nimport os\nimport sys\nimport time\nsys.path.insert(0, os.path.abspath(\"..\"))\nimport tornado\n\nmaster_doc = \"index\"\n\nproject = \"Tornado\"\ncopyright = \"2009-%s, The Tornado Authors\" % time.strftime(\"%Y\")\n\nversion = release = tornado.version\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.viewcode\",\n]\n\nprimary_domain = 'py'\ndefault_role = 'py:obj'\n\nautodoc_member_order = \"bysource\"\nautoclass_content = \"both\"\nautodoc_inherit_docstrings = False\n\n# Without this line sphinx includes a copy of object.__init__'s docstring\n# on any class that doesn't define __init__.\n# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__\nautodoc_docstring_signature = False\n\ncoverage_skip_undoc_in_source = True\ncoverage_ignore_modules = [\n \"tornado.platform.asyncio\",\n \"tornado.platform.caresresolver\",\n \"tornado.platform.twisted\",\n]\n# I wish this could go in a per-module file...\ncoverage_ignore_classes = [\n # tornado.gen\n \"Runner\",\n\n # tornado.web\n \"ChunkedTransferEncoding\",\n \"GZipContentEncoding\",\n \"OutputTransform\",\n \"TemplateModule\",\n \"url\",\n\n # tornado.websocket\n \"WebSocketProtocol\",\n \"WebSocketProtocol13\",\n \"WebSocketProtocol76\",\n]\n\ncoverage_ignore_functions = [\n # various modules\n \"doctests\",\n \"main\",\n\n # tornado.escape\n # parse_qs_bytes should probably be documented but it's complicated by\n # having different implementations between py2 and py3.\n \"parse_qs_bytes\",\n\n # tornado.gen\n \"Multi\",\n]\n\nhtml_favicon = 'favicon.ico'\n\nlatex_documents = [\n ('index', 'tornado.tex', 'Tornado Documentation', 'The Tornado Authors', 'manual', False),\n]\n\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3.6/', None),\n}\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# On RTD we can't import sphinx_rtd_theme, but it will be applied by\n# default anyway. This block will use the same theme when building locally\n# as on RTD.\nif not on_rtd:\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n", "path": "docs/conf.py"}]} | 1,338 | 642 |
gh_patches_debug_15664 | rasdani/github-patches | git_diff | getredash__redash-909 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error on adding modules to python datasource
I'm trying to add a module to a python datasource, but it's failing with this traceback
```
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 477, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_login.py", line 792, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask/views.py", line 84, in view
return self.dispatch_request(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py", line 19, in dispatch_request
return super(BaseResource, self).dispatch_request(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py", line 587, in dispatch_request
resp = meth(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/permissions.py", line 40, in decorated
return fn(*args, **kwargs)
File "/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py", line 38, in post
data_source.options.update(req['options'])
File "/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py", line 56, in update
if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
KeyError: 'secret'
```
</issue>
<code>
[start of redash/utils/configuration.py]
1 import json
2 import jsonschema
3 from jsonschema import ValidationError
4
5 SECRET_PLACEHOLDER = '--------'
6
7
8 class ConfigurationContainer(object):
9 def __init__(self, config, schema=None):
10 self._config = config
11 self.set_schema(schema)
12
13 def set_schema(self, schema):
14 self._schema = schema
15
16 @property
17 def schema(self):
18 if self._schema is None:
19 raise RuntimeError("Schema missing.")
20
21 return self._schema
22
23 def is_valid(self):
24 try:
25 self.validate()
26 except (ValidationError, ValueError):
27 return False
28
29 return True
30
31 def validate(self):
32 jsonschema.validate(self._config, self._schema)
33
34 def to_json(self):
35 return json.dumps(self._config)
36
37 def iteritems(self):
38 return self._config.iteritems()
39
40 def to_dict(self, mask_secrets=False):
41 if (mask_secrets is False or 'secret' not in self.schema):
42 return self._config
43
44 config = self._config.copy()
45 for key in config:
46 if key in self.schema['secret']:
47 config[key] = SECRET_PLACEHOLDER
48
49 return config
50
51 def update(self, new_config):
52 jsonschema.validate(new_config, self.schema)
53
54 config = {}
55 for k, v in new_config.iteritems():
56 if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
57 config[k] = self[k]
58 else:
59 config[k] = v
60
61 self._config = config
62
63 def get(self, *args, **kwargs):
64 return self._config.get(*args, **kwargs)
65
66 def __getitem__(self, item):
67 if item in self._config:
68 return self._config[item]
69
70 raise KeyError(item)
71
72 def __contains__(self, item):
73 return item in self._config
74
75 @classmethod
76 def from_json(cls, config_in_json):
77 return cls(json.loads(config_in_json))
78
[end of redash/utils/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py
--- a/redash/utils/configuration.py
+++ b/redash/utils/configuration.py
@@ -38,7 +38,7 @@
return self._config.iteritems()
def to_dict(self, mask_secrets=False):
- if (mask_secrets is False or 'secret' not in self.schema):
+ if mask_secrets is False or 'secret' not in self.schema:
return self._config
config = self._config.copy()
@@ -53,7 +53,7 @@
config = {}
for k, v in new_config.iteritems():
- if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:
+ if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:
config[k] = self[k]
else:
config[k] = v
| {"golden_diff": "diff --git a/redash/utils/configuration.py b/redash/utils/configuration.py\n--- a/redash/utils/configuration.py\n+++ b/redash/utils/configuration.py\n@@ -38,7 +38,7 @@\n return self._config.iteritems()\n \n def to_dict(self, mask_secrets=False):\n- if (mask_secrets is False or 'secret' not in self.schema):\n+ if mask_secrets is False or 'secret' not in self.schema:\n return self._config\n \n config = self._config.copy()\n@@ -53,7 +53,7 @@\n \n config = {}\n for k, v in new_config.iteritems():\n- if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\n+ if k in self.schema.get('secret', []) and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n", "issue": "Error on adding modules to python datasource\nI'm trying to add a module to a python datasource, but it's failing with this traceback\n\n```\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1475, in full_dispatch_request\n rv = self.dispatch_request()\n File \"/usr/local/lib/python2.7/dist-packages/flask/app.py\", line 1461, in dispatch_request\n return self.view_functions[rule.endpoint](**req.view_args)\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 477, in wrapper\n resp = resource(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask_login.py\", line 792, in decorated_view\n return func(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask/views.py\", line 84, in view\n return self.dispatch_request(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/handlers/base.py\", line 19, in dispatch_request\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/flask_restful/__init__.py\", line 587, in dispatch_request\n resp = meth(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/permissions.py\", line 40, in decorated\n return fn(*args, **kwargs)\n File \"/opt/redash/redash.0.9.2.b1536/redash/handlers/data_sources.py\", line 38, in post\n data_source.options.update(req['options'])\n File \"/opt/redash/redash.0.9.2.b1536/redash/utils/configuration.py\", line 56, in update\n if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\nKeyError: 'secret'\n```\n\n", "before_files": [{"content": "import json\nimport jsonschema\nfrom jsonschema import ValidationError\n\nSECRET_PLACEHOLDER = '--------'\n\n\nclass ConfigurationContainer(object):\n def __init__(self, config, schema=None):\n self._config = config\n self.set_schema(schema)\n\n def set_schema(self, schema):\n self._schema = schema\n\n @property\n def schema(self):\n if self._schema is None:\n raise RuntimeError(\"Schema missing.\")\n\n return self._schema\n\n def is_valid(self):\n try:\n self.validate()\n except (ValidationError, ValueError):\n return False\n\n return True\n\n def validate(self):\n jsonschema.validate(self._config, self._schema)\n\n def to_json(self):\n return json.dumps(self._config)\n\n def iteritems(self):\n return self._config.iteritems()\n\n def to_dict(self, mask_secrets=False):\n if (mask_secrets is False or 'secret' not in self.schema):\n return self._config\n\n config = self._config.copy()\n for key in config:\n if key in self.schema['secret']:\n config[key] = SECRET_PLACEHOLDER\n\n return config\n\n def update(self, new_config):\n jsonschema.validate(new_config, self.schema)\n\n config = {}\n for k, v in new_config.iteritems():\n if k in self.schema['secret'] and v == SECRET_PLACEHOLDER:\n config[k] = self[k]\n else:\n config[k] = v\n\n self._config = config\n\n def get(self, *args, **kwargs):\n return self._config.get(*args, **kwargs)\n\n def __getitem__(self, item):\n if item in self._config:\n return self._config[item]\n\n raise KeyError(item)\n\n def __contains__(self, item):\n return item in self._config\n\n @classmethod\n def from_json(cls, config_in_json):\n return cls(json.loads(config_in_json))\n", "path": "redash/utils/configuration.py"}]} | 1,572 | 194 |
gh_patches_debug_9045 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2967 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider att is broken
During the global build at 2021-06-02-14-42-40, spider **att** failed with **0 features** and **5433 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/att.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson))
</issue>
<code>
[start of locations/spiders/att.py]
1 import scrapy
2 import json
3 import re
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7
8 DAY_MAPPING = {
9 "MONDAY": "Mo",
10 "TUESDAY": "Tu",
11 "WEDNESDAY": "We",
12 "THURSDAY": "Th",
13 "FRIDAY": "Fr",
14 "SATURDAY": "Sa",
15 "SUNDAY": "Su"
16 }
17
18
19 class ATTScraper(scrapy.Spider):
20 name = "att"
21 item_attributes = { 'brand': "AT&T", 'brand_wikidata': "Q35476" }
22 allowed_domains = ['www.att.com']
23 start_urls = (
24 'https://www.att.com/stores/us',
25 )
26 download_delay = 0.2
27
28 def parse_hours(self, store_hours):
29 opening_hours = OpeningHours()
30 store_data = json.loads(store_hours)
31
32 for store_day in store_data:
33 if len(store_day["intervals"]) < 1:
34 continue
35 day = DAY_MAPPING[store_day["day"]]
36 open_time = str(store_day["intervals"][0]["start"])
37 if open_time == '0':
38 open_time = '0000'
39 close_time = str(store_day["intervals"][0]["end"])
40 if close_time == '0':
41 close_time = '2359'
42 opening_hours.add_range(day=day,
43 open_time=open_time,
44 close_time=close_time,
45 time_format='%H%M'
46 )
47
48 return opening_hours.as_opening_hours()
49
50 def parse(self, response):
51 urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
52 is_store_list = response.xpath('//a[@class="Teaser-titleLink"]/@href').extract()
53
54 if not urls and is_store_list:
55 urls = response.xpath('//a[@class="Teaser-titleLink"]/@href').extract()
56 for url in urls:
57 if url.count('/') >= 2:
58 yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
59 else:
60 yield scrapy.Request(response.urljoin(url))
61
62 def parse_store(self, response):
63 ref = re.search(r'.+/(.+?)/?(?:\.html|$)', response.url).group(1)
64
65 properties = {
66 'ref': ref,
67 'name': response.xpath('normalize-space(//span[@class="LocationName-brand"]/text())').extract_first(),
68 'addr_full': response.xpath('normalize-space(//meta[@itemprop="streetAddress"]/@content)').extract_first(),
69 'city': response.xpath('normalize-space(//meta[@itemprop="addressLocality"]/@content)').extract_first(),
70 'state': response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(),
71 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(),
72 'country': response.xpath('normalize-space(//abbr[@itemprop="addressCountry"]/text())').extract_first(),
73 'phone': response.xpath('normalize-space(//span[@itemprop="telephone"]//text())').extract_first(),
74 'website': response.url,
75 'lat': response.xpath('normalize-space(//meta[@itemprop="latitude"]/@content)').extract_first(),
76 'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
77 }
78
79 hours = response.xpath('//span[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first()
80 properties['opening_hours'] = self.parse_hours(hours)
81
82 yield GeojsonPointItem(**properties)
83
[end of locations/spiders/att.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/att.py b/locations/spiders/att.py
--- a/locations/spiders/att.py
+++ b/locations/spiders/att.py
@@ -76,7 +76,7 @@
'lon': response.xpath('normalize-space(//meta[@itemprop="longitude"]/@content)').extract_first(),
}
- hours = response.xpath('//span[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first()
+ hours = response.xpath('//span[@class="c-hours-today js-hours-today"]/@data-days').extract_first()
properties['opening_hours'] = self.parse_hours(hours)
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/att.py b/locations/spiders/att.py\n--- a/locations/spiders/att.py\n+++ b/locations/spiders/att.py\n@@ -76,7 +76,7 @@\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n \n- hours = response.xpath('//span[@class=\"c-location-hours-today js-location-hours\"]/@data-days').extract_first()\n+ hours = response.xpath('//span[@class=\"c-hours-today js-hours-today\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider att is broken\nDuring the global build at 2021-06-02-14-42-40, spider **att** failed with **0 features** and **5433 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/att.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/att.geojson))\n", "before_files": [{"content": "import scrapy\nimport json\nimport re\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nDAY_MAPPING = {\n \"MONDAY\": \"Mo\",\n \"TUESDAY\": \"Tu\",\n \"WEDNESDAY\": \"We\",\n \"THURSDAY\": \"Th\",\n \"FRIDAY\": \"Fr\",\n \"SATURDAY\": \"Sa\",\n \"SUNDAY\": \"Su\"\n}\n\n\nclass ATTScraper(scrapy.Spider):\n name = \"att\"\n item_attributes = { 'brand': \"AT&T\", 'brand_wikidata': \"Q35476\" }\n allowed_domains = ['www.att.com']\n start_urls = (\n 'https://www.att.com/stores/us',\n )\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n store_data = json.loads(store_hours)\n\n for store_day in store_data:\n if len(store_day[\"intervals\"]) < 1:\n continue\n day = DAY_MAPPING[store_day[\"day\"]]\n open_time = str(store_day[\"intervals\"][0][\"start\"])\n if open_time == '0':\n open_time = '0000'\n close_time = str(store_day[\"intervals\"][0][\"end\"])\n if close_time == '0':\n close_time = '2359'\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n urls = response.xpath('//a[@class=\"Directory-listLink\"]/@href').extract()\n is_store_list = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n\n if not urls and is_store_list:\n urls = response.xpath('//a[@class=\"Teaser-titleLink\"]/@href').extract()\n for url in urls:\n if url.count('/') >= 2:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+?)/?(?:\\.html|$)', response.url).group(1)\n\n properties = {\n 'ref': ref,\n 'name': response.xpath('normalize-space(//span[@class=\"LocationName-brand\"]/text())').extract_first(),\n 'addr_full': response.xpath('normalize-space(//meta[@itemprop=\"streetAddress\"]/@content)').extract_first(),\n 'city': response.xpath('normalize-space(//meta[@itemprop=\"addressLocality\"]/@content)').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'country': response.xpath('normalize-space(//abbr[@itemprop=\"addressCountry\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//span[@itemprop=\"telephone\"]//text())').extract_first(),\n 'website': response.url,\n 'lat': response.xpath('normalize-space(//meta[@itemprop=\"latitude\"]/@content)').extract_first(),\n 'lon': response.xpath('normalize-space(//meta[@itemprop=\"longitude\"]/@content)').extract_first(),\n }\n\n hours = response.xpath('//span[@class=\"c-location-hours-today js-location-hours\"]/@data-days').extract_first()\n properties['opening_hours'] = self.parse_hours(hours)\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/att.py"}]} | 1,669 | 156 |
gh_patches_debug_8441 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-953 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HistogramLUTWidget kargs bug?
# lut_widget = HistogramLUTWidget(background='w')
File "/usr/local/lib/python3.4/dist-packages/pyqtgraph-0.9.8-py3.4.egg/pyqtgraph/widgets/HistogramLUTWidget.py", line 18, in **init**
self.item = HistogramLUTItem(_args, *_kargs)
# TypeError: **init**() got an unexpected keyword argument 'background'
I can fix it by:
class HistogramLUTWidget(pg.GraphicsView):
```
def __init__(self, parent=None, *args, **kargs):
# background = kargs.get('background', 'default')
background = kargs.pop('background', 'default')
```
...
</issue>
<code>
[start of pyqtgraph/widgets/HistogramLUTWidget.py]
1 """
2 Widget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.
3 This is a wrapper around HistogramLUTItem
4 """
5
6 from ..Qt import QtGui, QtCore
7 from .GraphicsView import GraphicsView
8 from ..graphicsItems.HistogramLUTItem import HistogramLUTItem
9
10 __all__ = ['HistogramLUTWidget']
11
12
13 class HistogramLUTWidget(GraphicsView):
14
15 def __init__(self, parent=None, *args, **kargs):
16 background = kargs.get('background', 'default')
17 GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
18 self.item = HistogramLUTItem(*args, **kargs)
19 self.setCentralItem(self.item)
20 self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
21 self.setMinimumWidth(95)
22
23
24 def sizeHint(self):
25 return QtCore.QSize(115, 200)
26
27
28
29 def __getattr__(self, attr):
30 return getattr(self.item, attr)
31
32
33
34
[end of pyqtgraph/widgets/HistogramLUTWidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyqtgraph/widgets/HistogramLUTWidget.py b/pyqtgraph/widgets/HistogramLUTWidget.py
--- a/pyqtgraph/widgets/HistogramLUTWidget.py
+++ b/pyqtgraph/widgets/HistogramLUTWidget.py
@@ -13,7 +13,7 @@
class HistogramLUTWidget(GraphicsView):
def __init__(self, parent=None, *args, **kargs):
- background = kargs.get('background', 'default')
+ background = kargs.pop('background', 'default')
GraphicsView.__init__(self, parent, useOpenGL=False, background=background)
self.item = HistogramLUTItem(*args, **kargs)
self.setCentralItem(self.item)
| {"golden_diff": "diff --git a/pyqtgraph/widgets/HistogramLUTWidget.py b/pyqtgraph/widgets/HistogramLUTWidget.py\n--- a/pyqtgraph/widgets/HistogramLUTWidget.py\n+++ b/pyqtgraph/widgets/HistogramLUTWidget.py\n@@ -13,7 +13,7 @@\n class HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n- background = kargs.get('background', 'default')\n+ background = kargs.pop('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n", "issue": "HistogramLUTWidget kargs bug?\n# lut_widget = HistogramLUTWidget(background='w')\n\n File \"/usr/local/lib/python3.4/dist-packages/pyqtgraph-0.9.8-py3.4.egg/pyqtgraph/widgets/HistogramLUTWidget.py\", line 18, in **init**\n self.item = HistogramLUTItem(_args, *_kargs)\n# TypeError: **init**() got an unexpected keyword argument 'background'\n\nI can fix it by:\n\nclass HistogramLUTWidget(pg.GraphicsView):\n\n```\ndef __init__(self, parent=None, *args, **kargs):\n # background = kargs.get('background', 'default')\n background = kargs.pop('background', 'default')\n```\n\n...\n\n", "before_files": [{"content": "\"\"\"\nWidget displaying an image histogram along with gradient editor. Can be used to adjust the appearance of images.\nThis is a wrapper around HistogramLUTItem\n\"\"\"\n\nfrom ..Qt import QtGui, QtCore\nfrom .GraphicsView import GraphicsView\nfrom ..graphicsItems.HistogramLUTItem import HistogramLUTItem\n\n__all__ = ['HistogramLUTWidget']\n\n\nclass HistogramLUTWidget(GraphicsView):\n \n def __init__(self, parent=None, *args, **kargs):\n background = kargs.get('background', 'default')\n GraphicsView.__init__(self, parent, useOpenGL=False, background=background)\n self.item = HistogramLUTItem(*args, **kargs)\n self.setCentralItem(self.item)\n self.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)\n self.setMinimumWidth(95)\n \n\n def sizeHint(self):\n return QtCore.QSize(115, 200)\n \n \n\n def __getattr__(self, attr):\n return getattr(self.item, attr)\n\n\n\n", "path": "pyqtgraph/widgets/HistogramLUTWidget.py"}]} | 991 | 159 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.