problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_8837
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-707
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure rotation column == 'False' during migration.
Null values creates problems during validation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/migrations/versions/131ec6accff5_.py`
Content:
```
1 """Ensuring we have endpoint updated times and certificate rotation availability.
2
3 Revision ID: 131ec6accff5
4 Revises: e3691fc396e9
5 Create Date: 2016-12-07 17:29:42.049986
6
7 """
8
9 # revision identifiers, used by Alembic.
10 revision = '131ec6accff5'
11 down_revision = 'e3691fc396e9'
12
13 from alembic import op
14 import sqlalchemy as sa
15
16
17 def upgrade():
18 # ### commands auto generated by Alembic - please adjust! ###
19 op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=True))
20 op.add_column('endpoints', sa.Column('last_updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False))
21 # ### end Alembic commands ###
22
23
24 def downgrade():
25 # ### commands auto generated by Alembic - please adjust! ###
26 op.drop_column('endpoints', 'last_updated')
27 op.drop_column('certificates', 'rotation')
28 # ### end Alembic commands ###
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lemur/migrations/versions/131ec6accff5_.py b/lemur/migrations/versions/131ec6accff5_.py
--- a/lemur/migrations/versions/131ec6accff5_.py
+++ b/lemur/migrations/versions/131ec6accff5_.py
@@ -16,7 +16,7 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
- op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=True))
+ op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=False, server_default=False))
op.add_column('endpoints', sa.Column('last_updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False))
# ### end Alembic commands ###
|
{"golden_diff": "diff --git a/lemur/migrations/versions/131ec6accff5_.py b/lemur/migrations/versions/131ec6accff5_.py\n--- a/lemur/migrations/versions/131ec6accff5_.py\n+++ b/lemur/migrations/versions/131ec6accff5_.py\n@@ -16,7 +16,7 @@\n \n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n- op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=True))\n+ op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=False, server_default=False))\n op.add_column('endpoints', sa.Column('last_updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False))\n # ### end Alembic commands ###\n", "issue": "Ensure rotation column == 'False' during migration.\nNull values creates problems during validation.\n", "before_files": [{"content": "\"\"\"Ensuring we have endpoint updated times and certificate rotation availability.\n\nRevision ID: 131ec6accff5\nRevises: e3691fc396e9\nCreate Date: 2016-12-07 17:29:42.049986\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '131ec6accff5'\ndown_revision = 'e3691fc396e9'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=True))\n op.add_column('endpoints', sa.Column('last_updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('endpoints', 'last_updated')\n op.drop_column('certificates', 'rotation')\n # ### end Alembic commands ###\n", "path": "lemur/migrations/versions/131ec6accff5_.py"}], "after_files": [{"content": "\"\"\"Ensuring we have endpoint updated times and certificate rotation availability.\n\nRevision ID: 131ec6accff5\nRevises: e3691fc396e9\nCreate Date: 2016-12-07 17:29:42.049986\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '131ec6accff5'\ndown_revision = 'e3691fc396e9'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('certificates', sa.Column('rotation', sa.Boolean(), nullable=False, server_default=False))\n op.add_column('endpoints', sa.Column('last_updated', sa.DateTime(), server_default=sa.text('now()'), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('endpoints', 'last_updated')\n op.drop_column('certificates', 'rotation')\n # ### end Alembic commands ###\n", "path": "lemur/migrations/versions/131ec6accff5_.py"}]}
| 594 | 197 |
gh_patches_debug_60676
|
rasdani/github-patches
|
git_diff
|
sosreport__sos-3322
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
containerd plugin does not enable when containerd is installed from docker repo
The Fedora/RHEL RPM follows the naming conventions [found at containerd.io](https://containerd.io/downloads/), and an rpm name of `containerd`. However, when containerd is installed [from docker repositories](https://download.docker.com/linux/centos/8/x86_64/stable/Packages/) on EL8 distributions the RPM is actually named `containerd.io`, resulting in the plugin not enabling itself even though containerd is installed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sos/report/plugins/containerd.py`
Content:
```
1 # This file is part of the sos project: https://github.com/sosreport/sos
2 #
3 # This copyrighted material is made available to anyone wishing to use,
4 # modify, copy, or redistribute it subject to the terms and conditions of
5 # version 2 of the GNU General Public License.
6 #
7 # See the LICENSE file in the source distribution for further information.
8
9 from sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)
10
11
12 class Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):
13
14 short_desc = 'Containerd containers'
15 plugin_name = 'containerd'
16 profiles = ('container',)
17 packages = ('containerd',)
18
19 def setup(self):
20 self.add_copy_spec([
21 "/etc/containerd/",
22 ])
23
24 self.add_cmd_output('containerd config dump')
25
26 # collect the containerd logs.
27 self.add_journal(units='containerd')
28
29 # vim: set et ts=4 sw=4 :
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py
--- a/sos/report/plugins/containerd.py
+++ b/sos/report/plugins/containerd.py
@@ -14,7 +14,7 @@
short_desc = 'Containerd containers'
plugin_name = 'containerd'
profiles = ('container',)
- packages = ('containerd',)
+ packages = ('containerd', 'containerd.io',)
def setup(self):
self.add_copy_spec([
|
{"golden_diff": "diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py\n--- a/sos/report/plugins/containerd.py\n+++ b/sos/report/plugins/containerd.py\n@@ -14,7 +14,7 @@\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n- packages = ('containerd',)\n+ packages = ('containerd', 'containerd.io',)\n \n def setup(self):\n self.add_copy_spec([\n", "issue": "containerd plugin does not enable when containerd is installed from docker repo\nThe Fedora/RHEL RPM follows the naming conventions [found at containerd.io](https://containerd.io/downloads/), and an rpm name of `containerd`. However, when containerd is installed [from docker repositories](https://download.docker.com/linux/centos/8/x86_64/stable/Packages/) on EL8 distributions the RPM is actually named `containerd.io`, resulting in the plugin not enabling itself even though containerd is installed.\r\n\n", "before_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/containerd.py"}], "after_files": [{"content": "# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import (Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin)\n\n\nclass Containerd(Plugin, RedHatPlugin, UbuntuPlugin, CosPlugin):\n\n short_desc = 'Containerd containers'\n plugin_name = 'containerd'\n profiles = ('container',)\n packages = ('containerd', 'containerd.io',)\n\n def setup(self):\n self.add_copy_spec([\n \"/etc/containerd/\",\n ])\n\n self.add_cmd_output('containerd config dump')\n\n # collect the containerd logs.\n self.add_journal(units='containerd')\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/containerd.py"}]}
| 630 | 108 |
gh_patches_debug_4179
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-1258
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Control which picture will be used as main illustration (trek, POI)
- Could be a new attribute in attachment model
- Could be a foreign key in trek/poi/.. i.e. `PicturesMixin`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/common/mixins.py`
Content:
```
1 import os
2 import logging
3 import shutil
4 import datetime
5
6 from django.conf import settings
7 from django.db.models import Manager as DefaultManager
8 from django.db import models
9 from django.utils.translation import ugettext_lazy as _
10 from django.template.defaultfilters import slugify
11
12 from easy_thumbnails.alias import aliases
13 from easy_thumbnails.exceptions import InvalidImageFormatError
14 from easy_thumbnails.files import get_thumbnailer
15
16 from geotrek.common.utils import classproperty
17
18
19 logger = logging.getLogger(__name__)
20
21
22 class TimeStampedModelMixin(models.Model):
23 # Computed values (managed at DB-level with triggers)
24 date_insert = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_(u"Insertion date"), db_column='date_insert')
25 date_update = models.DateTimeField(auto_now=True, editable=False, verbose_name=_(u"Update date"), db_column='date_update')
26
27 class Meta:
28 abstract = True
29
30 def reload(self, fromdb=None):
31 """Reload fields computed at DB-level (triggers)
32 """
33 if fromdb is None:
34 fromdb = self.__class__.objects.get(pk=self.pk)
35 self.date_insert = fromdb.date_insert
36 self.date_update = fromdb.date_update
37 return self
38
39
40 class NoDeleteMixin(models.Model):
41 deleted = models.BooleanField(editable=False, default=False, db_column='supprime', verbose_name=_(u"Deleted"))
42
43 def delete(self, force=False, using=None, **kwargs):
44 if force:
45 super(NoDeleteMixin, self).delete(using, **kwargs)
46 else:
47 self.deleted = True
48 self.save(using=using)
49
50 class Meta:
51 abstract = True
52
53 def reload(self, fromdb=None):
54 """Reload fields computed at DB-level (triggers)
55 """
56 if fromdb is None:
57 fromdb = self.__class__.objects.get(pk=self.pk)
58 self.deleted = fromdb.deleted
59 return self
60
61 @classmethod
62 def get_manager_cls(cls, parent_mgr_cls=DefaultManager):
63
64 class NoDeleteManager(parent_mgr_cls):
65 # Use this manager when walking through FK/M2M relationships
66 use_for_related_fields = True
67
68 # Filter out deleted objects
69 def existing(self):
70 return self.get_queryset().filter(deleted=False)
71
72 return NoDeleteManager
73
74
75 class PicturesMixin(object):
76 """A common class to share code between Trek and POI regarding
77 attached pictures"""
78
79 @property
80 def pictures(self):
81 """
82 Find first image among attachments.
83 Since we allow screenshot to be overriden by attachments
84 named 'mapimage', filter it from object pictures.
85 """
86 if hasattr(self, '_pictures'):
87 return self._pictures
88 return [a for a in self.attachments.all() if a.is_image
89 and a.title != 'mapimage']
90
91 @pictures.setter
92 def pictures(self, values):
93 self._pictures = values
94
95 @property
96 def serializable_pictures(self):
97 serialized = []
98 for picture in self.pictures:
99 thumbnailer = get_thumbnailer(picture.attachment_file)
100 try:
101 thdetail = thumbnailer.get_thumbnail(aliases.get('medium'))
102 thurl = os.path.join(settings.MEDIA_URL, thdetail.name)
103 except InvalidImageFormatError:
104 thurl = None
105 logger.error(_("Image %s invalid or missing from disk.") % picture.attachment_file)
106 pass
107 serialized.append({
108 'author': picture.author,
109 'title': picture.title,
110 'legend': picture.legend,
111 'url': thurl
112 })
113 return serialized
114
115 @property
116 def picture_print(self):
117 for picture in self.pictures:
118 thumbnailer = get_thumbnailer(picture.attachment_file)
119 try:
120 return thumbnailer.get_thumbnail(aliases.get('print'))
121 except InvalidImageFormatError:
122 logger.error(_("Image %s invalid or missing from disk.") % picture.attachment_file)
123 pass
124 return None
125
126 @property
127 def thumbnail(self):
128 for picture in self.pictures:
129 thumbnailer = get_thumbnailer(picture.attachment_file)
130 try:
131 return thumbnailer.get_thumbnail(aliases.get('small-square'))
132 except InvalidImageFormatError:
133 logger.error(_("Image %s invalid or missing from disk.") % picture.attachment_file)
134 pass
135 return None
136
137 @classproperty
138 def thumbnail_verbose_name(cls):
139 return _("Thumbnail")
140
141 @property
142 def thumbnail_display(self):
143 thumbnail = self.thumbnail
144 if thumbnail is None:
145 return _("None")
146 return '<img height="20" width="20" src="%s"/>' % os.path.join(settings.MEDIA_URL, thumbnail.name)
147
148 @property
149 def thumbnail_csv_display(self):
150 return '' if self.thumbnail is None else os.path.join(settings.MEDIA_URL, self.thumbnail.name)
151
152 @property
153 def serializable_thumbnail(self):
154 th = self.thumbnail
155 if not th:
156 return None
157 return os.path.join(settings.MEDIA_URL, th.name)
158
159
160 class BasePublishableMixin(models.Model):
161 """ Basic fields to control publication of objects.
162
163 It is used for flat pages and publishable entities.
164 """
165 published = models.BooleanField(verbose_name=_(u"Published"), default=False,
166 help_text=_(u"Online"), db_column='public')
167 publication_date = models.DateField(verbose_name=_(u"Publication date"),
168 null=True, blank=True, editable=False,
169 db_column='date_publication')
170
171 class Meta:
172 abstract = True
173
174 def save(self, *args, **kwargs):
175 if self.publication_date is None and self.any_published:
176 self.publication_date = datetime.date.today()
177 if self.publication_date is not None and not self.any_published:
178 self.publication_date = None
179 super(BasePublishableMixin, self).save(*args, **kwargs)
180
181 @property
182 def any_published(self):
183 """Returns True if the object is published in at least one of the language
184 """
185 if not settings.PUBLISHED_BY_LANG:
186 return self.published
187
188 for l in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:
189 if getattr(self, 'published_%s' % l[0], False):
190 return True
191 return False
192
193 @property
194 def published_status(self):
195 """Returns the publication status by language.
196 """
197 status = []
198 for l in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:
199 if settings.PUBLISHED_BY_LANG:
200 published = getattr(self, 'published_%s' % l[0], None) or False
201 else:
202 published = self.published
203 status.append({
204 'lang': l[0],
205 'language': l[1],
206 'status': published
207 })
208 return status
209
210
211 class PublishableMixin(BasePublishableMixin):
212 """A mixin that contains all necessary stuff to publish objects
213 (e.g. on Geotrek-rando).
214
215 It will only work with MapEntity models.
216
217 Initially, it was part of the ``trekking.Trek`` class. But now, all kinds of information
218 can be published (c.f. PN Cevennes project).
219 """
220 name = models.CharField(verbose_name=_(u"Name"), max_length=128,
221 help_text=_(u"Public name (Change carefully)"), db_column='nom')
222
223 class Meta:
224 abstract = True
225
226 @property
227 def slug(self):
228 return slugify(self.name)
229
230 @property
231 def name_display(self):
232 s = u'<a data-pk="%s" href="%s" title="%s">%s</a>' % (self.pk,
233 self.get_detail_url(),
234 self.name,
235 self.name)
236 if self.published:
237 s = u'<span class="badge badge-success" title="%s">☆</span> ' % _("Published") + s
238 return s
239
240 @property
241 def name_csv_display(self):
242 return unicode(self.name)
243
244 @models.permalink
245 def get_document_public_url(self):
246 raise NotImplementedError
247
248 def is_complete(self):
249 """It should also have a description, etc.
250 """
251 modelname = self.__class__._meta.object_name.lower()
252 mandatory = settings.COMPLETENESS_FIELDS.get(modelname, [])
253 for f in mandatory:
254 if not getattr(self, f):
255 return False
256 return True
257
258 def is_publishable(self):
259 return self.is_complete() and self.has_geom_valid()
260
261 def has_geom_valid(self):
262 return self.geom is not None
263
264 def prepare_map_image(self, rooturl):
265 """
266 We override the default behaviour of map image preparation :
267 if the object has a attached picture file with *title* ``mapimage``, we use it
268 as a screenshot.
269 TODO: remove this when screenshots are bullet-proof ?
270 """
271 attached = None
272 for picture in [a for a in self.attachments.all() if a.is_image]:
273 if picture.title == 'mapimage':
274 attached = picture.attachment_file
275 break
276 if attached is None:
277 super(PublishableMixin, self).prepare_map_image(rooturl)
278 else:
279 # Copy it along other screenshots
280 src = os.path.join(settings.MEDIA_ROOT, attached.name)
281 dst = self.get_map_image_path()
282 shutil.copyfile(src, dst)
283
284 def get_geom_aspect_ratio(self):
285 """ Force object aspect ratio to fit height and width of
286 image in public document.
287 """
288 modelname = self.__class__._meta.object_name.lower()
289 s = settings.EXPORT_MAP_IMAGE_SIZE[modelname]
290 return float(s[0]) / s[1]
291
292 def get_attachment_print(self):
293 """
294 Look in attachment if there is document to be used as print version
295 """
296 overriden = self.attachments.filter(title="docprint").get()
297 # Must have OpenOffice document mimetype
298 if overriden.mimetype != ['application', 'vnd.oasis.opendocument.text']:
299 raise overriden.DoesNotExist()
300 return os.path.join(settings.MEDIA_ROOT, overriden.attachment_file.name)
301
302
303 class PictogramMixin(models.Model):
304 pictogram = models.FileField(verbose_name=_(u"Pictogram"), upload_to=settings.UPLOAD_DIR,
305 db_column='picto', max_length=512, null=True)
306
307 class Meta:
308 abstract = True
309
310 def pictogram_img(self):
311 return u'<img src="%s" />' % (self.pictogram.url if self.pictogram else "")
312 pictogram_img.short_description = _("Pictogram")
313 pictogram_img.allow_tags = True
314
315 def get_pictogram_url(self):
316 return self.pictogram.url if self.pictogram else None
317
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geotrek/common/mixins.py b/geotrek/common/mixins.py
--- a/geotrek/common/mixins.py
+++ b/geotrek/common/mixins.py
@@ -85,7 +85,8 @@
"""
if hasattr(self, '_pictures'):
return self._pictures
- return [a for a in self.attachments.all() if a.is_image
+ all_attachments = self.attachments.order_by('-starred').all()
+ return [a for a in all_attachments if a.is_image
and a.title != 'mapimage']
@pictures.setter
|
{"golden_diff": "diff --git a/geotrek/common/mixins.py b/geotrek/common/mixins.py\n--- a/geotrek/common/mixins.py\n+++ b/geotrek/common/mixins.py\n@@ -85,7 +85,8 @@\n \"\"\"\n if hasattr(self, '_pictures'):\n return self._pictures\n- return [a for a in self.attachments.all() if a.is_image\n+ all_attachments = self.attachments.order_by('-starred').all()\n+ return [a for a in all_attachments if a.is_image\n and a.title != 'mapimage']\n \n @pictures.setter\n", "issue": "Control which picture will be used as main illustration (trek, POI)\n- Could be a new attribute in attachment model\n- Could be a foreign key in trek/poi/.. i.e. `PicturesMixin`\n\n", "before_files": [{"content": "import os\nimport logging\nimport shutil\nimport datetime\n\nfrom django.conf import settings\nfrom django.db.models import Manager as DefaultManager\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import slugify\n\nfrom easy_thumbnails.alias import aliases\nfrom easy_thumbnails.exceptions import InvalidImageFormatError\nfrom easy_thumbnails.files import get_thumbnailer\n\nfrom geotrek.common.utils import classproperty\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TimeStampedModelMixin(models.Model):\n # Computed values (managed at DB-level with triggers)\n date_insert = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_(u\"Insertion date\"), db_column='date_insert')\n date_update = models.DateTimeField(auto_now=True, editable=False, verbose_name=_(u\"Update date\"), db_column='date_update')\n\n class Meta:\n abstract = True\n\n def reload(self, fromdb=None):\n \"\"\"Reload fields computed at DB-level (triggers)\n \"\"\"\n if fromdb is None:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.date_insert = fromdb.date_insert\n self.date_update = fromdb.date_update\n return self\n\n\nclass NoDeleteMixin(models.Model):\n deleted = models.BooleanField(editable=False, default=False, db_column='supprime', verbose_name=_(u\"Deleted\"))\n\n def delete(self, force=False, using=None, **kwargs):\n if force:\n super(NoDeleteMixin, self).delete(using, **kwargs)\n else:\n self.deleted = True\n self.save(using=using)\n\n class Meta:\n abstract = True\n\n def reload(self, fromdb=None):\n \"\"\"Reload fields computed at DB-level (triggers)\n \"\"\"\n if fromdb is None:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.deleted = fromdb.deleted\n return self\n\n @classmethod\n def get_manager_cls(cls, parent_mgr_cls=DefaultManager):\n\n class NoDeleteManager(parent_mgr_cls):\n # Use this manager when walking through FK/M2M relationships\n use_for_related_fields = True\n\n # Filter out deleted objects\n def existing(self):\n return self.get_queryset().filter(deleted=False)\n\n return NoDeleteManager\n\n\nclass PicturesMixin(object):\n \"\"\"A common class to share code between Trek and POI regarding\n attached pictures\"\"\"\n\n @property\n def pictures(self):\n \"\"\"\n Find first image among attachments.\n Since we allow screenshot to be overriden by attachments\n named 'mapimage', filter it from object pictures.\n \"\"\"\n if hasattr(self, '_pictures'):\n return self._pictures\n return [a for a in self.attachments.all() if a.is_image\n and a.title != 'mapimage']\n\n @pictures.setter\n def pictures(self, values):\n self._pictures = values\n\n @property\n def serializable_pictures(self):\n serialized = []\n for picture in self.pictures:\n thumbnailer = get_thumbnailer(picture.attachment_file)\n try:\n thdetail = thumbnailer.get_thumbnail(aliases.get('medium'))\n thurl = os.path.join(settings.MEDIA_URL, thdetail.name)\n except InvalidImageFormatError:\n thurl = None\n logger.error(_(\"Image %s invalid or missing from disk.\") % picture.attachment_file)\n pass\n serialized.append({\n 'author': picture.author,\n 'title': picture.title,\n 'legend': picture.legend,\n 'url': thurl\n })\n return serialized\n\n @property\n def picture_print(self):\n for picture in self.pictures:\n thumbnailer = get_thumbnailer(picture.attachment_file)\n try:\n return thumbnailer.get_thumbnail(aliases.get('print'))\n except InvalidImageFormatError:\n logger.error(_(\"Image %s invalid or missing from disk.\") % picture.attachment_file)\n pass\n return None\n\n @property\n def thumbnail(self):\n for picture in self.pictures:\n thumbnailer = get_thumbnailer(picture.attachment_file)\n try:\n return thumbnailer.get_thumbnail(aliases.get('small-square'))\n except InvalidImageFormatError:\n logger.error(_(\"Image %s invalid or missing from disk.\") % picture.attachment_file)\n pass\n return None\n\n @classproperty\n def thumbnail_verbose_name(cls):\n return _(\"Thumbnail\")\n\n @property\n def thumbnail_display(self):\n thumbnail = self.thumbnail\n if thumbnail is None:\n return _(\"None\")\n return '<img height=\"20\" width=\"20\" src=\"%s\"/>' % os.path.join(settings.MEDIA_URL, thumbnail.name)\n\n @property\n def thumbnail_csv_display(self):\n return '' if self.thumbnail is None else os.path.join(settings.MEDIA_URL, self.thumbnail.name)\n\n @property\n def serializable_thumbnail(self):\n th = self.thumbnail\n if not th:\n return None\n return os.path.join(settings.MEDIA_URL, th.name)\n\n\nclass BasePublishableMixin(models.Model):\n \"\"\" Basic fields to control publication of objects.\n\n It is used for flat pages and publishable entities.\n \"\"\"\n published = models.BooleanField(verbose_name=_(u\"Published\"), default=False,\n help_text=_(u\"Online\"), db_column='public')\n publication_date = models.DateField(verbose_name=_(u\"Publication date\"),\n null=True, blank=True, editable=False,\n db_column='date_publication')\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if self.publication_date is None and self.any_published:\n self.publication_date = datetime.date.today()\n if self.publication_date is not None and not self.any_published:\n self.publication_date = None\n super(BasePublishableMixin, self).save(*args, **kwargs)\n\n @property\n def any_published(self):\n \"\"\"Returns True if the object is published in at least one of the language\n \"\"\"\n if not settings.PUBLISHED_BY_LANG:\n return self.published\n\n for l in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:\n if getattr(self, 'published_%s' % l[0], False):\n return True\n return False\n\n @property\n def published_status(self):\n \"\"\"Returns the publication status by language.\n \"\"\"\n status = []\n for l in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:\n if settings.PUBLISHED_BY_LANG:\n published = getattr(self, 'published_%s' % l[0], None) or False\n else:\n published = self.published\n status.append({\n 'lang': l[0],\n 'language': l[1],\n 'status': published\n })\n return status\n\n\nclass PublishableMixin(BasePublishableMixin):\n \"\"\"A mixin that contains all necessary stuff to publish objects\n (e.g. on Geotrek-rando).\n\n It will only work with MapEntity models.\n\n Initially, it was part of the ``trekking.Trek`` class. But now, all kinds of information\n can be published (c.f. PN Cevennes project).\n \"\"\"\n name = models.CharField(verbose_name=_(u\"Name\"), max_length=128,\n help_text=_(u\"Public name (Change carefully)\"), db_column='nom')\n\n class Meta:\n abstract = True\n\n @property\n def slug(self):\n return slugify(self.name)\n\n @property\n def name_display(self):\n s = u'<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n if self.published:\n s = u'<span class=\"badge badge-success\" title=\"%s\">☆</span> ' % _(\"Published\") + s\n return s\n\n @property\n def name_csv_display(self):\n return unicode(self.name)\n\n @models.permalink\n def get_document_public_url(self):\n raise NotImplementedError\n\n def is_complete(self):\n \"\"\"It should also have a description, etc.\n \"\"\"\n modelname = self.__class__._meta.object_name.lower()\n mandatory = settings.COMPLETENESS_FIELDS.get(modelname, [])\n for f in mandatory:\n if not getattr(self, f):\n return False\n return True\n\n def is_publishable(self):\n return self.is_complete() and self.has_geom_valid()\n\n def has_geom_valid(self):\n return self.geom is not None\n\n def prepare_map_image(self, rooturl):\n \"\"\"\n We override the default behaviour of map image preparation :\n if the object has a attached picture file with *title* ``mapimage``, we use it\n as a screenshot.\n TODO: remove this when screenshots are bullet-proof ?\n \"\"\"\n attached = None\n for picture in [a for a in self.attachments.all() if a.is_image]:\n if picture.title == 'mapimage':\n attached = picture.attachment_file\n break\n if attached is None:\n super(PublishableMixin, self).prepare_map_image(rooturl)\n else:\n # Copy it along other screenshots\n src = os.path.join(settings.MEDIA_ROOT, attached.name)\n dst = self.get_map_image_path()\n shutil.copyfile(src, dst)\n\n def get_geom_aspect_ratio(self):\n \"\"\" Force object aspect ratio to fit height and width of\n image in public document.\n \"\"\"\n modelname = self.__class__._meta.object_name.lower()\n s = settings.EXPORT_MAP_IMAGE_SIZE[modelname]\n return float(s[0]) / s[1]\n\n def get_attachment_print(self):\n \"\"\"\n Look in attachment if there is document to be used as print version\n \"\"\"\n overriden = self.attachments.filter(title=\"docprint\").get()\n # Must have OpenOffice document mimetype\n if overriden.mimetype != ['application', 'vnd.oasis.opendocument.text']:\n raise overriden.DoesNotExist()\n return os.path.join(settings.MEDIA_ROOT, overriden.attachment_file.name)\n\n\nclass PictogramMixin(models.Model):\n pictogram = models.FileField(verbose_name=_(u\"Pictogram\"), upload_to=settings.UPLOAD_DIR,\n db_column='picto', max_length=512, null=True)\n\n class Meta:\n abstract = True\n\n def pictogram_img(self):\n return u'<img src=\"%s\" />' % (self.pictogram.url if self.pictogram else \"\")\n pictogram_img.short_description = _(\"Pictogram\")\n pictogram_img.allow_tags = True\n\n def get_pictogram_url(self):\n return self.pictogram.url if self.pictogram else None\n", "path": "geotrek/common/mixins.py"}], "after_files": [{"content": "import os\nimport logging\nimport shutil\nimport datetime\n\nfrom django.conf import settings\nfrom django.db.models import Manager as DefaultManager\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.template.defaultfilters import slugify\n\nfrom easy_thumbnails.alias import aliases\nfrom easy_thumbnails.exceptions import InvalidImageFormatError\nfrom easy_thumbnails.files import get_thumbnailer\n\nfrom geotrek.common.utils import classproperty\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TimeStampedModelMixin(models.Model):\n # Computed values (managed at DB-level with triggers)\n date_insert = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_(u\"Insertion date\"), db_column='date_insert')\n date_update = models.DateTimeField(auto_now=True, editable=False, verbose_name=_(u\"Update date\"), db_column='date_update')\n\n class Meta:\n abstract = True\n\n def reload(self, fromdb=None):\n \"\"\"Reload fields computed at DB-level (triggers)\n \"\"\"\n if fromdb is None:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.date_insert = fromdb.date_insert\n self.date_update = fromdb.date_update\n return self\n\n\nclass NoDeleteMixin(models.Model):\n deleted = models.BooleanField(editable=False, default=False, db_column='supprime', verbose_name=_(u\"Deleted\"))\n\n def delete(self, force=False, using=None, **kwargs):\n if force:\n super(NoDeleteMixin, self).delete(using, **kwargs)\n else:\n self.deleted = True\n self.save(using=using)\n\n class Meta:\n abstract = True\n\n def reload(self, fromdb=None):\n \"\"\"Reload fields computed at DB-level (triggers)\n \"\"\"\n if fromdb is None:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.deleted = fromdb.deleted\n return self\n\n @classmethod\n def get_manager_cls(cls, parent_mgr_cls=DefaultManager):\n\n class NoDeleteManager(parent_mgr_cls):\n # Use this manager when walking through FK/M2M relationships\n use_for_related_fields = True\n\n # Filter out deleted objects\n def existing(self):\n return self.get_queryset().filter(deleted=False)\n\n return NoDeleteManager\n\n\nclass PicturesMixin(object):\n \"\"\"A common class to share code between Trek and POI regarding\n attached pictures\"\"\"\n\n @property\n def pictures(self):\n \"\"\"\n Find first image among attachments.\n Since we allow screenshot to be overriden by attachments\n named 'mapimage', filter it from object pictures.\n \"\"\"\n if hasattr(self, '_pictures'):\n return self._pictures\n all_attachments = self.attachments.order_by('-starred').all()\n return [a for a in all_attachments if a.is_image\n and a.title != 'mapimage']\n\n @pictures.setter\n def pictures(self, values):\n self._pictures = values\n\n @property\n def serializable_pictures(self):\n serialized = []\n for picture in self.pictures:\n thumbnailer = get_thumbnailer(picture.attachment_file)\n try:\n thdetail = thumbnailer.get_thumbnail(aliases.get('medium'))\n thurl = os.path.join(settings.MEDIA_URL, thdetail.name)\n except InvalidImageFormatError:\n thurl = None\n logger.error(_(\"Image %s invalid or missing from disk.\") % picture.attachment_file)\n pass\n serialized.append({\n 'author': picture.author,\n 'title': picture.title,\n 'legend': picture.legend,\n 'url': thurl\n })\n return serialized\n\n @property\n def picture_print(self):\n for picture in self.pictures:\n thumbnailer = get_thumbnailer(picture.attachment_file)\n try:\n return thumbnailer.get_thumbnail(aliases.get('print'))\n except InvalidImageFormatError:\n logger.error(_(\"Image %s invalid or missing from disk.\") % picture.attachment_file)\n pass\n return None\n\n @property\n def thumbnail(self):\n for picture in self.pictures:\n thumbnailer = get_thumbnailer(picture.attachment_file)\n try:\n return thumbnailer.get_thumbnail(aliases.get('small-square'))\n except InvalidImageFormatError:\n logger.error(_(\"Image %s invalid or missing from disk.\") % picture.attachment_file)\n pass\n return None\n\n @classproperty\n def thumbnail_verbose_name(cls):\n return _(\"Thumbnail\")\n\n @property\n def thumbnail_display(self):\n thumbnail = self.thumbnail\n if thumbnail is None:\n return _(\"None\")\n return '<img height=\"20\" width=\"20\" src=\"%s\"/>' % os.path.join(settings.MEDIA_URL, thumbnail.name)\n\n @property\n def thumbnail_csv_display(self):\n return '' if self.thumbnail is None else os.path.join(settings.MEDIA_URL, self.thumbnail.name)\n\n @property\n def serializable_thumbnail(self):\n th = self.thumbnail\n if not th:\n return None\n return os.path.join(settings.MEDIA_URL, th.name)\n\n\nclass BasePublishableMixin(models.Model):\n \"\"\" Basic fields to control publication of objects.\n\n It is used for flat pages and publishable entities.\n \"\"\"\n published = models.BooleanField(verbose_name=_(u\"Published\"), default=False,\n help_text=_(u\"Online\"), db_column='public')\n publication_date = models.DateField(verbose_name=_(u\"Publication date\"),\n null=True, blank=True, editable=False,\n db_column='date_publication')\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if self.publication_date is None and self.any_published:\n self.publication_date = datetime.date.today()\n if self.publication_date is not None and not self.any_published:\n self.publication_date = None\n super(BasePublishableMixin, self).save(*args, **kwargs)\n\n @property\n def any_published(self):\n \"\"\"Returns True if the object is published in at least one of the language\n \"\"\"\n if not settings.PUBLISHED_BY_LANG:\n return self.published\n\n for l in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:\n if getattr(self, 'published_%s' % l[0], False):\n return True\n return False\n\n @property\n def published_status(self):\n \"\"\"Returns the publication status by language.\n \"\"\"\n status = []\n for l in settings.MAPENTITY_CONFIG['TRANSLATED_LANGUAGES']:\n if settings.PUBLISHED_BY_LANG:\n published = getattr(self, 'published_%s' % l[0], None) or False\n else:\n published = self.published\n status.append({\n 'lang': l[0],\n 'language': l[1],\n 'status': published\n })\n return status\n\n\nclass PublishableMixin(BasePublishableMixin):\n \"\"\"A mixin that contains all necessary stuff to publish objects\n (e.g. on Geotrek-rando).\n\n It will only work with MapEntity models.\n\n Initially, it was part of the ``trekking.Trek`` class. But now, all kinds of information\n can be published (c.f. PN Cevennes project).\n \"\"\"\n name = models.CharField(verbose_name=_(u\"Name\"), max_length=128,\n help_text=_(u\"Public name (Change carefully)\"), db_column='nom')\n\n class Meta:\n abstract = True\n\n @property\n def slug(self):\n return slugify(self.name)\n\n @property\n def name_display(self):\n s = u'<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n if self.published:\n s = u'<span class=\"badge badge-success\" title=\"%s\">☆</span> ' % _(\"Published\") + s\n return s\n\n @property\n def name_csv_display(self):\n return unicode(self.name)\n\n @models.permalink\n def get_document_public_url(self):\n raise NotImplementedError\n\n def is_complete(self):\n \"\"\"It should also have a description, etc.\n \"\"\"\n modelname = self.__class__._meta.object_name.lower()\n mandatory = settings.COMPLETENESS_FIELDS.get(modelname, [])\n for f in mandatory:\n if not getattr(self, f):\n return False\n return True\n\n def is_publishable(self):\n return self.is_complete() and self.has_geom_valid()\n\n def has_geom_valid(self):\n return self.geom is not None\n\n def prepare_map_image(self, rooturl):\n \"\"\"\n We override the default behaviour of map image preparation :\n if the object has a attached picture file with *title* ``mapimage``, we use it\n as a screenshot.\n TODO: remove this when screenshots are bullet-proof ?\n \"\"\"\n attached = None\n for picture in [a for a in self.attachments.all() if a.is_image]:\n if picture.title == 'mapimage':\n attached = picture.attachment_file\n break\n if attached is None:\n super(PublishableMixin, self).prepare_map_image(rooturl)\n else:\n # Copy it along other screenshots\n src = os.path.join(settings.MEDIA_ROOT, attached.name)\n dst = self.get_map_image_path()\n shutil.copyfile(src, dst)\n\n def get_geom_aspect_ratio(self):\n \"\"\" Force object aspect ratio to fit height and width of\n image in public document.\n \"\"\"\n modelname = self.__class__._meta.object_name.lower()\n s = settings.EXPORT_MAP_IMAGE_SIZE[modelname]\n return float(s[0]) / s[1]\n\n def get_attachment_print(self):\n \"\"\"\n Look in attachment if there is document to be used as print version\n \"\"\"\n overriden = self.attachments.filter(title=\"docprint\").get()\n # Must have OpenOffice document mimetype\n if overriden.mimetype != ['application', 'vnd.oasis.opendocument.text']:\n raise overriden.DoesNotExist()\n return os.path.join(settings.MEDIA_ROOT, overriden.attachment_file.name)\n\n\nclass PictogramMixin(models.Model):\n pictogram = models.FileField(verbose_name=_(u\"Pictogram\"), upload_to=settings.UPLOAD_DIR,\n db_column='picto', max_length=512, null=True)\n\n class Meta:\n abstract = True\n\n def pictogram_img(self):\n return u'<img src=\"%s\" />' % (self.pictogram.url if self.pictogram else \"\")\n pictogram_img.short_description = _(\"Pictogram\")\n pictogram_img.allow_tags = True\n\n def get_pictogram_url(self):\n return self.pictogram.url if self.pictogram else None\n", "path": "geotrek/common/mixins.py"}]}
| 3,473 | 136 |
gh_patches_debug_21915
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kubernetes option missing in setup.py
The option to install kubernetes as an optional extra is missing from our setup.py script.
reported by Ben Galewsky.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open('parsl/version.py') as f:
4 exec(f.read())
5
6 with open('requirements.txt') as f:
7 install_requires = f.readlines()
8
9 setup(
10 name='parsl',
11 version=VERSION,
12 description='Simple data dependent workflows in Python',
13 long_description='Simple parallel workflows system for Python',
14 url='https://github.com/Parsl/parsl',
15 author='The Parsl Team',
16 author_email='[email protected]',
17 license='Apache 2.0',
18 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
19 include_package_data=True,
20 packages=find_packages(),
21 install_requires=install_requires,
22 scripts = ['parsl/executors/high_throughput/process_worker_pool.py',
23 'parsl/executors/extreme_scale/mpi_worker_pool.py',
24 'parsl/executors/low_latency/lowlatency_worker.py',
25 ],
26 extras_require = {
27 'visualize': ['dash', 'dash-html-components', 'dash-core-components', 'pandas'],
28 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],
29 'aws' : ['boto3'],
30 # Jetstream is deprecated since the interface has not been maintained.
31 # 'jetstream' : ['python-novaclient'],
32 'extreme_scale' : ['mpi4py'],
33 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],
34 'google_cloud' : ['google-auth', 'google-api-python-client'],
35 'gssapi' : ['python-gssapi'],
36 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',
37 'dash', 'dash-html-components', 'dash-core-components', 'pandas',
38 'boto3',
39 'mpi4py',
40 'nbsphinx', 'sphinx_rtd_theme',
41 'google-auth', 'google-api-python-client',
42 'python-gssapi']
43
44 },
45 classifiers = [
46 # Maturity
47 'Development Status :: 3 - Alpha',
48 # Intended audience
49 'Intended Audience :: Developers',
50 # Licence, must match with licence above
51 'License :: OSI Approved :: Apache Software License',
52 # Python versions supported
53 'Programming Language :: Python :: 3.5',
54 'Programming Language :: Python :: 3.6',
55 ],
56 keywords=['Workflows', 'Scientific computing'],
57 entry_points={'console_scripts': ['parsl-visualize=parsl.monitoring.web_app.index:cli_run']}
58 )
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,6 +27,7 @@
'visualize': ['dash', 'dash-html-components', 'dash-core-components', 'pandas'],
'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],
'aws' : ['boto3'],
+ 'kubernetes' : ['kubernetes'],
# Jetstream is deprecated since the interface has not been maintained.
# 'jetstream' : ['python-novaclient'],
'extreme_scale' : ['mpi4py'],
@@ -36,6 +37,7 @@
'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',
'dash', 'dash-html-components', 'dash-core-components', 'pandas',
'boto3',
+ 'kubernetes',
'mpi4py',
'nbsphinx', 'sphinx_rtd_theme',
'google-auth', 'google-api-python-client',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,6 +27,7 @@\n 'visualize': ['dash', 'dash-html-components', 'dash-core-components', 'pandas'],\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n+ 'kubernetes' : ['kubernetes'],\n # Jetstream is deprecated since the interface has not been maintained.\n # 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n@@ -36,6 +37,7 @@\n 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',\n 'dash', 'dash-html-components', 'dash-core-components', 'pandas',\n 'boto3',\n+ 'kubernetes',\n 'mpi4py',\n 'nbsphinx', 'sphinx_rtd_theme',\n 'google-auth', 'google-api-python-client',\n", "issue": "Kubernetes option missing in setup.py\nThe option to install kubernetes as an optional extra is missing from our setup.py script.\r\n\r\nreported by Ben Galewsky.\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require = {\n 'visualize': ['dash', 'dash-html-components', 'dash-core-components', 'pandas'],\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n # Jetstream is deprecated since the interface has not been maintained.\n # 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',\n 'dash', 'dash-html-components', 'dash-core-components', 'pandas',\n 'boto3',\n 'mpi4py',\n 'nbsphinx', 'sphinx_rtd_theme',\n 'google-auth', 'google-api-python-client',\n 'python-gssapi']\n\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts': ['parsl-visualize=parsl.monitoring.web_app.index:cli_run']}\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n packages=find_packages(),\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/extreme_scale/mpi_worker_pool.py',\n 'parsl/executors/low_latency/lowlatency_worker.py',\n ],\n extras_require = {\n 'visualize': ['dash', 'dash-html-components', 'dash-core-components', 'pandas'],\n 'db_logging' : ['CMRESHandler', 'psutil', 'sqlalchemy'],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n # Jetstream is deprecated since the interface has not been maintained.\n # 'jetstream' : ['python-novaclient'],\n 'extreme_scale' : ['mpi4py'],\n 'docs' : ['nbsphinx', 'sphinx_rtd_theme'],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n 'all' : ['CMRESHandler', 'psutil', 'sqlalchemy',\n 'dash', 'dash-html-components', 'dash-core-components', 'pandas',\n 'boto3',\n 'kubernetes',\n 'mpi4py',\n 'nbsphinx', 'sphinx_rtd_theme',\n 'google-auth', 'google-api-python-client',\n 'python-gssapi']\n\n },\n classifiers = [\n # Maturity\n 'Development Status :: 3 - Alpha',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts': ['parsl-visualize=parsl.monitoring.web_app.index:cli_run']}\n)\n", "path": "setup.py"}]}
| 955 | 223 |
gh_patches_debug_13889
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-11558
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IMAP port settings are not updated from Email Domain to Email Account
## Description of the issue
When changing the IMAP port in an existing Email Domain, the Email Accounts using this Domain are not updated accordingly. This can lead to Frappe trying an IMAPS connection (which usually is 993) to the plain IMAP port 143, resulting in misleading error messages like `ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)`.
We could track down the root cause to the method `on_update` from the DocType "Email Domain": it simply misses the field `incoming_port` when copying data to all e-mail accounts that use this domain. This leads to the problem if the `incoming_port` is already set in the email account and gets updated/changed afterwards in the email domain.
## Context information (for bug reports)
```
frappe-bench$ bench --version
5.0.0
frappe-bench$ bench version
erpnext 12.11.2
frappe 12.9.1
```
## Steps to reproduce the issue
1. To reproduce this small bug you need to create a "Email Domain" in Frappe and save it with imap-port 143 and no SSL.
2. Create an e-mail account and link it with the domain from step 1 but without `Enable Incoming` and save.
3. Try to `Enable Incoming` and save
4. After "saving" the e-mail account go to the domain and change the imap-port from 143 to 993 and check SSL.
5. The `incoming_port` in Email-account is still 143.
### Observed result
In the database you can see that the `incoming_port` in the e-mail account is still 143 (real domain and mail addresses hidden):
```
select
ea.email_id,
ea.domain,
ea.incoming_port,
ed.incoming_port,
ea.email_server,
ed.email_server
from
`tabEmail Account` ea,
`tabEmail Domain` ed
where ea.domain = ed.name
and ed.name = "example.com";
```
#### Before updating the IMAP port in the domain
```
+------------------+-------------+---------------+---------------+--------------+--------------+
| email_id | domain | incoming_port | incoming_port | email_server | email_server |
+------------------+-------------+---------------+---------------+--------------+--------------+
| [email protected] | example.com | 143 | 143 | example.com | example.com |
+------------------+-------------+---------------+---------------+--------------+--------------+
1 row in set (0.000 sec)
```
#### After updating the IMAP port in the domain
```
+------------------+-------------+---------------+---------------+--------------+--------------+
| email_id | domain | incoming_port | incoming_port | email_server | email_server |
+------------------+-------------+---------------+---------------+--------------+--------------+
| [email protected] | example.com | 143 | 993 | example.com | example.com |
+------------------+-------------+---------------+---------------+--------------+--------------+
1 row in set (0.001 sec)
```
Now it will always trigger an SSL-handshake-error if the scheduler tries to get access.
### Expected result
When the mail domain gets updated all necessary fields related to e-mail account should be updated including the `incoming_port`.
### Stacktrace / full error message
```
Traceback (most recent call last):
File "/home/erpnext/frappe-bench/apps/frappe/frappe/app.py", line 64, in application
response = frappe.api.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/api.py", line 59, in handle
return frappe.handler.handle()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 24, in handle
data = execute_cmd(cmd)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py", line 63, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/__init__.py", line 1055, in call
return fn(*args, **newargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/desk/form/save.py", line 21, in savedocs
doc.save()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 273, in save
return self._save(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 309, in _save
self.run_before_save_methods()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 896, in run_before_save_methods
self.run_method("validate")
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 797, in run_method
out = Document.hook(fn)(self, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1073, in composer
return composed(self, method, *args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 1056, in runner
add_to_return_value(self, fn(self, *args, **kwargs))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py", line 791, in <lambda>
fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 68, in validate
self.get_incoming_server()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py", line 168, in get_incoming_server
email_server.connect()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 43, in connect
return self.connect_imap()
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 51, in connect_imap
self.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get("pop_timeout"))
File "/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py", line 564, in __init__
self._super.__init__(self, *args, **kwargs)
File "/usr/lib/python3.6/imaplib.py", line 1288, in __init__
IMAP4.__init__(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 198, in __init__
self.open(host, port)
File "/usr/lib/python3.6/imaplib.py", line 1301, in open
IMAP4.open(self, host, port)
File "/usr/lib/python3.6/imaplib.py", line 299, in open
self.sock = self._create_socket()
File "/usr/lib/python3.6/imaplib.py", line 1293, in _create_socket
server_hostname=self.host)
File "/usr/lib/python3.6/ssl.py", line 407, in wrap_socket
_context=self, _session=session)
File "/usr/lib/python3.6/ssl.py", line 817, in __init__
self.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 1077, in do_handshake
self._sslobj.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)
```
## OS
- Linux Ubuntu 18.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/email/doctype/email_domain/email_domain.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
3 # For license information, please see license.txt
4
5 from __future__ import unicode_literals
6 import frappe
7 from frappe import _
8 from frappe.model.document import Document
9 from frappe.utils import validate_email_address ,cint, cstr
10 import imaplib,poplib,smtplib
11 from frappe.email.utils import get_port
12
13 class EmailDomain(Document):
14 def autoname(self):
15 if self.domain_name:
16 self.name = self.domain_name
17
18 def validate(self):
19 """Validate email id and check POP3/IMAP and SMTP connections is enabled."""
20 if self.email_id:
21 validate_email_address(self.email_id, True)
22
23 if frappe.local.flags.in_patch or frappe.local.flags.in_test:
24 return
25
26 if not frappe.local.flags.in_install and not frappe.local.flags.in_patch:
27 try:
28 if self.use_imap:
29 if self.use_ssl:
30 test = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))
31 else:
32 test = imaplib.IMAP4(self.email_server, port=get_port(self))
33
34 else:
35 if self.use_ssl:
36 test = poplib.POP3_SSL(self.email_server, port=get_port(self))
37 else:
38 test = poplib.POP3(self.email_server, port=get_port(self))
39
40 except Exception:
41 frappe.throw(_("Incoming email account not correct"))
42
43 finally:
44 try:
45 if self.use_imap:
46 test.logout()
47 else:
48 test.quit()
49 except Exception:
50 pass
51
52 try:
53 if self.get('use_ssl_for_outgoing'):
54 if not self.get('smtp_port'):
55 self.smtp_port = 465
56
57 sess = smtplib.SMTP_SSL((self.smtp_server or "").encode('utf-8'),
58 cint(self.smtp_port) or None)
59 else:
60 if self.use_tls and not self.smtp_port:
61 self.smtp_port = 587
62 sess = smtplib.SMTP(cstr(self.smtp_server or ""), cint(self.smtp_port) or None)
63 sess.quit()
64 except Exception:
65 frappe.throw(_("Outgoing email account not correct"))
66
67 def on_update(self):
68 """update all email accounts using this domain"""
69 for email_account in frappe.get_all("Email Account", filters={"domain": self.name}):
70 try:
71 email_account = frappe.get_doc("Email Account", email_account.name)
72 for attr in ["email_server", "use_imap", "use_ssl", "use_tls", "attachment_limit", "smtp_server", "smtp_port", "use_ssl_for_outgoing", "append_emails_to_sent_folder"]:
73 email_account.set(attr, self.get(attr, default=0))
74 email_account.save()
75
76 except Exception as e:
77 frappe.msgprint(_("Error has occurred in {0}").format(email_account.name), raise_exception=e.__class__)
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/email/doctype/email_domain/email_domain.py b/frappe/email/doctype/email_domain/email_domain.py
--- a/frappe/email/doctype/email_domain/email_domain.py
+++ b/frappe/email/doctype/email_domain/email_domain.py
@@ -69,7 +69,7 @@
for email_account in frappe.get_all("Email Account", filters={"domain": self.name}):
try:
email_account = frappe.get_doc("Email Account", email_account.name)
- for attr in ["email_server", "use_imap", "use_ssl", "use_tls", "attachment_limit", "smtp_server", "smtp_port", "use_ssl_for_outgoing", "append_emails_to_sent_folder"]:
+ for attr in ["email_server", "use_imap", "use_ssl", "use_tls", "attachment_limit", "smtp_server", "smtp_port", "use_ssl_for_outgoing", "append_emails_to_sent_folder", "incoming_port"]:
email_account.set(attr, self.get(attr, default=0))
email_account.save()
|
{"golden_diff": "diff --git a/frappe/email/doctype/email_domain/email_domain.py b/frappe/email/doctype/email_domain/email_domain.py\n--- a/frappe/email/doctype/email_domain/email_domain.py\n+++ b/frappe/email/doctype/email_domain/email_domain.py\n@@ -69,7 +69,7 @@\n \t\tfor email_account in frappe.get_all(\"Email Account\", filters={\"domain\": self.name}):\n \t\t\ttry:\n \t\t\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n-\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\"]:\n+\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\", \"incoming_port\"]:\n \t\t\t\t\temail_account.set(attr, self.get(attr, default=0))\n \t\t\t\temail_account.save()\n", "issue": "IMAP port settings are not updated from Email Domain to Email Account\n## Description of the issue\r\n\r\nWhen changing the IMAP port in an existing Email Domain, the Email Accounts using this Domain are not updated accordingly. This can lead to Frappe trying an IMAPS connection (which usually is 993) to the plain IMAP port 143, resulting in misleading error messages like `ssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)`.\r\n\r\nWe could track down the root cause to the method `on_update` from the DocType \"Email Domain\": it simply misses the field `incoming_port` when copying data to all e-mail accounts that use this domain. This leads to the problem if the `incoming_port` is already set in the email account and gets updated/changed afterwards in the email domain.\r\n## Context information (for bug reports)\r\n\r\n```\r\nfrappe-bench$ bench --version\r\n5.0.0\r\n\r\nfrappe-bench$ bench version\r\nerpnext 12.11.2\r\nfrappe 12.9.1\r\n```\r\n## Steps to reproduce the issue\r\n\r\n1. To reproduce this small bug you need to create a \"Email Domain\" in Frappe and save it with imap-port 143 and no SSL.\r\n2. Create an e-mail account and link it with the domain from step 1 but without `Enable Incoming` and save.\r\n3. Try to `Enable Incoming` and save\r\n4. After \"saving\" the e-mail account go to the domain and change the imap-port from 143 to 993 and check SSL.\r\n5. The `incoming_port` in Email-account is still 143.\r\n\r\n### Observed result\r\nIn the database you can see that the `incoming_port` in the e-mail account is still 143 (real domain and mail addresses hidden):\r\n\r\n```\r\nselect\r\n ea.email_id,\r\n ea.domain,\r\n ea.incoming_port,\r\n ed.incoming_port,\r\n ea.email_server,\r\n ed.email_server\r\nfrom \r\n `tabEmail Account` ea,\r\n `tabEmail Domain` ed\r\nwhere ea.domain = ed.name\r\n and ed.name = \"example.com\";\r\n```\r\n\r\n#### Before updating the IMAP port in the domain\r\n```\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| email_id | domain | incoming_port | incoming_port | email_server | email_server |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| [email protected] | example.com | 143 | 143 | example.com | example.com |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n1 row in set (0.000 sec)\r\n```\r\n#### After updating the IMAP port in the domain\r\n```\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| email_id | domain | incoming_port | incoming_port | email_server | email_server |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n| [email protected] | example.com | 143 | 993 | example.com | example.com |\r\n+------------------+-------------+---------------+---------------+--------------+--------------+\r\n1 row in set (0.001 sec)\r\n```\r\nNow it will always trigger an SSL-handshake-error if the scheduler tries to get access.\r\n\r\n### Expected result\r\nWhen the mail domain gets updated all necessary fields related to e-mail account should be updated including the `incoming_port`.\r\n### Stacktrace / full error message\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/app.py\", line 64, in application\r\n response = frappe.api.handle()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/api.py\", line 59, in handle\r\n return frappe.handler.handle()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py\", line 24, in handle\r\n data = execute_cmd(cmd)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/handler.py\", line 63, in execute_cmd\r\n return frappe.call(method, **frappe.form_dict)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/__init__.py\", line 1055, in call\r\n return fn(*args, **newargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/desk/form/save.py\", line 21, in savedocs\r\n doc.save()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 273, in save\r\n return self._save(*args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 309, in _save\r\n self.run_before_save_methods()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 896, in run_before_save_methods\r\n self.run_method(\"validate\")\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 797, in run_method\r\n out = Document.hook(fn)(self, *args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 1073, in composer\r\n return composed(self, method, *args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 1056, in runner\r\n add_to_return_value(self, fn(self, *args, **kwargs))\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/model/document.py\", line 791, in <lambda>\r\n fn = lambda self, *args, **kwargs: getattr(self, method)(*args, **kwargs)\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py\", line 68, in validate\r\n self.get_incoming_server()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/doctype/email_account/email_account.py\", line 168, in get_incoming_server\r\n email_server.connect()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py\", line 43, in connect\r\n return self.connect_imap()\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py\", line 51, in connect_imap\r\n self.imap = Timed_IMAP4_SSL(self.settings.host, self.settings.incoming_port, timeout=frappe.conf.get(\"pop_timeout\"))\r\n File \"/home/erpnext/frappe-bench/apps/frappe/frappe/email/receive.py\", line 564, in __init__\r\n self._super.__init__(self, *args, **kwargs)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 1288, in __init__\r\n IMAP4.__init__(self, host, port)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 198, in __init__\r\n self.open(host, port)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 1301, in open\r\n IMAP4.open(self, host, port)\r\n File \"/usr/lib/python3.6/imaplib.py\", line 299, in open\r\n self.sock = self._create_socket()\r\n File \"/usr/lib/python3.6/imaplib.py\", line 1293, in _create_socket\r\n server_hostname=self.host)\r\n File \"/usr/lib/python3.6/ssl.py\", line 407, in wrap_socket\r\n _context=self, _session=session)\r\n File \"/usr/lib/python3.6/ssl.py\", line 817, in __init__\r\n self.do_handshake()\r\n File \"/usr/lib/python3.6/ssl.py\", line 1077, in do_handshake\r\n self._sslobj.do_handshake()\r\n File \"/usr/lib/python3.6/ssl.py\", line 689, in do_handshake\r\n self._sslobj.do_handshake()\r\nssl.SSLError: [SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:852)\r\n```\r\n\r\n## OS\r\n- Linux Ubuntu 18.04\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import validate_email_address ,cint, cstr\nimport imaplib,poplib,smtplib\nfrom frappe.email.utils import get_port\n\nclass EmailDomain(Document):\n\tdef autoname(self):\n\t\tif self.domain_name:\n\t\t\tself.name = self.domain_name\n\n\tdef validate(self):\n\t\t\"\"\"Validate email id and check POP3/IMAP and SMTP connections is enabled.\"\"\"\n\t\tif self.email_id:\n\t\t\tvalidate_email_address(self.email_id, True)\n\n\t\tif frappe.local.flags.in_patch or frappe.local.flags.in_test:\n\t\t\treturn\n\n\t\tif not frappe.local.flags.in_install and not frappe.local.flags.in_patch:\n\t\t\ttry:\n\t\t\t\tif self.use_imap:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = imaplib.IMAP4(self.email_server, port=get_port(self))\n\n\t\t\t\telse:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = poplib.POP3_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = poplib.POP3(self.email_server, port=get_port(self))\n\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Incoming email account not correct\"))\n\n\t\t\tfinally:\n\t\t\t\ttry:\n\t\t\t\t\tif self.use_imap:\n\t\t\t\t\t\ttest.logout()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest.quit()\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\n\t\t\ttry:\n\t\t\t\tif self.get('use_ssl_for_outgoing'):\n\t\t\t\t\tif not self.get('smtp_port'):\n\t\t\t\t\t\tself.smtp_port = 465\n\n\t\t\t\t\tsess = smtplib.SMTP_SSL((self.smtp_server or \"\").encode('utf-8'),\n\t\t\t\t\t\t\tcint(self.smtp_port) or None)\n\t\t\t\telse:\n\t\t\t\t\tif self.use_tls and not self.smtp_port:\n\t\t\t\t\t\tself.smtp_port = 587\n\t\t\t\t\tsess = smtplib.SMTP(cstr(self.smtp_server or \"\"), cint(self.smtp_port) or None)\n\t\t\t\tsess.quit()\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Outgoing email account not correct\"))\n\n\tdef on_update(self):\n\t\t\"\"\"update all email accounts using this domain\"\"\"\n\t\tfor email_account in frappe.get_all(\"Email Account\", filters={\"domain\": self.name}):\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\"]:\n\t\t\t\t\temail_account.set(attr, self.get(attr, default=0))\n\t\t\t\temail_account.save()\n\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(_(\"Error has occurred in {0}\").format(email_account.name), raise_exception=e.__class__)\n", "path": "frappe/email/doctype/email_domain/email_domain.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom frappe.utils import validate_email_address ,cint, cstr\nimport imaplib,poplib,smtplib\nfrom frappe.email.utils import get_port\n\nclass EmailDomain(Document):\n\tdef autoname(self):\n\t\tif self.domain_name:\n\t\t\tself.name = self.domain_name\n\n\tdef validate(self):\n\t\t\"\"\"Validate email id and check POP3/IMAP and SMTP connections is enabled.\"\"\"\n\t\tif self.email_id:\n\t\t\tvalidate_email_address(self.email_id, True)\n\n\t\tif frappe.local.flags.in_patch or frappe.local.flags.in_test:\n\t\t\treturn\n\n\t\tif not frappe.local.flags.in_install and not frappe.local.flags.in_patch:\n\t\t\ttry:\n\t\t\t\tif self.use_imap:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = imaplib.IMAP4_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = imaplib.IMAP4(self.email_server, port=get_port(self))\n\n\t\t\t\telse:\n\t\t\t\t\tif self.use_ssl:\n\t\t\t\t\t\ttest = poplib.POP3_SSL(self.email_server, port=get_port(self))\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest = poplib.POP3(self.email_server, port=get_port(self))\n\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Incoming email account not correct\"))\n\n\t\t\tfinally:\n\t\t\t\ttry:\n\t\t\t\t\tif self.use_imap:\n\t\t\t\t\t\ttest.logout()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttest.quit()\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\n\t\t\ttry:\n\t\t\t\tif self.get('use_ssl_for_outgoing'):\n\t\t\t\t\tif not self.get('smtp_port'):\n\t\t\t\t\t\tself.smtp_port = 465\n\n\t\t\t\t\tsess = smtplib.SMTP_SSL((self.smtp_server or \"\").encode('utf-8'),\n\t\t\t\t\t\t\tcint(self.smtp_port) or None)\n\t\t\t\telse:\n\t\t\t\t\tif self.use_tls and not self.smtp_port:\n\t\t\t\t\t\tself.smtp_port = 587\n\t\t\t\t\tsess = smtplib.SMTP(cstr(self.smtp_server or \"\"), cint(self.smtp_port) or None)\n\t\t\t\tsess.quit()\n\t\t\texcept Exception:\n\t\t\t\tfrappe.throw(_(\"Outgoing email account not correct\"))\n\n\tdef on_update(self):\n\t\t\"\"\"update all email accounts using this domain\"\"\"\n\t\tfor email_account in frappe.get_all(\"Email Account\", filters={\"domain\": self.name}):\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\", \"incoming_port\"]:\n\t\t\t\t\temail_account.set(attr, self.get(attr, default=0))\n\t\t\t\temail_account.save()\n\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(_(\"Error has occurred in {0}\").format(email_account.name), raise_exception=e.__class__)\n", "path": "frappe/email/doctype/email_domain/email_domain.py"}]}
| 2,916 | 223 |
gh_patches_debug_15025
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-2069
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vacancy detail view in API does not work
### Describe the bug
The API detail view for vacancies seem to be broken.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to `/api/v2/partners/vacancies/1/`
2. Crash!
### Expected behaviour
Should work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/partners/api/v2/views.py`
Content:
```
1 from django.db.models import query
2 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
3 from rest_framework import filters as framework_filters
4 from rest_framework.generics import ListAPIView, RetrieveAPIView
5
6 from partners.api.v2 import filters
7 from partners.api.v2.serializers.partner import PartnerSerializer
8 from partners.api.v2.serializers.partner_event import PartnerEventSerializer
9 from partners.api.v2.serializers.vacancy import VacancySerializer
10 from partners.models import PartnerEvent, Partner, Vacancy
11
12
13 class PartnerEventListView(ListAPIView):
14 """Returns an overview of all partner events."""
15
16 serializer_class = PartnerEventSerializer
17 queryset = PartnerEvent.objects.filter(published=True)
18 filter_backends = (
19 framework_filters.OrderingFilter,
20 framework_filters.SearchFilter,
21 filters.PartnerEventDateFilter,
22 )
23 ordering_fields = ("start", "end", "title")
24 search_fields = ("title",)
25 permission_classes = [IsAuthenticatedOrTokenHasScope]
26 required_scopes = ["partners:read"]
27
28
29 class PartnerEventDetailView(RetrieveAPIView):
30 """Returns a single partner event."""
31
32 serializer_class = PartnerEventSerializer
33 queryset = PartnerEvent.objects.filter(published=True)
34 permission_classes = [IsAuthenticatedOrTokenHasScope]
35 required_scopes = ["partners:read"]
36
37
38 class PartnerListView(ListAPIView):
39 """Returns an overview of all partners."""
40
41 serializer_class = PartnerSerializer
42 queryset = Partner.objects.filter(is_active=True)
43 filter_backends = (
44 framework_filters.OrderingFilter,
45 framework_filters.SearchFilter,
46 )
47 ordering_fields = ("name", "pk")
48 search_fields = ("name",)
49 permission_classes = [IsAuthenticatedOrTokenHasScope]
50 required_scopes = ["partners:read"]
51
52
53 class PartnerDetailView(RetrieveAPIView):
54 """Returns a single partner."""
55
56 serializer_class = PartnerSerializer
57 queryset = Partner.objects.filter(is_active=True)
58 permission_classes = [IsAuthenticatedOrTokenHasScope]
59 required_scopes = ["partners:read"]
60
61
62 class VacancyListView(ListAPIView):
63 """Returns an overview of all vacancies."""
64
65 serializer_class = VacancySerializer
66 queryset = Vacancy.objects.all()
67 filter_backends = (
68 framework_filters.OrderingFilter,
69 framework_filters.SearchFilter,
70 filters.VacancyPartnerFilter,
71 )
72 ordering_fields = ("title", "pk")
73 search_fields = (
74 "title",
75 "company_name",
76 )
77 permission_classes = [IsAuthenticatedOrTokenHasScope]
78 required_scopes = ["partners:read"]
79
80
81 class VacancyDetailView(RetrieveAPIView):
82 """Returns a single vacancy."""
83
84 serializer_class = VacancySerializer
85 queryset = Partner.objects.all()
86 permission_classes = [IsAuthenticatedOrTokenHasScope]
87 required_scopes = ["partners:read"]
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/partners/api/v2/views.py b/website/partners/api/v2/views.py
--- a/website/partners/api/v2/views.py
+++ b/website/partners/api/v2/views.py
@@ -1,4 +1,3 @@
-from django.db.models import query
from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
from rest_framework import filters as framework_filters
from rest_framework.generics import ListAPIView, RetrieveAPIView
@@ -82,6 +81,6 @@
"""Returns a single vacancy."""
serializer_class = VacancySerializer
- queryset = Partner.objects.all()
+ queryset = Vacancy.objects.all()
permission_classes = [IsAuthenticatedOrTokenHasScope]
required_scopes = ["partners:read"]
|
{"golden_diff": "diff --git a/website/partners/api/v2/views.py b/website/partners/api/v2/views.py\n--- a/website/partners/api/v2/views.py\n+++ b/website/partners/api/v2/views.py\n@@ -1,4 +1,3 @@\n-from django.db.models import query\n from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\n from rest_framework import filters as framework_filters\n from rest_framework.generics import ListAPIView, RetrieveAPIView\n@@ -82,6 +81,6 @@\n \"\"\"Returns a single vacancy.\"\"\"\n \n serializer_class = VacancySerializer\n- queryset = Partner.objects.all()\n+ queryset = Vacancy.objects.all()\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n", "issue": "Vacancy detail view in API does not work\n### Describe the bug\r\nThe API detail view for vacancies seem to be broken. \r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to `/api/v2/partners/vacancies/1/`\r\n2. Crash!\r\n\r\n### Expected behaviour\r\nShould work.\r\n\n", "before_files": [{"content": "from django.db.models import query\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\n\nfrom partners.api.v2 import filters\nfrom partners.api.v2.serializers.partner import PartnerSerializer\nfrom partners.api.v2.serializers.partner_event import PartnerEventSerializer\nfrom partners.api.v2.serializers.vacancy import VacancySerializer\nfrom partners.models import PartnerEvent, Partner, Vacancy\n\n\nclass PartnerEventListView(ListAPIView):\n \"\"\"Returns an overview of all partner events.\"\"\"\n\n serializer_class = PartnerEventSerializer\n queryset = PartnerEvent.objects.filter(published=True)\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.PartnerEventDateFilter,\n )\n ordering_fields = (\"start\", \"end\", \"title\")\n search_fields = (\"title\",)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass PartnerEventDetailView(RetrieveAPIView):\n \"\"\"Returns a single partner event.\"\"\"\n\n serializer_class = PartnerEventSerializer\n queryset = PartnerEvent.objects.filter(published=True)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass PartnerListView(ListAPIView):\n \"\"\"Returns an overview of all partners.\"\"\"\n\n serializer_class = PartnerSerializer\n queryset = Partner.objects.filter(is_active=True)\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n )\n ordering_fields = (\"name\", \"pk\")\n search_fields = (\"name\",)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass PartnerDetailView(RetrieveAPIView):\n \"\"\"Returns a single partner.\"\"\"\n\n serializer_class = PartnerSerializer\n queryset = Partner.objects.filter(is_active=True)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass VacancyListView(ListAPIView):\n \"\"\"Returns an overview of all vacancies.\"\"\"\n\n serializer_class = VacancySerializer\n queryset = Vacancy.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.VacancyPartnerFilter,\n )\n ordering_fields = (\"title\", \"pk\")\n search_fields = (\n \"title\",\n \"company_name\",\n )\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass VacancyDetailView(RetrieveAPIView):\n \"\"\"Returns a single vacancy.\"\"\"\n\n serializer_class = VacancySerializer\n queryset = Partner.objects.all()\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n", "path": "website/partners/api/v2/views.py"}], "after_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView\n\nfrom partners.api.v2 import filters\nfrom partners.api.v2.serializers.partner import PartnerSerializer\nfrom partners.api.v2.serializers.partner_event import PartnerEventSerializer\nfrom partners.api.v2.serializers.vacancy import VacancySerializer\nfrom partners.models import PartnerEvent, Partner, Vacancy\n\n\nclass PartnerEventListView(ListAPIView):\n \"\"\"Returns an overview of all partner events.\"\"\"\n\n serializer_class = PartnerEventSerializer\n queryset = PartnerEvent.objects.filter(published=True)\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.PartnerEventDateFilter,\n )\n ordering_fields = (\"start\", \"end\", \"title\")\n search_fields = (\"title\",)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass PartnerEventDetailView(RetrieveAPIView):\n \"\"\"Returns a single partner event.\"\"\"\n\n serializer_class = PartnerEventSerializer\n queryset = PartnerEvent.objects.filter(published=True)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass PartnerListView(ListAPIView):\n \"\"\"Returns an overview of all partners.\"\"\"\n\n serializer_class = PartnerSerializer\n queryset = Partner.objects.filter(is_active=True)\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n )\n ordering_fields = (\"name\", \"pk\")\n search_fields = (\"name\",)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass PartnerDetailView(RetrieveAPIView):\n \"\"\"Returns a single partner.\"\"\"\n\n serializer_class = PartnerSerializer\n queryset = Partner.objects.filter(is_active=True)\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass VacancyListView(ListAPIView):\n \"\"\"Returns an overview of all vacancies.\"\"\"\n\n serializer_class = VacancySerializer\n queryset = Vacancy.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.VacancyPartnerFilter,\n )\n ordering_fields = (\"title\", \"pk\")\n search_fields = (\n \"title\",\n \"company_name\",\n )\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n\n\nclass VacancyDetailView(RetrieveAPIView):\n \"\"\"Returns a single vacancy.\"\"\"\n\n serializer_class = VacancySerializer\n queryset = Vacancy.objects.all()\n permission_classes = [IsAuthenticatedOrTokenHasScope]\n required_scopes = [\"partners:read\"]\n", "path": "website/partners/api/v2/views.py"}]}
| 1,096 | 168 |
gh_patches_debug_38588
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-621
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve TQDM, progress bar value formatting
Idea is to use tqdm's value renderer instead of using current implementation. Newer version will looks like:
```python
import tqdm
with tqdm.tqdm() as pbar:
pbar.set_postfix({'a': 123, 'b': 12.3456788, 'd': 0.12345, 'c': torch.tensor(123).item(), 'e': 'text', 'f': lambda x: x})
```
out:
```
0it [00:00, ?it/s, a=123, b=12.3, d=0.123, c=123, e=text, f=<function <lambda> at 0x1234>]
```
This will help to better display integer values as `123` instead of `1.23e+02`.
cc @miguelvr
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/handlers/tqdm_logger.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import numbers
3 import warnings
4
5 import torch
6
7 from ignite.engine import Events
8
9 from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
10
11
12 class ProgressBar(BaseLogger):
13 """
14 TQDM progress bar handler to log training progress and computed metrics.
15
16 Args:
17 persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
18 bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
19 [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
20 Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
21 l_bar='{desc}: {percentage:3.0f}%|' and
22 r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
23 formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
24 **tqdm_kwargs: kwargs passed to tqdm progress bar.
25 By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
26 number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
27 "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
28
29 Examples:
30
31 Simple progress bar
32
33 .. code-block:: python
34
35 trainer = create_supervised_trainer(model, optimizer, loss)
36
37 pbar = ProgressBar()
38 pbar.attach(trainer)
39
40 # Progress bar will looks like
41 # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
42
43 Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`
44 (such as :class:`~ignite.metrics.RunningAverage`)
45
46 .. code-block:: python
47
48 trainer = create_supervised_trainer(model, optimizer, loss)
49
50 RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
51
52 pbar = ProgressBar()
53 pbar.attach(trainer, ['loss'])
54
55 # Progress bar will looks like
56 # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
57
58 Directly attach the engine's output
59
60 .. code-block:: python
61
62 trainer = create_supervised_trainer(model, optimizer, loss)
63
64 pbar = ProgressBar()
65 pbar.attach(trainer, output_transform=lambda x: {'loss': x})
66
67 # Progress bar will looks like
68 # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
69
70 Note:
71 When adding attaching the progress bar to an engine, it is recommend that you replace
72 every print operation in the engine's handlers triggered every iteration with
73 ``pbar.log_message`` to guarantee the correct format of the stdout.
74
75 Note:
76 When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
77 please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
78 Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
79 to an empty string value.
80
81 """
82
83 events_order = [
84 Events.STARTED,
85 Events.EPOCH_STARTED,
86 Events.ITERATION_STARTED,
87 Events.ITERATION_COMPLETED,
88 Events.EPOCH_COMPLETED,
89 Events.COMPLETED
90 ]
91
92 def __init__(self, persist=False,
93 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
94 **tqdm_kwargs):
95
96 try:
97 from tqdm.autonotebook import tqdm
98 except ImportError:
99 raise RuntimeError("This contrib module requires tqdm to be installed. "
100 "Please install it with command: \n pip install tqdm")
101
102 self.pbar_cls = tqdm
103 self.pbar = None
104 self.persist = persist
105 self.bar_format = bar_format
106 self.tqdm_kwargs = tqdm_kwargs
107
108 def _reset(self, pbar_total):
109 self.pbar = self.pbar_cls(
110 total=pbar_total,
111 leave=self.persist,
112 bar_format=self.bar_format,
113 **self.tqdm_kwargs
114 )
115
116 def _close(self, engine):
117 if self.pbar:
118 self.pbar.close()
119 self.pbar = None
120
121 @staticmethod
122 def _compare_lt(event1, event2):
123 i1 = ProgressBar.events_order.index(event1)
124 i2 = ProgressBar.events_order.index(event2)
125 return i1 < i2
126
127 @staticmethod
128 def log_message(message):
129 """
130 Logs a message, preserving the progress bar correct output format.
131
132 Args:
133 message (str): string you wish to log.
134 """
135 from tqdm import tqdm
136 tqdm.write(message)
137
138 def attach(self, engine, metric_names=None, output_transform=None,
139 event_name=Events.ITERATION_COMPLETED,
140 closing_event_name=Events.EPOCH_COMPLETED):
141 """
142 Attaches the progress bar to an engine object.
143
144 Args:
145 engine (Engine): engine object.
146 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
147 metrics.
148 output_transform (callable, optional): a function to select what you want to print from the engine's
149 output. This function may return either a dictionary with entries in the format of ``{name: value}``,
150 or a single scalar, which will be displayed with the default name `output`.
151 event_name: event's name on which the progress bar advances. Valid events are from
152 :class:`~ignite.engine.Events`.
153 closing_event_name: event's name on which the progress bar is closed. Valid events are from
154 :class:`~ignite.engine.Events`.
155
156 Note: accepted output value types are numbers, 0d and 1d torch tensors and strings
157
158 """
159 desc = self.tqdm_kwargs.get("desc", "Epoch")
160
161 if not (event_name in Events and closing_event_name in Events):
162 raise ValueError("Logging and closing events should be only ignite.engine.Events")
163
164 if not self._compare_lt(event_name, closing_event_name):
165 raise ValueError("Logging event {} should be called before closing event {}"
166 .format(event_name, closing_event_name))
167
168 log_handler = _OutputHandler(desc, metric_names, output_transform,
169 event_name=event_name,
170 closing_event_name=closing_event_name)
171 super(ProgressBar, self).attach(engine, log_handler, event_name)
172 engine.add_event_handler(closing_event_name, self._close)
173
174
175 class _OutputHandler(BaseOutputHandler):
176 """Helper handler to log engine's output and/or metrics
177
178 Args:
179 description (str): progress bar description.
180 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
181 metrics.
182 output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
183 For example, `output_transform = lambda output: output`
184 This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot
185 with corresponding keys.
186 event_name: event's name on which the progress bar advances. Valid events are from
187 :class:`~ignite.engine.Events` or any `event_name` added by
188 :meth:`~ignite.engine.Engine.register_events`.
189 closing_event_name: event's name on which the progress bar is closed. Valid events are from
190 :class:`~ignite.engine.Events` or any `event_name` added by
191 :meth:`~ignite.engine.Engine.register_events`.
192
193 """
194 def __init__(self, description, metric_names=None, output_transform=None,
195 event_name=Events.ITERATION_COMPLETED,
196 closing_event_name=Events.EPOCH_COMPLETED):
197 if metric_names is None and output_transform is None:
198 # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
199 metric_names = []
200 super(_OutputHandler, self).__init__(description, metric_names, output_transform,
201 another_engine=None, global_step_transform=None)
202 self.event_name = event_name
203 self.closing_event_name = closing_event_name
204
205 @staticmethod
206 def get_max_number_events(event_name, engine):
207 if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
208 return len(engine.state.dataloader)
209 if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
210 return engine.state.max_epochs
211 return 1
212
213 def __call__(self, engine, logger, event_name):
214
215 if logger.pbar is None:
216 logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))
217
218 desc = self.tag
219 max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
220 if max_num_of_closing_events > 1:
221 global_step = engine.state.get_event_attrib_value(self.closing_event_name)
222 desc += " [{}/{}]".format(global_step, max_num_of_closing_events)
223 logger.pbar.set_description(desc)
224
225 metrics = self._setup_output_metrics(engine)
226
227 rendered_metrics = {}
228 for key, value in metrics.items():
229 if isinstance(value, numbers.Number) or \
230 isinstance(value, torch.Tensor) and value.ndimension() == 0:
231 rendered_metrics[key] = "{:.2e}".format(value)
232 elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
233 for i, v in enumerate(value):
234 k = "{}_{}".format(key, i)
235 rendered_metrics[k] = "{:.2e}".format(v)
236 elif isinstance(value, str):
237 rendered_metrics[key] = value
238 else:
239 warnings.warn("ProgressBar can not log "
240 "metrics value type {}".format(type(value)))
241
242 if rendered_metrics:
243 logger.pbar.set_postfix(**rendered_metrics)
244
245 logger.pbar.update()
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-import numbers
import warnings
import torch
@@ -53,7 +52,7 @@
pbar.attach(trainer, ['loss'])
# Progress bar will looks like
- # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
+ # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
Directly attach the engine's output
@@ -65,7 +64,7 @@
pbar.attach(trainer, output_transform=lambda x: {'loss': x})
# Progress bar will looks like
- # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
+ # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
Note:
When adding attaching the progress bar to an engine, it is recommend that you replace
@@ -91,6 +90,7 @@
def __init__(self, persist=False,
bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
+
**tqdm_kwargs):
try:
@@ -226,18 +226,18 @@
rendered_metrics = {}
for key, value in metrics.items():
- if isinstance(value, numbers.Number) or \
- isinstance(value, torch.Tensor) and value.ndimension() == 0:
- rendered_metrics[key] = "{:.2e}".format(value)
- elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
- for i, v in enumerate(value):
- k = "{}_{}".format(key, i)
- rendered_metrics[k] = "{:.2e}".format(v)
- elif isinstance(value, str):
- rendered_metrics[key] = value
+ if isinstance(value, torch.Tensor):
+ if value.ndimension() == 0:
+ rendered_metrics[key] = value.item()
+ elif value.ndimension() == 1:
+ for i, v in enumerate(value):
+ k = "{}_{}".format(key, i)
+ rendered_metrics[k] = v.item()
+ else:
+ warnings.warn("ProgressBar can not log "
+ "tensor with {} dimensions".format(value.ndimension()))
else:
- warnings.warn("ProgressBar can not log "
- "metrics value type {}".format(type(value)))
+ rendered_metrics[key] = value
if rendered_metrics:
logger.pbar.set_postfix(**rendered_metrics)
|
{"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -1,5 +1,4 @@\n # -*- coding: utf-8 -*-\n-import numbers\n import warnings\n \n import torch\n@@ -53,7 +52,7 @@\n pbar.attach(trainer, ['loss'])\n \n # Progress bar will looks like\n- # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n+ # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n \n Directly attach the engine's output\n \n@@ -65,7 +64,7 @@\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n \n # Progress bar will looks like\n- # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n+ # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n \n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n@@ -91,6 +90,7 @@\n \n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n+\n **tqdm_kwargs):\n \n try:\n@@ -226,18 +226,18 @@\n \n rendered_metrics = {}\n for key, value in metrics.items():\n- if isinstance(value, numbers.Number) or \\\n- isinstance(value, torch.Tensor) and value.ndimension() == 0:\n- rendered_metrics[key] = \"{:.2e}\".format(value)\n- elif isinstance(value, torch.Tensor) and value.ndimension() == 1:\n- for i, v in enumerate(value):\n- k = \"{}_{}\".format(key, i)\n- rendered_metrics[k] = \"{:.2e}\".format(v)\n- elif isinstance(value, str):\n- rendered_metrics[key] = value\n+ if isinstance(value, torch.Tensor):\n+ if value.ndimension() == 0:\n+ rendered_metrics[key] = value.item()\n+ elif value.ndimension() == 1:\n+ for i, v in enumerate(value):\n+ k = \"{}_{}\".format(key, i)\n+ rendered_metrics[k] = v.item()\n+ else:\n+ warnings.warn(\"ProgressBar can not log \"\n+ \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n- warnings.warn(\"ProgressBar can not log \"\n- \"metrics value type {}\".format(type(value)))\n+ rendered_metrics[key] = value\n \n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n", "issue": "Improve TQDM, progress bar value formatting\nIdea is to use tqdm's value renderer instead of using current implementation. Newer version will looks like:\r\n```python\r\nimport tqdm\r\nwith tqdm.tqdm() as pbar:\r\n pbar.set_postfix({'a': 123, 'b': 12.3456788, 'd': 0.12345, 'c': torch.tensor(123).item(), 'e': 'text', 'f': lambda x: x})\r\n```\r\nout:\r\n```\r\n0it [00:00, ?it/s, a=123, b=12.3, d=0.123, c=123, e=text, f=<function <lambda> at 0x1234>]\r\n```\r\n\r\nThis will help to better display integer values as `123` instead of `1.23e+02`.\r\n\r\ncc @miguelvr \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport numbers\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\n\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n i1 = ProgressBar.events_order.index(event1)\n i2 = ProgressBar.events_order.index(event2)\n return i1 < i2\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not (event_name in Events and closing_event_name in Events):\n raise ValueError(\"Logging and closing events should be only ignite.engine.Events\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n event_name=event_name,\n closing_event_name=closing_event_name)\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot\n with corresponding keys.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n def __init__(self, description, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.event_name = event_name\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n if logger.pbar is None:\n logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, numbers.Number) or \\\n isinstance(value, torch.Tensor) and value.ndimension() == 0:\n rendered_metrics[key] = \"{:.2e}\".format(value)\n elif isinstance(value, torch.Tensor) and value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = \"{:.2e}\".format(v)\n elif isinstance(value, str):\n rendered_metrics[key] = value\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"metrics value type {}\".format(type(value)))\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n logger.pbar.update()\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\n\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n i1 = ProgressBar.events_order.index(event1)\n i2 = ProgressBar.events_order.index(event2)\n return i1 < i2\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not (event_name in Events and closing_event_name in Events):\n raise ValueError(\"Logging and closing events should be only ignite.engine.Events\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n event_name=event_name,\n closing_event_name=closing_event_name)\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot\n with corresponding keys.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n def __init__(self, description, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.event_name = event_name\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n if logger.pbar is None:\n logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n logger.pbar.update()\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]}
| 3,448 | 754 |
gh_patches_debug_268
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-5809
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dev mode PDFization broken
### Describe the issue
The `STATIC_IETF_ORG_INTERNAL` stuff in https://github.com/ietf-tools/datatracker/blob/2bf7e8250c3fc2fcaf9a6223c331a52d1f6d89a4/ietf/doc/models.py#L630 causes a Python error in the dev environment.
CC @NGPixel
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/configs/settings_local.py`
Content:
```
1 # Copyright The IETF Trust 2007-2019, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4 from ietf.settings import * # pyflakes:ignore
5
6 ALLOWED_HOSTS = ['*']
7
8 from ietf.settings_postgresqldb import DATABASES # pyflakes:ignore
9
10 IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
11 IDSUBMIT_REPOSITORY_PATH = "test/id/"
12 IDSUBMIT_STAGING_PATH = "test/staging/"
13
14 AGENDA_PATH = '/assets/www6s/proceedings/'
15 MEETINGHOST_LOGO_PATH = AGENDA_PATH
16
17 USING_DEBUG_EMAIL_SERVER=True
18 EMAIL_HOST='localhost'
19 EMAIL_PORT=2025
20
21 MEDIA_BASE_DIR = '/assets'
22 MEDIA_ROOT = MEDIA_BASE_DIR + '/media/'
23 MEDIA_URL = '/media/'
24
25 PHOTOS_DIRNAME = 'photo'
26 PHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME
27
28 SUBMIT_YANG_CATALOG_MODEL_DIR = '/assets/ietf-ftp/yang/catalogmod/'
29 SUBMIT_YANG_DRAFT_MODEL_DIR = '/assets/ietf-ftp/yang/draftmod/'
30 SUBMIT_YANG_INVAL_MODEL_DIR = '/assets/ietf-ftp/yang/invalmod/'
31 SUBMIT_YANG_IANA_MODEL_DIR = '/assets/ietf-ftp/yang/ianamod/'
32 SUBMIT_YANG_RFC_MODEL_DIR = '/assets/ietf-ftp/yang/rfcmod/'
33
34 # Set INTERNAL_IPS for use within Docker. See https://knasmueller.net/fix-djangos-debug-toolbar-not-showing-inside-docker
35 import socket
36 hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
37 INTERNAL_IPS = [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips] + ['127.0.0.1']
38
39 # DEV_TEMPLATE_CONTEXT_PROCESSORS = [
40 # 'ietf.context_processors.sql_debug',
41 # ]
42
43 DOCUMENT_PATH_PATTERN = '/assets/ietf-ftp/{doc.type_id}/'
44 INTERNET_DRAFT_PATH = '/assets/ietf-ftp/internet-drafts/'
45 RFC_PATH = '/assets/ietf-ftp/rfc/'
46 CHARTER_PATH = '/assets/ietf-ftp/charter/'
47 BOFREQ_PATH = '/assets/ietf-ftp/bofreq/'
48 CONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'
49 STATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'
50 INTERNET_DRAFT_ARCHIVE_DIR = '/assets/archive/id'
51 INTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/archive/id'
52 BIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'
53
54 NOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'
55 SLIDE_STAGING_PATH = 'test/staging/'
56
57 DE_GFM_BINARY = '/usr/local/bin/de-gfm'
58
59 STATIC_IETF_ORG = "/_static"
60 STATIC_IETF_ORG_INTERNAL = "http://localhost:80"
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/configs/settings_local.py b/docker/configs/settings_local.py
--- a/docker/configs/settings_local.py
+++ b/docker/configs/settings_local.py
@@ -57,4 +57,4 @@
DE_GFM_BINARY = '/usr/local/bin/de-gfm'
STATIC_IETF_ORG = "/_static"
-STATIC_IETF_ORG_INTERNAL = "http://localhost:80"
+STATIC_IETF_ORG_INTERNAL = "http://static"
|
{"golden_diff": "diff --git a/docker/configs/settings_local.py b/docker/configs/settings_local.py\n--- a/docker/configs/settings_local.py\n+++ b/docker/configs/settings_local.py\n@@ -57,4 +57,4 @@\n DE_GFM_BINARY = '/usr/local/bin/de-gfm'\n \n STATIC_IETF_ORG = \"/_static\"\n-STATIC_IETF_ORG_INTERNAL = \"http://localhost:80\"\n+STATIC_IETF_ORG_INTERNAL = \"http://static\"\n", "issue": "Dev mode PDFization broken\n### Describe the issue\n\nThe `STATIC_IETF_ORG_INTERNAL` stuff in https://github.com/ietf-tools/datatracker/blob/2bf7e8250c3fc2fcaf9a6223c331a52d1f6d89a4/ietf/doc/models.py#L630 causes a Python error in the dev environment.\r\n\r\nCC @NGPixel \n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Copyright The IETF Trust 2007-2019, All Rights Reserved\n# -*- coding: utf-8 -*-\n\nfrom ietf.settings import * # pyflakes:ignore\n\nALLOWED_HOSTS = ['*']\n\nfrom ietf.settings_postgresqldb import DATABASES # pyflakes:ignore\n\nIDSUBMIT_IDNITS_BINARY = \"/usr/local/bin/idnits\"\nIDSUBMIT_REPOSITORY_PATH = \"test/id/\"\nIDSUBMIT_STAGING_PATH = \"test/staging/\"\n\nAGENDA_PATH = '/assets/www6s/proceedings/'\nMEETINGHOST_LOGO_PATH = AGENDA_PATH\n\nUSING_DEBUG_EMAIL_SERVER=True\nEMAIL_HOST='localhost'\nEMAIL_PORT=2025\n\nMEDIA_BASE_DIR = '/assets'\nMEDIA_ROOT = MEDIA_BASE_DIR + '/media/'\nMEDIA_URL = '/media/'\n\nPHOTOS_DIRNAME = 'photo'\nPHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME\n\nSUBMIT_YANG_CATALOG_MODEL_DIR = '/assets/ietf-ftp/yang/catalogmod/'\nSUBMIT_YANG_DRAFT_MODEL_DIR = '/assets/ietf-ftp/yang/draftmod/'\nSUBMIT_YANG_INVAL_MODEL_DIR = '/assets/ietf-ftp/yang/invalmod/'\nSUBMIT_YANG_IANA_MODEL_DIR = '/assets/ietf-ftp/yang/ianamod/'\nSUBMIT_YANG_RFC_MODEL_DIR = '/assets/ietf-ftp/yang/rfcmod/'\n\n# Set INTERNAL_IPS for use within Docker. See https://knasmueller.net/fix-djangos-debug-toolbar-not-showing-inside-docker\nimport socket\nhostname, _, ips = socket.gethostbyname_ex(socket.gethostname())\nINTERNAL_IPS = [\".\".join(ip.split(\".\")[:-1] + [\"1\"]) for ip in ips] + ['127.0.0.1']\n\n# DEV_TEMPLATE_CONTEXT_PROCESSORS = [\n# 'ietf.context_processors.sql_debug',\n# ]\n\nDOCUMENT_PATH_PATTERN = '/assets/ietf-ftp/{doc.type_id}/'\nINTERNET_DRAFT_PATH = '/assets/ietf-ftp/internet-drafts/'\nRFC_PATH = '/assets/ietf-ftp/rfc/'\nCHARTER_PATH = '/assets/ietf-ftp/charter/'\nBOFREQ_PATH = '/assets/ietf-ftp/bofreq/'\nCONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'\nSTATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'\nINTERNET_DRAFT_ARCHIVE_DIR = '/assets/archive/id'\nINTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/archive/id'\nBIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'\n\nNOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'\nSLIDE_STAGING_PATH = 'test/staging/'\n\nDE_GFM_BINARY = '/usr/local/bin/de-gfm'\n\nSTATIC_IETF_ORG = \"/_static\"\nSTATIC_IETF_ORG_INTERNAL = \"http://localhost:80\"\n", "path": "docker/configs/settings_local.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2007-2019, All Rights Reserved\n# -*- coding: utf-8 -*-\n\nfrom ietf.settings import * # pyflakes:ignore\n\nALLOWED_HOSTS = ['*']\n\nfrom ietf.settings_postgresqldb import DATABASES # pyflakes:ignore\n\nIDSUBMIT_IDNITS_BINARY = \"/usr/local/bin/idnits\"\nIDSUBMIT_REPOSITORY_PATH = \"test/id/\"\nIDSUBMIT_STAGING_PATH = \"test/staging/\"\n\nAGENDA_PATH = '/assets/www6s/proceedings/'\nMEETINGHOST_LOGO_PATH = AGENDA_PATH\n\nUSING_DEBUG_EMAIL_SERVER=True\nEMAIL_HOST='localhost'\nEMAIL_PORT=2025\n\nMEDIA_BASE_DIR = '/assets'\nMEDIA_ROOT = MEDIA_BASE_DIR + '/media/'\nMEDIA_URL = '/media/'\n\nPHOTOS_DIRNAME = 'photo'\nPHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME\n\nSUBMIT_YANG_CATALOG_MODEL_DIR = '/assets/ietf-ftp/yang/catalogmod/'\nSUBMIT_YANG_DRAFT_MODEL_DIR = '/assets/ietf-ftp/yang/draftmod/'\nSUBMIT_YANG_INVAL_MODEL_DIR = '/assets/ietf-ftp/yang/invalmod/'\nSUBMIT_YANG_IANA_MODEL_DIR = '/assets/ietf-ftp/yang/ianamod/'\nSUBMIT_YANG_RFC_MODEL_DIR = '/assets/ietf-ftp/yang/rfcmod/'\n\n# Set INTERNAL_IPS for use within Docker. See https://knasmueller.net/fix-djangos-debug-toolbar-not-showing-inside-docker\nimport socket\nhostname, _, ips = socket.gethostbyname_ex(socket.gethostname())\nINTERNAL_IPS = [\".\".join(ip.split(\".\")[:-1] + [\"1\"]) for ip in ips] + ['127.0.0.1']\n\n# DEV_TEMPLATE_CONTEXT_PROCESSORS = [\n# 'ietf.context_processors.sql_debug',\n# ]\n\nDOCUMENT_PATH_PATTERN = '/assets/ietf-ftp/{doc.type_id}/'\nINTERNET_DRAFT_PATH = '/assets/ietf-ftp/internet-drafts/'\nRFC_PATH = '/assets/ietf-ftp/rfc/'\nCHARTER_PATH = '/assets/ietf-ftp/charter/'\nBOFREQ_PATH = '/assets/ietf-ftp/bofreq/'\nCONFLICT_REVIEW_PATH = '/assets/ietf-ftp/conflict-reviews/'\nSTATUS_CHANGE_PATH = '/assets/ietf-ftp/status-changes/'\nINTERNET_DRAFT_ARCHIVE_DIR = '/assets/archive/id'\nINTERNET_ALL_DRAFTS_ARCHIVE_DIR = '/assets/archive/id'\nBIBXML_BASE_PATH = '/assets/ietfdata/derived/bibxml'\n\nNOMCOM_PUBLIC_KEYS_DIR = 'data/nomcom_keys/public_keys/'\nSLIDE_STAGING_PATH = 'test/staging/'\n\nDE_GFM_BINARY = '/usr/local/bin/de-gfm'\n\nSTATIC_IETF_ORG = \"/_static\"\nSTATIC_IETF_ORG_INTERNAL = \"http://static\"\n", "path": "docker/configs/settings_local.py"}]}
| 1,142 | 101 |
gh_patches_debug_17159
|
rasdani/github-patches
|
git_diff
|
scikit-hep__uproot5-395
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uproot fails with self-built XRootD
With Uproot 4.0.11 and a self-built XRootD with Python bindings, `import uproot` fails with `TypeError: '<' not supported between instances of 'str' and 'int'`. This is due to the following line: https://github.com/scikit-hep/uproot4/blob/d6f9bea0f1a9ca6806445b95da93efa37c5117ba/src/uproot/extras.py#L116
When one builds XRootD, the version number differs from the standard `x.y.z` - it is, e.g., `v20210712-58b374f12`, which causes `LooseVersion` to raise `TypeError`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/uproot/extras.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
2
3 """
4 This module defines functions that import external libraries used by Uproot, but not
5 required by an Uproot installation. (Uproot only requires NumPy).
6
7 If a library cannot be imported, these functions raise ``ImportError`` with
8 error messages containing instructions on how to install the library.
9 """
10
11 from __future__ import absolute_import
12
13 import atexit
14 import os
15 from distutils.version import LooseVersion
16
17 import pkg_resources
18
19
20 def awkward():
21 """
22 Imports and returns ``awkward``.
23 """
24 try:
25 import awkward
26 except ImportError:
27 raise ImportError(
28 """install the 'awkward' package with:
29
30 pip install awkward
31
32 Alternatively, you can use ``library="np"`` or globally set ``uproot.default_library``
33 to output as NumPy arrays, rather than Awkward arrays.
34 """
35 )
36 else:
37 return awkward
38
39
40 def pandas():
41 """
42 Imports and returns ``pandas``.
43 """
44 try:
45 import pandas
46 except ImportError:
47 raise ImportError(
48 """install the 'pandas' package with:
49
50 pip install pandas
51
52 or
53
54 conda install pandas"""
55 )
56 else:
57 return pandas
58
59
60 def XRootD_client():
61 """
62 Imports and returns ``XRootD.client`` (after setting the
63 ```XRD_RUNFORKHANDLER`` environment variable to ``"1"``, to allow
64 multiprocessing).
65 """
66 os.environ["XRD_RUNFORKHANDLER"] = "1" # set multiprocessing flag
67 try:
68 import XRootD
69 import XRootD.client
70
71 except ImportError:
72 raise ImportError(
73 """Install XRootD python bindings with:
74
75 conda install -c conda-forge xrootd
76
77 (or download from http://xrootd.org/dload.html and manually compile with """
78 """cmake; setting PYTHONPATH and LD_LIBRARY_PATH appropriately)."""
79 )
80
81 if older_xrootd("5.1.0"):
82 # This is registered after calling "import XRootD.client" so it is ran
83 # before XRootD.client.finalize.finalize()
84 @atexit.register
85 def cleanup_open_files():
86 """Clean up any open xrootd file objects at exit
87
88 Required to avoid deadlocks from XRootD, for details see:
89 * https://github.com/scikit-hep/uproot/issues/504
90 * https://github.com/xrootd/xrootd/pull/1260
91 """
92 import gc
93
94 for obj in gc.get_objects():
95 try:
96 isopen = isinstance(obj, XRootD.client.file.File) and obj.is_open()
97 except ReferenceError:
98 pass
99 else:
100 if isopen:
101 obj.close()
102
103 return XRootD.client
104
105
106 def older_xrootd(min_version):
107 """
108 Check if the installed XRootD bindings are newer than a given version
109 without importing. Defaults to False if XRootD is not installed.
110 """
111 try:
112 dist = pkg_resources.get_distribution("XRootD")
113 except pkg_resources.DistributionNotFound:
114 return False
115 else:
116 return LooseVersion(dist.version) < LooseVersion(min_version)
117
118
119 def lzma():
120 """
121 Imports and returns ``lzma`` (which is part of the Python 3 standard
122 library, but not Python 2).
123 """
124 try:
125 import lzma
126 except ImportError:
127 try:
128 import backports.lzma as lzma
129 except ImportError:
130 raise ImportError(
131 """install the 'lzma' package with:
132
133 pip install backports.lzma
134
135 or
136
137 conda install backports.lzma
138
139 or use Python >= 3.3."""
140 )
141 else:
142 return lzma
143 else:
144 return lzma
145
146
147 def lz4_block():
148 """
149 Imports and returns ``lz4``.
150
151 Attempts to import ``xxhash`` as well.
152 """
153 try:
154 import lz4.block
155 import xxhash # noqa: F401
156 except ImportError:
157 raise ImportError(
158 """install the 'lz4' and `xxhash` packages with:
159
160 pip install lz4 xxhash
161
162 or
163
164 conda install lz4 python-xxhash"""
165 )
166 else:
167 return lz4.block
168
169
170 def xxhash():
171 """
172 Imports and returns ``xxhash``.
173
174 Attempts to import ``lz4`` as well.
175 """
176 try:
177 import lz4.block # noqa: F401
178 import xxhash
179 except ImportError:
180 raise ImportError(
181 """install the 'lz4' and `xxhash` packages with:
182
183 pip install lz4 xxhash
184
185 or
186
187 conda install lz4 python-xxhash"""
188 )
189 else:
190 return xxhash
191
192
193 def zstandard():
194 """
195 Imports and returns ``zstandard``.
196 """
197 try:
198 import zstandard
199 except ImportError:
200 raise ImportError(
201 """install the 'zstandard' package with:
202
203 pip install zstandard
204
205 or
206
207 conda install zstandard"""
208 )
209 else:
210 return zstandard
211
212
213 def boost_histogram():
214 """
215 Imports and returns ``boost-histogram``.
216 """
217 try:
218 import boost_histogram
219 except ImportError:
220 raise ImportError(
221 """install the 'boost-histogram' package with:
222
223 pip install boost-histogram
224
225 or
226
227 conda install -c conda-forge boost-histogram"""
228 )
229 else:
230 return boost_histogram
231
232
233 def hist():
234 """
235 Imports and returns ``hist``.
236 """
237 try:
238 import hist
239 except ImportError:
240 raise ImportError(
241 """install the 'hist' package with:
242
243 pip install hist"""
244 )
245 else:
246 return hist
247
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/uproot/extras.py b/src/uproot/extras.py
--- a/src/uproot/extras.py
+++ b/src/uproot/extras.py
@@ -106,14 +106,20 @@
def older_xrootd(min_version):
"""
Check if the installed XRootD bindings are newer than a given version
- without importing. Defaults to False if XRootD is not installed.
+ without importing. Defaults to False if XRootD is not installed. Unrecognized
+ versions (i.e. self-built XRootD, whose version numbers are strings)
+ return False: that is, they're assumed to be new, so that no warnings
+ are raised.
"""
try:
dist = pkg_resources.get_distribution("XRootD")
except pkg_resources.DistributionNotFound:
return False
else:
- return LooseVersion(dist.version) < LooseVersion(min_version)
+ try:
+ return LooseVersion(dist.version) < LooseVersion(min_version)
+ except TypeError:
+ return False
def lzma():
|
{"golden_diff": "diff --git a/src/uproot/extras.py b/src/uproot/extras.py\n--- a/src/uproot/extras.py\n+++ b/src/uproot/extras.py\n@@ -106,14 +106,20 @@\n def older_xrootd(min_version):\n \"\"\"\n Check if the installed XRootD bindings are newer than a given version\n- without importing. Defaults to False if XRootD is not installed.\n+ without importing. Defaults to False if XRootD is not installed. Unrecognized\n+ versions (i.e. self-built XRootD, whose version numbers are strings)\n+ return False: that is, they're assumed to be new, so that no warnings\n+ are raised.\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(\"XRootD\")\n except pkg_resources.DistributionNotFound:\n return False\n else:\n- return LooseVersion(dist.version) < LooseVersion(min_version)\n+ try:\n+ return LooseVersion(dist.version) < LooseVersion(min_version)\n+ except TypeError:\n+ return False\n \n \n def lzma():\n", "issue": "Uproot fails with self-built XRootD\nWith Uproot 4.0.11 and a self-built XRootD with Python bindings, `import uproot` fails with `TypeError: '<' not supported between instances of 'str' and 'int'`. This is due to the following line: https://github.com/scikit-hep/uproot4/blob/d6f9bea0f1a9ca6806445b95da93efa37c5117ba/src/uproot/extras.py#L116\r\n\r\nWhen one builds XRootD, the version number differs from the standard `x.y.z` - it is, e.g., `v20210712-58b374f12`, which causes `LooseVersion` to raise `TypeError`. \n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines functions that import external libraries used by Uproot, but not\nrequired by an Uproot installation. (Uproot only requires NumPy).\n\nIf a library cannot be imported, these functions raise ``ImportError`` with\nerror messages containing instructions on how to install the library.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport atexit\nimport os\nfrom distutils.version import LooseVersion\n\nimport pkg_resources\n\n\ndef awkward():\n \"\"\"\n Imports and returns ``awkward``.\n \"\"\"\n try:\n import awkward\n except ImportError:\n raise ImportError(\n \"\"\"install the 'awkward' package with:\n\n pip install awkward\n\nAlternatively, you can use ``library=\"np\"`` or globally set ``uproot.default_library``\nto output as NumPy arrays, rather than Awkward arrays.\n\"\"\"\n )\n else:\n return awkward\n\n\ndef pandas():\n \"\"\"\n Imports and returns ``pandas``.\n \"\"\"\n try:\n import pandas\n except ImportError:\n raise ImportError(\n \"\"\"install the 'pandas' package with:\n\n pip install pandas\n\nor\n\n conda install pandas\"\"\"\n )\n else:\n return pandas\n\n\ndef XRootD_client():\n \"\"\"\n Imports and returns ``XRootD.client`` (after setting the\n ```XRD_RUNFORKHANDLER`` environment variable to ``\"1\"``, to allow\n multiprocessing).\n \"\"\"\n os.environ[\"XRD_RUNFORKHANDLER\"] = \"1\" # set multiprocessing flag\n try:\n import XRootD\n import XRootD.client\n\n except ImportError:\n raise ImportError(\n \"\"\"Install XRootD python bindings with:\n\n conda install -c conda-forge xrootd\n\n(or download from http://xrootd.org/dload.html and manually compile with \"\"\"\n \"\"\"cmake; setting PYTHONPATH and LD_LIBRARY_PATH appropriately).\"\"\"\n )\n\n if older_xrootd(\"5.1.0\"):\n # This is registered after calling \"import XRootD.client\" so it is ran\n # before XRootD.client.finalize.finalize()\n @atexit.register\n def cleanup_open_files():\n \"\"\"Clean up any open xrootd file objects at exit\n\n Required to avoid deadlocks from XRootD, for details see:\n * https://github.com/scikit-hep/uproot/issues/504\n * https://github.com/xrootd/xrootd/pull/1260\n \"\"\"\n import gc\n\n for obj in gc.get_objects():\n try:\n isopen = isinstance(obj, XRootD.client.file.File) and obj.is_open()\n except ReferenceError:\n pass\n else:\n if isopen:\n obj.close()\n\n return XRootD.client\n\n\ndef older_xrootd(min_version):\n \"\"\"\n Check if the installed XRootD bindings are newer than a given version\n without importing. Defaults to False if XRootD is not installed.\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(\"XRootD\")\n except pkg_resources.DistributionNotFound:\n return False\n else:\n return LooseVersion(dist.version) < LooseVersion(min_version)\n\n\ndef lzma():\n \"\"\"\n Imports and returns ``lzma`` (which is part of the Python 3 standard\n library, but not Python 2).\n \"\"\"\n try:\n import lzma\n except ImportError:\n try:\n import backports.lzma as lzma\n except ImportError:\n raise ImportError(\n \"\"\"install the 'lzma' package with:\n\n pip install backports.lzma\n\nor\n\n conda install backports.lzma\n\nor use Python >= 3.3.\"\"\"\n )\n else:\n return lzma\n else:\n return lzma\n\n\ndef lz4_block():\n \"\"\"\n Imports and returns ``lz4``.\n\n Attempts to import ``xxhash`` as well.\n \"\"\"\n try:\n import lz4.block\n import xxhash # noqa: F401\n except ImportError:\n raise ImportError(\n \"\"\"install the 'lz4' and `xxhash` packages with:\n\n pip install lz4 xxhash\n\nor\n\n conda install lz4 python-xxhash\"\"\"\n )\n else:\n return lz4.block\n\n\ndef xxhash():\n \"\"\"\n Imports and returns ``xxhash``.\n\n Attempts to import ``lz4`` as well.\n \"\"\"\n try:\n import lz4.block # noqa: F401\n import xxhash\n except ImportError:\n raise ImportError(\n \"\"\"install the 'lz4' and `xxhash` packages with:\n\n pip install lz4 xxhash\n\nor\n\n conda install lz4 python-xxhash\"\"\"\n )\n else:\n return xxhash\n\n\ndef zstandard():\n \"\"\"\n Imports and returns ``zstandard``.\n \"\"\"\n try:\n import zstandard\n except ImportError:\n raise ImportError(\n \"\"\"install the 'zstandard' package with:\n\n pip install zstandard\n\nor\n\n conda install zstandard\"\"\"\n )\n else:\n return zstandard\n\n\ndef boost_histogram():\n \"\"\"\n Imports and returns ``boost-histogram``.\n \"\"\"\n try:\n import boost_histogram\n except ImportError:\n raise ImportError(\n \"\"\"install the 'boost-histogram' package with:\n\n pip install boost-histogram\n\nor\n\n conda install -c conda-forge boost-histogram\"\"\"\n )\n else:\n return boost_histogram\n\n\ndef hist():\n \"\"\"\n Imports and returns ``hist``.\n \"\"\"\n try:\n import hist\n except ImportError:\n raise ImportError(\n \"\"\"install the 'hist' package with:\n\n pip install hist\"\"\"\n )\n else:\n return hist\n", "path": "src/uproot/extras.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE\n\n\"\"\"\nThis module defines functions that import external libraries used by Uproot, but not\nrequired by an Uproot installation. (Uproot only requires NumPy).\n\nIf a library cannot be imported, these functions raise ``ImportError`` with\nerror messages containing instructions on how to install the library.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport atexit\nimport os\nfrom distutils.version import LooseVersion\n\nimport pkg_resources\n\n\ndef awkward():\n \"\"\"\n Imports and returns ``awkward``.\n \"\"\"\n try:\n import awkward\n except ImportError:\n raise ImportError(\n \"\"\"install the 'awkward' package with:\n\n pip install awkward\n\nAlternatively, you can use ``library=\"np\"`` or globally set ``uproot.default_library``\nto output as NumPy arrays, rather than Awkward arrays.\n\"\"\"\n )\n else:\n return awkward\n\n\ndef pandas():\n \"\"\"\n Imports and returns ``pandas``.\n \"\"\"\n try:\n import pandas\n except ImportError:\n raise ImportError(\n \"\"\"install the 'pandas' package with:\n\n pip install pandas\n\nor\n\n conda install pandas\"\"\"\n )\n else:\n return pandas\n\n\ndef XRootD_client():\n \"\"\"\n Imports and returns ``XRootD.client`` (after setting the\n ```XRD_RUNFORKHANDLER`` environment variable to ``\"1\"``, to allow\n multiprocessing).\n \"\"\"\n os.environ[\"XRD_RUNFORKHANDLER\"] = \"1\" # set multiprocessing flag\n try:\n import XRootD\n import XRootD.client\n\n except ImportError:\n raise ImportError(\n \"\"\"Install XRootD python bindings with:\n\n conda install -c conda-forge xrootd\n\n(or download from http://xrootd.org/dload.html and manually compile with \"\"\"\n \"\"\"cmake; setting PYTHONPATH and LD_LIBRARY_PATH appropriately).\"\"\"\n )\n\n if older_xrootd(\"5.1.0\"):\n # This is registered after calling \"import XRootD.client\" so it is ran\n # before XRootD.client.finalize.finalize()\n @atexit.register\n def cleanup_open_files():\n \"\"\"Clean up any open xrootd file objects at exit\n\n Required to avoid deadlocks from XRootD, for details see:\n * https://github.com/scikit-hep/uproot/issues/504\n * https://github.com/xrootd/xrootd/pull/1260\n \"\"\"\n import gc\n\n for obj in gc.get_objects():\n try:\n isopen = isinstance(obj, XRootD.client.file.File) and obj.is_open()\n except ReferenceError:\n pass\n else:\n if isopen:\n obj.close()\n\n return XRootD.client\n\n\ndef older_xrootd(min_version):\n \"\"\"\n Check if the installed XRootD bindings are newer than a given version\n without importing. Defaults to False if XRootD is not installed. Unrecognized\n versions (i.e. self-built XRootD, whose version numbers are strings)\n return False: that is, they're assumed to be new, so that no warnings\n are raised.\n \"\"\"\n try:\n dist = pkg_resources.get_distribution(\"XRootD\")\n except pkg_resources.DistributionNotFound:\n return False\n else:\n try:\n return LooseVersion(dist.version) < LooseVersion(min_version)\n except TypeError:\n return False\n\n\ndef lzma():\n \"\"\"\n Imports and returns ``lzma`` (which is part of the Python 3 standard\n library, but not Python 2).\n \"\"\"\n try:\n import lzma\n except ImportError:\n try:\n import backports.lzma as lzma\n except ImportError:\n raise ImportError(\n \"\"\"install the 'lzma' package with:\n\n pip install backports.lzma\n\nor\n\n conda install backports.lzma\n\nor use Python >= 3.3.\"\"\"\n )\n else:\n return lzma\n else:\n return lzma\n\n\ndef lz4_block():\n \"\"\"\n Imports and returns ``lz4``.\n\n Attempts to import ``xxhash`` as well.\n \"\"\"\n try:\n import lz4.block\n import xxhash # noqa: F401\n except ImportError:\n raise ImportError(\n \"\"\"install the 'lz4' and `xxhash` packages with:\n\n pip install lz4 xxhash\n\nor\n\n conda install lz4 python-xxhash\"\"\"\n )\n else:\n return lz4.block\n\n\ndef xxhash():\n \"\"\"\n Imports and returns ``xxhash``.\n\n Attempts to import ``lz4`` as well.\n \"\"\"\n try:\n import lz4.block # noqa: F401\n import xxhash\n except ImportError:\n raise ImportError(\n \"\"\"install the 'lz4' and `xxhash` packages with:\n\n pip install lz4 xxhash\n\nor\n\n conda install lz4 python-xxhash\"\"\"\n )\n else:\n return xxhash\n\n\ndef zstandard():\n \"\"\"\n Imports and returns ``zstandard``.\n \"\"\"\n try:\n import zstandard\n except ImportError:\n raise ImportError(\n \"\"\"install the 'zstandard' package with:\n\n pip install zstandard\n\nor\n\n conda install zstandard\"\"\"\n )\n else:\n return zstandard\n\n\ndef boost_histogram():\n \"\"\"\n Imports and returns ``boost-histogram``.\n \"\"\"\n try:\n import boost_histogram\n except ImportError:\n raise ImportError(\n \"\"\"install the 'boost-histogram' package with:\n\n pip install boost-histogram\n\nor\n\n conda install -c conda-forge boost-histogram\"\"\"\n )\n else:\n return boost_histogram\n\n\ndef hist():\n \"\"\"\n Imports and returns ``hist``.\n \"\"\"\n try:\n import hist\n except ImportError:\n raise ImportError(\n \"\"\"install the 'hist' package with:\n\n pip install hist\"\"\"\n )\n else:\n return hist\n", "path": "src/uproot/extras.py"}]}
| 2,370 | 236 |
gh_patches_debug_9190
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-1589
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Toil Appliance should set /var/lib/toil as the default workdir
Toil Appliance should set /var/lib/toil as the default workdir
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/Dockerfile.py`
Content:
```
1 # Copyright (C) 2015-2016 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import print_function
16 import os
17 import textwrap
18
19 applianceSelf = os.environ['TOIL_APPLIANCE_SELF']
20 sdistName = os.environ['_TOIL_SDIST_NAME']
21
22
23 dependencies = ' '.join(['libffi-dev', # For client side encryption for 'azure' extra with PyNACL
24 'python-dev', # For installing Python packages with native code
25 'python-pip', # Bootstrap pip, but needs upgrading, see below
26 'libcurl4-openssl-dev',
27 'libssl-dev',
28 'wget',
29 'curl',
30 'openssh-server',
31 'mesos=1.0.0-2.0.89.ubuntu1404',
32 'rsync',
33 'screen'])
34
35
36 def heredoc(s):
37 s = textwrap.dedent(s).format(**globals())
38 return s[1:] if s.startswith('\n') else s
39
40
41 motd = heredoc('''
42
43 This is the Toil appliance. You can run your Toil script directly on the appliance, but only
44 in single-machine mode. Alternatively, create a Toil cluster with `toil launch-cluster`,
45 log into the leader of that cluster with `toil ssh-cluster` and run your Toil script there.
46
47 For more information see http://toil.readthedocs.io/en/latest/
48
49 Copyright (C) 2015-2016 Regents of the University of California
50
51 Version: {applianceSelf}
52
53 ''')
54
55 # Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print
56 motd = ''.join(l + '\\n\\\n' for l in motd.splitlines())
57
58 print(heredoc('''
59 FROM ubuntu:14.04
60
61 RUN echo "deb http://repos.mesosphere.io/ubuntu/ trusty main" \
62 > /etc/apt/sources.list.d/mesosphere.list \
63 && apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \
64 && apt-get -y update \
65 && apt-get -y install {dependencies} \
66 && apt-get clean && rm -rf /var/lib/apt/lists/*
67
68 RUN mkdir /root/.ssh && \
69 chmod 700 /root/.ssh
70
71 ADD waitForKey.sh /usr/bin/waitForKey.sh
72
73 RUN chmod 777 /usr/bin/waitForKey.sh
74
75 # The stock pip is too old and can't install from sdist with extras
76 RUN pip install --upgrade pip==8.1.2
77
78 # Include virtualenv, as it is still the recommended way to deploy pipelines
79 RUN pip install --upgrade virtualenv==15.0.3
80
81 # Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)
82 RUN virtualenv --never-download /home/s3am \
83 && /home/s3am/bin/pip install s3am==2.0 \
84 && ln -s /home/s3am/bin/s3am /usr/local/bin/
85
86 # Install statically linked version of docker client
87 RUN wget -O /usr/bin/docker https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \
88 && chmod +x /usr/bin/docker
89
90 # Fix for Mesos interface dependency missing on ubuntu
91 RUN pip install protobuf==3.0.0
92
93 # Move the Mesos module onto the Python path
94 RUN ln -s /usr/lib/python2.7/site-packages/mesos /usr/local/lib/python2.7/dist-packages/mesos
95
96 # Fix for https://issues.apache.org/jira/browse/MESOS-3793
97 ENV MESOS_LAUNCHER=posix
98
99 # Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)
100 ENV TERM linux
101
102 # An appliance may need to start more appliances, e.g. when the leader appliance launches the
103 # worker appliance on a worker node. To support this, we embed a self-reference into the image:
104 ENV TOIL_APPLIANCE_SELF {applianceSelf}
105
106 # This component changes most frequently and keeping it last maximizes Docker cache hits.
107 COPY {sdistName} .
108 RUN pip install {sdistName}[aws,mesos,encryption,cwl]
109 RUN rm {sdistName}
110
111 # We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect
112 # that the running appliance just gives you a shell. To start the Mesos master or slave
113 # daemons, the user # should override the entrypoint via --entrypoint.
114
115 RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \
116 && printf '{motd}' > /etc/motd
117 '''))
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/Dockerfile.py b/docker/Dockerfile.py
--- a/docker/Dockerfile.py
+++ b/docker/Dockerfile.py
@@ -103,6 +103,10 @@
# worker appliance on a worker node. To support this, we embed a self-reference into the image:
ENV TOIL_APPLIANCE_SELF {applianceSelf}
+ RUN mkdir /var/lib/toil
+
+ ENV TOIL_WORKDIR /var/lib/toil
+
# This component changes most frequently and keeping it last maximizes Docker cache hits.
COPY {sdistName} .
RUN pip install {sdistName}[aws,mesos,encryption,cwl]
|
{"golden_diff": "diff --git a/docker/Dockerfile.py b/docker/Dockerfile.py\n--- a/docker/Dockerfile.py\n+++ b/docker/Dockerfile.py\n@@ -103,6 +103,10 @@\n # worker appliance on a worker node. To support this, we embed a self-reference into the image:\n ENV TOIL_APPLIANCE_SELF {applianceSelf}\n \n+ RUN mkdir /var/lib/toil\n+\n+ ENV TOIL_WORKDIR /var/lib/toil\n+\n # This component changes most frequently and keeping it last maximizes Docker cache hits.\n COPY {sdistName} .\n RUN pip install {sdistName}[aws,mesos,encryption,cwl]\n", "issue": "Toil Appliance should set /var/lib/toil as the default workdir \n\nToil Appliance should set /var/lib/toil as the default workdir \n\n", "before_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os\nimport textwrap\n\napplianceSelf = os.environ['TOIL_APPLIANCE_SELF']\nsdistName = os.environ['_TOIL_SDIST_NAME']\n\n\ndependencies = ' '.join(['libffi-dev', # For client side encryption for 'azure' extra with PyNACL\n 'python-dev', # For installing Python packages with native code\n 'python-pip', # Bootstrap pip, but needs upgrading, see below\n 'libcurl4-openssl-dev',\n 'libssl-dev',\n 'wget',\n 'curl',\n 'openssh-server',\n 'mesos=1.0.0-2.0.89.ubuntu1404',\n 'rsync',\n 'screen'])\n\n\ndef heredoc(s):\n s = textwrap.dedent(s).format(**globals())\n return s[1:] if s.startswith('\\n') else s\n\n\nmotd = heredoc('''\n\n This is the Toil appliance. You can run your Toil script directly on the appliance, but only\n in single-machine mode. Alternatively, create a Toil cluster with `toil launch-cluster`,\n log into the leader of that cluster with `toil ssh-cluster` and run your Toil script there.\n\n For more information see http://toil.readthedocs.io/en/latest/\n\n Copyright (C) 2015-2016 Regents of the University of California\n\n Version: {applianceSelf}\n\n''')\n\n# Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print\nmotd = ''.join(l + '\\\\n\\\\\\n' for l in motd.splitlines())\n\nprint(heredoc('''\n FROM ubuntu:14.04\n\n RUN echo \"deb http://repos.mesosphere.io/ubuntu/ trusty main\" \\\n > /etc/apt/sources.list.d/mesosphere.list \\\n && apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \\\n && apt-get -y update \\\n && apt-get -y install {dependencies} \\\n && apt-get clean && rm -rf /var/lib/apt/lists/*\n\n RUN mkdir /root/.ssh && \\\n chmod 700 /root/.ssh\n\n ADD waitForKey.sh /usr/bin/waitForKey.sh\n\n RUN chmod 777 /usr/bin/waitForKey.sh\n\n # The stock pip is too old and can't install from sdist with extras\n RUN pip install --upgrade pip==8.1.2\n\n # Include virtualenv, as it is still the recommended way to deploy pipelines\n RUN pip install --upgrade virtualenv==15.0.3\n\n # Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)\n RUN virtualenv --never-download /home/s3am \\\n && /home/s3am/bin/pip install s3am==2.0 \\\n && ln -s /home/s3am/bin/s3am /usr/local/bin/\n\n # Install statically linked version of docker client\n RUN wget -O /usr/bin/docker https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \\\n && chmod +x /usr/bin/docker\n\n # Fix for Mesos interface dependency missing on ubuntu\n RUN pip install protobuf==3.0.0\n\n # Move the Mesos module onto the Python path\n RUN ln -s /usr/lib/python2.7/site-packages/mesos /usr/local/lib/python2.7/dist-packages/mesos\n\n # Fix for https://issues.apache.org/jira/browse/MESOS-3793\n ENV MESOS_LAUNCHER=posix\n\n # Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)\n ENV TERM linux\n\n # An appliance may need to start more appliances, e.g. when the leader appliance launches the\n # worker appliance on a worker node. To support this, we embed a self-reference into the image:\n ENV TOIL_APPLIANCE_SELF {applianceSelf}\n\n # This component changes most frequently and keeping it last maximizes Docker cache hits.\n COPY {sdistName} .\n RUN pip install {sdistName}[aws,mesos,encryption,cwl]\n RUN rm {sdistName}\n\n # We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect\n # that the running appliance just gives you a shell. To start the Mesos master or slave\n # daemons, the user # should override the entrypoint via --entrypoint.\n\n RUN echo '[ ! -z \"$TERM\" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \\\n && printf '{motd}' > /etc/motd\n'''))\n", "path": "docker/Dockerfile.py"}], "after_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os\nimport textwrap\n\napplianceSelf = os.environ['TOIL_APPLIANCE_SELF']\nsdistName = os.environ['_TOIL_SDIST_NAME']\n\n\ndependencies = ' '.join(['libffi-dev', # For client side encryption for 'azure' extra with PyNACL\n 'python-dev', # For installing Python packages with native code\n 'python-pip', # Bootstrap pip, but needs upgrading, see below\n 'libcurl4-openssl-dev',\n 'libssl-dev',\n 'wget',\n 'curl',\n 'openssh-server',\n 'mesos=1.0.0-2.0.89.ubuntu1404',\n 'rsync',\n 'screen'])\n\n\ndef heredoc(s):\n s = textwrap.dedent(s).format(**globals())\n return s[1:] if s.startswith('\\n') else s\n\n\nmotd = heredoc('''\n\n This is the Toil appliance. You can run your Toil script directly on the appliance, but only\n in single-machine mode. Alternatively, create a Toil cluster with `toil launch-cluster`,\n log into the leader of that cluster with `toil ssh-cluster` and run your Toil script there.\n\n For more information see http://toil.readthedocs.io/en/latest/\n\n Copyright (C) 2015-2016 Regents of the University of California\n\n Version: {applianceSelf}\n\n''')\n\n# Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print\nmotd = ''.join(l + '\\\\n\\\\\\n' for l in motd.splitlines())\n\nprint(heredoc('''\n FROM ubuntu:14.04\n\n RUN echo \"deb http://repos.mesosphere.io/ubuntu/ trusty main\" \\\n > /etc/apt/sources.list.d/mesosphere.list \\\n && apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \\\n && apt-get -y update \\\n && apt-get -y install {dependencies} \\\n && apt-get clean && rm -rf /var/lib/apt/lists/*\n\n RUN mkdir /root/.ssh && \\\n chmod 700 /root/.ssh\n\n ADD waitForKey.sh /usr/bin/waitForKey.sh\n\n RUN chmod 777 /usr/bin/waitForKey.sh\n\n # The stock pip is too old and can't install from sdist with extras\n RUN pip install --upgrade pip==8.1.2\n\n # Include virtualenv, as it is still the recommended way to deploy pipelines\n RUN pip install --upgrade virtualenv==15.0.3\n\n # Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)\n RUN virtualenv --never-download /home/s3am \\\n && /home/s3am/bin/pip install s3am==2.0 \\\n && ln -s /home/s3am/bin/s3am /usr/local/bin/\n\n # Install statically linked version of docker client\n RUN wget -O /usr/bin/docker https://get.docker.com/builds/Linux/x86_64/docker-1.10.3 \\\n && chmod +x /usr/bin/docker\n\n # Fix for Mesos interface dependency missing on ubuntu\n RUN pip install protobuf==3.0.0\n\n # Move the Mesos module onto the Python path\n RUN ln -s /usr/lib/python2.7/site-packages/mesos /usr/local/lib/python2.7/dist-packages/mesos\n\n # Fix for https://issues.apache.org/jira/browse/MESOS-3793\n ENV MESOS_LAUNCHER=posix\n\n # Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)\n ENV TERM linux\n\n # An appliance may need to start more appliances, e.g. when the leader appliance launches the\n # worker appliance on a worker node. To support this, we embed a self-reference into the image:\n ENV TOIL_APPLIANCE_SELF {applianceSelf}\n\n RUN mkdir /var/lib/toil\n\n ENV TOIL_WORKDIR /var/lib/toil\n\n # This component changes most frequently and keeping it last maximizes Docker cache hits.\n COPY {sdistName} .\n RUN pip install {sdistName}[aws,mesos,encryption,cwl]\n RUN rm {sdistName}\n\n # We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect\n # that the running appliance just gives you a shell. To start the Mesos master or slave\n # daemons, the user # should override the entrypoint via --entrypoint.\n\n RUN echo '[ ! -z \"$TERM\" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \\\n && printf '{motd}' > /etc/motd\n'''))\n", "path": "docker/Dockerfile.py"}]}
| 1,775 | 152 |
gh_patches_debug_1656
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2158
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve IATI export
## Test plan
Before starting with testing, make sure to perform two one-time actions:
- The `iati_export` cron job is running (done on Test and UAT)
- See running cron jobs by running: `manage.py crontab show`;
- The `perform_iati_checks` management command has been run (done on Test and UAT).
---
GIVEN the 'My IATI' section in MyRSR
WHEN connected to multiple organisations (or as superuser)
THEN an organisation selection screen should be shown
WHEN connected to one organisation
THEN this organisation should be automatically selected
GIVEN the 'My IATI' section in MyRSR
WHEN an organisation has been selected
THEN the overview of all IATI exports should be shown
AND for each export the status, number of projects, created by, created at and IATI version should be shown
AND the latest IATI export should be shown in a green row
AND a pending or in progress IATI export should be shown in a yellow row
AND a cancelled or export without an IATI file should be shown in a red row
GIVEN the 'My IATI' section in MyRSR
WHEN an organisation has been selected
THEN it should be possible to select whether the latest IATI file is shown on the organisation page
GIVEN that is has been set that the latest IATI file is shown on the organisation page
THEN it should be shown on the organisation page as well
ELSE the IATI file should not be shown
GIVEN that the 'Add new IATI export' button is clicked
THEN the user should be redirected to the project selection overview
GIVEN the project selection overview
WHEN looking at the projects overview
THEN all projects where the selected organisation is reporting organisation should be shown
GIVEN the project selection overview
WHEN applying a filter
THEN the project selection should change
AND the indication of the number of projects selected should indicate the number of selected projects
GIVEN the project selection overview
WHEN projects are selected AND the 'create new IATI export' button is clicked
THEN the user should be redirected to the IATI exports overview
AND the top IATI export should be the new IATI export (with 'Pending' status)
GIVEN the IATI export overview
WHEN an export is pending or in progress
THEN the overview should be refreshed every 10 seconds
AND when an export is in progress, the number of processed projects should be shown
## Issue description
Currently, an IATI export with more than 70 projects will give a DB timeout. However, we need to be able to export an IATI file with any amount of projects. Similar to the IATI import, we can use a cron job for this.
- [x] Move IATI export to a cron job implementation
- [x] Update the 'My IATI' tab in MyRSR
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/feeds.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import re
8
9 from xml.sax.saxutils import XMLGenerator
10
11 from django.contrib.syndication.views import FeedDoesNotExist, Feed
12 from django.core.urlresolvers import reverse
13 from django.shortcuts import get_object_or_404
14 from django.utils.feedgenerator import Rss201rev2Feed
15 from django.utils.translation import ugettext_lazy as _
16
17 from akvo.rsr.models import Project, ProjectUpdate, Organisation
18
19
20 def __dict_replace(s, d):
21 """Replace substrings of a string using a dictionary."""
22 for key, value in d.items():
23 s = s.replace(key, value)
24 return s
25
26 def __escape(data, entities):
27 # must do ampersand first
28 data = data.replace("&", "&")
29 data = data.replace(">", ">")
30 data = data.replace("<", "<")
31 if entities:
32 data = __dict_replace(data, entities)
33 return data
34
35 def escape(data, entities={}):
36 """Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped
37
38 Escape &, <, and > in a string of data.
39
40 You can escape other strings of data by passing a dictionary as
41 the optional entities parameter. The keys and values must all be
42 strings; each key will be replaced with its corresponding value.
43
44 """
45 # find character data, re.DOTALL includes linefeed in .
46 pattern = re.compile('<!\[CDATA\[.*\]\]>', re.DOTALL)
47 iterator = pattern.finditer(data)
48 start = 0
49 bits = []
50 for match in iterator:
51 #grab chunk before first match
52 bit = data[start:match.span()[0]]
53 bit = __escape(bit, entities)
54 bits.append(bit)
55 #grab match
56 bit = data[match.span()[0]:match.span()[1]]
57 bits.extend(bit)
58 start = match.span()[1]
59 # escape tail bit after last match
60 bit = data[start:]
61 bit = __escape(bit, entities)
62 bits.extend(bit)
63 data = ''.join(bits)
64 return data
65
66
67 class RSRSimplerXMLGenerator(XMLGenerator):
68 """subclassed to be able to call custom escape() function, see above
69 """
70 def characters(self, content):
71 self._write(escape(content))
72
73 def addQuickElement(self, name, contents=None, attrs=None):
74 "Convenience method for adding an element with no children"
75 if attrs is None: attrs = {}
76 self.startElement(name, attrs)
77 if contents is not None:
78 self.characters(contents)
79 self.endElement(name)
80
81
82 class RSRMediaRssFeed(Rss201rev2Feed):
83 def rss_attributes(self):
84 attrs = super(RSRMediaRssFeed, self).rss_attributes()
85 attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
86 attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'
87 return attrs
88
89 def add_item_elements(self, handler, item):
90 """Callback to add elements to each item (item/entry) element."""
91 super(RSRMediaRssFeed, self).add_item_elements(handler, item)
92
93 if 'media:title' in item:
94 handler.addQuickElement(u"media:title", item['title'])
95 if 'media:description' in item:
96 handler.addQuickElement(u"media:description", item['media:description'])
97 if 'media:credit' in item:
98 handler.addQuickElement(u"media:credit", item['media:credit'])
99
100 if 'content_url' in item:
101 content = dict(url=item['content_url'])
102 if 'content_width' in item:
103 content['width'] = str(item['content_width'])
104 if 'content_height' in item:
105 content['height'] = str(item['content_height'])
106 handler.addQuickElement(u"media:content", '', content)
107
108 if 'thumbnail_url' in item:
109 thumbnail = dict(url=item['thumbnail_url'])
110 if 'thumbnail_width' in item:
111 thumbnail['width'] = str(item['thumbnail_width'])
112 if 'thumbnail_height' in item:
113 thumbnail['height'] = str(item['thumbnail_height'])
114 handler.addQuickElement(u"media:thumbnail", '', thumbnail)
115
116 if 'keywords' in item:
117 handler.addQuickElement(u"media:keywords", item['keywords'])
118
119 def write(self, outfile, encoding):
120 handler = RSRSimplerXMLGenerator(outfile, encoding)
121 handler.startDocument()
122 handler.startElement(u"rss", self.rss_attributes())
123 handler.startElement(u"channel", self.root_attributes())
124 self.add_root_elements(handler)
125 self.write_items(handler)
126 self.endChannelElement(handler)
127 handler.endElement(u"rss")
128
129 class UpdateFeed(Feed):
130 """base class generating Update feeds
131 """
132 feed_type = RSRMediaRssFeed
133
134 def link(self, obj):
135 if not obj:
136 raise FeedDoesNotExist
137 return obj.get_absolute_url()
138
139 def item_link(self, item):
140 return item.get_absolute_url()
141
142 def item_title(self, item):
143 return item.title
144
145 def item_description(self, item):
146 try:
147 size = item.photo.size
148 return '<![CDATA[<p><a href="%s"><img src="%s" alt="" /></a></p><p>%s</p>]]>' % (
149 item.get_absolute_url(),
150 item.photo.thumbnail.absolute_url,
151 item.text,
152 )
153 except:
154 return item.text
155
156 def item_pubdate(self, item):
157 return item.created_at
158
159 def item_author_name(self, item):
160 return item.user.get_full_name()
161
162 def item_credit(self, item):
163 return item.photo_credit
164
165 def item_extra_kwargs(self, item):
166 """return a dictionary to the feedgenerator for each item to be added to the feed.
167 """
168 try:
169 size = item.photo.size
170 photo = item.photo
171 kwargs = {
172 'media:title': item.title,
173 'media:description': item.photo_caption,
174 'media:credit': item.photo_credit,
175 'content_url': photo.url,
176 'content_width': photo.width,
177 'content_height': photo.height,
178 'thumbnail_url': photo.thumbnail.absolute_url,
179 'thumbnail_width': photo.thumbnail.width(),
180 'thumbnail_height': photo.thumbnail.height(),
181 }
182 return kwargs
183 except:
184 return {}
185
186
187 class ProjectUpdates(UpdateFeed):
188 """RSS feed for last 25 RSR updates of a project."""
189 def get_object(self, request, project_id):
190 return Project.objects.get(pk__exact=project_id)
191
192 def title(self, obj):
193 return _(u'Akvo RSR project %(id)d: %(project_title)s') % {
194 'id': obj.id,
195 'project_title': obj.title
196 }
197
198 def description(self, obj):
199 return _(u'Project updates for project %(project_title)s') % {
200 'project_title': obj.title
201 }
202
203 def items(self, obj):
204 # Limited to 25 items to prevent gateway timeouts.
205 return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]
206
207
208 class OrganisationUpdates(UpdateFeed):
209 """RSS feed for last 25 RSR updates of an organisation."""
210 feed_type = RSRMediaRssFeed
211
212 def get_object(self, request, org_id):
213 return get_object_or_404(Organisation, id=int(org_id))
214
215 def title(self, obj):
216 return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}
217
218 def description(self, obj):
219 if obj.name == obj.long_name:
220 return _(u"Project updates for projects partnered by %(org_name)s") % {
221 'org_name': obj.name
222 }
223 else:
224 return _(
225 u"Project updates for projects partnered by %(org_name)s - %(long_name)s"
226 ) % {'org_name': obj.name, 'long_name': obj.long_name}
227
228 def items(self, obj):
229 # Limited to 25 items to prevent gateway timeouts.
230 return obj.published_projects().all_updates()[:25]
231
232 def item_title(self, item):
233 return _(
234 u'Project %(project_id)d - %(project_title)s: %(update_title)s'
235 ) % {
236 'project_id': item.project.id,
237 'project_title': item.project.title,
238 'update_title': item.title
239 }
240
241
242 class AllProjectUpdates(UpdateFeed):
243 """RSS feed for last 25 RSR updates."""
244 title = _(u'Last 25 RSR project updates')
245
246 def link(self):
247 return reverse('update-directory')
248
249 description = _(u'Project updates for all Akvo RSR projects')
250
251 def items(self):
252 # Limited to 25 items to prevent gateway timeouts.
253 return ProjectUpdate.objects.select_related().order_by('-id')[:25]
254
255 def item_title(self, item):
256 return _(
257 u'Project %(project_id)d - %(project_title)s: %(update_title)s'
258 ) % {
259 'project_id': item.project.id,
260 'project_title': item.project.title,
261 'update_title': item.title
262 }
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py
--- a/akvo/rsr/feeds.py
+++ b/akvo/rsr/feeds.py
@@ -227,7 +227,7 @@
def items(self, obj):
# Limited to 25 items to prevent gateway timeouts.
- return obj.published_projects().all_updates()[:25]
+ return obj.all_updates()[:25]
def item_title(self, item):
return _(
|
{"golden_diff": "diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py\n--- a/akvo/rsr/feeds.py\n+++ b/akvo/rsr/feeds.py\n@@ -227,7 +227,7 @@\n \n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n- return obj.published_projects().all_updates()[:25]\n+ return obj.all_updates()[:25]\n \n def item_title(self, item):\n return _(\n", "issue": "Improve IATI export\n## Test plan\n\nBefore starting with testing, make sure to perform two one-time actions:\n- The `iati_export` cron job is running (done on Test and UAT)\n - See running cron jobs by running: `manage.py crontab show`;\n- The `perform_iati_checks` management command has been run (done on Test and UAT).\n\n---\n\nGIVEN the 'My IATI' section in MyRSR\nWHEN connected to multiple organisations (or as superuser)\nTHEN an organisation selection screen should be shown\n\nWHEN connected to one organisation\nTHEN this organisation should be automatically selected\n\nGIVEN the 'My IATI' section in MyRSR\nWHEN an organisation has been selected\nTHEN the overview of all IATI exports should be shown\nAND for each export the status, number of projects, created by, created at and IATI version should be shown\nAND the latest IATI export should be shown in a green row\nAND a pending or in progress IATI export should be shown in a yellow row\nAND a cancelled or export without an IATI file should be shown in a red row\n\nGIVEN the 'My IATI' section in MyRSR\nWHEN an organisation has been selected\nTHEN it should be possible to select whether the latest IATI file is shown on the organisation page\n\nGIVEN that is has been set that the latest IATI file is shown on the organisation page\nTHEN it should be shown on the organisation page as well\nELSE the IATI file should not be shown\n\nGIVEN that the 'Add new IATI export' button is clicked\nTHEN the user should be redirected to the project selection overview\n\nGIVEN the project selection overview\nWHEN looking at the projects overview\nTHEN all projects where the selected organisation is reporting organisation should be shown\n\nGIVEN the project selection overview\nWHEN applying a filter\nTHEN the project selection should change\nAND the indication of the number of projects selected should indicate the number of selected projects\n\nGIVEN the project selection overview\nWHEN projects are selected AND the 'create new IATI export' button is clicked\nTHEN the user should be redirected to the IATI exports overview\nAND the top IATI export should be the new IATI export (with 'Pending' status)\n\nGIVEN the IATI export overview\nWHEN an export is pending or in progress\nTHEN the overview should be refreshed every 10 seconds\nAND when an export is in progress, the number of processed projects should be shown\n## Issue description\n\nCurrently, an IATI export with more than 70 projects will give a DB timeout. However, we need to be able to export an IATI file with any amount of projects. Similar to the IATI import, we can use a cron job for this.\n- [x] Move IATI export to a cron job implementation\n- [x] Update the 'My IATI' tab in MyRSR\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&\")\n data = data.replace(\">\", \">\")\n data = data.replace(\"<\", \"<\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return obj.published_projects().all_updates()[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&\")\n data = data.replace(\">\", \">\")\n data = data.replace(\"<\", \"<\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return obj.all_updates()[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py"}]}
| 3,649 | 117 |
gh_patches_debug_30620
|
rasdani/github-patches
|
git_diff
|
ietf-tools__datatracker-5619
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Another location where meeting time zone info is incorrect
### Describe the issue
Related to #5285 ; time data is incorrectly showing in UTC. This is not a showstopper since I'm probably the only one who would see this page, but I'm just alerting you to one more place the time zone is showing as UTC instead of meeting time: on the "Edit Session" page.
See below for an example: these office hours are scheduled for 10:30 local time but the edit session page says 01:30.
<img width="719" alt="Screen Shot 2023-03-15 at 2 24 42 PM" src="https://user-images.githubusercontent.com/29440652/225447877-90f2209a-8e79-41c8-8f6a-c054c877779e.png">
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/meeting/templatetags/session_filters.py`
Content:
```
1 from django import template
2
3 register = template.Library()
4
5 @register.filter
6 def presented_versions(session,doc):
7 sp = session.sessionpresentation_set.filter(document=doc)
8 if not sp:
9 return "Document not in session"
10 else:
11 rev = sp.first().rev
12 return rev if rev else "(current)"
13
14 @register.filter
15 def can_manage_materials(session,user):
16 return session.can_manage_materials(user)
17
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ietf/meeting/templatetags/session_filters.py b/ietf/meeting/templatetags/session_filters.py
--- a/ietf/meeting/templatetags/session_filters.py
+++ b/ietf/meeting/templatetags/session_filters.py
@@ -1,17 +1,56 @@
+# Copyright The IETF Trust 2023, All Rights Reserved
from django import template
+from ietf.name.models import SessionStatusName
+
register = template.Library()
+
@register.filter
-def presented_versions(session,doc):
- sp = session.sessionpresentation_set.filter(document=doc)
- if not sp:
- return "Document not in session"
- else:
- rev = sp.first().rev
- return rev if rev else "(current)"
+def presented_versions(session, doc):
+ sp = session.sessionpresentation_set.filter(document=doc)
+ if not sp:
+ return "Document not in session"
+ else:
+ rev = sp.first().rev
+ return rev if rev else "(current)"
+
@register.filter
-def can_manage_materials(session,user):
+def can_manage_materials(session, user):
return session.can_manage_materials(user)
+
[email protected]
+def describe_with_tz(session):
+ # Very similar to session.__str__, but doesn't treat interims differently from sessions at an IETF meeting
+ # and displays the timeslot in the meeting's timezone.
+
+ if session is None:
+ return ""
+
+ status_id = None
+ if hasattr(session, "current_status"):
+ status_id = session.current_status
+ elif session.pk is not None:
+ latest_event = session.schedulingevent_set.order_by("-time", "-id").first()
+ if latest_event:
+ status_id = latest_event.status_id
+
+ if status_id in ("canceled", "disappr", "notmeet", "deleted"):
+ ss0name = "(%s)" % SessionStatusName.objects.get(slug=status_id).name
+ else:
+ ss0name = "(unscheduled)"
+ ss = session.timeslotassignments.filter(
+ schedule__in=[
+ session.meeting.schedule,
+ session.meeting.schedule.base if session.meeting.schedule else None,
+ ]
+ ).order_by("timeslot__time")
+ if ss:
+ ss0name = ",".join(
+ x.timeslot.time.astimezone(session.meeting.tz()).strftime("%a-%H%M")
+ for x in ss
+ )
+ ss0name += f" {session.meeting.tz()}"
+ return f"{session.meeting}: {session.group.acronym} {session.name} {ss0name}"
|
{"golden_diff": "diff --git a/ietf/meeting/templatetags/session_filters.py b/ietf/meeting/templatetags/session_filters.py\n--- a/ietf/meeting/templatetags/session_filters.py\n+++ b/ietf/meeting/templatetags/session_filters.py\n@@ -1,17 +1,56 @@\n+# Copyright The IETF Trust 2023, All Rights Reserved\n from django import template\n \n+from ietf.name.models import SessionStatusName\n+\n register = template.Library()\n \n+\n @register.filter\n-def presented_versions(session,doc):\n- sp = session.sessionpresentation_set.filter(document=doc)\n- if not sp:\n- return \"Document not in session\"\n- else:\n- rev = sp.first().rev\n- return rev if rev else \"(current)\"\n+def presented_versions(session, doc):\n+ sp = session.sessionpresentation_set.filter(document=doc)\n+ if not sp:\n+ return \"Document not in session\"\n+ else:\n+ rev = sp.first().rev\n+ return rev if rev else \"(current)\"\n+\n \n @register.filter\n-def can_manage_materials(session,user):\n+def can_manage_materials(session, user):\n return session.can_manage_materials(user)\n \n+\[email protected]\n+def describe_with_tz(session):\n+ # Very similar to session.__str__, but doesn't treat interims differently from sessions at an IETF meeting\n+ # and displays the timeslot in the meeting's timezone.\n+\n+ if session is None:\n+ return \"\"\n+\n+ status_id = None\n+ if hasattr(session, \"current_status\"):\n+ status_id = session.current_status\n+ elif session.pk is not None:\n+ latest_event = session.schedulingevent_set.order_by(\"-time\", \"-id\").first()\n+ if latest_event:\n+ status_id = latest_event.status_id\n+\n+ if status_id in (\"canceled\", \"disappr\", \"notmeet\", \"deleted\"):\n+ ss0name = \"(%s)\" % SessionStatusName.objects.get(slug=status_id).name\n+ else:\n+ ss0name = \"(unscheduled)\"\n+ ss = session.timeslotassignments.filter(\n+ schedule__in=[\n+ session.meeting.schedule,\n+ session.meeting.schedule.base if session.meeting.schedule else None,\n+ ]\n+ ).order_by(\"timeslot__time\")\n+ if ss:\n+ ss0name = \",\".join(\n+ x.timeslot.time.astimezone(session.meeting.tz()).strftime(\"%a-%H%M\")\n+ for x in ss\n+ )\n+ ss0name += f\" {session.meeting.tz()}\"\n+ return f\"{session.meeting}: {session.group.acronym} {session.name} {ss0name}\"\n", "issue": "Another location where meeting time zone info is incorrect\n### Describe the issue\n\nRelated to #5285 ; time data is incorrectly showing in UTC. This is not a showstopper since I'm probably the only one who would see this page, but I'm just alerting you to one more place the time zone is showing as UTC instead of meeting time: on the \"Edit Session\" page. \r\n\r\nSee below for an example: these office hours are scheduled for 10:30 local time but the edit session page says 01:30.\r\n\r\n<img width=\"719\" alt=\"Screen Shot 2023-03-15 at 2 24 42 PM\" src=\"https://user-images.githubusercontent.com/29440652/225447877-90f2209a-8e79-41c8-8f6a-c054c877779e.png\">\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "from django import template\n\nregister = template.Library()\n\[email protected]\ndef presented_versions(session,doc):\n sp = session.sessionpresentation_set.filter(document=doc)\n if not sp:\n return \"Document not in session\"\n else:\n rev = sp.first().rev\n return rev if rev else \"(current)\"\n\[email protected]\ndef can_manage_materials(session,user):\n return session.can_manage_materials(user)\n\n", "path": "ietf/meeting/templatetags/session_filters.py"}], "after_files": [{"content": "# Copyright The IETF Trust 2023, All Rights Reserved\nfrom django import template\n\nfrom ietf.name.models import SessionStatusName\n\nregister = template.Library()\n\n\[email protected]\ndef presented_versions(session, doc):\n sp = session.sessionpresentation_set.filter(document=doc)\n if not sp:\n return \"Document not in session\"\n else:\n rev = sp.first().rev\n return rev if rev else \"(current)\"\n\n\[email protected]\ndef can_manage_materials(session, user):\n return session.can_manage_materials(user)\n\n\[email protected]\ndef describe_with_tz(session):\n # Very similar to session.__str__, but doesn't treat interims differently from sessions at an IETF meeting\n # and displays the timeslot in the meeting's timezone.\n\n if session is None:\n return \"\"\n\n status_id = None\n if hasattr(session, \"current_status\"):\n status_id = session.current_status\n elif session.pk is not None:\n latest_event = session.schedulingevent_set.order_by(\"-time\", \"-id\").first()\n if latest_event:\n status_id = latest_event.status_id\n\n if status_id in (\"canceled\", \"disappr\", \"notmeet\", \"deleted\"):\n ss0name = \"(%s)\" % SessionStatusName.objects.get(slug=status_id).name\n else:\n ss0name = \"(unscheduled)\"\n ss = session.timeslotassignments.filter(\n schedule__in=[\n session.meeting.schedule,\n session.meeting.schedule.base if session.meeting.schedule else None,\n ]\n ).order_by(\"timeslot__time\")\n if ss:\n ss0name = \",\".join(\n x.timeslot.time.astimezone(session.meeting.tz()).strftime(\"%a-%H%M\")\n for x in ss\n )\n ss0name += f\" {session.meeting.tz()}\"\n return f\"{session.meeting}: {session.group.acronym} {session.name} {ss0name}\"\n", "path": "ietf/meeting/templatetags/session_filters.py"}]}
| 639 | 603 |
gh_patches_debug_23321
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-framework-3686
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hierarchical Module Naming Scheme with Cray toolchains
I have tried following the tutorial https://easybuilders.github.io/easybuild-tutorial/hmns to implement the Hierarchical Module Naming Scheme (MNS) with a modified Cray toolchain using Lmod 8.3.1, but the compiler modulefile created under the `Core` folder does not update the `MODULEPATH` once loaded, therefore the modules created cannot be searched (not even with `module spider`).
I was wondering if it is at all possible to use the Hierarchical MNS with the way Cray toolchains are configured: for instance I cannot prepare the environment as suggested in the tutorial, using the commands below, or I would unload essential modules:
```
module purge
module unuse $MODULEPATH
```
Thanks for your advice!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/tools/module_naming_scheme/hierarchical_mns.py`
Content:
```
1 ##
2 # Copyright 2013-2021 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 Implementation of an example hierarchical module naming scheme.
27
28 :author: Kenneth Hoste (Ghent University)
29 :author: Markus Geimer (Forschungszentrum Juelich GmbH)
30 """
31
32 import os
33 import re
34
35 from easybuild.toolchains.gcccore import GCCcore
36 from easybuild.tools.build_log import EasyBuildError
37 from easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme
38 from easybuild.tools.module_naming_scheme.toolchain import det_toolchain_compilers, det_toolchain_mpi
39
40
41 CORE = 'Core'
42 COMPILER = 'Compiler'
43 MPI = 'MPI'
44 TOOLCHAIN = 'Toolchain'
45
46 MODULECLASS_COMPILER = 'compiler'
47 MODULECLASS_MPI = 'mpi'
48 MODULECLASS_TOOLCHAIN = 'toolchain'
49
50 GCCCORE = GCCcore.NAME
51
52 # note: names in keys are ordered alphabetically
53 COMP_NAME_VERSION_TEMPLATES = {
54 # required for use of iccifort toolchain
55 'icc,ifort': ('intel', '%(icc)s'),
56 'iccifort': ('intel', '%(iccifort)s'),
57 # required for use of intel-compilers toolchain (OneAPI compilers)
58 'intel-compilers': ('intel', '%(intel-compilers)s'),
59 # required for use of ClangGCC toolchain
60 'Clang,GCC': ('Clang-GCC', '%(Clang)s-%(GCC)s'),
61 # required for use of gcccuda toolchain, and for CUDA installed with GCC toolchain
62 'CUDA,GCC': ('GCC-CUDA', '%(GCC)s-%(CUDA)s'),
63 # required for use of iccifortcuda toolchain
64 'CUDA,icc,ifort': ('intel-CUDA', '%(icc)s-%(CUDA)s'),
65 'CUDA,iccifort': ('intel-CUDA', '%(iccifort)s-%(CUDA)s'),
66 # required for CUDA installed with iccifort toolchain
67 # need to use 'intel' here because 'iccifort' toolchain maps to 'intel' (see above)
68 'CUDA,intel': ('intel-CUDA', '%(intel)s-%(CUDA)s'),
69 # required for use of xlcxlf toolchain
70 'xlc,xlf': ('xlcxlf', '%(xlc)s'),
71 }
72
73
74 class HierarchicalMNS(ModuleNamingScheme):
75 """Class implementing an example hierarchical module naming scheme."""
76
77 REQUIRED_KEYS = ['name', 'versionprefix', 'version', 'versionsuffix', 'toolchain', 'moduleclass']
78
79 def requires_toolchain_details(self):
80 """
81 Determine whether toolchain details are required by this module naming scheme,
82 e.g. whether one of det_toolchain_* functions are relied upon.
83 """
84 return True
85
86 def det_full_module_name(self, ec):
87 """
88 Determine full module name, relative to the top of the module path.
89 Examples: Core/GCC/4.8.3, Compiler/GCC/4.8.3/OpenMPI/1.6.5, MPI/GCC/4.8.3/OpenMPI/1.6.5/HPL/2.1
90 """
91 return os.path.join(self.det_module_subdir(ec), self.det_short_module_name(ec))
92
93 def det_short_module_name(self, ec):
94 """
95 Determine short module name, i.e. the name under which modules will be exposed to users.
96 Examples: GCC/4.8.3, OpenMPI/1.6.5, OpenBLAS/0.2.9, HPL/2.1, Python/2.7.5
97 """
98 return os.path.join(ec['name'], self.det_full_version(ec))
99
100 def det_full_version(self, ec):
101 """Determine full version, taking into account version prefix/suffix."""
102 # versionprefix is not always available (e.g., for toolchains)
103 versionprefix = ec.get('versionprefix', '')
104 return versionprefix + ec['version'] + ec['versionsuffix']
105
106 def det_toolchain_compilers_name_version(self, tc_comps):
107 """
108 Determine toolchain compiler tag, for given list of compilers.
109 """
110 if tc_comps is None:
111 # no compiler in toolchain, system toolchain
112 res = None
113 elif len(tc_comps) == 1:
114 tc_comp = tc_comps[0]
115 if tc_comp is None:
116 res = None
117 else:
118 res = (tc_comp['name'], self.det_full_version(tc_comp))
119 else:
120 comp_versions = dict([(comp['name'], self.det_full_version(comp)) for comp in tc_comps])
121 comp_names = comp_versions.keys()
122 key = ','.join(sorted(comp_names))
123 if key in COMP_NAME_VERSION_TEMPLATES:
124 tc_comp_name, tc_comp_ver_tmpl = COMP_NAME_VERSION_TEMPLATES[key]
125 tc_comp_ver = tc_comp_ver_tmpl % comp_versions
126 # make sure that icc/ifort versions match (unless not existing as separate modules)
127 if tc_comp_name == 'intel' and comp_versions.get('icc') != comp_versions.get('ifort'):
128 raise EasyBuildError("Bumped into different versions for Intel compilers: %s", comp_versions)
129 else:
130 raise EasyBuildError("Unknown set of toolchain compilers, module naming scheme needs work: %s",
131 comp_names)
132 res = (tc_comp_name, tc_comp_ver)
133 return res
134
135 def det_module_subdir(self, ec):
136 """
137 Determine module subdirectory, relative to the top of the module path.
138 This determines the separation between module names exposed to users, and what's part of the $MODULEPATH.
139 Examples: Core, Compiler/GCC/4.8.3, MPI/GCC/4.8.3/OpenMPI/1.6.5
140 """
141 tc_comps = det_toolchain_compilers(ec)
142 # determine prefix based on type of toolchain used
143 if tc_comps is None:
144 # no compiler in toolchain, system toolchain => Core module
145 subdir = CORE
146 elif tc_comps == [None]:
147 # no info on toolchain compiler (cfr. Cray toolchains),
148 # then use toolchain name/version
149 subdir = os.path.join(TOOLCHAIN, ec.toolchain.name, ec.toolchain.version)
150 else:
151 tc_comp_name, tc_comp_ver = self.det_toolchain_compilers_name_version(tc_comps)
152 tc_mpi = det_toolchain_mpi(ec)
153 if tc_mpi is None:
154 # compiler-only toolchain => Compiler/<compiler_name>/<compiler_version> namespace
155 subdir = os.path.join(COMPILER, tc_comp_name, tc_comp_ver)
156 else:
157 # compiler-MPI toolchain => MPI/<comp_name>/<comp_version>/<MPI_name>/<MPI_version> namespace
158 tc_mpi_fullver = self.det_full_version(tc_mpi)
159 subdir = os.path.join(MPI, tc_comp_name, tc_comp_ver, tc_mpi['name'], tc_mpi_fullver)
160
161 return subdir
162
163 def det_module_symlink_paths(self, ec):
164 """
165 Determine list of paths in which symlinks to module files must be created.
166 """
167 # symlinks are not very useful in the context of a hierarchical MNS
168 return []
169
170 def det_modpath_extensions(self, ec):
171 """
172 Determine module path extensions, if any.
173 Examples: Compiler/GCC/4.8.3 (for GCC/4.8.3 module), MPI/GCC/4.8.3/OpenMPI/1.6.5 (for OpenMPI/1.6.5 module)
174 """
175 modclass = ec['moduleclass']
176 tc_comps = det_toolchain_compilers(ec)
177 tc_comp_info = self.det_toolchain_compilers_name_version(tc_comps)
178
179 # we consider the following to be compilers:
180 # * has 'compiler' specified as moduleclass
181 is_compiler = modclass == MODULECLASS_COMPILER
182 # * CUDA, but only when not installed with 'system' toolchain (i.e. one or more toolchain compilers found)
183 non_system_tc = tc_comps is not None
184 non_system_cuda = ec['name'] == 'CUDA' and non_system_tc
185
186 paths = []
187 if is_compiler or non_system_cuda:
188 # obtain list of compilers based on that extend $MODULEPATH in some way other than <name>/<version>
189 extend_comps = []
190 # exclude GCC for which <name>/<version> is used as $MODULEPATH extension
191 excluded_comps = ['GCC']
192 for comps in COMP_NAME_VERSION_TEMPLATES.keys():
193 extend_comps.extend([comp for comp in comps.split(',') if comp not in excluded_comps])
194
195 comp_name_ver = None
196 if ec['name'] in extend_comps:
197 for key in COMP_NAME_VERSION_TEMPLATES:
198 comp_names = key.split(',')
199 if ec['name'] in comp_names:
200 comp_name, comp_ver_tmpl = COMP_NAME_VERSION_TEMPLATES[key]
201 comp_versions = {ec['name']: self.det_full_version(ec)}
202 if ec['name'] == 'ifort':
203 # 'icc' key should be provided since it's the only one used in the template
204 comp_versions.update({'icc': self.det_full_version(ec)})
205
206 if non_system_tc:
207 tc_comp_name, tc_comp_ver = tc_comp_info
208 # Stick to name GCC for GCCcore
209 if tc_comp_name == GCCCORE:
210 tc_comp_name = 'GCC'
211 if tc_comp_name in comp_names:
212 # also provide toolchain version for non-system toolchains
213 comp_versions.update({tc_comp_name: tc_comp_ver})
214
215 comp_ver_keys = re.findall(r'%\((\w+)\)s', comp_ver_tmpl)
216 if all(comp_ver_key in comp_versions for comp_ver_key in comp_ver_keys):
217 comp_name_ver = [comp_name, comp_ver_tmpl % comp_versions]
218 break
219 else:
220 comp_name_ver = [ec['name'], self.det_full_version(ec)]
221
222 if comp_name_ver is None:
223 raise EasyBuildError("Required compilers not available in toolchain %s for %s v%s",
224 ec['toolchain'], ec['name'], ec['version'])
225
226 paths.append(os.path.join(COMPILER, *comp_name_ver))
227
228 elif modclass == MODULECLASS_MPI:
229 if tc_comp_info is None:
230 raise EasyBuildError("No compiler available in toolchain %s used to install MPI library %s v%s, "
231 "which is required by the active module naming scheme.",
232 ec['toolchain'], ec['name'], ec['version'])
233 else:
234 tc_comp_name, tc_comp_ver = tc_comp_info
235 fullver = self.det_full_version(ec)
236 paths.append(os.path.join(MPI, tc_comp_name, tc_comp_ver, ec['name'], fullver))
237
238 # special case for Cray toolchains
239 elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None and ec.name.startswith('Cray'):
240 paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))
241
242 return paths
243
244 def expand_toolchain_load(self, ec=None):
245 """
246 Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.
247 This is useful when toolchains are not exposed to users.
248 """
249 return True
250
251 def det_init_modulepaths(self, ec):
252 """
253 Determine list of initial module paths (i.e. top of the hierarchy).
254 """
255 return [CORE]
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/easybuild/tools/module_naming_scheme/hierarchical_mns.py b/easybuild/tools/module_naming_scheme/hierarchical_mns.py
--- a/easybuild/tools/module_naming_scheme/hierarchical_mns.py
+++ b/easybuild/tools/module_naming_scheme/hierarchical_mns.py
@@ -70,6 +70,12 @@
'xlc,xlf': ('xlcxlf', '%(xlc)s'),
}
+# possible prefixes for Cray toolchain names
+# example: CrayGNU, CrayCCE, cpeGNU, cpeCCE, ...;
+# important for determining $MODULEPATH extensions in det_modpath_extensions,
+# cfr. https://github.com/easybuilders/easybuild-framework/issues/3575
+CRAY_TOOLCHAIN_NAME_PREFIXES = ('Cray', 'cpe')
+
class HierarchicalMNS(ModuleNamingScheme):
"""Class implementing an example hierarchical module naming scheme."""
@@ -236,8 +242,9 @@
paths.append(os.path.join(MPI, tc_comp_name, tc_comp_ver, ec['name'], fullver))
# special case for Cray toolchains
- elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None and ec.name.startswith('Cray'):
- paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))
+ elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None:
+ if any(ec.name.startswith(x) for x in CRAY_TOOLCHAIN_NAME_PREFIXES):
+ paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))
return paths
|
{"golden_diff": "diff --git a/easybuild/tools/module_naming_scheme/hierarchical_mns.py b/easybuild/tools/module_naming_scheme/hierarchical_mns.py\n--- a/easybuild/tools/module_naming_scheme/hierarchical_mns.py\n+++ b/easybuild/tools/module_naming_scheme/hierarchical_mns.py\n@@ -70,6 +70,12 @@\n 'xlc,xlf': ('xlcxlf', '%(xlc)s'),\n }\n \n+# possible prefixes for Cray toolchain names\n+# example: CrayGNU, CrayCCE, cpeGNU, cpeCCE, ...;\n+# important for determining $MODULEPATH extensions in det_modpath_extensions,\n+# cfr. https://github.com/easybuilders/easybuild-framework/issues/3575\n+CRAY_TOOLCHAIN_NAME_PREFIXES = ('Cray', 'cpe')\n+\n \n class HierarchicalMNS(ModuleNamingScheme):\n \"\"\"Class implementing an example hierarchical module naming scheme.\"\"\"\n@@ -236,8 +242,9 @@\n paths.append(os.path.join(MPI, tc_comp_name, tc_comp_ver, ec['name'], fullver))\n \n # special case for Cray toolchains\n- elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None and ec.name.startswith('Cray'):\n- paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))\n+ elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None:\n+ if any(ec.name.startswith(x) for x in CRAY_TOOLCHAIN_NAME_PREFIXES):\n+ paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))\n \n return paths\n", "issue": "Hierarchical Module Naming Scheme with Cray toolchains\nI have tried following the tutorial https://easybuilders.github.io/easybuild-tutorial/hmns to implement the Hierarchical Module Naming Scheme (MNS) with a modified Cray toolchain using Lmod 8.3.1, but the compiler modulefile created under the `Core` folder does not update the `MODULEPATH` once loaded, therefore the modules created cannot be searched (not even with `module spider`).\r\n\r\nI was wondering if it is at all possible to use the Hierarchical MNS with the way Cray toolchains are configured: for instance I cannot prepare the environment as suggested in the tutorial, using the commands below, or I would unload essential modules:\r\n```\r\nmodule purge\r\nmodule unuse $MODULEPATH\r\n```\r\n\r\nThanks for your advice!\n", "before_files": [{"content": "##\n# Copyright 2013-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nImplementation of an example hierarchical module naming scheme.\n\n:author: Kenneth Hoste (Ghent University)\n:author: Markus Geimer (Forschungszentrum Juelich GmbH)\n\"\"\"\n\nimport os\nimport re\n\nfrom easybuild.toolchains.gcccore import GCCcore\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme\nfrom easybuild.tools.module_naming_scheme.toolchain import det_toolchain_compilers, det_toolchain_mpi\n\n\nCORE = 'Core'\nCOMPILER = 'Compiler'\nMPI = 'MPI'\nTOOLCHAIN = 'Toolchain'\n\nMODULECLASS_COMPILER = 'compiler'\nMODULECLASS_MPI = 'mpi'\nMODULECLASS_TOOLCHAIN = 'toolchain'\n\nGCCCORE = GCCcore.NAME\n\n# note: names in keys are ordered alphabetically\nCOMP_NAME_VERSION_TEMPLATES = {\n # required for use of iccifort toolchain\n 'icc,ifort': ('intel', '%(icc)s'),\n 'iccifort': ('intel', '%(iccifort)s'),\n # required for use of intel-compilers toolchain (OneAPI compilers)\n 'intel-compilers': ('intel', '%(intel-compilers)s'),\n # required for use of ClangGCC toolchain\n 'Clang,GCC': ('Clang-GCC', '%(Clang)s-%(GCC)s'),\n # required for use of gcccuda toolchain, and for CUDA installed with GCC toolchain\n 'CUDA,GCC': ('GCC-CUDA', '%(GCC)s-%(CUDA)s'),\n # required for use of iccifortcuda toolchain\n 'CUDA,icc,ifort': ('intel-CUDA', '%(icc)s-%(CUDA)s'),\n 'CUDA,iccifort': ('intel-CUDA', '%(iccifort)s-%(CUDA)s'),\n # required for CUDA installed with iccifort toolchain\n # need to use 'intel' here because 'iccifort' toolchain maps to 'intel' (see above)\n 'CUDA,intel': ('intel-CUDA', '%(intel)s-%(CUDA)s'),\n # required for use of xlcxlf toolchain\n 'xlc,xlf': ('xlcxlf', '%(xlc)s'),\n}\n\n\nclass HierarchicalMNS(ModuleNamingScheme):\n \"\"\"Class implementing an example hierarchical module naming scheme.\"\"\"\n\n REQUIRED_KEYS = ['name', 'versionprefix', 'version', 'versionsuffix', 'toolchain', 'moduleclass']\n\n def requires_toolchain_details(self):\n \"\"\"\n Determine whether toolchain details are required by this module naming scheme,\n e.g. whether one of det_toolchain_* functions are relied upon.\n \"\"\"\n return True\n\n def det_full_module_name(self, ec):\n \"\"\"\n Determine full module name, relative to the top of the module path.\n Examples: Core/GCC/4.8.3, Compiler/GCC/4.8.3/OpenMPI/1.6.5, MPI/GCC/4.8.3/OpenMPI/1.6.5/HPL/2.1\n \"\"\"\n return os.path.join(self.det_module_subdir(ec), self.det_short_module_name(ec))\n\n def det_short_module_name(self, ec):\n \"\"\"\n Determine short module name, i.e. the name under which modules will be exposed to users.\n Examples: GCC/4.8.3, OpenMPI/1.6.5, OpenBLAS/0.2.9, HPL/2.1, Python/2.7.5\n \"\"\"\n return os.path.join(ec['name'], self.det_full_version(ec))\n\n def det_full_version(self, ec):\n \"\"\"Determine full version, taking into account version prefix/suffix.\"\"\"\n # versionprefix is not always available (e.g., for toolchains)\n versionprefix = ec.get('versionprefix', '')\n return versionprefix + ec['version'] + ec['versionsuffix']\n\n def det_toolchain_compilers_name_version(self, tc_comps):\n \"\"\"\n Determine toolchain compiler tag, for given list of compilers.\n \"\"\"\n if tc_comps is None:\n # no compiler in toolchain, system toolchain\n res = None\n elif len(tc_comps) == 1:\n tc_comp = tc_comps[0]\n if tc_comp is None:\n res = None\n else:\n res = (tc_comp['name'], self.det_full_version(tc_comp))\n else:\n comp_versions = dict([(comp['name'], self.det_full_version(comp)) for comp in tc_comps])\n comp_names = comp_versions.keys()\n key = ','.join(sorted(comp_names))\n if key in COMP_NAME_VERSION_TEMPLATES:\n tc_comp_name, tc_comp_ver_tmpl = COMP_NAME_VERSION_TEMPLATES[key]\n tc_comp_ver = tc_comp_ver_tmpl % comp_versions\n # make sure that icc/ifort versions match (unless not existing as separate modules)\n if tc_comp_name == 'intel' and comp_versions.get('icc') != comp_versions.get('ifort'):\n raise EasyBuildError(\"Bumped into different versions for Intel compilers: %s\", comp_versions)\n else:\n raise EasyBuildError(\"Unknown set of toolchain compilers, module naming scheme needs work: %s\",\n comp_names)\n res = (tc_comp_name, tc_comp_ver)\n return res\n\n def det_module_subdir(self, ec):\n \"\"\"\n Determine module subdirectory, relative to the top of the module path.\n This determines the separation between module names exposed to users, and what's part of the $MODULEPATH.\n Examples: Core, Compiler/GCC/4.8.3, MPI/GCC/4.8.3/OpenMPI/1.6.5\n \"\"\"\n tc_comps = det_toolchain_compilers(ec)\n # determine prefix based on type of toolchain used\n if tc_comps is None:\n # no compiler in toolchain, system toolchain => Core module\n subdir = CORE\n elif tc_comps == [None]:\n # no info on toolchain compiler (cfr. Cray toolchains),\n # then use toolchain name/version\n subdir = os.path.join(TOOLCHAIN, ec.toolchain.name, ec.toolchain.version)\n else:\n tc_comp_name, tc_comp_ver = self.det_toolchain_compilers_name_version(tc_comps)\n tc_mpi = det_toolchain_mpi(ec)\n if tc_mpi is None:\n # compiler-only toolchain => Compiler/<compiler_name>/<compiler_version> namespace\n subdir = os.path.join(COMPILER, tc_comp_name, tc_comp_ver)\n else:\n # compiler-MPI toolchain => MPI/<comp_name>/<comp_version>/<MPI_name>/<MPI_version> namespace\n tc_mpi_fullver = self.det_full_version(tc_mpi)\n subdir = os.path.join(MPI, tc_comp_name, tc_comp_ver, tc_mpi['name'], tc_mpi_fullver)\n\n return subdir\n\n def det_module_symlink_paths(self, ec):\n \"\"\"\n Determine list of paths in which symlinks to module files must be created.\n \"\"\"\n # symlinks are not very useful in the context of a hierarchical MNS\n return []\n\n def det_modpath_extensions(self, ec):\n \"\"\"\n Determine module path extensions, if any.\n Examples: Compiler/GCC/4.8.3 (for GCC/4.8.3 module), MPI/GCC/4.8.3/OpenMPI/1.6.5 (for OpenMPI/1.6.5 module)\n \"\"\"\n modclass = ec['moduleclass']\n tc_comps = det_toolchain_compilers(ec)\n tc_comp_info = self.det_toolchain_compilers_name_version(tc_comps)\n\n # we consider the following to be compilers:\n # * has 'compiler' specified as moduleclass\n is_compiler = modclass == MODULECLASS_COMPILER\n # * CUDA, but only when not installed with 'system' toolchain (i.e. one or more toolchain compilers found)\n non_system_tc = tc_comps is not None\n non_system_cuda = ec['name'] == 'CUDA' and non_system_tc\n\n paths = []\n if is_compiler or non_system_cuda:\n # obtain list of compilers based on that extend $MODULEPATH in some way other than <name>/<version>\n extend_comps = []\n # exclude GCC for which <name>/<version> is used as $MODULEPATH extension\n excluded_comps = ['GCC']\n for comps in COMP_NAME_VERSION_TEMPLATES.keys():\n extend_comps.extend([comp for comp in comps.split(',') if comp not in excluded_comps])\n\n comp_name_ver = None\n if ec['name'] in extend_comps:\n for key in COMP_NAME_VERSION_TEMPLATES:\n comp_names = key.split(',')\n if ec['name'] in comp_names:\n comp_name, comp_ver_tmpl = COMP_NAME_VERSION_TEMPLATES[key]\n comp_versions = {ec['name']: self.det_full_version(ec)}\n if ec['name'] == 'ifort':\n # 'icc' key should be provided since it's the only one used in the template\n comp_versions.update({'icc': self.det_full_version(ec)})\n\n if non_system_tc:\n tc_comp_name, tc_comp_ver = tc_comp_info\n # Stick to name GCC for GCCcore\n if tc_comp_name == GCCCORE:\n tc_comp_name = 'GCC'\n if tc_comp_name in comp_names:\n # also provide toolchain version for non-system toolchains\n comp_versions.update({tc_comp_name: tc_comp_ver})\n\n comp_ver_keys = re.findall(r'%\\((\\w+)\\)s', comp_ver_tmpl)\n if all(comp_ver_key in comp_versions for comp_ver_key in comp_ver_keys):\n comp_name_ver = [comp_name, comp_ver_tmpl % comp_versions]\n break\n else:\n comp_name_ver = [ec['name'], self.det_full_version(ec)]\n\n if comp_name_ver is None:\n raise EasyBuildError(\"Required compilers not available in toolchain %s for %s v%s\",\n ec['toolchain'], ec['name'], ec['version'])\n\n paths.append(os.path.join(COMPILER, *comp_name_ver))\n\n elif modclass == MODULECLASS_MPI:\n if tc_comp_info is None:\n raise EasyBuildError(\"No compiler available in toolchain %s used to install MPI library %s v%s, \"\n \"which is required by the active module naming scheme.\",\n ec['toolchain'], ec['name'], ec['version'])\n else:\n tc_comp_name, tc_comp_ver = tc_comp_info\n fullver = self.det_full_version(ec)\n paths.append(os.path.join(MPI, tc_comp_name, tc_comp_ver, ec['name'], fullver))\n\n # special case for Cray toolchains\n elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None and ec.name.startswith('Cray'):\n paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))\n\n return paths\n\n def expand_toolchain_load(self, ec=None):\n \"\"\"\n Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.\n This is useful when toolchains are not exposed to users.\n \"\"\"\n return True\n\n def det_init_modulepaths(self, ec):\n \"\"\"\n Determine list of initial module paths (i.e. top of the hierarchy).\n \"\"\"\n return [CORE]\n", "path": "easybuild/tools/module_naming_scheme/hierarchical_mns.py"}], "after_files": [{"content": "##\n# Copyright 2013-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nImplementation of an example hierarchical module naming scheme.\n\n:author: Kenneth Hoste (Ghent University)\n:author: Markus Geimer (Forschungszentrum Juelich GmbH)\n\"\"\"\n\nimport os\nimport re\n\nfrom easybuild.toolchains.gcccore import GCCcore\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme\nfrom easybuild.tools.module_naming_scheme.toolchain import det_toolchain_compilers, det_toolchain_mpi\n\n\nCORE = 'Core'\nCOMPILER = 'Compiler'\nMPI = 'MPI'\nTOOLCHAIN = 'Toolchain'\n\nMODULECLASS_COMPILER = 'compiler'\nMODULECLASS_MPI = 'mpi'\nMODULECLASS_TOOLCHAIN = 'toolchain'\n\nGCCCORE = GCCcore.NAME\n\n# note: names in keys are ordered alphabetically\nCOMP_NAME_VERSION_TEMPLATES = {\n # required for use of iccifort toolchain\n 'icc,ifort': ('intel', '%(icc)s'),\n 'iccifort': ('intel', '%(iccifort)s'),\n # required for use of intel-compilers toolchain (OneAPI compilers)\n 'intel-compilers': ('intel', '%(intel-compilers)s'),\n # required for use of ClangGCC toolchain\n 'Clang,GCC': ('Clang-GCC', '%(Clang)s-%(GCC)s'),\n # required for use of gcccuda toolchain, and for CUDA installed with GCC toolchain\n 'CUDA,GCC': ('GCC-CUDA', '%(GCC)s-%(CUDA)s'),\n # required for use of iccifortcuda toolchain\n 'CUDA,icc,ifort': ('intel-CUDA', '%(icc)s-%(CUDA)s'),\n 'CUDA,iccifort': ('intel-CUDA', '%(iccifort)s-%(CUDA)s'),\n # required for CUDA installed with iccifort toolchain\n # need to use 'intel' here because 'iccifort' toolchain maps to 'intel' (see above)\n 'CUDA,intel': ('intel-CUDA', '%(intel)s-%(CUDA)s'),\n # required for use of xlcxlf toolchain\n 'xlc,xlf': ('xlcxlf', '%(xlc)s'),\n}\n\n# possible prefixes for Cray toolchain names\n# example: CrayGNU, CrayCCE, cpeGNU, cpeCCE, ...;\n# important for determining $MODULEPATH extensions in det_modpath_extensions,\n# cfr. https://github.com/easybuilders/easybuild-framework/issues/3575\nCRAY_TOOLCHAIN_NAME_PREFIXES = ('Cray', 'cpe')\n\n\nclass HierarchicalMNS(ModuleNamingScheme):\n \"\"\"Class implementing an example hierarchical module naming scheme.\"\"\"\n\n REQUIRED_KEYS = ['name', 'versionprefix', 'version', 'versionsuffix', 'toolchain', 'moduleclass']\n\n def requires_toolchain_details(self):\n \"\"\"\n Determine whether toolchain details are required by this module naming scheme,\n e.g. whether one of det_toolchain_* functions are relied upon.\n \"\"\"\n return True\n\n def det_full_module_name(self, ec):\n \"\"\"\n Determine full module name, relative to the top of the module path.\n Examples: Core/GCC/4.8.3, Compiler/GCC/4.8.3/OpenMPI/1.6.5, MPI/GCC/4.8.3/OpenMPI/1.6.5/HPL/2.1\n \"\"\"\n return os.path.join(self.det_module_subdir(ec), self.det_short_module_name(ec))\n\n def det_short_module_name(self, ec):\n \"\"\"\n Determine short module name, i.e. the name under which modules will be exposed to users.\n Examples: GCC/4.8.3, OpenMPI/1.6.5, OpenBLAS/0.2.9, HPL/2.1, Python/2.7.5\n \"\"\"\n return os.path.join(ec['name'], self.det_full_version(ec))\n\n def det_full_version(self, ec):\n \"\"\"Determine full version, taking into account version prefix/suffix.\"\"\"\n # versionprefix is not always available (e.g., for toolchains)\n versionprefix = ec.get('versionprefix', '')\n return versionprefix + ec['version'] + ec['versionsuffix']\n\n def det_toolchain_compilers_name_version(self, tc_comps):\n \"\"\"\n Determine toolchain compiler tag, for given list of compilers.\n \"\"\"\n if tc_comps is None:\n # no compiler in toolchain, system toolchain\n res = None\n elif len(tc_comps) == 1:\n tc_comp = tc_comps[0]\n if tc_comp is None:\n res = None\n else:\n res = (tc_comp['name'], self.det_full_version(tc_comp))\n else:\n comp_versions = dict([(comp['name'], self.det_full_version(comp)) for comp in tc_comps])\n comp_names = comp_versions.keys()\n key = ','.join(sorted(comp_names))\n if key in COMP_NAME_VERSION_TEMPLATES:\n tc_comp_name, tc_comp_ver_tmpl = COMP_NAME_VERSION_TEMPLATES[key]\n tc_comp_ver = tc_comp_ver_tmpl % comp_versions\n # make sure that icc/ifort versions match (unless not existing as separate modules)\n if tc_comp_name == 'intel' and comp_versions.get('icc') != comp_versions.get('ifort'):\n raise EasyBuildError(\"Bumped into different versions for Intel compilers: %s\", comp_versions)\n else:\n raise EasyBuildError(\"Unknown set of toolchain compilers, module naming scheme needs work: %s\",\n comp_names)\n res = (tc_comp_name, tc_comp_ver)\n return res\n\n def det_module_subdir(self, ec):\n \"\"\"\n Determine module subdirectory, relative to the top of the module path.\n This determines the separation between module names exposed to users, and what's part of the $MODULEPATH.\n Examples: Core, Compiler/GCC/4.8.3, MPI/GCC/4.8.3/OpenMPI/1.6.5\n \"\"\"\n tc_comps = det_toolchain_compilers(ec)\n # determine prefix based on type of toolchain used\n if tc_comps is None:\n # no compiler in toolchain, system toolchain => Core module\n subdir = CORE\n elif tc_comps == [None]:\n # no info on toolchain compiler (cfr. Cray toolchains),\n # then use toolchain name/version\n subdir = os.path.join(TOOLCHAIN, ec.toolchain.name, ec.toolchain.version)\n else:\n tc_comp_name, tc_comp_ver = self.det_toolchain_compilers_name_version(tc_comps)\n tc_mpi = det_toolchain_mpi(ec)\n if tc_mpi is None:\n # compiler-only toolchain => Compiler/<compiler_name>/<compiler_version> namespace\n subdir = os.path.join(COMPILER, tc_comp_name, tc_comp_ver)\n else:\n # compiler-MPI toolchain => MPI/<comp_name>/<comp_version>/<MPI_name>/<MPI_version> namespace\n tc_mpi_fullver = self.det_full_version(tc_mpi)\n subdir = os.path.join(MPI, tc_comp_name, tc_comp_ver, tc_mpi['name'], tc_mpi_fullver)\n\n return subdir\n\n def det_module_symlink_paths(self, ec):\n \"\"\"\n Determine list of paths in which symlinks to module files must be created.\n \"\"\"\n # symlinks are not very useful in the context of a hierarchical MNS\n return []\n\n def det_modpath_extensions(self, ec):\n \"\"\"\n Determine module path extensions, if any.\n Examples: Compiler/GCC/4.8.3 (for GCC/4.8.3 module), MPI/GCC/4.8.3/OpenMPI/1.6.5 (for OpenMPI/1.6.5 module)\n \"\"\"\n modclass = ec['moduleclass']\n tc_comps = det_toolchain_compilers(ec)\n tc_comp_info = self.det_toolchain_compilers_name_version(tc_comps)\n\n # we consider the following to be compilers:\n # * has 'compiler' specified as moduleclass\n is_compiler = modclass == MODULECLASS_COMPILER\n # * CUDA, but only when not installed with 'system' toolchain (i.e. one or more toolchain compilers found)\n non_system_tc = tc_comps is not None\n non_system_cuda = ec['name'] == 'CUDA' and non_system_tc\n\n paths = []\n if is_compiler or non_system_cuda:\n # obtain list of compilers based on that extend $MODULEPATH in some way other than <name>/<version>\n extend_comps = []\n # exclude GCC for which <name>/<version> is used as $MODULEPATH extension\n excluded_comps = ['GCC']\n for comps in COMP_NAME_VERSION_TEMPLATES.keys():\n extend_comps.extend([comp for comp in comps.split(',') if comp not in excluded_comps])\n\n comp_name_ver = None\n if ec['name'] in extend_comps:\n for key in COMP_NAME_VERSION_TEMPLATES:\n comp_names = key.split(',')\n if ec['name'] in comp_names:\n comp_name, comp_ver_tmpl = COMP_NAME_VERSION_TEMPLATES[key]\n comp_versions = {ec['name']: self.det_full_version(ec)}\n if ec['name'] == 'ifort':\n # 'icc' key should be provided since it's the only one used in the template\n comp_versions.update({'icc': self.det_full_version(ec)})\n\n if non_system_tc:\n tc_comp_name, tc_comp_ver = tc_comp_info\n # Stick to name GCC for GCCcore\n if tc_comp_name == GCCCORE:\n tc_comp_name = 'GCC'\n if tc_comp_name in comp_names:\n # also provide toolchain version for non-system toolchains\n comp_versions.update({tc_comp_name: tc_comp_ver})\n\n comp_ver_keys = re.findall(r'%\\((\\w+)\\)s', comp_ver_tmpl)\n if all(comp_ver_key in comp_versions for comp_ver_key in comp_ver_keys):\n comp_name_ver = [comp_name, comp_ver_tmpl % comp_versions]\n break\n else:\n comp_name_ver = [ec['name'], self.det_full_version(ec)]\n\n if comp_name_ver is None:\n raise EasyBuildError(\"Required compilers not available in toolchain %s for %s v%s\",\n ec['toolchain'], ec['name'], ec['version'])\n\n paths.append(os.path.join(COMPILER, *comp_name_ver))\n\n elif modclass == MODULECLASS_MPI:\n if tc_comp_info is None:\n raise EasyBuildError(\"No compiler available in toolchain %s used to install MPI library %s v%s, \"\n \"which is required by the active module naming scheme.\",\n ec['toolchain'], ec['name'], ec['version'])\n else:\n tc_comp_name, tc_comp_ver = tc_comp_info\n fullver = self.det_full_version(ec)\n paths.append(os.path.join(MPI, tc_comp_name, tc_comp_ver, ec['name'], fullver))\n\n # special case for Cray toolchains\n elif modclass == MODULECLASS_TOOLCHAIN and tc_comp_info is None:\n if any(ec.name.startswith(x) for x in CRAY_TOOLCHAIN_NAME_PREFIXES):\n paths.append(os.path.join(TOOLCHAIN, ec.name, ec.version))\n\n return paths\n\n def expand_toolchain_load(self, ec=None):\n \"\"\"\n Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.\n This is useful when toolchains are not exposed to users.\n \"\"\"\n return True\n\n def det_init_modulepaths(self, ec):\n \"\"\"\n Determine list of initial module paths (i.e. top of the hierarchy).\n \"\"\"\n return [CORE]\n", "path": "easybuild/tools/module_naming_scheme/hierarchical_mns.py"}]}
| 3,833 | 356 |
gh_patches_debug_1655
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-23585
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Route History shouldn‘t be editable
Editing or adding a new Route History:


… shouldn’t be possible, not even for the Administrator.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/desk/doctype/route_history/route_history.py`
Content:
```
1 # Copyright (c) 2022, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import frappe
5 from frappe.deferred_insert import deferred_insert as _deferred_insert
6 from frappe.model.document import Document
7
8
9 class RouteHistory(Document):
10 # begin: auto-generated types
11 # This code is auto-generated. Do not modify anything in this block.
12
13 from typing import TYPE_CHECKING
14
15 if TYPE_CHECKING:
16 from frappe.types import DF
17
18 route: DF.Data | None
19 user: DF.Link | None
20 # end: auto-generated types
21 @staticmethod
22 def clear_old_logs(days=30):
23 from frappe.query_builder import Interval
24 from frappe.query_builder.functions import Now
25
26 table = frappe.qb.DocType("Route History")
27 frappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))
28
29
30 @frappe.whitelist()
31 def deferred_insert(routes):
32 routes = [
33 {
34 "user": frappe.session.user,
35 "route": route.get("route"),
36 "creation": route.get("creation"),
37 }
38 for route in frappe.parse_json(routes)
39 ]
40
41 _deferred_insert("Route History", routes)
42
43
44 @frappe.whitelist()
45 def frequently_visited_links():
46 return frappe.get_all(
47 "Route History",
48 fields=["route", "count(name) as count"],
49 filters={"user": frappe.session.user},
50 group_by="route",
51 order_by="count desc",
52 limit=5,
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/desk/doctype/route_history/route_history.py b/frappe/desk/doctype/route_history/route_history.py
--- a/frappe/desk/doctype/route_history/route_history.py
+++ b/frappe/desk/doctype/route_history/route_history.py
@@ -18,6 +18,7 @@
route: DF.Data | None
user: DF.Link | None
# end: auto-generated types
+
@staticmethod
def clear_old_logs(days=30):
from frappe.query_builder import Interval
|
{"golden_diff": "diff --git a/frappe/desk/doctype/route_history/route_history.py b/frappe/desk/doctype/route_history/route_history.py\n--- a/frappe/desk/doctype/route_history/route_history.py\n+++ b/frappe/desk/doctype/route_history/route_history.py\n@@ -18,6 +18,7 @@\n \t\troute: DF.Data | None\n \t\tuser: DF.Link | None\n \t# end: auto-generated types\n+\n \t@staticmethod\n \tdef clear_old_logs(days=30):\n \t\tfrom frappe.query_builder import Interval\n", "issue": "Route History shouldn\u2018t be editable\nEditing or adding a new Route History:\r\n\r\n\r\n\r\n\r\n\u2026 shouldn\u2019t be possible, not even for the Administrator.\n", "before_files": [{"content": "# Copyright (c) 2022, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.deferred_insert import deferred_insert as _deferred_insert\nfrom frappe.model.document import Document\n\n\nclass RouteHistory(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\troute: DF.Data | None\n\t\tuser: DF.Link | None\n\t# end: auto-generated types\n\t@staticmethod\n\tdef clear_old_logs(days=30):\n\t\tfrom frappe.query_builder import Interval\n\t\tfrom frappe.query_builder.functions import Now\n\n\t\ttable = frappe.qb.DocType(\"Route History\")\n\t\tfrappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))\n\n\[email protected]()\ndef deferred_insert(routes):\n\troutes = [\n\t\t{\n\t\t\t\"user\": frappe.session.user,\n\t\t\t\"route\": route.get(\"route\"),\n\t\t\t\"creation\": route.get(\"creation\"),\n\t\t}\n\t\tfor route in frappe.parse_json(routes)\n\t]\n\n\t_deferred_insert(\"Route History\", routes)\n\n\[email protected]()\ndef frequently_visited_links():\n\treturn frappe.get_all(\n\t\t\"Route History\",\n\t\tfields=[\"route\", \"count(name) as count\"],\n\t\tfilters={\"user\": frappe.session.user},\n\t\tgroup_by=\"route\",\n\t\torder_by=\"count desc\",\n\t\tlimit=5,\n\t)\n", "path": "frappe/desk/doctype/route_history/route_history.py"}], "after_files": [{"content": "# Copyright (c) 2022, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.deferred_insert import deferred_insert as _deferred_insert\nfrom frappe.model.document import Document\n\n\nclass RouteHistory(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\troute: DF.Data | None\n\t\tuser: DF.Link | None\n\t# end: auto-generated types\n\n\t@staticmethod\n\tdef clear_old_logs(days=30):\n\t\tfrom frappe.query_builder import Interval\n\t\tfrom frappe.query_builder.functions import Now\n\n\t\ttable = frappe.qb.DocType(\"Route History\")\n\t\tfrappe.db.delete(table, filters=(table.modified < (Now() - Interval(days=days))))\n\n\[email protected]()\ndef deferred_insert(routes):\n\troutes = [\n\t\t{\n\t\t\t\"user\": frappe.session.user,\n\t\t\t\"route\": route.get(\"route\"),\n\t\t\t\"creation\": route.get(\"creation\"),\n\t\t}\n\t\tfor route in frappe.parse_json(routes)\n\t]\n\n\t_deferred_insert(\"Route History\", routes)\n\n\[email protected]()\ndef frequently_visited_links():\n\treturn frappe.get_all(\n\t\t\"Route History\",\n\t\tfields=[\"route\", \"count(name) as count\"],\n\t\tfilters={\"user\": frappe.session.user},\n\t\tgroup_by=\"route\",\n\t\torder_by=\"count desc\",\n\t\tlimit=5,\n\t)\n", "path": "frappe/desk/doctype/route_history/route_history.py"}]}
| 869 | 127 |
gh_patches_debug_29516
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmcv-489
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DetectoRS don't support pytorch version>=1.5
>
File "/jizhi/jizhi2/worker/trainer/mmdet/models/backbones/resnet.py", line 632, in forward
x = res_layer(x)
File "/usr/local/lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib64/python3.6/site-packages/torch/nn/modules/container.py", line 117, in forward
input = module(input)
File "/usr/local/lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/jizhi/jizhi2/worker/trainer/mmdet/models/backbones/resnet.py", line 296, in forward
out = _inner_forward(x)
File "/jizhi/jizhi2/worker/trainer/mmdet/models/backbones/resnet.py", line 273, in _inner_forward
out = self.conv2(out)
File "/usr/local/lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib64/python3.6/site-packages/mmcv/ops/saconv.py", line 105, in forward
out_s = super().conv2d_forward(x, weight)
AttributeError: 'super' object has no attribute 'conv2d_forward'
This is a PyTorch version problem, the PyTorch 1.5 has changed its internal API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcv/ops/saconv.py`
Content:
```
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5 from mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init
6 from mmcv.ops.deform_conv import deform_conv2d
7
8
9 @CONV_LAYERS.register_module(name='SAC')
10 class SAConv2d(ConvAWS2d):
11 """SAC (Switchable Atrous Convolution)
12
13 This is an implementation of SAC in DetectoRS
14 (https://arxiv.org/pdf/2006.02334.pdf).
15
16 Args:
17 in_channels (int): Number of channels in the input image
18 out_channels (int): Number of channels produced by the convolution
19 kernel_size (int or tuple): Size of the convolving kernel
20 stride (int or tuple, optional): Stride of the convolution. Default: 1
21 padding (int or tuple, optional): Zero-padding added to both sides of
22 the input. Default: 0
23 padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
24 ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
25 dilation (int or tuple, optional): Spacing between kernel elements.
26 Default: 1
27 groups (int, optional): Number of blocked connections from input
28 channels to output channels. Default: 1
29 bias (bool, optional): If ``True``, adds a learnable bias to the
30 output. Default: ``True``
31 use_deform: If ``True``, replace convolution with deformable
32 convolution. Default: ``False``.
33 """
34
35 def __init__(self,
36 in_channels,
37 out_channels,
38 kernel_size,
39 stride=1,
40 padding=0,
41 dilation=1,
42 groups=1,
43 bias=True,
44 use_deform=False):
45 super().__init__(
46 in_channels,
47 out_channels,
48 kernel_size,
49 stride=stride,
50 padding=padding,
51 dilation=dilation,
52 groups=groups,
53 bias=bias)
54 self.use_deform = use_deform
55 self.switch = nn.Conv2d(
56 self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
57 self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
58 self.pre_context = nn.Conv2d(
59 self.in_channels, self.in_channels, kernel_size=1, bias=True)
60 self.post_context = nn.Conv2d(
61 self.out_channels, self.out_channels, kernel_size=1, bias=True)
62 if self.use_deform:
63 self.offset_s = nn.Conv2d(
64 self.in_channels,
65 18,
66 kernel_size=3,
67 padding=1,
68 stride=stride,
69 bias=True)
70 self.offset_l = nn.Conv2d(
71 self.in_channels,
72 18,
73 kernel_size=3,
74 padding=1,
75 stride=stride,
76 bias=True)
77 self.init_weights()
78
79 def init_weights(self):
80 constant_init(self.switch, 0, bias=1)
81 self.weight_diff.data.zero_()
82 constant_init(self.pre_context, 0)
83 constant_init(self.post_context, 0)
84 if self.use_deform:
85 constant_init(self.offset_s, 0)
86 constant_init(self.offset_l, 0)
87
88 def forward(self, x):
89 # pre-context
90 avg_x = F.adaptive_avg_pool2d(x, output_size=1)
91 avg_x = self.pre_context(avg_x)
92 avg_x = avg_x.expand_as(x)
93 x = x + avg_x
94 # switch
95 avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
96 avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
97 switch = self.switch(avg_x)
98 # sac
99 weight = self._get_weight(self.weight)
100 if self.use_deform:
101 offset = self.offset_s(avg_x)
102 out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
103 self.dilation, self.groups, 1)
104 else:
105 out_s = super().conv2d_forward(x, weight)
106 ori_p = self.padding
107 ori_d = self.dilation
108 self.padding = tuple(3 * p for p in self.padding)
109 self.dilation = tuple(3 * d for d in self.dilation)
110 weight = weight + self.weight_diff
111 if self.use_deform:
112 offset = self.offset_l(avg_x)
113 out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
114 self.dilation, self.groups, 1)
115 else:
116 out_l = super().conv2d_forward(x, weight)
117 out = switch * out_s + (1 - switch) * out_l
118 self.padding = ori_p
119 self.dilation = ori_d
120 # post-context
121 avg_x = F.adaptive_avg_pool2d(out, output_size=1)
122 avg_x = self.post_context(avg_x)
123 avg_x = avg_x.expand_as(out)
124 out = out + avg_x
125 return out
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmcv/ops/saconv.py b/mmcv/ops/saconv.py
--- a/mmcv/ops/saconv.py
+++ b/mmcv/ops/saconv.py
@@ -4,6 +4,7 @@
from mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init
from mmcv.ops.deform_conv import deform_conv2d
+from mmcv.utils import TORCH_VERSION
@CONV_LAYERS.register_module(name='SAC')
@@ -102,7 +103,10 @@
out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
self.dilation, self.groups, 1)
else:
- out_s = super().conv2d_forward(x, weight)
+ if TORCH_VERSION < '1.5.0' or TORCH_VERSION == 'parrots':
+ out_s = super().conv2d_forward(x, weight)
+ else:
+ out_s = super()._conv_forward(x, weight)
ori_p = self.padding
ori_d = self.dilation
self.padding = tuple(3 * p for p in self.padding)
@@ -113,7 +117,10 @@
out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
self.dilation, self.groups, 1)
else:
- out_l = super().conv2d_forward(x, weight)
+ if TORCH_VERSION < '1.5.0' or TORCH_VERSION == 'parrots':
+ out_l = super().conv2d_forward(x, weight)
+ else:
+ out_l = super()._conv_forward(x, weight)
out = switch * out_s + (1 - switch) * out_l
self.padding = ori_p
self.dilation = ori_d
|
{"golden_diff": "diff --git a/mmcv/ops/saconv.py b/mmcv/ops/saconv.py\n--- a/mmcv/ops/saconv.py\n+++ b/mmcv/ops/saconv.py\n@@ -4,6 +4,7 @@\n \n from mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init\n from mmcv.ops.deform_conv import deform_conv2d\n+from mmcv.utils import TORCH_VERSION\n \n \n @CONV_LAYERS.register_module(name='SAC')\n@@ -102,7 +103,10 @@\n out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,\n self.dilation, self.groups, 1)\n else:\n- out_s = super().conv2d_forward(x, weight)\n+ if TORCH_VERSION < '1.5.0' or TORCH_VERSION == 'parrots':\n+ out_s = super().conv2d_forward(x, weight)\n+ else:\n+ out_s = super()._conv_forward(x, weight)\n ori_p = self.padding\n ori_d = self.dilation\n self.padding = tuple(3 * p for p in self.padding)\n@@ -113,7 +117,10 @@\n out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,\n self.dilation, self.groups, 1)\n else:\n- out_l = super().conv2d_forward(x, weight)\n+ if TORCH_VERSION < '1.5.0' or TORCH_VERSION == 'parrots':\n+ out_l = super().conv2d_forward(x, weight)\n+ else:\n+ out_l = super()._conv_forward(x, weight)\n out = switch * out_s + (1 - switch) * out_l\n self.padding = ori_p\n self.dilation = ori_d\n", "issue": "DetectoRS don't support pytorch version>=1.5\n> \r\n File \"/jizhi/jizhi2/worker/trainer/mmdet/models/backbones/resnet.py\", line 632, in forward\r\n x = res_layer(x)\r\n File \"/usr/local/lib64/python3.6/site-packages/torch/nn/modules/module.py\", line 722, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n File \"/usr/local/lib64/python3.6/site-packages/torch/nn/modules/container.py\", line 117, in forward\r\n input = module(input)\r\n File \"/usr/local/lib64/python3.6/site-packages/torch/nn/modules/module.py\", line 722, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n File \"/jizhi/jizhi2/worker/trainer/mmdet/models/backbones/resnet.py\", line 296, in forward\r\n out = _inner_forward(x)\r\n File \"/jizhi/jizhi2/worker/trainer/mmdet/models/backbones/resnet.py\", line 273, in _inner_forward\r\n out = self.conv2(out)\r\n File \"/usr/local/lib64/python3.6/site-packages/torch/nn/modules/module.py\", line 722, in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n File \"/usr/local/lib64/python3.6/site-packages/mmcv/ops/saconv.py\", line 105, in forward\r\n out_s = super().conv2d_forward(x, weight)\r\nAttributeError: 'super' object has no attribute 'conv2d_forward'\r\n\r\nThis is a PyTorch version problem, the PyTorch 1.5 has changed its internal API.\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init\nfrom mmcv.ops.deform_conv import deform_conv2d\n\n\n@CONV_LAYERS.register_module(name='SAC')\nclass SAConv2d(ConvAWS2d):\n \"\"\"SAC (Switchable Atrous Convolution)\n\n This is an implementation of SAC in DetectoRS\n (https://arxiv.org/pdf/2006.02334.pdf).\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n use_deform: If ``True``, replace convolution with deformable\n convolution. Default: ``False``.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=True,\n use_deform=False):\n super().__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n self.use_deform = use_deform\n self.switch = nn.Conv2d(\n self.in_channels, 1, kernel_size=1, stride=stride, bias=True)\n self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))\n self.pre_context = nn.Conv2d(\n self.in_channels, self.in_channels, kernel_size=1, bias=True)\n self.post_context = nn.Conv2d(\n self.out_channels, self.out_channels, kernel_size=1, bias=True)\n if self.use_deform:\n self.offset_s = nn.Conv2d(\n self.in_channels,\n 18,\n kernel_size=3,\n padding=1,\n stride=stride,\n bias=True)\n self.offset_l = nn.Conv2d(\n self.in_channels,\n 18,\n kernel_size=3,\n padding=1,\n stride=stride,\n bias=True)\n self.init_weights()\n\n def init_weights(self):\n constant_init(self.switch, 0, bias=1)\n self.weight_diff.data.zero_()\n constant_init(self.pre_context, 0)\n constant_init(self.post_context, 0)\n if self.use_deform:\n constant_init(self.offset_s, 0)\n constant_init(self.offset_l, 0)\n\n def forward(self, x):\n # pre-context\n avg_x = F.adaptive_avg_pool2d(x, output_size=1)\n avg_x = self.pre_context(avg_x)\n avg_x = avg_x.expand_as(x)\n x = x + avg_x\n # switch\n avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')\n avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)\n switch = self.switch(avg_x)\n # sac\n weight = self._get_weight(self.weight)\n if self.use_deform:\n offset = self.offset_s(avg_x)\n out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,\n self.dilation, self.groups, 1)\n else:\n out_s = super().conv2d_forward(x, weight)\n ori_p = self.padding\n ori_d = self.dilation\n self.padding = tuple(3 * p for p in self.padding)\n self.dilation = tuple(3 * d for d in self.dilation)\n weight = weight + self.weight_diff\n if self.use_deform:\n offset = self.offset_l(avg_x)\n out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,\n self.dilation, self.groups, 1)\n else:\n out_l = super().conv2d_forward(x, weight)\n out = switch * out_s + (1 - switch) * out_l\n self.padding = ori_p\n self.dilation = ori_d\n # post-context\n avg_x = F.adaptive_avg_pool2d(out, output_size=1)\n avg_x = self.post_context(avg_x)\n avg_x = avg_x.expand_as(out)\n out = out + avg_x\n return out\n", "path": "mmcv/ops/saconv.py"}], "after_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init\nfrom mmcv.ops.deform_conv import deform_conv2d\nfrom mmcv.utils import TORCH_VERSION\n\n\n@CONV_LAYERS.register_module(name='SAC')\nclass SAConv2d(ConvAWS2d):\n \"\"\"SAC (Switchable Atrous Convolution)\n\n This is an implementation of SAC in DetectoRS\n (https://arxiv.org/pdf/2006.02334.pdf).\n\n Args:\n in_channels (int): Number of channels in the input image\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int or tuple): Size of the convolving kernel\n stride (int or tuple, optional): Stride of the convolution. Default: 1\n padding (int or tuple, optional): Zero-padding added to both sides of\n the input. Default: 0\n padding_mode (string, optional): ``'zeros'``, ``'reflect'``,\n ``'replicate'`` or ``'circular'``. Default: ``'zeros'``\n dilation (int or tuple, optional): Spacing between kernel elements.\n Default: 1\n groups (int, optional): Number of blocked connections from input\n channels to output channels. Default: 1\n bias (bool, optional): If ``True``, adds a learnable bias to the\n output. Default: ``True``\n use_deform: If ``True``, replace convolution with deformable\n convolution. Default: ``False``.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=True,\n use_deform=False):\n super().__init__(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n self.use_deform = use_deform\n self.switch = nn.Conv2d(\n self.in_channels, 1, kernel_size=1, stride=stride, bias=True)\n self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))\n self.pre_context = nn.Conv2d(\n self.in_channels, self.in_channels, kernel_size=1, bias=True)\n self.post_context = nn.Conv2d(\n self.out_channels, self.out_channels, kernel_size=1, bias=True)\n if self.use_deform:\n self.offset_s = nn.Conv2d(\n self.in_channels,\n 18,\n kernel_size=3,\n padding=1,\n stride=stride,\n bias=True)\n self.offset_l = nn.Conv2d(\n self.in_channels,\n 18,\n kernel_size=3,\n padding=1,\n stride=stride,\n bias=True)\n self.init_weights()\n\n def init_weights(self):\n constant_init(self.switch, 0, bias=1)\n self.weight_diff.data.zero_()\n constant_init(self.pre_context, 0)\n constant_init(self.post_context, 0)\n if self.use_deform:\n constant_init(self.offset_s, 0)\n constant_init(self.offset_l, 0)\n\n def forward(self, x):\n # pre-context\n avg_x = F.adaptive_avg_pool2d(x, output_size=1)\n avg_x = self.pre_context(avg_x)\n avg_x = avg_x.expand_as(x)\n x = x + avg_x\n # switch\n avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')\n avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)\n switch = self.switch(avg_x)\n # sac\n weight = self._get_weight(self.weight)\n if self.use_deform:\n offset = self.offset_s(avg_x)\n out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,\n self.dilation, self.groups, 1)\n else:\n if TORCH_VERSION < '1.5.0' or TORCH_VERSION == 'parrots':\n out_s = super().conv2d_forward(x, weight)\n else:\n out_s = super()._conv_forward(x, weight)\n ori_p = self.padding\n ori_d = self.dilation\n self.padding = tuple(3 * p for p in self.padding)\n self.dilation = tuple(3 * d for d in self.dilation)\n weight = weight + self.weight_diff\n if self.use_deform:\n offset = self.offset_l(avg_x)\n out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,\n self.dilation, self.groups, 1)\n else:\n if TORCH_VERSION < '1.5.0' or TORCH_VERSION == 'parrots':\n out_l = super().conv2d_forward(x, weight)\n else:\n out_l = super()._conv_forward(x, weight)\n out = switch * out_s + (1 - switch) * out_l\n self.padding = ori_p\n self.dilation = ori_d\n # post-context\n avg_x = F.adaptive_avg_pool2d(out, output_size=1)\n avg_x = self.post_context(avg_x)\n avg_x = avg_x.expand_as(out)\n out = out + avg_x\n return out\n", "path": "mmcv/ops/saconv.py"}]}
| 2,052 | 416 |
gh_patches_debug_27150
|
rasdani/github-patches
|
git_diff
|
openai__gym-2554
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Proposal] Simplify the Box string representation
### Proposal
Stop the `Box` string representation from printing out all the low/high values if they are all identical
### Motivation
In any reasonably large environment, `print(env.observation_space)` will spam the output with dozens of zero's or infs or whatever else. This information may be relevant if some of these values are different, but often is not necessary, and is just noise. And good luck printing a Dictionary observation space.
I think it'd be enough to change the __repr__ method on Box so that if `len(set(self.low)) == 1`, we just print the unique value, and the same thing for `self.high`.
### Pitch
Make Boxes printable again
### Alternatives
Grin and bear it
### Additional context

Help
This is partially going back on #2182 and #2183 , but I wasn't around to yell alarm at the full consequences of that PR. But since it will only simplify the representation when all limits are equal, it doesn't actually cause that same problem.
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/spaces/box.py`
Content:
```
1 import numpy as np
2
3 from .space import Space
4 from gym import logger
5
6
7 class Box(Space):
8 """
9 A (possibly unbounded) box in R^n. Specifically, a Box represents the
10 Cartesian product of n closed intervals. Each interval has the form of one
11 of [a, b], (-oo, b], [a, oo), or (-oo, oo).
12
13 There are two common use cases:
14
15 * Identical bound for each dimension::
16 >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
17 Box(3, 4)
18
19 * Independent bound for each dimension::
20 >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
21 Box(2,)
22
23 """
24
25 def __init__(self, low, high, shape=None, dtype=np.float32, seed=None):
26 assert dtype is not None, "dtype must be explicitly provided. "
27 self.dtype = np.dtype(dtype)
28
29 # determine shape if it isn't provided directly
30 if shape is not None:
31 shape = tuple(shape)
32 assert (
33 np.isscalar(low) or low.shape == shape
34 ), "low.shape doesn't match provided shape"
35 assert (
36 np.isscalar(high) or high.shape == shape
37 ), "high.shape doesn't match provided shape"
38 elif not np.isscalar(low):
39 shape = low.shape
40 assert (
41 np.isscalar(high) or high.shape == shape
42 ), "high.shape doesn't match low.shape"
43 elif not np.isscalar(high):
44 shape = high.shape
45 assert (
46 np.isscalar(low) or low.shape == shape
47 ), "low.shape doesn't match high.shape"
48 else:
49 raise ValueError(
50 "shape must be provided or inferred from the shapes of low or high"
51 )
52
53 # handle infinite bounds and broadcast at the same time if needed
54 if np.isscalar(low):
55 low = get_inf(dtype, "-") if np.isinf(low) else low
56 low = np.full(shape, low, dtype=dtype)
57 else:
58 if np.any(np.isinf(low)):
59 # create new array with dtype, but maintain old one to preserve np.inf
60 temp_low = low.astype(dtype)
61 temp_low[np.isinf(low)] = get_inf(dtype, "-")
62 low = temp_low
63
64 if np.isscalar(high):
65 high = get_inf(dtype, "+") if np.isinf(high) else high
66 high = np.full(shape, high, dtype=dtype)
67 else:
68 if np.any(np.isinf(high)):
69 # create new array with dtype, but maintain old one to preserve np.inf
70 temp_high = high.astype(dtype)
71 temp_high[np.isinf(high)] = get_inf(dtype, "+")
72 high = temp_high
73
74 self._shape = shape
75 self.low = low
76 self.high = high
77
78 low_precision = get_precision(self.low.dtype)
79 high_precision = get_precision(self.high.dtype)
80 dtype_precision = get_precision(self.dtype)
81 if min(low_precision, high_precision) > dtype_precision:
82 logger.warn(f"Box bound precision lowered by casting to {self.dtype}")
83 self.low = self.low.astype(self.dtype)
84 self.high = self.high.astype(self.dtype)
85
86 # Boolean arrays which indicate the interval type for each coordinate
87 self.bounded_below = -np.inf < self.low
88 self.bounded_above = np.inf > self.high
89
90 super().__init__(self.shape, self.dtype, seed)
91
92 def is_bounded(self, manner="both"):
93 below = np.all(self.bounded_below)
94 above = np.all(self.bounded_above)
95 if manner == "both":
96 return below and above
97 elif manner == "below":
98 return below
99 elif manner == "above":
100 return above
101 else:
102 raise ValueError("manner is not in {'below', 'above', 'both'}")
103
104 def sample(self):
105 """
106 Generates a single random sample inside of the Box.
107
108 In creating a sample of the box, each coordinate is sampled according to
109 the form of the interval:
110
111 * [a, b] : uniform distribution
112 * [a, oo) : shifted exponential distribution
113 * (-oo, b] : shifted negative exponential distribution
114 * (-oo, oo) : normal distribution
115 """
116 high = self.high if self.dtype.kind == "f" else self.high.astype("int64") + 1
117 sample = np.empty(self.shape)
118
119 # Masking arrays which classify the coordinates according to interval
120 # type
121 unbounded = ~self.bounded_below & ~self.bounded_above
122 upp_bounded = ~self.bounded_below & self.bounded_above
123 low_bounded = self.bounded_below & ~self.bounded_above
124 bounded = self.bounded_below & self.bounded_above
125
126 # Vectorized sampling by interval type
127 sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)
128
129 sample[low_bounded] = (
130 self.np_random.exponential(size=low_bounded[low_bounded].shape)
131 + self.low[low_bounded]
132 )
133
134 sample[upp_bounded] = (
135 -self.np_random.exponential(size=upp_bounded[upp_bounded].shape)
136 + self.high[upp_bounded]
137 )
138
139 sample[bounded] = self.np_random.uniform(
140 low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape
141 )
142 if self.dtype.kind == "i":
143 sample = np.floor(sample)
144
145 return sample.astype(self.dtype)
146
147 def contains(self, x):
148 if not isinstance(x, np.ndarray):
149 logger.warn("Casting input x to numpy array.")
150 x = np.asarray(x, dtype=self.dtype)
151
152 return (
153 np.can_cast(x.dtype, self.dtype)
154 and x.shape == self.shape
155 and np.all(x >= self.low)
156 and np.all(x <= self.high)
157 )
158
159 def to_jsonable(self, sample_n):
160 return np.array(sample_n).tolist()
161
162 def from_jsonable(self, sample_n):
163 return [np.asarray(sample) for sample in sample_n]
164
165 def __repr__(self):
166 return f"Box({self.low}, {self.high}, {self.shape}, {self.dtype})"
167
168 def __eq__(self, other):
169 return (
170 isinstance(other, Box)
171 and (self.shape == other.shape)
172 and np.allclose(self.low, other.low)
173 and np.allclose(self.high, other.high)
174 )
175
176
177 def get_inf(dtype, sign):
178 """Returns an infinite that doesn't break things.
179 `dtype` must be an `np.dtype`
180 `bound` must be either `min` or `max`
181 """
182 if np.dtype(dtype).kind == "f":
183 if sign == "+":
184 return np.inf
185 elif sign == "-":
186 return -np.inf
187 else:
188 raise TypeError(f"Unknown sign {sign}, use either '+' or '-'")
189 elif np.dtype(dtype).kind == "i":
190 if sign == "+":
191 return np.iinfo(dtype).max - 2
192 elif sign == "-":
193 return np.iinfo(dtype).min + 2
194 else:
195 raise TypeError(f"Unknown sign {sign}, use either '+' or '-'")
196 else:
197 raise ValueError(f"Unknown dtype {dtype} for infinite bounds")
198
199
200 def get_precision(dtype):
201 if np.issubdtype(dtype, np.floating):
202 return np.finfo(dtype).precision
203 else:
204 return np.inf
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gym/spaces/box.py b/gym/spaces/box.py
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -4,6 +4,17 @@
from gym import logger
+def _short_repr(arr):
+ """Create a shortened string representation of a numpy array.
+
+ If arr is a multiple of the all-ones vector, return a string representation of the multiplier.
+ Otherwise, return a string representation of the entire array.
+ """
+ if arr.size != 0 and np.min(arr) == np.max(arr):
+ return str(np.min(arr))
+ return str(arr)
+
+
class Box(Space):
"""
A (possibly unbounded) box in R^n. Specifically, a Box represents the
@@ -74,6 +85,9 @@
self.low = self.low.astype(self.dtype)
self.high = self.high.astype(self.dtype)
+ self.low_repr = _short_repr(self.low)
+ self.high_repr = _short_repr(self.high)
+
# Boolean arrays which indicate the interval type for each coordinate
self.bounded_below = -np.inf < self.low
self.bounded_above = np.inf > self.high
@@ -154,7 +168,7 @@
return [np.asarray(sample) for sample in sample_n]
def __repr__(self):
- return f"Box({self.low}, {self.high}, {self.shape}, {self.dtype})"
+ return f"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})"
def __eq__(self, other):
return (
|
{"golden_diff": "diff --git a/gym/spaces/box.py b/gym/spaces/box.py\n--- a/gym/spaces/box.py\n+++ b/gym/spaces/box.py\n@@ -4,6 +4,17 @@\n from gym import logger\n \n \n+def _short_repr(arr):\n+ \"\"\"Create a shortened string representation of a numpy array.\n+\n+ If arr is a multiple of the all-ones vector, return a string representation of the multiplier.\n+ Otherwise, return a string representation of the entire array.\n+ \"\"\"\n+ if arr.size != 0 and np.min(arr) == np.max(arr):\n+ return str(np.min(arr))\n+ return str(arr)\n+\n+\n class Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n@@ -74,6 +85,9 @@\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n \n+ self.low_repr = _short_repr(self.low)\n+ self.high_repr = _short_repr(self.high)\n+\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n@@ -154,7 +168,7 @@\n return [np.asarray(sample) for sample in sample_n]\n \n def __repr__(self):\n- return f\"Box({self.low}, {self.high}, {self.shape}, {self.dtype})\"\n+ return f\"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})\"\n \n def __eq__(self, other):\n return (\n", "issue": "[Proposal] Simplify the Box string representation\n### Proposal \r\n\r\nStop the `Box` string representation from printing out all the low/high values if they are all identical\r\n\r\n### Motivation\r\n\r\nIn any reasonably large environment, `print(env.observation_space)` will spam the output with dozens of zero's or infs or whatever else. This information may be relevant if some of these values are different, but often is not necessary, and is just noise. And good luck printing a Dictionary observation space.\r\n\r\nI think it'd be enough to change the __repr__ method on Box so that if `len(set(self.low)) == 1`, we just print the unique value, and the same thing for `self.high`.\r\n\r\n### Pitch\r\n\r\nMake Boxes printable again\r\n\r\n### Alternatives\r\n\r\nGrin and bear it\r\n\r\n### Additional context\r\n\r\n\r\nHelp\r\n\r\nThis is partially going back on #2182 and #2183 , but I wasn't around to yell alarm at the full consequences of that PR. But since it will only simplify the representation when all limits are equal, it doesn't actually cause that same problem.\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n\n There are two common use cases:\n\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n\n def __init__(self, low, high, shape=None, dtype=np.float32, seed=None):\n assert dtype is not None, \"dtype must be explicitly provided. \"\n self.dtype = np.dtype(dtype)\n\n # determine shape if it isn't provided directly\n if shape is not None:\n shape = tuple(shape)\n assert (\n np.isscalar(low) or low.shape == shape\n ), \"low.shape doesn't match provided shape\"\n assert (\n np.isscalar(high) or high.shape == shape\n ), \"high.shape doesn't match provided shape\"\n elif not np.isscalar(low):\n shape = low.shape\n assert (\n np.isscalar(high) or high.shape == shape\n ), \"high.shape doesn't match low.shape\"\n elif not np.isscalar(high):\n shape = high.shape\n assert (\n np.isscalar(low) or low.shape == shape\n ), \"low.shape doesn't match high.shape\"\n else:\n raise ValueError(\n \"shape must be provided or inferred from the shapes of low or high\"\n )\n\n # handle infinite bounds and broadcast at the same time if needed\n if np.isscalar(low):\n low = get_inf(dtype, \"-\") if np.isinf(low) else low\n low = np.full(shape, low, dtype=dtype)\n else:\n if np.any(np.isinf(low)):\n # create new array with dtype, but maintain old one to preserve np.inf\n temp_low = low.astype(dtype)\n temp_low[np.isinf(low)] = get_inf(dtype, \"-\")\n low = temp_low\n\n if np.isscalar(high):\n high = get_inf(dtype, \"+\") if np.isinf(high) else high\n high = np.full(shape, high, dtype=dtype)\n else:\n if np.any(np.isinf(high)):\n # create new array with dtype, but maintain old one to preserve np.inf\n temp_high = high.astype(dtype)\n temp_high[np.isinf(high)] = get_inf(dtype, \"+\")\n high = temp_high\n\n self._shape = shape\n self.low = low\n self.high = high\n\n low_precision = get_precision(self.low.dtype)\n high_precision = get_precision(self.high.dtype)\n dtype_precision = get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logger.warn(f\"Box bound precision lowered by casting to {self.dtype}\")\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super().__init__(self.shape, self.dtype, seed)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box.\n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n\n * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == \"f\" else self.high.astype(\"int64\") + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)\n\n sample[low_bounded] = (\n self.np_random.exponential(size=low_bounded[low_bounded].shape)\n + self.low[low_bounded]\n )\n\n sample[upp_bounded] = (\n -self.np_random.exponential(size=upp_bounded[upp_bounded].shape)\n + self.high[upp_bounded]\n )\n\n sample[bounded] = self.np_random.uniform(\n low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape\n )\n if self.dtype.kind == \"i\":\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n\n def contains(self, x):\n if not isinstance(x, np.ndarray):\n logger.warn(\"Casting input x to numpy array.\")\n x = np.asarray(x, dtype=self.dtype)\n\n return (\n np.can_cast(x.dtype, self.dtype)\n and x.shape == self.shape\n and np.all(x >= self.low)\n and np.all(x <= self.high)\n )\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return f\"Box({self.low}, {self.high}, {self.shape}, {self.dtype})\"\n\n def __eq__(self, other):\n return (\n isinstance(other, Box)\n and (self.shape == other.shape)\n and np.allclose(self.low, other.low)\n and np.allclose(self.high, other.high)\n )\n\n\ndef get_inf(dtype, sign):\n \"\"\"Returns an infinite that doesn't break things.\n `dtype` must be an `np.dtype`\n `bound` must be either `min` or `max`\n \"\"\"\n if np.dtype(dtype).kind == \"f\":\n if sign == \"+\":\n return np.inf\n elif sign == \"-\":\n return -np.inf\n else:\n raise TypeError(f\"Unknown sign {sign}, use either '+' or '-'\")\n elif np.dtype(dtype).kind == \"i\":\n if sign == \"+\":\n return np.iinfo(dtype).max - 2\n elif sign == \"-\":\n return np.iinfo(dtype).min + 2\n else:\n raise TypeError(f\"Unknown sign {sign}, use either '+' or '-'\")\n else:\n raise ValueError(f\"Unknown dtype {dtype} for infinite bounds\")\n\n\ndef get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n", "path": "gym/spaces/box.py"}], "after_files": [{"content": "import numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\ndef _short_repr(arr):\n \"\"\"Create a shortened string representation of a numpy array.\n\n If arr is a multiple of the all-ones vector, return a string representation of the multiplier.\n Otherwise, return a string representation of the entire array.\n \"\"\"\n if arr.size != 0 and np.min(arr) == np.max(arr):\n return str(np.min(arr))\n return str(arr)\n\n\nclass Box(Space):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n\n There are two common use cases:\n\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n\n def __init__(self, low, high, shape=None, dtype=np.float32, seed=None):\n assert dtype is not None, \"dtype must be explicitly provided. \"\n self.dtype = np.dtype(dtype)\n\n # determine shape if it isn't provided directly\n if shape is not None:\n shape = tuple(shape)\n assert (\n np.isscalar(low) or low.shape == shape\n ), \"low.shape doesn't match provided shape\"\n assert (\n np.isscalar(high) or high.shape == shape\n ), \"high.shape doesn't match provided shape\"\n elif not np.isscalar(low):\n shape = low.shape\n assert (\n np.isscalar(high) or high.shape == shape\n ), \"high.shape doesn't match low.shape\"\n elif not np.isscalar(high):\n shape = high.shape\n assert (\n np.isscalar(low) or low.shape == shape\n ), \"low.shape doesn't match high.shape\"\n else:\n raise ValueError(\n \"shape must be provided or inferred from the shapes of low or high\"\n )\n\n if np.isscalar(low):\n low = np.full(shape, low, dtype=dtype)\n\n if np.isscalar(high):\n high = np.full(shape, high, dtype=dtype)\n\n self._shape = shape\n self.low = low\n self.high = high\n\n def _get_precision(dtype):\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n\n low_precision = _get_precision(self.low.dtype)\n high_precision = _get_precision(self.high.dtype)\n dtype_precision = _get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision:\n logger.warn(f\"Box bound precision lowered by casting to {self.dtype}\")\n self.low = self.low.astype(self.dtype)\n self.high = self.high.astype(self.dtype)\n\n self.low_repr = _short_repr(self.low)\n self.high_repr = _short_repr(self.high)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super().__init__(self.shape, self.dtype, seed)\n\n def is_bounded(self, manner=\"both\"):\n below = np.all(self.bounded_below)\n above = np.all(self.bounded_above)\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self):\n \"\"\"\n Generates a single random sample inside of the Box.\n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n\n * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == \"f\" else self.high.astype(\"int64\") + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)\n\n sample[low_bounded] = (\n self.np_random.exponential(size=low_bounded[low_bounded].shape)\n + self.low[low_bounded]\n )\n\n sample[upp_bounded] = (\n -self.np_random.exponential(size=upp_bounded[upp_bounded].shape)\n + self.high[upp_bounded]\n )\n\n sample[bounded] = self.np_random.uniform(\n low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape\n )\n if self.dtype.kind == \"i\":\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n\n def contains(self, x):\n if not isinstance(x, np.ndarray):\n logger.warn(\"Casting input x to numpy array.\")\n x = np.asarray(x, dtype=self.dtype)\n\n return (\n np.can_cast(x.dtype, self.dtype)\n and x.shape == self.shape\n and np.all(x >= self.low)\n and np.all(x <= self.high)\n )\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n):\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self):\n return f\"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})\"\n\n def __eq__(self, other):\n return (\n isinstance(other, Box)\n and (self.shape == other.shape)\n and np.allclose(self.low, other.low)\n and np.allclose(self.high, other.high)\n )\n", "path": "gym/spaces/box.py"}]}
| 2,775 | 366 |
gh_patches_debug_13008
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmcv-1164
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
log repeat twice
The log print in screen like
**`2021-04-28 17:55:36,274 - mmseg - INFO - Distributed training: True `
`INFO:mmseg:Distributed training: True`**
All log messages ware printed twice in screen, but in log files, the message was only writed once.
I tried to locate the bug, and found that after run `c = c.parent`(anaconda3/envs/open-mmlab/lib/python3.7/logging/__init__.py, line 1590), c was changed to `[StreamHandler <stderr> (NOTSET)]`. And then, the message was print again.
------------------------------------------------------------
sys.platform: linux
Python: 3.7.10 (default, Feb 26 2021, 18:47:35) [GCC 7.3.0]
CUDA available: True
GPU 0,1,2,3,4,5,6,7: GeForce RTX 3090
CUDA_HOME: /data/lfxuan/softwares/cuda-11.1
NVCC: Build cuda_11.1.TC455_06.29069683_0
GCC: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
PyTorch: 1.8.1+cu111
PyTorch compiling details: PyTorch built with:
- GCC 7.3
- C++ Version: 201402
- Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications
- Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)
- OpenMP 201511 (a.k.a. OpenMP 4.5)
- NNPACK is enabled
- CPU capability usage: AVX2
- CUDA Runtime 11.1
- NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86
- CuDNN 8.0.5
- Magma 2.5.2
- Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON,
TorchVision: 0.9.1+cu111
OpenCV: 4.5.1
MMCV: 1.3.2
MMCV Compiler: GCC 9.3
MMCV CUDA Compiler: 11.1
MMSegmentation: 0.9.0+fed4b96
------------------------------------------------------------
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcv/utils/logging.py`
Content:
```
1 import logging
2
3 import torch.distributed as dist
4
5 logger_initialized = {}
6
7
8 def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):
9 """Initialize and get a logger by name.
10
11 If the logger has not been initialized, this method will initialize the
12 logger by adding one or two handlers, otherwise the initialized logger will
13 be directly returned. During initialization, a StreamHandler will always be
14 added. If `log_file` is specified and the process rank is 0, a FileHandler
15 will also be added.
16
17 Args:
18 name (str): Logger name.
19 log_file (str | None): The log filename. If specified, a FileHandler
20 will be added to the logger.
21 log_level (int): The logger level. Note that only the process of
22 rank 0 is affected, and other processes will set the level to
23 "Error" thus be silent most of the time.
24 file_mode (str): The file mode used in opening log file.
25 Defaults to 'w'.
26
27 Returns:
28 logging.Logger: The expected logger.
29 """
30 logger = logging.getLogger(name)
31 if name in logger_initialized:
32 return logger
33 # handle hierarchical names
34 # e.g., logger "a" is initialized, then logger "a.b" will skip the
35 # initialization since it is a child of "a".
36 for logger_name in logger_initialized:
37 if name.startswith(logger_name):
38 return logger
39
40 stream_handler = logging.StreamHandler()
41 handlers = [stream_handler]
42
43 if dist.is_available() and dist.is_initialized():
44 rank = dist.get_rank()
45 else:
46 rank = 0
47
48 # only rank 0 will add a FileHandler
49 if rank == 0 and log_file is not None:
50 # Here, the default behaviour of the official logger is 'a'. Thus, we
51 # provide an interface to change the file mode to the default
52 # behaviour.
53 file_handler = logging.FileHandler(log_file, file_mode)
54 handlers.append(file_handler)
55
56 formatter = logging.Formatter(
57 '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
58 for handler in handlers:
59 handler.setFormatter(formatter)
60 handler.setLevel(log_level)
61 logger.addHandler(handler)
62
63 if rank == 0:
64 logger.setLevel(log_level)
65 else:
66 logger.setLevel(logging.ERROR)
67
68 logger_initialized[name] = True
69
70 return logger
71
72
73 def print_log(msg, logger=None, level=logging.INFO):
74 """Print a log message.
75
76 Args:
77 msg (str): The message to be logged.
78 logger (logging.Logger | str | None): The logger to be used.
79 Some special loggers are:
80 - "silent": no message will be printed.
81 - other str: the logger obtained with `get_root_logger(logger)`.
82 - None: The `print()` method will be used to print log messages.
83 level (int): Logging level. Only available when `logger` is a Logger
84 object or "root".
85 """
86 if logger is None:
87 print(msg)
88 elif isinstance(logger, logging.Logger):
89 logger.log(level, msg)
90 elif logger == 'silent':
91 pass
92 elif isinstance(logger, str):
93 _logger = get_logger(logger)
94 _logger.log(level, msg)
95 else:
96 raise TypeError(
97 'logger should be either a logging.Logger object, str, '
98 f'"silent" or None, but got {type(logger)}')
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmcv/utils/logging.py b/mmcv/utils/logging.py
--- a/mmcv/utils/logging.py
+++ b/mmcv/utils/logging.py
@@ -37,6 +37,17 @@
if name.startswith(logger_name):
return logger
+ # handle duplicate logs to the console
+ # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET)
+ # to the root logger. As logger.propagate is True by default, this root
+ # level handler causes logging messages from rank>0 processes to
+ # unexpectedly show up on the console, creating much unwanted clutter.
+ # To fix this issue, we set the root logger's StreamHandler, if any, to log
+ # at the ERROR level.
+ for handler in logger.root.handlers:
+ if type(handler) is logging.StreamHandler:
+ handler.setLevel(logging.ERROR)
+
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
|
{"golden_diff": "diff --git a/mmcv/utils/logging.py b/mmcv/utils/logging.py\n--- a/mmcv/utils/logging.py\n+++ b/mmcv/utils/logging.py\n@@ -37,6 +37,17 @@\n if name.startswith(logger_name):\n return logger\n \n+ # handle duplicate logs to the console\n+ # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET)\n+ # to the root logger. As logger.propagate is True by default, this root\n+ # level handler causes logging messages from rank>0 processes to\n+ # unexpectedly show up on the console, creating much unwanted clutter.\n+ # To fix this issue, we set the root logger's StreamHandler, if any, to log\n+ # at the ERROR level.\n+ for handler in logger.root.handlers:\n+ if type(handler) is logging.StreamHandler:\n+ handler.setLevel(logging.ERROR)\n+\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n", "issue": "log repeat twice\nThe log print in screen like\r\n**`2021-04-28 17:55:36,274 - mmseg - INFO - Distributed training: True `\r\n`INFO:mmseg:Distributed training: True`**\r\nAll log messages ware printed twice in screen, but in log files, the message was only writed once. \r\nI tried to locate the bug, and found that after run `c = c.parent`(anaconda3/envs/open-mmlab/lib/python3.7/logging/__init__.py, line 1590), c was changed to `[StreamHandler <stderr> (NOTSET)]`. And then, the message was print again.\r\n\r\n\r\n------------------------------------------------------------\r\nsys.platform: linux\r\nPython: 3.7.10 (default, Feb 26 2021, 18:47:35) [GCC 7.3.0]\r\nCUDA available: True\r\nGPU 0,1,2,3,4,5,6,7: GeForce RTX 3090\r\nCUDA_HOME: /data/lfxuan/softwares/cuda-11.1\r\nNVCC: Build cuda_11.1.TC455_06.29069683_0\r\nGCC: gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\nPyTorch: 1.8.1+cu111\r\nPyTorch compiling details: PyTorch built with:\r\n - GCC 7.3\r\n - C++ Version: 201402\r\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\r\n - Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)\r\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n - NNPACK is enabled\r\n - CPU capability usage: AVX2\r\n - CUDA Runtime 11.1\r\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\r\n - CuDNN 8.0.5\r\n - Magma 2.5.2\r\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.1, CUDNN_VERSION=8.0.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \r\n\r\nTorchVision: 0.9.1+cu111\r\nOpenCV: 4.5.1\r\nMMCV: 1.3.2\r\nMMCV Compiler: GCC 9.3\r\nMMCV CUDA Compiler: 11.1\r\nMMSegmentation: 0.9.0+fed4b96\r\n------------------------------------------------------------\r\n\r\n \n", "before_files": [{"content": "import logging\n\nimport torch.distributed as dist\n\nlogger_initialized = {}\n\n\ndef get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):\n \"\"\"Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n \"Error\" thus be silent most of the time.\n file_mode (str): The file mode used in opening log file.\n Defaults to 'w'.\n\n Returns:\n logging.Logger: The expected logger.\n \"\"\"\n logger = logging.getLogger(name)\n if name in logger_initialized:\n return logger\n # handle hierarchical names\n # e.g., logger \"a\" is initialized, then logger \"a.b\" will skip the\n # initialization since it is a child of \"a\".\n for logger_name in logger_initialized:\n if name.startswith(logger_name):\n return logger\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if dist.is_available() and dist.is_initialized():\n rank = dist.get_rank()\n else:\n rank = 0\n\n # only rank 0 will add a FileHandler\n if rank == 0 and log_file is not None:\n # Here, the default behaviour of the official logger is 'a'. Thus, we\n # provide an interface to change the file mode to the default\n # behaviour.\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n if rank == 0:\n logger.setLevel(log_level)\n else:\n logger.setLevel(logging.ERROR)\n\n logger_initialized[name] = True\n\n return logger\n\n\ndef print_log(msg, logger=None, level=logging.INFO):\n \"\"\"Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used.\n Some special loggers are:\n - \"silent\": no message will be printed.\n - other str: the logger obtained with `get_root_logger(logger)`.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or \"root\".\n \"\"\"\n if logger is None:\n print(msg)\n elif isinstance(logger, logging.Logger):\n logger.log(level, msg)\n elif logger == 'silent':\n pass\n elif isinstance(logger, str):\n _logger = get_logger(logger)\n _logger.log(level, msg)\n else:\n raise TypeError(\n 'logger should be either a logging.Logger object, str, '\n f'\"silent\" or None, but got {type(logger)}')\n", "path": "mmcv/utils/logging.py"}], "after_files": [{"content": "import logging\n\nimport torch.distributed as dist\n\nlogger_initialized = {}\n\n\ndef get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):\n \"\"\"Initialize and get a logger by name.\n\n If the logger has not been initialized, this method will initialize the\n logger by adding one or two handlers, otherwise the initialized logger will\n be directly returned. During initialization, a StreamHandler will always be\n added. If `log_file` is specified and the process rank is 0, a FileHandler\n will also be added.\n\n Args:\n name (str): Logger name.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the logger.\n log_level (int): The logger level. Note that only the process of\n rank 0 is affected, and other processes will set the level to\n \"Error\" thus be silent most of the time.\n file_mode (str): The file mode used in opening log file.\n Defaults to 'w'.\n\n Returns:\n logging.Logger: The expected logger.\n \"\"\"\n logger = logging.getLogger(name)\n if name in logger_initialized:\n return logger\n # handle hierarchical names\n # e.g., logger \"a\" is initialized, then logger \"a.b\" will skip the\n # initialization since it is a child of \"a\".\n for logger_name in logger_initialized:\n if name.startswith(logger_name):\n return logger\n\n # handle duplicate logs to the console\n # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET)\n # to the root logger. As logger.propagate is True by default, this root\n # level handler causes logging messages from rank>0 processes to\n # unexpectedly show up on the console, creating much unwanted clutter.\n # To fix this issue, we set the root logger's StreamHandler, if any, to log\n # at the ERROR level.\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if dist.is_available() and dist.is_initialized():\n rank = dist.get_rank()\n else:\n rank = 0\n\n # only rank 0 will add a FileHandler\n if rank == 0 and log_file is not None:\n # Here, the default behaviour of the official logger is 'a'. Thus, we\n # provide an interface to change the file mode to the default\n # behaviour.\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n if rank == 0:\n logger.setLevel(log_level)\n else:\n logger.setLevel(logging.ERROR)\n\n logger_initialized[name] = True\n\n return logger\n\n\ndef print_log(msg, logger=None, level=logging.INFO):\n \"\"\"Print a log message.\n\n Args:\n msg (str): The message to be logged.\n logger (logging.Logger | str | None): The logger to be used.\n Some special loggers are:\n - \"silent\": no message will be printed.\n - other str: the logger obtained with `get_root_logger(logger)`.\n - None: The `print()` method will be used to print log messages.\n level (int): Logging level. Only available when `logger` is a Logger\n object or \"root\".\n \"\"\"\n if logger is None:\n print(msg)\n elif isinstance(logger, logging.Logger):\n logger.log(level, msg)\n elif logger == 'silent':\n pass\n elif isinstance(logger, str):\n _logger = get_logger(logger)\n _logger.log(level, msg)\n else:\n raise TypeError(\n 'logger should be either a logging.Logger object, str, '\n f'\"silent\" or None, but got {type(logger)}')\n", "path": "mmcv/utils/logging.py"}]}
| 2,357 | 222 |
gh_patches_debug_23184
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-7515
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IOError when getting saved file names in a project
## Description
### What steps will reproduce the problem?
<!--- You can use Markdown here --->
Opening spyder from Terminal in macOS with '~/anaconda2/bin/spyder $@' causes an Exception and Spyder does not remember last config. In addition, Spyder creates a new project in the current working directory.
### Traceback
```python-traceback
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/plugins/projects.py", line 156, in <lambda>
lambda v: self.editor.setup_open_files())
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/plugins/editor.py", line 2702, in setup_open_files
filenames = self.projects.get_project_filenames()
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/plugins/projects.py", line 341, in get_project_filenames
recent_files = self.current_active_project.get_recent_files()
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/widgets/projects/type/__init__.py", line 79, in get_recent_files
default=[])
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/config/user.py", line 377, in get
self.set(section, option, default)
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/config/user.py", line 445, in set
self._save()
File "/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/config/user.py", line 120, in _save
raise(e)
IOError: [Errno 2] No such file or directory: u'sn_0_9996680/.spyproject/workspace.ini'
WARNING:spyder.widgets.github.backend:failed to send bug report on github. response={'json': {'documentation_url': u'https://developer.github.com/v3', 'message': u'Bad credentials'}, 'code': 401}
```
## Versions
* Spyder version: 3.3.0
* Python version: 2.7.15
* Qt version: 5.9.6
* PyQt5 version: 5.9.2
* Operating System: Darwin 18.0.0
### Dependencies
```
pyflakes >=0.5.0 : 2.0.0 (OK)
pycodestyle >=2.3 : 2.4.0 (OK)
pygments >=2.0 : 2.2.0 (OK)
sphinx >=0.6.6 : 1.7.5 (OK)
rope >=0.9.4 : 0.10.7 (OK)
jedi >=0.9.0 : 0.12.0 (OK)
psutil >=0.3 : 5.4.6 (OK)
nbconvert >=4.0 : 5.3.1 (OK)
pandas >=0.13.1 : 0.23.3 (OK)
numpy >=1.7 : 1.14.5 (OK)
sympy >=0.7.3 : 1.1.1 (OK)
cython >=0.21 : 0.28.3 (OK)
qtconsole >=4.2.0 : 4.3.1 (OK)
IPython >=4.0;<6.0: 5.7.0 (OK)
matplotlib >=2.0.0: 2.2.2 (OK)
pylint >=0.25 : 1.9.2 (OK)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/widgets/projects/type/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # -----------------------------------------------------------------------------
3 # Copyright © Spyder Project Contributors
4 #
5 # Licensed under the terms of the MIT License
6 # (see spyder/__init__.py for details)
7 # -----------------------------------------------------------------------------
8 """Project types"""
9
10 import os
11 import os.path as osp
12 from collections import OrderedDict
13
14 from spyder.config.base import _
15 from spyder.py3compat import to_text_string
16 from spyder.widgets.projects.config import (ProjectConfig, CODESTYLE,
17 CODESTYLE_DEFAULTS,
18 CODESTYLE_VERSION, WORKSPACE,
19 WORKSPACE_DEFAULTS,
20 WORKSPACE_VERSION,
21 ENCODING, ENCODING_DEFAULTS,
22 ENCODING_VERSION,
23 VCS, VCS_DEFAULTS, VCS_VERSION)
24
25
26 class BaseProject(object):
27 """Spyder base project.
28
29 This base class must not be used directly, but inherited from. It does not
30 assume that python is specific to this project.
31 """
32 PROJECT_FOLDER = '.spyproject'
33 PROJECT_TYPE_NAME = None
34 IGNORE_FILE = ""
35 CONFIG_SETUP = {WORKSPACE: {'filename': '{0}.ini'.format(WORKSPACE),
36 'defaults': WORKSPACE_DEFAULTS,
37 'version': WORKSPACE_VERSION},
38 CODESTYLE: {'filename': '{0}.ini'.format(CODESTYLE),
39 'defaults': CODESTYLE_DEFAULTS,
40 'version': CODESTYLE_VERSION},
41 ENCODING: {'filename': '{0}.ini'.format(ENCODING),
42 'defaults': ENCODING_DEFAULTS,
43 'version': ENCODING_VERSION},
44 VCS: {'filename': '{0}.ini'.format(VCS),
45 'defaults': VCS_DEFAULTS,
46 'version': VCS_VERSION}
47 }
48
49 def __init__(self, root_path):
50 self.name = None
51 self.root_path = root_path
52 self.open_project_files = []
53 self.open_non_project_files = []
54 self.config_files = []
55 self.CONF = {}
56
57 # Configuration files
58
59 self.related_projects = [] # storing project path, not project objects
60 # self.pythonpath = []
61 self.opened = True
62
63 self.ioerror_flag = False
64 self.create_project_config_files()
65
66 # --- Helpers
67 # -------------------------------------------------------------------------
68 def set_recent_files(self, recent_files):
69 """Set a list of files opened by the project."""
70 for recent_file in recent_files[:]:
71 if not os.path.isfile(recent_file):
72 recent_files.remove(recent_file)
73 self.CONF[WORKSPACE].set('main', 'recent_files',
74 list(OrderedDict.fromkeys(recent_files)))
75
76 def get_recent_files(self):
77 """Return a list of files opened by the project."""
78 recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',
79 default=[])
80 for recent_file in recent_files[:]:
81 if not os.path.isfile(recent_file):
82 recent_files.remove(recent_file)
83 return list(OrderedDict.fromkeys(recent_files))
84
85 def create_project_config_files(self):
86 """ """
87 dic = self.CONFIG_SETUP
88 for key in dic:
89 name = key
90 filename = dic[key]['filename']
91 defaults = dic[key]['defaults']
92 version = dic[key]['version']
93 self.CONF[key] = ProjectConfig(name, self.root_path, filename,
94 defaults=defaults, load=True,
95 version=version)
96
97 def get_conf_files(self):
98 """ """
99 return self.CONF
100
101 def add_ignore_lines(self, lines):
102 """ """
103 text = self.IGNORE_FILE
104 for line in lines:
105 text += line
106 self.IGNORE_FILE = text
107
108 def set_root_path(self, root_path):
109 """Set project root path."""
110 if self.name is None:
111 self.name = osp.basename(root_path)
112 self.root_path = to_text_string(root_path)
113 config_path = self.__get_project_config_path()
114 if osp.exists(config_path):
115 self.load()
116 else:
117 if not osp.isdir(self.root_path):
118 os.mkdir(self.root_path)
119 self.save()
120
121 def rename(self, new_name):
122 """Rename project and rename its root path accordingly."""
123 old_name = self.name
124 self.name = new_name
125 pypath = self.relative_pythonpath # ??
126 self.root_path = self.root_path[:-len(old_name)]+new_name
127 self.relative_pythonpath = pypath # ??
128 self.save()
129
130 def __get_project_config_folder(self):
131 """Return project configuration folder."""
132 return osp.join(self.root_path, self.PROJECT_FOLDER)
133
134 def __get_project_config_path(self):
135 """Return project configuration path"""
136 return osp.join(self.root_path, self.CONFIG_NAME)
137
138 def load(self):
139 """Load project data"""
140 # fname = self.__get_project_config_path()
141 # try:
142 # # Old format (Spyder 2.0-2.1 for Python 2)
143 # with open(fname, 'U') as fdesc:
144 # data = pickle.loads(fdesc.read())
145 # except (pickle.PickleError, TypeError, UnicodeDecodeError,
146 # AttributeError):
147 # try:
148 # # New format (Spyder >=2.2 for Python 2 and Python 3)
149 # with open(fname, 'rb') as fdesc:
150 # data = pickle.loads(fdesc.read())
151 # except (IOError, OSError, pickle.PickleError):
152 # self.ioerror_flag = True
153 # return
154 # Compatibilty with old project explorer file format:
155 # if 'relative_pythonpath' not in data:
156 # print("Warning: converting old configuration file "
157 # "for project '%s'" % data['name'], file=STDERR)
158 # self.pythonpath = data['pythonpath']
159 # data['relative_pythonpath'] = self.relative_pythonpath
160 # for attr in self.CONFIG_ATTR:
161 # setattr(self, attr, data[attr])
162 # self.save()
163
164 def save(self):
165 """Save project data"""
166 # data = {}
167 # for attr in self.PROJECT_ATTR:
168 # data[attr] = getattr(self, attr)
169 # try:
170 # with open(self.__get_project_config_path(), 'wb') as fdesc:
171 # pickle.dump(data, fdesc, 2)
172 # except (IOError, OSError):
173 # self.ioerror_flag = True
174
175 # def delete(self):
176 # """Delete project"""
177 # os.remove(self.__get_project_config_path())
178 #
179 # # --- Misc.
180 # def get_related_projects(self):
181 # """Return related projects path list"""
182 # return self.related_projects
183 #
184 # def set_related_projects(self, related_projects):
185 # """Set related projects"""
186 # self.related_projects = related_projects
187 # self.save()
188 #
189 # def open(self):
190 # """Open project"""
191 # self.opened = True
192 # self.save()
193 #
194 # def close(self):
195 # """Close project"""
196 # self.opened = False
197 # self.save()
198 #
199 # def is_opened(self):
200 # """Return True if project is opened"""
201 # return self.opened
202 #
203 # def is_file_in_project(self, fname):
204 # """Return True if file *fname* is in one of the project subfolders"""
205 # fixed_root = fixpath(self.root_path)
206 # return fixpath(fname) == fixed_root or\
207 # fixpath(osp.dirname(fname)).startswith(fixed_root)
208 #
209 # def is_root_path(self, dirname):
210 # """Return True if dirname is project's root path"""
211 # return fixpath(dirname) == fixpath(self.root_path)
212
213
214 class EmptyProject(BaseProject):
215 """Empty Project"""
216 PROJECT_TYPE_NAME = _('Empty project')
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/spyder/widgets/projects/type/__init__.py b/spyder/widgets/projects/type/__init__.py
--- a/spyder/widgets/projects/type/__init__.py
+++ b/spyder/widgets/projects/type/__init__.py
@@ -70,13 +70,20 @@
for recent_file in recent_files[:]:
if not os.path.isfile(recent_file):
recent_files.remove(recent_file)
- self.CONF[WORKSPACE].set('main', 'recent_files',
- list(OrderedDict.fromkeys(recent_files)))
+ try:
+ self.CONF[WORKSPACE].set('main', 'recent_files',
+ list(OrderedDict.fromkeys(recent_files)))
+ except EnvironmentError:
+ pass
def get_recent_files(self):
"""Return a list of files opened by the project."""
- recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',
- default=[])
+ try:
+ recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',
+ default=[])
+ except EnvironmentError:
+ return []
+
for recent_file in recent_files[:]:
if not os.path.isfile(recent_file):
recent_files.remove(recent_file)
|
{"golden_diff": "diff --git a/spyder/widgets/projects/type/__init__.py b/spyder/widgets/projects/type/__init__.py\n--- a/spyder/widgets/projects/type/__init__.py\n+++ b/spyder/widgets/projects/type/__init__.py\n@@ -70,13 +70,20 @@\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n- self.CONF[WORKSPACE].set('main', 'recent_files',\r\n- list(OrderedDict.fromkeys(recent_files)))\r\n+ try:\r\n+ self.CONF[WORKSPACE].set('main', 'recent_files',\r\n+ list(OrderedDict.fromkeys(recent_files)))\r\n+ except EnvironmentError:\r\n+ pass\r\n \r\n def get_recent_files(self):\r\n \"\"\"Return a list of files opened by the project.\"\"\"\r\n- recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',\r\n- default=[])\r\n+ try:\r\n+ recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',\r\n+ default=[])\r\n+ except EnvironmentError:\r\n+ return []\r\n+\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\n", "issue": "IOError when getting saved file names in a project\n## Description\r\n\r\n### What steps will reproduce the problem?\r\n\r\n<!--- You can use Markdown here --->\r\n\r\nOpening spyder from Terminal in macOS with '~/anaconda2/bin/spyder $@' causes an Exception and Spyder does not remember last config. In addition, Spyder creates a new project in the current working directory.\r\n\r\n### Traceback\r\n```python-traceback\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/plugins/projects.py\", line 156, in <lambda>\r\n lambda v: self.editor.setup_open_files())\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/plugins/editor.py\", line 2702, in setup_open_files\r\n filenames = self.projects.get_project_filenames()\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/plugins/projects.py\", line 341, in get_project_filenames\r\n recent_files = self.current_active_project.get_recent_files()\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/widgets/projects/type/__init__.py\", line 79, in get_recent_files\r\n default=[])\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/config/user.py\", line 377, in get\r\n self.set(section, option, default)\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/config/user.py\", line 445, in set\r\n self._save()\r\n File \"/Users/samschott/anaconda2/lib/python2.7/site-packages/spyder/config/user.py\", line 120, in _save\r\n raise(e)\r\nIOError: [Errno 2] No such file or directory: u'sn_0_9996680/.spyproject/workspace.ini'\r\nWARNING:spyder.widgets.github.backend:failed to send bug report on github. response={'json': {'documentation_url': u'https://developer.github.com/v3', 'message': u'Bad credentials'}, 'code': 401}\r\n```\r\n\r\n## Versions\r\n\r\n* Spyder version: 3.3.0 \r\n* Python version: 2.7.15\r\n* Qt version: 5.9.6\r\n* PyQt5 version: 5.9.2\r\n* Operating System: Darwin 18.0.0\r\n\r\n### Dependencies\r\n\r\n```\r\npyflakes >=0.5.0 : 2.0.0 (OK)\r\npycodestyle >=2.3 : 2.4.0 (OK)\r\npygments >=2.0 : 2.2.0 (OK)\r\nsphinx >=0.6.6 : 1.7.5 (OK)\r\nrope >=0.9.4 : 0.10.7 (OK)\r\njedi >=0.9.0 : 0.12.0 (OK)\r\npsutil >=0.3 : 5.4.6 (OK)\r\nnbconvert >=4.0 : 5.3.1 (OK)\r\npandas >=0.13.1 : 0.23.3 (OK)\r\nnumpy >=1.7 : 1.14.5 (OK)\r\nsympy >=0.7.3 : 1.1.1 (OK)\r\ncython >=0.21 : 0.28.3 (OK)\r\nqtconsole >=4.2.0 : 4.3.1 (OK)\r\nIPython >=4.0;<6.0: 5.7.0 (OK)\r\nmatplotlib >=2.0.0: 2.2.2 (OK)\r\npylint >=0.25 : 1.9.2 (OK)\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# Copyright \u00a9 Spyder Project Contributors\r\n#\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n# -----------------------------------------------------------------------------\r\n\"\"\"Project types\"\"\"\r\n\r\nimport os\r\nimport os.path as osp\r\nfrom collections import OrderedDict\r\n\r\nfrom spyder.config.base import _\r\nfrom spyder.py3compat import to_text_string\r\nfrom spyder.widgets.projects.config import (ProjectConfig, CODESTYLE,\r\n CODESTYLE_DEFAULTS,\r\n CODESTYLE_VERSION, WORKSPACE,\r\n WORKSPACE_DEFAULTS,\r\n WORKSPACE_VERSION,\r\n ENCODING, ENCODING_DEFAULTS,\r\n ENCODING_VERSION,\r\n VCS, VCS_DEFAULTS, VCS_VERSION)\r\n\r\n\r\nclass BaseProject(object):\r\n \"\"\"Spyder base project.\r\n\r\n This base class must not be used directly, but inherited from. It does not\r\n assume that python is specific to this project.\r\n \"\"\"\r\n PROJECT_FOLDER = '.spyproject'\r\n PROJECT_TYPE_NAME = None\r\n IGNORE_FILE = \"\"\r\n CONFIG_SETUP = {WORKSPACE: {'filename': '{0}.ini'.format(WORKSPACE),\r\n 'defaults': WORKSPACE_DEFAULTS,\r\n 'version': WORKSPACE_VERSION},\r\n CODESTYLE: {'filename': '{0}.ini'.format(CODESTYLE),\r\n 'defaults': CODESTYLE_DEFAULTS,\r\n 'version': CODESTYLE_VERSION},\r\n ENCODING: {'filename': '{0}.ini'.format(ENCODING),\r\n 'defaults': ENCODING_DEFAULTS,\r\n 'version': ENCODING_VERSION},\r\n VCS: {'filename': '{0}.ini'.format(VCS),\r\n 'defaults': VCS_DEFAULTS,\r\n 'version': VCS_VERSION}\r\n }\r\n\r\n def __init__(self, root_path):\r\n self.name = None\r\n self.root_path = root_path\r\n self.open_project_files = []\r\n self.open_non_project_files = []\r\n self.config_files = []\r\n self.CONF = {}\r\n\r\n # Configuration files\r\n\r\n self.related_projects = [] # storing project path, not project objects\r\n# self.pythonpath = []\r\n self.opened = True\r\n\r\n self.ioerror_flag = False\r\n self.create_project_config_files()\r\n\r\n # --- Helpers\r\n # -------------------------------------------------------------------------\r\n def set_recent_files(self, recent_files):\r\n \"\"\"Set a list of files opened by the project.\"\"\"\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n self.CONF[WORKSPACE].set('main', 'recent_files',\r\n list(OrderedDict.fromkeys(recent_files)))\r\n\r\n def get_recent_files(self):\r\n \"\"\"Return a list of files opened by the project.\"\"\"\r\n recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',\r\n default=[])\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n return list(OrderedDict.fromkeys(recent_files))\r\n\r\n def create_project_config_files(self):\r\n \"\"\" \"\"\"\r\n dic = self.CONFIG_SETUP\r\n for key in dic:\r\n name = key\r\n filename = dic[key]['filename']\r\n defaults = dic[key]['defaults']\r\n version = dic[key]['version']\r\n self.CONF[key] = ProjectConfig(name, self.root_path, filename,\r\n defaults=defaults, load=True,\r\n version=version)\r\n\r\n def get_conf_files(self):\r\n \"\"\" \"\"\"\r\n return self.CONF\r\n\r\n def add_ignore_lines(self, lines):\r\n \"\"\" \"\"\"\r\n text = self.IGNORE_FILE\r\n for line in lines:\r\n text += line\r\n self.IGNORE_FILE = text\r\n\r\n def set_root_path(self, root_path):\r\n \"\"\"Set project root path.\"\"\"\r\n if self.name is None:\r\n self.name = osp.basename(root_path)\r\n self.root_path = to_text_string(root_path)\r\n config_path = self.__get_project_config_path()\r\n if osp.exists(config_path):\r\n self.load()\r\n else:\r\n if not osp.isdir(self.root_path):\r\n os.mkdir(self.root_path)\r\n self.save()\r\n\r\n def rename(self, new_name):\r\n \"\"\"Rename project and rename its root path accordingly.\"\"\"\r\n old_name = self.name\r\n self.name = new_name\r\n pypath = self.relative_pythonpath # ??\r\n self.root_path = self.root_path[:-len(old_name)]+new_name\r\n self.relative_pythonpath = pypath # ??\r\n self.save()\r\n\r\n def __get_project_config_folder(self):\r\n \"\"\"Return project configuration folder.\"\"\"\r\n return osp.join(self.root_path, self.PROJECT_FOLDER)\r\n\r\n def __get_project_config_path(self):\r\n \"\"\"Return project configuration path\"\"\"\r\n return osp.join(self.root_path, self.CONFIG_NAME)\r\n\r\n def load(self):\r\n \"\"\"Load project data\"\"\"\r\n# fname = self.__get_project_config_path()\r\n# try:\r\n# # Old format (Spyder 2.0-2.1 for Python 2)\r\n# with open(fname, 'U') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (pickle.PickleError, TypeError, UnicodeDecodeError,\r\n# AttributeError):\r\n# try:\r\n# # New format (Spyder >=2.2 for Python 2 and Python 3)\r\n# with open(fname, 'rb') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (IOError, OSError, pickle.PickleError):\r\n# self.ioerror_flag = True\r\n# return\r\n # Compatibilty with old project explorer file format:\r\n# if 'relative_pythonpath' not in data:\r\n# print(\"Warning: converting old configuration file \"\r\n# \"for project '%s'\" % data['name'], file=STDERR)\r\n# self.pythonpath = data['pythonpath']\r\n# data['relative_pythonpath'] = self.relative_pythonpath\r\n# for attr in self.CONFIG_ATTR:\r\n# setattr(self, attr, data[attr])\r\n# self.save()\r\n\r\n def save(self):\r\n \"\"\"Save project data\"\"\"\r\n# data = {}\r\n# for attr in self.PROJECT_ATTR:\r\n# data[attr] = getattr(self, attr)\r\n# try:\r\n# with open(self.__get_project_config_path(), 'wb') as fdesc:\r\n# pickle.dump(data, fdesc, 2)\r\n# except (IOError, OSError):\r\n# self.ioerror_flag = True\r\n\r\n# def delete(self):\r\n# \"\"\"Delete project\"\"\"\r\n# os.remove(self.__get_project_config_path())\r\n#\r\n# # --- Misc.\r\n# def get_related_projects(self):\r\n# \"\"\"Return related projects path list\"\"\"\r\n# return self.related_projects\r\n#\r\n# def set_related_projects(self, related_projects):\r\n# \"\"\"Set related projects\"\"\"\r\n# self.related_projects = related_projects\r\n# self.save()\r\n#\r\n# def open(self):\r\n# \"\"\"Open project\"\"\"\r\n# self.opened = True\r\n# self.save()\r\n#\r\n# def close(self):\r\n# \"\"\"Close project\"\"\"\r\n# self.opened = False\r\n# self.save()\r\n#\r\n# def is_opened(self):\r\n# \"\"\"Return True if project is opened\"\"\"\r\n# return self.opened\r\n#\r\n# def is_file_in_project(self, fname):\r\n# \"\"\"Return True if file *fname* is in one of the project subfolders\"\"\"\r\n# fixed_root = fixpath(self.root_path)\r\n# return fixpath(fname) == fixed_root or\\\r\n# fixpath(osp.dirname(fname)).startswith(fixed_root)\r\n#\r\n# def is_root_path(self, dirname):\r\n# \"\"\"Return True if dirname is project's root path\"\"\"\r\n# return fixpath(dirname) == fixpath(self.root_path)\r\n\r\n\r\nclass EmptyProject(BaseProject):\r\n \"\"\"Empty Project\"\"\"\r\n PROJECT_TYPE_NAME = _('Empty project')\r\n", "path": "spyder/widgets/projects/type/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\r\n# -----------------------------------------------------------------------------\r\n# Copyright \u00a9 Spyder Project Contributors\r\n#\r\n# Licensed under the terms of the MIT License\r\n# (see spyder/__init__.py for details)\r\n# -----------------------------------------------------------------------------\r\n\"\"\"Project types\"\"\"\r\n\r\nimport os\r\nimport os.path as osp\r\nfrom collections import OrderedDict\r\n\r\nfrom spyder.config.base import _\r\nfrom spyder.py3compat import to_text_string\r\nfrom spyder.widgets.projects.config import (ProjectConfig, CODESTYLE,\r\n CODESTYLE_DEFAULTS,\r\n CODESTYLE_VERSION, WORKSPACE,\r\n WORKSPACE_DEFAULTS,\r\n WORKSPACE_VERSION,\r\n ENCODING, ENCODING_DEFAULTS,\r\n ENCODING_VERSION,\r\n VCS, VCS_DEFAULTS, VCS_VERSION)\r\n\r\n\r\nclass BaseProject(object):\r\n \"\"\"Spyder base project.\r\n\r\n This base class must not be used directly, but inherited from. It does not\r\n assume that python is specific to this project.\r\n \"\"\"\r\n PROJECT_FOLDER = '.spyproject'\r\n PROJECT_TYPE_NAME = None\r\n IGNORE_FILE = \"\"\r\n CONFIG_SETUP = {WORKSPACE: {'filename': '{0}.ini'.format(WORKSPACE),\r\n 'defaults': WORKSPACE_DEFAULTS,\r\n 'version': WORKSPACE_VERSION},\r\n CODESTYLE: {'filename': '{0}.ini'.format(CODESTYLE),\r\n 'defaults': CODESTYLE_DEFAULTS,\r\n 'version': CODESTYLE_VERSION},\r\n ENCODING: {'filename': '{0}.ini'.format(ENCODING),\r\n 'defaults': ENCODING_DEFAULTS,\r\n 'version': ENCODING_VERSION},\r\n VCS: {'filename': '{0}.ini'.format(VCS),\r\n 'defaults': VCS_DEFAULTS,\r\n 'version': VCS_VERSION}\r\n }\r\n\r\n def __init__(self, root_path):\r\n self.name = None\r\n self.root_path = root_path\r\n self.open_project_files = []\r\n self.open_non_project_files = []\r\n self.config_files = []\r\n self.CONF = {}\r\n\r\n # Configuration files\r\n\r\n self.related_projects = [] # storing project path, not project objects\r\n# self.pythonpath = []\r\n self.opened = True\r\n\r\n self.ioerror_flag = False\r\n self.create_project_config_files()\r\n\r\n # --- Helpers\r\n # -------------------------------------------------------------------------\r\n def set_recent_files(self, recent_files):\r\n \"\"\"Set a list of files opened by the project.\"\"\"\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n try:\r\n self.CONF[WORKSPACE].set('main', 'recent_files',\r\n list(OrderedDict.fromkeys(recent_files)))\r\n except EnvironmentError:\r\n pass\r\n\r\n def get_recent_files(self):\r\n \"\"\"Return a list of files opened by the project.\"\"\"\r\n try:\r\n recent_files = self.CONF[WORKSPACE].get('main', 'recent_files',\r\n default=[])\r\n except EnvironmentError:\r\n return []\r\n\r\n for recent_file in recent_files[:]:\r\n if not os.path.isfile(recent_file):\r\n recent_files.remove(recent_file)\r\n return list(OrderedDict.fromkeys(recent_files))\r\n\r\n def create_project_config_files(self):\r\n \"\"\" \"\"\"\r\n dic = self.CONFIG_SETUP\r\n for key in dic:\r\n name = key\r\n filename = dic[key]['filename']\r\n defaults = dic[key]['defaults']\r\n version = dic[key]['version']\r\n self.CONF[key] = ProjectConfig(name, self.root_path, filename,\r\n defaults=defaults, load=True,\r\n version=version)\r\n\r\n def get_conf_files(self):\r\n \"\"\" \"\"\"\r\n return self.CONF\r\n\r\n def add_ignore_lines(self, lines):\r\n \"\"\" \"\"\"\r\n text = self.IGNORE_FILE\r\n for line in lines:\r\n text += line\r\n self.IGNORE_FILE = text\r\n\r\n def set_root_path(self, root_path):\r\n \"\"\"Set project root path.\"\"\"\r\n if self.name is None:\r\n self.name = osp.basename(root_path)\r\n self.root_path = to_text_string(root_path)\r\n config_path = self.__get_project_config_path()\r\n if osp.exists(config_path):\r\n self.load()\r\n else:\r\n if not osp.isdir(self.root_path):\r\n os.mkdir(self.root_path)\r\n self.save()\r\n\r\n def rename(self, new_name):\r\n \"\"\"Rename project and rename its root path accordingly.\"\"\"\r\n old_name = self.name\r\n self.name = new_name\r\n pypath = self.relative_pythonpath # ??\r\n self.root_path = self.root_path[:-len(old_name)]+new_name\r\n self.relative_pythonpath = pypath # ??\r\n self.save()\r\n\r\n def __get_project_config_folder(self):\r\n \"\"\"Return project configuration folder.\"\"\"\r\n return osp.join(self.root_path, self.PROJECT_FOLDER)\r\n\r\n def __get_project_config_path(self):\r\n \"\"\"Return project configuration path\"\"\"\r\n return osp.join(self.root_path, self.CONFIG_NAME)\r\n\r\n def load(self):\r\n \"\"\"Load project data\"\"\"\r\n# fname = self.__get_project_config_path()\r\n# try:\r\n# # Old format (Spyder 2.0-2.1 for Python 2)\r\n# with open(fname, 'U') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (pickle.PickleError, TypeError, UnicodeDecodeError,\r\n# AttributeError):\r\n# try:\r\n# # New format (Spyder >=2.2 for Python 2 and Python 3)\r\n# with open(fname, 'rb') as fdesc:\r\n# data = pickle.loads(fdesc.read())\r\n# except (IOError, OSError, pickle.PickleError):\r\n# self.ioerror_flag = True\r\n# return\r\n # Compatibilty with old project explorer file format:\r\n# if 'relative_pythonpath' not in data:\r\n# print(\"Warning: converting old configuration file \"\r\n# \"for project '%s'\" % data['name'], file=STDERR)\r\n# self.pythonpath = data['pythonpath']\r\n# data['relative_pythonpath'] = self.relative_pythonpath\r\n# for attr in self.CONFIG_ATTR:\r\n# setattr(self, attr, data[attr])\r\n# self.save()\r\n\r\n def save(self):\r\n \"\"\"Save project data\"\"\"\r\n# data = {}\r\n# for attr in self.PROJECT_ATTR:\r\n# data[attr] = getattr(self, attr)\r\n# try:\r\n# with open(self.__get_project_config_path(), 'wb') as fdesc:\r\n# pickle.dump(data, fdesc, 2)\r\n# except (IOError, OSError):\r\n# self.ioerror_flag = True\r\n\r\n# def delete(self):\r\n# \"\"\"Delete project\"\"\"\r\n# os.remove(self.__get_project_config_path())\r\n#\r\n# # --- Misc.\r\n# def get_related_projects(self):\r\n# \"\"\"Return related projects path list\"\"\"\r\n# return self.related_projects\r\n#\r\n# def set_related_projects(self, related_projects):\r\n# \"\"\"Set related projects\"\"\"\r\n# self.related_projects = related_projects\r\n# self.save()\r\n#\r\n# def open(self):\r\n# \"\"\"Open project\"\"\"\r\n# self.opened = True\r\n# self.save()\r\n#\r\n# def close(self):\r\n# \"\"\"Close project\"\"\"\r\n# self.opened = False\r\n# self.save()\r\n#\r\n# def is_opened(self):\r\n# \"\"\"Return True if project is opened\"\"\"\r\n# return self.opened\r\n#\r\n# def is_file_in_project(self, fname):\r\n# \"\"\"Return True if file *fname* is in one of the project subfolders\"\"\"\r\n# fixed_root = fixpath(self.root_path)\r\n# return fixpath(fname) == fixed_root or\\\r\n# fixpath(osp.dirname(fname)).startswith(fixed_root)\r\n#\r\n# def is_root_path(self, dirname):\r\n# \"\"\"Return True if dirname is project's root path\"\"\"\r\n# return fixpath(dirname) == fixpath(self.root_path)\r\n\r\n\r\nclass EmptyProject(BaseProject):\r\n \"\"\"Empty Project\"\"\"\r\n PROJECT_TYPE_NAME = _('Empty project')\r\n", "path": "spyder/widgets/projects/type/__init__.py"}]}
| 3,335 | 279 |
gh_patches_debug_21976
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-3397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Write docs for Optuna optimization
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupyx/optimizing/_optimize.py`
Content:
```
1 import contextlib
2 import math
3
4
5 try:
6 import optuna
7 _optuna_available = True
8 except ImportError:
9 _optuna_available = False
10
11
12 from cupy.core import _optimize_config
13 from cupyx import time
14
15
16 def _optimize(
17 optimize_config, target_func, suggest_func,
18 default_best, ignore_error=()):
19 assert isinstance(optimize_config, _optimize_config._OptimizationConfig)
20 assert callable(target_func)
21 assert callable(suggest_func)
22
23 def objective(trial):
24 args = suggest_func(trial)
25 max_total_time = optimize_config.max_total_time_per_trial
26 try:
27 perf = time.repeat(target_func, args, max_duration=max_total_time)
28 return perf.gpu_times.mean()
29 except Exception as e:
30 if isinstance(e, ignore_error):
31 return math.inf
32 else:
33 raise e
34
35 study = optuna.create_study()
36 study.enqueue_trial(default_best)
37 study.optimize(
38 objective,
39 n_trials=optimize_config.max_trials,
40 timeout=optimize_config.timeout)
41 return study.best_trial
42
43
44 @contextlib.contextmanager
45 def optimize(*, key=None, **config_dict):
46 if not _optuna_available:
47 raise RuntimeError(
48 'Optuna is required to run optimization. '
49 'See https://optuna.org/ for the installation instructions.')
50
51 old_context = _optimize_config.get_current_context()
52 context = _optimize_config.get_new_context(key, _optimize, config_dict)
53 _optimize_config.set_current_context(context)
54
55 try:
56 yield context
57 finally:
58 _optimize_config.set_current_context(old_context)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cupyx/optimizing/_optimize.py b/cupyx/optimizing/_optimize.py
--- a/cupyx/optimizing/_optimize.py
+++ b/cupyx/optimizing/_optimize.py
@@ -43,6 +43,37 @@
@contextlib.contextmanager
def optimize(*, key=None, **config_dict):
+ """Context manager that optimizes kernel launch parameters.
+
+ In this context, CuPy's routines find the best kernel launch parameter
+ values (e.g., the number of threads and blocks). The found values are
+ cached and reused with keys as the shapes, strides and dtypes of the
+ given inputs arrays.
+
+ Args:
+ key (string or None): The cache key of optimizations.
+ max_trials (int): The number of trials that defaults to 100.
+ timeout (float):
+ Stops study after the given number of seconds. Default is 1.
+ max_total_time_per_trial (float):
+ Repeats measuring the execution time of the routine for the
+ given number of seconds. Default is 0.1.
+
+ Examples
+ --------
+ >>> import cupy
+ >>> from cupyx import optimizing
+ >>>
+ >>> x = cupy.arange(100)
+ >>> with optimizing.optimize():
+ ... cupy.sum(x)
+ ...
+ array(4950)
+
+ .. note::
+ Optuna (https://optuna.org) installation is required.
+ Currently it works for reduction operations only.
+ """
if not _optuna_available:
raise RuntimeError(
'Optuna is required to run optimization. '
|
{"golden_diff": "diff --git a/cupyx/optimizing/_optimize.py b/cupyx/optimizing/_optimize.py\n--- a/cupyx/optimizing/_optimize.py\n+++ b/cupyx/optimizing/_optimize.py\n@@ -43,6 +43,37 @@\n \n @contextlib.contextmanager\n def optimize(*, key=None, **config_dict):\n+ \"\"\"Context manager that optimizes kernel launch parameters.\n+\n+ In this context, CuPy's routines find the best kernel launch parameter\n+ values (e.g., the number of threads and blocks). The found values are\n+ cached and reused with keys as the shapes, strides and dtypes of the\n+ given inputs arrays.\n+\n+ Args:\n+ key (string or None): The cache key of optimizations.\n+ max_trials (int): The number of trials that defaults to 100.\n+ timeout (float):\n+ Stops study after the given number of seconds. Default is 1.\n+ max_total_time_per_trial (float):\n+ Repeats measuring the execution time of the routine for the\n+ given number of seconds. Default is 0.1.\n+\n+ Examples\n+ --------\n+ >>> import cupy\n+ >>> from cupyx import optimizing\n+ >>>\n+ >>> x = cupy.arange(100)\n+ >>> with optimizing.optimize():\n+ ... cupy.sum(x)\n+ ...\n+ array(4950)\n+\n+ .. note::\n+ Optuna (https://optuna.org) installation is required.\n+ Currently it works for reduction operations only.\n+ \"\"\"\n if not _optuna_available:\n raise RuntimeError(\n 'Optuna is required to run optimization. '\n", "issue": "Write docs for Optuna optimization\n\n", "before_files": [{"content": "import contextlib\nimport math\n\n\ntry:\n import optuna\n _optuna_available = True\nexcept ImportError:\n _optuna_available = False\n\n\nfrom cupy.core import _optimize_config\nfrom cupyx import time\n\n\ndef _optimize(\n optimize_config, target_func, suggest_func,\n default_best, ignore_error=()):\n assert isinstance(optimize_config, _optimize_config._OptimizationConfig)\n assert callable(target_func)\n assert callable(suggest_func)\n\n def objective(trial):\n args = suggest_func(trial)\n max_total_time = optimize_config.max_total_time_per_trial\n try:\n perf = time.repeat(target_func, args, max_duration=max_total_time)\n return perf.gpu_times.mean()\n except Exception as e:\n if isinstance(e, ignore_error):\n return math.inf\n else:\n raise e\n\n study = optuna.create_study()\n study.enqueue_trial(default_best)\n study.optimize(\n objective,\n n_trials=optimize_config.max_trials,\n timeout=optimize_config.timeout)\n return study.best_trial\n\n\[email protected]\ndef optimize(*, key=None, **config_dict):\n if not _optuna_available:\n raise RuntimeError(\n 'Optuna is required to run optimization. '\n 'See https://optuna.org/ for the installation instructions.')\n\n old_context = _optimize_config.get_current_context()\n context = _optimize_config.get_new_context(key, _optimize, config_dict)\n _optimize_config.set_current_context(context)\n\n try:\n yield context\n finally:\n _optimize_config.set_current_context(old_context)\n", "path": "cupyx/optimizing/_optimize.py"}], "after_files": [{"content": "import contextlib\nimport math\n\n\ntry:\n import optuna\n _optuna_available = True\nexcept ImportError:\n _optuna_available = False\n\n\nfrom cupy.core import _optimize_config\nfrom cupyx import time\n\n\ndef _optimize(\n optimize_config, target_func, suggest_func,\n default_best, ignore_error=()):\n assert isinstance(optimize_config, _optimize_config._OptimizationConfig)\n assert callable(target_func)\n assert callable(suggest_func)\n\n def objective(trial):\n args = suggest_func(trial)\n max_total_time = optimize_config.max_total_time_per_trial\n try:\n perf = time.repeat(target_func, args, max_duration=max_total_time)\n return perf.gpu_times.mean()\n except Exception as e:\n if isinstance(e, ignore_error):\n return math.inf\n else:\n raise e\n\n study = optuna.create_study()\n study.enqueue_trial(default_best)\n study.optimize(\n objective,\n n_trials=optimize_config.max_trials,\n timeout=optimize_config.timeout)\n return study.best_trial\n\n\[email protected]\ndef optimize(*, key=None, **config_dict):\n \"\"\"Context manager that optimizes kernel launch parameters.\n\n In this context, CuPy's routines find the best kernel launch parameter\n values (e.g., the number of threads and blocks). The found values are\n cached and reused with keys as the shapes, strides and dtypes of the\n given inputs arrays.\n\n Args:\n key (string or None): The cache key of optimizations.\n max_trials (int): The number of trials that defaults to 100.\n timeout (float):\n Stops study after the given number of seconds. Default is 1.\n max_total_time_per_trial (float):\n Repeats measuring the execution time of the routine for the\n given number of seconds. Default is 0.1.\n\n Examples\n --------\n >>> import cupy\n >>> from cupyx import optimizing\n >>>\n >>> x = cupy.arange(100)\n >>> with optimizing.optimize():\n ... cupy.sum(x)\n ...\n array(4950)\n\n .. note::\n Optuna (https://optuna.org) installation is required.\n Currently it works for reduction operations only.\n \"\"\"\n if not _optuna_available:\n raise RuntimeError(\n 'Optuna is required to run optimization. '\n 'See https://optuna.org/ for the installation instructions.')\n\n old_context = _optimize_config.get_current_context()\n context = _optimize_config.get_new_context(key, _optimize, config_dict)\n _optimize_config.set_current_context(context)\n\n try:\n yield context\n finally:\n _optimize_config.set_current_context(old_context)\n", "path": "cupyx/optimizing/_optimize.py"}]}
| 722 | 376 |
gh_patches_debug_1127
|
rasdani/github-patches
|
git_diff
|
python__mypy-16229
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add setuptools as a dependency on Python 3.12?
Mypyc needs `distutils` or `setuptools` to run, but Python 3.12 no longer bundles `distutils` ([PEP 632](https://peps.python.org/pep-0632/)). This seems to imply that we need to include `setuptools` as a dependency of mypy (at least on Python 3.12 or later), or unbundle mypyc into a separate distribution on PyPI. Thoughts?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from __future__ import annotations
4
5 import glob
6 import os
7 import os.path
8 import sys
9 from typing import TYPE_CHECKING, Any
10
11 if sys.version_info < (3, 8, 0): # noqa: UP036
12 sys.stderr.write("ERROR: You need Python 3.8 or later to use mypy.\n")
13 exit(1)
14
15 # we'll import stuff from the source tree, let's ensure is on the sys path
16 sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
17
18 # This requires setuptools when building; setuptools is not needed
19 # when installing from a wheel file (though it is still needed for
20 # alternative forms of installing, as suggested by README.md).
21 from setuptools import Extension, find_packages, setup
22 from setuptools.command.build_py import build_py
23
24 from mypy.version import __version__ as version
25
26 if TYPE_CHECKING:
27 from typing_extensions import TypeGuard
28
29 description = "Optional static typing for Python"
30 long_description = """
31 Mypy -- Optional Static Typing for Python
32 =========================================
33
34 Add type annotations to your Python programs, and use mypy to type
35 check them. Mypy is essentially a Python linter on steroids, and it
36 can catch many programming errors by analyzing your program, without
37 actually having to run it. Mypy has a powerful type system with
38 features such as type inference, gradual typing, generics and union
39 types.
40 """.lstrip()
41
42
43 def is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:
44 return all(isinstance(item, Extension) for item in items)
45
46
47 def find_package_data(base, globs, root="mypy"):
48 """Find all interesting data files, for setup(package_data=)
49
50 Arguments:
51 root: The directory to search in.
52 globs: A list of glob patterns to accept files.
53 """
54
55 rv_dirs = [root for root, dirs, files in os.walk(base)]
56 rv = []
57 for rv_dir in rv_dirs:
58 files = []
59 for pat in globs:
60 files += glob.glob(os.path.join(rv_dir, pat))
61 if not files:
62 continue
63 rv.extend([os.path.relpath(f, root) for f in files])
64 return rv
65
66
67 class CustomPythonBuild(build_py):
68 def pin_version(self):
69 path = os.path.join(self.build_lib, "mypy")
70 self.mkpath(path)
71 with open(os.path.join(path, "version.py"), "w") as stream:
72 stream.write(f'__version__ = "{version}"\n')
73
74 def run(self):
75 self.execute(self.pin_version, ())
76 build_py.run(self)
77
78
79 cmdclass = {"build_py": CustomPythonBuild}
80
81 package_data = ["py.typed"]
82
83 package_data += find_package_data(os.path.join("mypy", "typeshed"), ["*.py", "*.pyi"])
84 package_data += [os.path.join("mypy", "typeshed", "stdlib", "VERSIONS")]
85
86 package_data += find_package_data(os.path.join("mypy", "xml"), ["*.xsd", "*.xslt", "*.css"])
87
88 USE_MYPYC = False
89 # To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
90 if len(sys.argv) > 1 and "--use-mypyc" in sys.argv:
91 sys.argv.remove("--use-mypyc")
92 USE_MYPYC = True
93 if os.getenv("MYPY_USE_MYPYC", None) == "1":
94 USE_MYPYC = True
95
96 if USE_MYPYC:
97 MYPYC_BLACKLIST = tuple(
98 os.path.join("mypy", x)
99 for x in (
100 # Need to be runnable as scripts
101 "__main__.py",
102 "pyinfo.py",
103 os.path.join("dmypy", "__main__.py"),
104 # Uses __getattr__/__setattr__
105 "split_namespace.py",
106 # Lies to mypy about code reachability
107 "bogus_type.py",
108 # We don't populate __file__ properly at the top level or something?
109 # Also I think there would be problems with how we generate version.py.
110 "version.py",
111 # Skip these to reduce the size of the build
112 "stubtest.py",
113 "stubgenc.py",
114 "stubdoc.py",
115 "stubutil.py",
116 )
117 ) + (
118 # Don't want to grab this accidentally
119 os.path.join("mypyc", "lib-rt", "setup.py"),
120 # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700
121 os.path.join("mypyc", "__main__.py"),
122 )
123
124 everything = [os.path.join("mypy", x) for x in find_package_data("mypy", ["*.py"])] + [
125 os.path.join("mypyc", x) for x in find_package_data("mypyc", ["*.py"], root="mypyc")
126 ]
127 # Start with all the .py files
128 all_real_pys = [
129 x for x in everything if not x.startswith(os.path.join("mypy", "typeshed") + os.sep)
130 ]
131 # Strip out anything in our blacklist
132 mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]
133 # Strip out any test code
134 mypyc_targets = [
135 x
136 for x in mypyc_targets
137 if not x.startswith(
138 (
139 os.path.join("mypy", "test") + os.sep,
140 os.path.join("mypyc", "test") + os.sep,
141 os.path.join("mypyc", "doc") + os.sep,
142 os.path.join("mypyc", "test-data") + os.sep,
143 )
144 )
145 ]
146 # ... and add back in the one test module we need
147 mypyc_targets.append(os.path.join("mypy", "test", "visitors.py"))
148
149 # The targets come out of file system apis in an unspecified
150 # order. Sort them so that the mypyc output is deterministic.
151 mypyc_targets.sort()
152
153 use_other_mypyc = os.getenv("ALTERNATE_MYPYC_PATH", None)
154 if use_other_mypyc:
155 # This bit is super unfortunate: we want to use a different
156 # mypy/mypyc version, but we've already imported parts, so we
157 # remove the modules that we've imported already, which will
158 # let the right versions be imported by mypyc.
159 del sys.modules["mypy"]
160 del sys.modules["mypy.version"]
161 del sys.modules["mypy.git"]
162 sys.path.insert(0, use_other_mypyc)
163
164 from mypyc.build import mypycify
165
166 opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
167 debug_level = os.getenv("MYPYC_DEBUG_LEVEL", "1")
168 force_multifile = os.getenv("MYPYC_MULTI_FILE", "") == "1"
169 ext_modules = mypycify(
170 mypyc_targets + ["--config-file=mypy_bootstrap.ini"],
171 opt_level=opt_level,
172 debug_level=debug_level,
173 # Use multi-file compilation mode on windows because without it
174 # our Appveyor builds run out of memory sometimes.
175 multi_file=sys.platform == "win32" or force_multifile,
176 )
177 assert is_list_of_setuptools_extension(ext_modules), "Expected mypycify to use setuptools"
178
179 else:
180 ext_modules = []
181
182
183 classifiers = [
184 "Development Status :: 5 - Production/Stable",
185 "Environment :: Console",
186 "Intended Audience :: Developers",
187 "License :: OSI Approved :: MIT License",
188 "Programming Language :: Python :: 3",
189 "Programming Language :: Python :: 3.8",
190 "Programming Language :: Python :: 3.9",
191 "Programming Language :: Python :: 3.10",
192 "Programming Language :: Python :: 3.11",
193 "Topic :: Software Development",
194 "Typing :: Typed",
195 ]
196
197 setup(
198 name="mypy",
199 version=version,
200 description=description,
201 long_description=long_description,
202 author="Jukka Lehtosalo",
203 author_email="[email protected]",
204 url="https://www.mypy-lang.org/",
205 license="MIT License",
206 py_modules=[],
207 ext_modules=ext_modules,
208 packages=find_packages(),
209 package_data={"mypy": package_data},
210 entry_points={
211 "console_scripts": [
212 "mypy=mypy.__main__:console_entry",
213 "stubgen=mypy.stubgen:main",
214 "stubtest=mypy.stubtest:main",
215 "dmypy=mypy.dmypy.client:console_entry",
216 "mypyc=mypyc.__main__:main",
217 ]
218 },
219 classifiers=classifiers,
220 cmdclass=cmdclass,
221 # When changing this, also update mypy-requirements.txt.
222 install_requires=[
223 "typing_extensions>=4.1.0",
224 "mypy_extensions >= 1.0.0",
225 "tomli>=1.1.0; python_version<'3.11'",
226 ],
227 # Same here.
228 extras_require={
229 "dmypy": "psutil >= 4.0",
230 "python2": "",
231 "reports": "lxml",
232 "install-types": "pip",
233 },
234 python_requires=">=3.8",
235 include_package_data=True,
236 project_urls={
237 "News": "https://mypy-lang.org/news.html",
238 "Documentation": "https://mypy.readthedocs.io/en/stable/index.html",
239 "Repository": "https://github.com/python/mypy",
240 },
241 )
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -227,6 +227,7 @@
# Same here.
extras_require={
"dmypy": "psutil >= 4.0",
+ "mypyc": "setuptools >= 50",
"python2": "",
"reports": "lxml",
"install-types": "pip",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -227,6 +227,7 @@\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n+ \"mypyc\": \"setuptools >= 50\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n", "issue": "Add setuptools as a dependency on Python 3.12?\nMypyc needs `distutils` or `setuptools` to run, but Python 3.12 no longer bundles `distutils` ([PEP 632](https://peps.python.org/pep-0632/)). This seems to imply that we need to include `setuptools` as a dependency of mypy (at least on Python 3.12 or later), or unbundle mypyc into a separate distribution on PyPI. Thoughts?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import annotations\n\nimport glob\nimport os\nimport os.path\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif sys.version_info < (3, 8, 0): # noqa: UP036\n sys.stderr.write(\"ERROR: You need Python 3.8 or later to use mypy.\\n\")\n exit(1)\n\n# we'll import stuff from the source tree, let's ensure is on the sys path\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))\n\n# This requires setuptools when building; setuptools is not needed\n# when installing from a wheel file (though it is still needed for\n# alternative forms of installing, as suggested by README.md).\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_py import build_py\n\nfrom mypy.version import __version__ as version\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\ndescription = \"Optional static typing for Python\"\nlong_description = \"\"\"\nMypy -- Optional Static Typing for Python\n=========================================\n\nAdd type annotations to your Python programs, and use mypy to type\ncheck them. Mypy is essentially a Python linter on steroids, and it\ncan catch many programming errors by analyzing your program, without\nactually having to run it. Mypy has a powerful type system with\nfeatures such as type inference, gradual typing, generics and union\ntypes.\n\"\"\".lstrip()\n\n\ndef is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:\n return all(isinstance(item, Extension) for item in items)\n\n\ndef find_package_data(base, globs, root=\"mypy\"):\n \"\"\"Find all interesting data files, for setup(package_data=)\n\n Arguments:\n root: The directory to search in.\n globs: A list of glob patterns to accept files.\n \"\"\"\n\n rv_dirs = [root for root, dirs, files in os.walk(base)]\n rv = []\n for rv_dir in rv_dirs:\n files = []\n for pat in globs:\n files += glob.glob(os.path.join(rv_dir, pat))\n if not files:\n continue\n rv.extend([os.path.relpath(f, root) for f in files])\n return rv\n\n\nclass CustomPythonBuild(build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"mypy\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as stream:\n stream.write(f'__version__ = \"{version}\"\\n')\n\n def run(self):\n self.execute(self.pin_version, ())\n build_py.run(self)\n\n\ncmdclass = {\"build_py\": CustomPythonBuild}\n\npackage_data = [\"py.typed\"]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"typeshed\"), [\"*.py\", \"*.pyi\"])\npackage_data += [os.path.join(\"mypy\", \"typeshed\", \"stdlib\", \"VERSIONS\")]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"xml\"), [\"*.xsd\", \"*.xslt\", \"*.css\"])\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and \"--use-mypyc\" in sys.argv:\n sys.argv.remove(\"--use-mypyc\")\n USE_MYPYC = True\nif os.getenv(\"MYPY_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n MYPYC_BLACKLIST = tuple(\n os.path.join(\"mypy\", x)\n for x in (\n # Need to be runnable as scripts\n \"__main__.py\",\n \"pyinfo.py\",\n os.path.join(\"dmypy\", \"__main__.py\"),\n # Uses __getattr__/__setattr__\n \"split_namespace.py\",\n # Lies to mypy about code reachability\n \"bogus_type.py\",\n # We don't populate __file__ properly at the top level or something?\n # Also I think there would be problems with how we generate version.py.\n \"version.py\",\n # Skip these to reduce the size of the build\n \"stubtest.py\",\n \"stubgenc.py\",\n \"stubdoc.py\",\n \"stubutil.py\",\n )\n ) + (\n # Don't want to grab this accidentally\n os.path.join(\"mypyc\", \"lib-rt\", \"setup.py\"),\n # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700\n os.path.join(\"mypyc\", \"__main__.py\"),\n )\n\n everything = [os.path.join(\"mypy\", x) for x in find_package_data(\"mypy\", [\"*.py\"])] + [\n os.path.join(\"mypyc\", x) for x in find_package_data(\"mypyc\", [\"*.py\"], root=\"mypyc\")\n ]\n # Start with all the .py files\n all_real_pys = [\n x for x in everything if not x.startswith(os.path.join(\"mypy\", \"typeshed\") + os.sep)\n ]\n # Strip out anything in our blacklist\n mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]\n # Strip out any test code\n mypyc_targets = [\n x\n for x in mypyc_targets\n if not x.startswith(\n (\n os.path.join(\"mypy\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"doc\") + os.sep,\n os.path.join(\"mypyc\", \"test-data\") + os.sep,\n )\n )\n ]\n # ... and add back in the one test module we need\n mypyc_targets.append(os.path.join(\"mypy\", \"test\", \"visitors.py\"))\n\n # The targets come out of file system apis in an unspecified\n # order. Sort them so that the mypyc output is deterministic.\n mypyc_targets.sort()\n\n use_other_mypyc = os.getenv(\"ALTERNATE_MYPYC_PATH\", None)\n if use_other_mypyc:\n # This bit is super unfortunate: we want to use a different\n # mypy/mypyc version, but we've already imported parts, so we\n # remove the modules that we've imported already, which will\n # let the right versions be imported by mypyc.\n del sys.modules[\"mypy\"]\n del sys.modules[\"mypy.version\"]\n del sys.modules[\"mypy.git\"]\n sys.path.insert(0, use_other_mypyc)\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n debug_level = os.getenv(\"MYPYC_DEBUG_LEVEL\", \"1\")\n force_multifile = os.getenv(\"MYPYC_MULTI_FILE\", \"\") == \"1\"\n ext_modules = mypycify(\n mypyc_targets + [\"--config-file=mypy_bootstrap.ini\"],\n opt_level=opt_level,\n debug_level=debug_level,\n # Use multi-file compilation mode on windows because without it\n # our Appveyor builds run out of memory sometimes.\n multi_file=sys.platform == \"win32\" or force_multifile,\n )\n assert is_list_of_setuptools_extension(ext_modules), \"Expected mypycify to use setuptools\"\n\nelse:\n ext_modules = []\n\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n \"Typing :: Typed\",\n]\n\nsetup(\n name=\"mypy\",\n version=version,\n description=description,\n long_description=long_description,\n author=\"Jukka Lehtosalo\",\n author_email=\"[email protected]\",\n url=\"https://www.mypy-lang.org/\",\n license=\"MIT License\",\n py_modules=[],\n ext_modules=ext_modules,\n packages=find_packages(),\n package_data={\"mypy\": package_data},\n entry_points={\n \"console_scripts\": [\n \"mypy=mypy.__main__:console_entry\",\n \"stubgen=mypy.stubgen:main\",\n \"stubtest=mypy.stubtest:main\",\n \"dmypy=mypy.dmypy.client:console_entry\",\n \"mypyc=mypyc.__main__:main\",\n ]\n },\n classifiers=classifiers,\n cmdclass=cmdclass,\n # When changing this, also update mypy-requirements.txt.\n install_requires=[\n \"typing_extensions>=4.1.0\",\n \"mypy_extensions >= 1.0.0\",\n \"tomli>=1.1.0; python_version<'3.11'\",\n ],\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n },\n python_requires=\">=3.8\",\n include_package_data=True,\n project_urls={\n \"News\": \"https://mypy-lang.org/news.html\",\n \"Documentation\": \"https://mypy.readthedocs.io/en/stable/index.html\",\n \"Repository\": \"https://github.com/python/mypy\",\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import annotations\n\nimport glob\nimport os\nimport os.path\nimport sys\nfrom typing import TYPE_CHECKING, Any\n\nif sys.version_info < (3, 8, 0):\n sys.stderr.write(\"ERROR: You need Python 3.8 or later to use mypy.\\n\")\n exit(1)\n\n# we'll import stuff from the source tree, let's ensure is on the sys path\nsys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))\n\n# This requires setuptools when building; setuptools is not needed\n# when installing from a wheel file (though it is still needed for\n# alternative forms of installing, as suggested by README.md).\nfrom setuptools import Extension, find_packages, setup\nfrom setuptools.command.build_py import build_py\n\nfrom mypy.version import __version__ as version\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\ndescription = \"Optional static typing for Python\"\nlong_description = \"\"\"\nMypy -- Optional Static Typing for Python\n=========================================\n\nAdd type annotations to your Python programs, and use mypy to type\ncheck them. Mypy is essentially a Python linter on steroids, and it\ncan catch many programming errors by analyzing your program, without\nactually having to run it. Mypy has a powerful type system with\nfeatures such as type inference, gradual typing, generics and union\ntypes.\n\"\"\".lstrip()\n\n\ndef is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]:\n return all(isinstance(item, Extension) for item in items)\n\n\ndef find_package_data(base, globs, root=\"mypy\"):\n \"\"\"Find all interesting data files, for setup(package_data=)\n\n Arguments:\n root: The directory to search in.\n globs: A list of glob patterns to accept files.\n \"\"\"\n\n rv_dirs = [root for root, dirs, files in os.walk(base)]\n rv = []\n for rv_dir in rv_dirs:\n files = []\n for pat in globs:\n files += glob.glob(os.path.join(rv_dir, pat))\n if not files:\n continue\n rv.extend([os.path.relpath(f, root) for f in files])\n return rv\n\n\nclass CustomPythonBuild(build_py):\n def pin_version(self):\n path = os.path.join(self.build_lib, \"mypy\")\n self.mkpath(path)\n with open(os.path.join(path, \"version.py\"), \"w\") as stream:\n stream.write(f'__version__ = \"{version}\"\\n')\n\n def run(self):\n self.execute(self.pin_version, ())\n build_py.run(self)\n\n\ncmdclass = {\"build_py\": CustomPythonBuild}\n\npackage_data = [\"py.typed\"]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"typeshed\"), [\"*.py\", \"*.pyi\"])\npackage_data += [os.path.join(\"mypy\", \"typeshed\", \"stdlib\", \"VERSIONS\")]\n\npackage_data += find_package_data(os.path.join(\"mypy\", \"xml\"), [\"*.xsd\", \"*.xslt\", \"*.css\"])\n\nUSE_MYPYC = False\n# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH\nif len(sys.argv) > 1 and \"--use-mypyc\" in sys.argv:\n sys.argv.remove(\"--use-mypyc\")\n USE_MYPYC = True\nif os.getenv(\"MYPY_USE_MYPYC\", None) == \"1\":\n USE_MYPYC = True\n\nif USE_MYPYC:\n MYPYC_BLACKLIST = tuple(\n os.path.join(\"mypy\", x)\n for x in (\n # Need to be runnable as scripts\n \"__main__.py\",\n \"pyinfo.py\",\n os.path.join(\"dmypy\", \"__main__.py\"),\n # Uses __getattr__/__setattr__\n \"split_namespace.py\",\n # Lies to mypy about code reachability\n \"bogus_type.py\",\n # We don't populate __file__ properly at the top level or something?\n # Also I think there would be problems with how we generate version.py.\n \"version.py\",\n # Skip these to reduce the size of the build\n \"stubtest.py\",\n \"stubgenc.py\",\n \"stubdoc.py\",\n \"stubutil.py\",\n )\n ) + (\n # Don't want to grab this accidentally\n os.path.join(\"mypyc\", \"lib-rt\", \"setup.py\"),\n # Uses __file__ at top level https://github.com/mypyc/mypyc/issues/700\n os.path.join(\"mypyc\", \"__main__.py\"),\n )\n\n everything = [os.path.join(\"mypy\", x) for x in find_package_data(\"mypy\", [\"*.py\"])] + [\n os.path.join(\"mypyc\", x) for x in find_package_data(\"mypyc\", [\"*.py\"], root=\"mypyc\")\n ]\n # Start with all the .py files\n all_real_pys = [\n x for x in everything if not x.startswith(os.path.join(\"mypy\", \"typeshed\") + os.sep)\n ]\n # Strip out anything in our blacklist\n mypyc_targets = [x for x in all_real_pys if x not in MYPYC_BLACKLIST]\n # Strip out any test code\n mypyc_targets = [\n x\n for x in mypyc_targets\n if not x.startswith(\n (\n os.path.join(\"mypy\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"test\") + os.sep,\n os.path.join(\"mypyc\", \"doc\") + os.sep,\n os.path.join(\"mypyc\", \"test-data\") + os.sep,\n )\n )\n ]\n # ... and add back in the one test module we need\n mypyc_targets.append(os.path.join(\"mypy\", \"test\", \"visitors.py\"))\n\n # The targets come out of file system apis in an unspecified\n # order. Sort them so that the mypyc output is deterministic.\n mypyc_targets.sort()\n\n use_other_mypyc = os.getenv(\"ALTERNATE_MYPYC_PATH\", None)\n if use_other_mypyc:\n # This bit is super unfortunate: we want to use a different\n # mypy/mypyc version, but we've already imported parts, so we\n # remove the modules that we've imported already, which will\n # let the right versions be imported by mypyc.\n del sys.modules[\"mypy\"]\n del sys.modules[\"mypy.version\"]\n del sys.modules[\"mypy.git\"]\n sys.path.insert(0, use_other_mypyc)\n\n from mypyc.build import mypycify\n\n opt_level = os.getenv(\"MYPYC_OPT_LEVEL\", \"3\")\n debug_level = os.getenv(\"MYPYC_DEBUG_LEVEL\", \"1\")\n force_multifile = os.getenv(\"MYPYC_MULTI_FILE\", \"\") == \"1\"\n ext_modules = mypycify(\n mypyc_targets + [\"--config-file=mypy_bootstrap.ini\"],\n opt_level=opt_level,\n debug_level=debug_level,\n # Use multi-file compilation mode on windows because without it\n # our Appveyor builds run out of memory sometimes.\n multi_file=sys.platform == \"win32\" or force_multifile,\n )\n assert is_list_of_setuptools_extension(ext_modules), \"Expected mypycify to use setuptools\"\n\nelse:\n ext_modules = []\n\n\nclassifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Software Development\",\n \"Typing :: Typed\",\n]\n\nsetup(\n name=\"mypy\",\n version=version,\n description=description,\n long_description=long_description,\n author=\"Jukka Lehtosalo\",\n author_email=\"[email protected]\",\n url=\"https://www.mypy-lang.org/\",\n license=\"MIT License\",\n py_modules=[],\n ext_modules=ext_modules,\n packages=find_packages(),\n package_data={\"mypy\": package_data},\n entry_points={\n \"console_scripts\": [\n \"mypy=mypy.__main__:console_entry\",\n \"stubgen=mypy.stubgen:main\",\n \"stubtest=mypy.stubtest:main\",\n \"dmypy=mypy.dmypy.client:console_entry\",\n \"mypyc=mypyc.__main__:main\",\n ]\n },\n classifiers=classifiers,\n cmdclass=cmdclass,\n # When changing this, also update mypy-requirements.txt.\n install_requires=[\n \"typing_extensions>=4.1.0\",\n \"mypy_extensions >= 1.0.0\",\n \"tomli>=1.1.0; python_version<'3.11'\",\n ],\n # Same here.\n extras_require={\n \"dmypy\": \"psutil >= 4.0\",\n \"mypyc\": \"setuptools >= 50\",\n \"python2\": \"\",\n \"reports\": \"lxml\",\n \"install-types\": \"pip\",\n },\n python_requires=\">=3.8\",\n include_package_data=True,\n project_urls={\n \"News\": \"https://mypy-lang.org/news.html\",\n \"Documentation\": \"https://mypy.readthedocs.io/en/stable/index.html\",\n \"Repository\": \"https://github.com/python/mypy\",\n },\n)\n", "path": "setup.py"}]}
| 3,105 | 93 |
gh_patches_debug_37357
|
rasdani/github-patches
|
git_diff
|
plone__Products.CMFPlone-3792
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't add an user in UserGroupsControlpanelView to a Group with pas.plugin.ldap with plone.many_users=True
### What I did:
- add plone group `testgroup1` via /@@usergroup-groupprefs
- set registry entries for `plone.many_users` and `plone.many_groups` to `True`
- add a Folder
- go to the folder and call the sharing view /myfolder/@@sharing
- add the group `testgroup1` with reader, editor roles
- click on the group to add some users `/@@usergroup-groupmembership?groupname=testgroup1`
- search a user
- select a user (should be a LDAP User) and save
### What I expect to happen:
- the user is added to the group
### What actually happened:
- endless waiting for response after click on "Add" Button
### What version of Plone/ Addons I am using:
- Plone 6.0.4
- pas.plugin.ldap 1.8.2
- node.ext.ldap 1.2
- node 1.2.1
- LDAP Backend user objects > 10000
### Some investigations
- in the ZMI acl_users -> source_groups the action is possible without errors
- with loglevel DEBUG i see thousands of queries to the LDAP Backend
- the task run many minutes
- its a limitation of [pas.plugin.ldap](https://github.com/collective/pas.plugins.ldap#limitations-and-future-optimizations)
the [GroupMembershipControlPanel](https://github.com/plone/Products.CMFPlone/blob/a5b48c0c24e6eebbe01aa2874eaaa9aa3d49f155/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py#L65) clean the searchstring in the request and pass it to the membership search in UsersGroupsControlPanelView.
An empty searchstring is like a intention "list all users" and a violation against the option `many_users = True`. The search with empty searchstring should not be performed.
With the following patch, all is fine ;-)
```
if searchGroups:
if not self.many_groups or bool(searchString):
groupResults = searchView.merge(chain(*[searchView.searchGroups(**{field: searchString}) for field in ['id', 'title']]), 'groupid')
groupResults = [gtool.getGroupById(g['id']) for g in groupResults if g['id'] not in ignore]
groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))
if searchUsers:
if not self.many_users or bool(searchString):
userResults = searchView.merge(chain(*[searchView.searchUsers(**{field: searchString}) for field in ['login', 'fullname', 'email']]), 'userid')
userResults = [mtool.getMemberById(u['id']) for u in userResults if u['id'] not in ignore]
userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/controlpanel/browser/usergroups.py`
Content:
```
1 from AccessControl import getSecurityManager
2 from Acquisition import aq_inner
3 from itertools import chain
4 from plone.autoform.form import AutoExtensibleForm
5 from plone.base import PloneMessageFactory as _
6 from plone.base.interfaces import ISecuritySchema
7 from plone.base.interfaces import IUserGroupsSettingsSchema
8 from plone.z3cform import layout
9 from Products.CMFCore.permissions import ManagePortal
10 from Products.CMFCore.utils import getToolByName
11 from Products.CMFPlone.utils import normalizeString
12 from Products.Five.browser import BrowserView
13 from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
14 from z3c.form import button
15 from z3c.form import form
16 from zope.component import getAdapter
17 from zope.component import getMultiAdapter
18 from ZTUtils import make_query
19
20
21 class UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):
22 schema = IUserGroupsSettingsSchema
23 id = "usergroupsettings-control-panel"
24 label = _("Users and Groups")
25 form_name = _("User/Groups settings")
26 control_panel_view = "usergroups-controlpanel"
27
28 @button.buttonAndHandler(_("label_save", default="Save"), name="save")
29 def handleApply(self, action):
30 super().handleApply(self, action)
31
32 def updateActions(self):
33 super().updateActions()
34 if self.actions and "save" in self.actions:
35 self.actions["save"].addClass("btn-primary")
36
37
38 class ControlPanelFormWrapper(layout.FormWrapper):
39 """Use this form as the plone.z3cform layout wrapper to get the control
40 panel layout.
41 """
42
43 index = ViewPageTemplateFile("controlpanel_usergroups_layout.pt")
44
45
46 UserGroupsSettingsPanelView = layout.wrap_form(
47 UserGroupsSettingsControlPanel, ControlPanelFormWrapper
48 )
49
50
51 class UsersGroupsControlPanelView(BrowserView):
52 @property
53 def portal_roles(self):
54 pmemb = getToolByName(aq_inner(self.context), "portal_membership")
55 return [r for r in pmemb.getPortalRoles() if r != "Owner"]
56
57 @property
58 def many_users(self):
59 return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users
60
61 @property
62 def many_groups(self):
63 return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups
64
65 @property
66 def email_as_username(self):
67 return getAdapter(
68 aq_inner(self.context), ISecuritySchema
69 ).get_use_email_as_login()
70
71 def makeQuery(self, **kw):
72 return make_query(**kw)
73
74 def membershipSearch(
75 self, searchString="", searchUsers=True, searchGroups=True, ignore=[]
76 ):
77 """Search for users and/or groups, returning actual member and group items
78 Replaces the now-deprecated prefs_user_groups_search.py script"""
79 groupResults = userResults = []
80
81 gtool = getToolByName(self, "portal_groups")
82 mtool = getToolByName(self, "portal_membership")
83
84 searchView = getMultiAdapter(
85 (aq_inner(self.context), self.request), name="pas_search"
86 )
87
88 if searchGroups:
89 groupResults = searchView.merge(
90 chain(
91 *[
92 searchView.searchGroups(**{field: searchString})
93 for field in ["id", "title"]
94 ]
95 ),
96 "groupid",
97 )
98 groupResults = [
99 gtool.getGroupById(g["id"])
100 for g in groupResults
101 if g["id"] not in ignore
102 ]
103 groupResults.sort(
104 key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
105 )
106
107 if searchUsers:
108 userResults = searchView.merge(
109 chain(
110 *[
111 searchView.searchUsers(**{field: searchString})
112 for field in ["login", "fullname", "email"]
113 ]
114 ),
115 "userid",
116 )
117 userResults = [
118 mtool.getMemberById(u["id"])
119 for u in userResults
120 if u["id"] not in ignore
121 ]
122 userResults.sort(
123 key=lambda x: x is not None
124 and x.getProperty("fullname") is not None
125 and normalizeString(x.getProperty("fullname"))
126 or ""
127 )
128
129 return groupResults + userResults
130
131 def atoi(self, s):
132 try:
133 return int(s)
134 except ValueError:
135 return 0
136
137 @property
138 def is_zope_manager(self):
139 return getSecurityManager().checkPermission(ManagePortal, self.context)
140
141 # The next two class methods implement the following truth table:
142 #
143 # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT
144 # False False False Lists unavailable
145 # False False True Show all
146 # False True False Show matching
147 # False True True Show matching
148 # True False False Too many to list
149 # True False True Lists unavailable
150 # True True False Show matching
151 # True True True Show matching
152
153 # TODO: Maybe have these methods return a text message (instead of a bool)
154 # corresponding to the actual result, e.g. "Too many to list", "Lists
155 # unavailable"
156
157 @property
158 def show_group_listing_warning(self):
159 if not self.searchString:
160 acl = getToolByName(self, "acl_users")
161 if acl.canListAllGroups():
162 if self.many_groups:
163 return True
164 return False
165
166 @property
167 def show_users_listing_warning(self):
168 if not self.searchString:
169 acl = getToolByName(self, "acl_users")
170 # XXX Huh? Is canListAllUsers broken?
171 if not acl.canListAllUsers():
172 if self.many_users:
173 return True
174 return False
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Products/CMFPlone/controlpanel/browser/usergroups.py b/Products/CMFPlone/controlpanel/browser/usergroups.py
--- a/Products/CMFPlone/controlpanel/browser/usergroups.py
+++ b/Products/CMFPlone/controlpanel/browser/usergroups.py
@@ -86,45 +86,49 @@
)
if searchGroups:
- groupResults = searchView.merge(
- chain(
- *[
- searchView.searchGroups(**{field: searchString})
- for field in ["id", "title"]
- ]
- ),
- "groupid",
- )
- groupResults = [
- gtool.getGroupById(g["id"])
- for g in groupResults
- if g["id"] not in ignore
- ]
- groupResults.sort(
- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
- )
+ # Only search for all ('') if the many_users flag is not set.
+ if not (self.many_groups) or bool(self.searchString):
+ groupResults = searchView.merge(
+ chain(
+ *[
+ searchView.searchGroups(**{field: searchString})
+ for field in ["id", "title"]
+ ]
+ ),
+ "groupid",
+ )
+ groupResults = [
+ gtool.getGroupById(g["id"])
+ for g in groupResults
+ if g["id"] not in ignore
+ ]
+ groupResults.sort(
+ key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())
+ )
if searchUsers:
- userResults = searchView.merge(
- chain(
- *[
- searchView.searchUsers(**{field: searchString})
- for field in ["login", "fullname", "email"]
- ]
- ),
- "userid",
- )
- userResults = [
- mtool.getMemberById(u["id"])
- for u in userResults
- if u["id"] not in ignore
- ]
- userResults.sort(
- key=lambda x: x is not None
- and x.getProperty("fullname") is not None
- and normalizeString(x.getProperty("fullname"))
- or ""
- )
+ # Only search for all ('') if the many_users flag is not set.
+ if not (self.many_users) or bool(self.searchString):
+ userResults = searchView.merge(
+ chain(
+ *[
+ searchView.searchUsers(**{field: searchString})
+ for field in ["login", "fullname", "email"]
+ ]
+ ),
+ "userid",
+ )
+ userResults = [
+ mtool.getMemberById(u["id"])
+ for u in userResults
+ if u["id"] not in ignore
+ ]
+ userResults.sort(
+ key=lambda x: x is not None
+ and x.getProperty("fullname") is not None
+ and normalizeString(x.getProperty("fullname"))
+ or ""
+ )
return groupResults + userResults
|
{"golden_diff": "diff --git a/Products/CMFPlone/controlpanel/browser/usergroups.py b/Products/CMFPlone/controlpanel/browser/usergroups.py\n--- a/Products/CMFPlone/controlpanel/browser/usergroups.py\n+++ b/Products/CMFPlone/controlpanel/browser/usergroups.py\n@@ -86,45 +86,49 @@\n )\n \n if searchGroups:\n- groupResults = searchView.merge(\n- chain(\n- *[\n- searchView.searchGroups(**{field: searchString})\n- for field in [\"id\", \"title\"]\n- ]\n- ),\n- \"groupid\",\n- )\n- groupResults = [\n- gtool.getGroupById(g[\"id\"])\n- for g in groupResults\n- if g[\"id\"] not in ignore\n- ]\n- groupResults.sort(\n- key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n- )\n+ # Only search for all ('') if the many_users flag is not set.\n+ if not (self.many_groups) or bool(self.searchString):\n+ groupResults = searchView.merge(\n+ chain(\n+ *[\n+ searchView.searchGroups(**{field: searchString})\n+ for field in [\"id\", \"title\"]\n+ ]\n+ ),\n+ \"groupid\",\n+ )\n+ groupResults = [\n+ gtool.getGroupById(g[\"id\"])\n+ for g in groupResults\n+ if g[\"id\"] not in ignore\n+ ]\n+ groupResults.sort(\n+ key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n+ )\n \n if searchUsers:\n- userResults = searchView.merge(\n- chain(\n- *[\n- searchView.searchUsers(**{field: searchString})\n- for field in [\"login\", \"fullname\", \"email\"]\n- ]\n- ),\n- \"userid\",\n- )\n- userResults = [\n- mtool.getMemberById(u[\"id\"])\n- for u in userResults\n- if u[\"id\"] not in ignore\n- ]\n- userResults.sort(\n- key=lambda x: x is not None\n- and x.getProperty(\"fullname\") is not None\n- and normalizeString(x.getProperty(\"fullname\"))\n- or \"\"\n- )\n+ # Only search for all ('') if the many_users flag is not set.\n+ if not (self.many_users) or bool(self.searchString):\n+ userResults = searchView.merge(\n+ chain(\n+ *[\n+ searchView.searchUsers(**{field: searchString})\n+ for field in [\"login\", \"fullname\", \"email\"]\n+ ]\n+ ),\n+ \"userid\",\n+ )\n+ userResults = [\n+ mtool.getMemberById(u[\"id\"])\n+ for u in userResults\n+ if u[\"id\"] not in ignore\n+ ]\n+ userResults.sort(\n+ key=lambda x: x is not None\n+ and x.getProperty(\"fullname\") is not None\n+ and normalizeString(x.getProperty(\"fullname\"))\n+ or \"\"\n+ )\n \n return groupResults + userResults\n", "issue": "Can't add an user in UserGroupsControlpanelView to a Group with pas.plugin.ldap with plone.many_users=True\n### What I did:\r\n\r\n- add plone group `testgroup1` via /@@usergroup-groupprefs\r\n- set registry entries for `plone.many_users` and `plone.many_groups` to `True`\r\n- add a Folder\r\n- go to the folder and call the sharing view /myfolder/@@sharing\r\n- add the group `testgroup1` with reader, editor roles\r\n- click on the group to add some users `/@@usergroup-groupmembership?groupname=testgroup1`\r\n- search a user\r\n- select a user (should be a LDAP User) and save\r\n\r\n### What I expect to happen:\r\n- the user is added to the group\r\n\r\n### What actually happened:\r\n- endless waiting for response after click on \"Add\" Button\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\n- Plone 6.0.4\r\n- pas.plugin.ldap 1.8.2\r\n- node.ext.ldap 1.2\r\n- node 1.2.1\r\n- LDAP Backend user objects > 10000\r\n\r\n### Some investigations\r\n\r\n- in the ZMI acl_users -> source_groups the action is possible without errors\r\n- with loglevel DEBUG i see thousands of queries to the LDAP Backend\r\n- the task run many minutes\r\n- its a limitation of [pas.plugin.ldap](https://github.com/collective/pas.plugins.ldap#limitations-and-future-optimizations)\r\n \r\n\r\nthe [GroupMembershipControlPanel](https://github.com/plone/Products.CMFPlone/blob/a5b48c0c24e6eebbe01aa2874eaaa9aa3d49f155/Products/CMFPlone/controlpanel/browser/usergroups_groupmembership.py#L65) clean the searchstring in the request and pass it to the membership search in UsersGroupsControlPanelView.\r\n\r\nAn empty searchstring is like a intention \"list all users\" and a violation against the option `many_users = True`. The search with empty searchstring should not be performed.\r\n\r\nWith the following patch, all is fine ;-)\r\n\r\n```\r\nif searchGroups:\r\n if not self.many_groups or bool(searchString):\r\n groupResults = searchView.merge(chain(*[searchView.searchGroups(**{field: searchString}) for field in ['id', 'title']]), 'groupid')\r\n groupResults = [gtool.getGroupById(g['id']) for g in groupResults if g['id'] not in ignore]\r\n groupResults.sort(key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName()))\r\n \r\n if searchUsers:\r\n if not self.many_users or bool(searchString):\r\n userResults = searchView.merge(chain(*[searchView.searchUsers(**{field: searchString}) for field in ['login', 'fullname', 'email']]), 'userid')\r\n userResults = [mtool.getMemberById(u['id']) for u in userResults if u['id'] not in ignore]\r\n userResults.sort(key=lambda x: x is not None and x.getProperty('fullname') is not None and normalizeString(x.getProperty('fullname')) or '')\r\n```\r\n\n", "before_files": [{"content": "from AccessControl import getSecurityManager\nfrom Acquisition import aq_inner\nfrom itertools import chain\nfrom plone.autoform.form import AutoExtensibleForm\nfrom plone.base import PloneMessageFactory as _\nfrom plone.base.interfaces import ISecuritySchema\nfrom plone.base.interfaces import IUserGroupsSettingsSchema\nfrom plone.z3cform import layout\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import normalizeString\nfrom Products.Five.browser import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom z3c.form import button\nfrom z3c.form import form\nfrom zope.component import getAdapter\nfrom zope.component import getMultiAdapter\nfrom ZTUtils import make_query\n\n\nclass UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):\n schema = IUserGroupsSettingsSchema\n id = \"usergroupsettings-control-panel\"\n label = _(\"Users and Groups\")\n form_name = _(\"User/Groups settings\")\n control_panel_view = \"usergroups-controlpanel\"\n\n @button.buttonAndHandler(_(\"label_save\", default=\"Save\"), name=\"save\")\n def handleApply(self, action):\n super().handleApply(self, action)\n\n def updateActions(self):\n super().updateActions()\n if self.actions and \"save\" in self.actions:\n self.actions[\"save\"].addClass(\"btn-primary\")\n\n\nclass ControlPanelFormWrapper(layout.FormWrapper):\n \"\"\"Use this form as the plone.z3cform layout wrapper to get the control\n panel layout.\n \"\"\"\n\n index = ViewPageTemplateFile(\"controlpanel_usergroups_layout.pt\")\n\n\nUserGroupsSettingsPanelView = layout.wrap_form(\n UserGroupsSettingsControlPanel, ControlPanelFormWrapper\n)\n\n\nclass UsersGroupsControlPanelView(BrowserView):\n @property\n def portal_roles(self):\n pmemb = getToolByName(aq_inner(self.context), \"portal_membership\")\n return [r for r in pmemb.getPortalRoles() if r != \"Owner\"]\n\n @property\n def many_users(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users\n\n @property\n def many_groups(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups\n\n @property\n def email_as_username(self):\n return getAdapter(\n aq_inner(self.context), ISecuritySchema\n ).get_use_email_as_login()\n\n def makeQuery(self, **kw):\n return make_query(**kw)\n\n def membershipSearch(\n self, searchString=\"\", searchUsers=True, searchGroups=True, ignore=[]\n ):\n \"\"\"Search for users and/or groups, returning actual member and group items\n Replaces the now-deprecated prefs_user_groups_search.py script\"\"\"\n groupResults = userResults = []\n\n gtool = getToolByName(self, \"portal_groups\")\n mtool = getToolByName(self, \"portal_membership\")\n\n searchView = getMultiAdapter(\n (aq_inner(self.context), self.request), name=\"pas_search\"\n )\n\n if searchGroups:\n groupResults = searchView.merge(\n chain(\n *[\n searchView.searchGroups(**{field: searchString})\n for field in [\"id\", \"title\"]\n ]\n ),\n \"groupid\",\n )\n groupResults = [\n gtool.getGroupById(g[\"id\"])\n for g in groupResults\n if g[\"id\"] not in ignore\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n\n if searchUsers:\n userResults = searchView.merge(\n chain(\n *[\n searchView.searchUsers(**{field: searchString})\n for field in [\"login\", \"fullname\", \"email\"]\n ]\n ),\n \"userid\",\n )\n userResults = [\n mtool.getMemberById(u[\"id\"])\n for u in userResults\n if u[\"id\"] not in ignore\n ]\n userResults.sort(\n key=lambda x: x is not None\n and x.getProperty(\"fullname\") is not None\n and normalizeString(x.getProperty(\"fullname\"))\n or \"\"\n )\n\n return groupResults + userResults\n\n def atoi(self, s):\n try:\n return int(s)\n except ValueError:\n return 0\n\n @property\n def is_zope_manager(self):\n return getSecurityManager().checkPermission(ManagePortal, self.context)\n\n # The next two class methods implement the following truth table:\n #\n # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT\n # False False False Lists unavailable\n # False False True Show all\n # False True False Show matching\n # False True True Show matching\n # True False False Too many to list\n # True False True Lists unavailable\n # True True False Show matching\n # True True True Show matching\n\n # TODO: Maybe have these methods return a text message (instead of a bool)\n # corresponding to the actual result, e.g. \"Too many to list\", \"Lists\n # unavailable\"\n\n @property\n def show_group_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n if acl.canListAllGroups():\n if self.many_groups:\n return True\n return False\n\n @property\n def show_users_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n # XXX Huh? Is canListAllUsers broken?\n if not acl.canListAllUsers():\n if self.many_users:\n return True\n return False\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups.py"}], "after_files": [{"content": "from AccessControl import getSecurityManager\nfrom Acquisition import aq_inner\nfrom itertools import chain\nfrom plone.autoform.form import AutoExtensibleForm\nfrom plone.base import PloneMessageFactory as _\nfrom plone.base.interfaces import ISecuritySchema\nfrom plone.base.interfaces import IUserGroupsSettingsSchema\nfrom plone.z3cform import layout\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.utils import normalizeString\nfrom Products.Five.browser import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom z3c.form import button\nfrom z3c.form import form\nfrom zope.component import getAdapter\nfrom zope.component import getMultiAdapter\nfrom ZTUtils import make_query\n\n\nclass UserGroupsSettingsControlPanel(AutoExtensibleForm, form.EditForm):\n schema = IUserGroupsSettingsSchema\n id = \"usergroupsettings-control-panel\"\n label = _(\"Users and Groups\")\n form_name = _(\"User/Groups settings\")\n control_panel_view = \"usergroups-controlpanel\"\n\n @button.buttonAndHandler(_(\"label_save\", default=\"Save\"), name=\"save\")\n def handleApply(self, action):\n super().handleApply(self, action)\n\n def updateActions(self):\n super().updateActions()\n if self.actions and \"save\" in self.actions:\n self.actions[\"save\"].addClass(\"btn-primary\")\n\n\nclass ControlPanelFormWrapper(layout.FormWrapper):\n \"\"\"Use this form as the plone.z3cform layout wrapper to get the control\n panel layout.\n \"\"\"\n\n index = ViewPageTemplateFile(\"controlpanel_usergroups_layout.pt\")\n\n\nUserGroupsSettingsPanelView = layout.wrap_form(\n UserGroupsSettingsControlPanel, ControlPanelFormWrapper\n)\n\n\nclass UsersGroupsControlPanelView(BrowserView):\n @property\n def portal_roles(self):\n pmemb = getToolByName(aq_inner(self.context), \"portal_membership\")\n return [r for r in pmemb.getPortalRoles() if r != \"Owner\"]\n\n @property\n def many_users(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_users\n\n @property\n def many_groups(self):\n return getAdapter(aq_inner(self.context), IUserGroupsSettingsSchema).many_groups\n\n @property\n def email_as_username(self):\n return getAdapter(\n aq_inner(self.context), ISecuritySchema\n ).get_use_email_as_login()\n\n def makeQuery(self, **kw):\n return make_query(**kw)\n\n def membershipSearch(\n self, searchString=\"\", searchUsers=True, searchGroups=True, ignore=[]\n ):\n \"\"\"Search for users and/or groups, returning actual member and group items\n Replaces the now-deprecated prefs_user_groups_search.py script\"\"\"\n groupResults = userResults = []\n\n gtool = getToolByName(self, \"portal_groups\")\n mtool = getToolByName(self, \"portal_membership\")\n\n searchView = getMultiAdapter(\n (aq_inner(self.context), self.request), name=\"pas_search\"\n )\n\n if searchGroups:\n # Only search for all ('') if the many_users flag is not set.\n if not (self.many_groups) or bool(self.searchString):\n groupResults = searchView.merge(\n chain(\n *[\n searchView.searchGroups(**{field: searchString})\n for field in [\"id\", \"title\"]\n ]\n ),\n \"groupid\",\n )\n groupResults = [\n gtool.getGroupById(g[\"id\"])\n for g in groupResults\n if g[\"id\"] not in ignore\n ]\n groupResults.sort(\n key=lambda x: x is not None and normalizeString(x.getGroupTitleOrName())\n )\n\n if searchUsers:\n # Only search for all ('') if the many_users flag is not set.\n if not (self.many_users) or bool(self.searchString):\n userResults = searchView.merge(\n chain(\n *[\n searchView.searchUsers(**{field: searchString})\n for field in [\"login\", \"fullname\", \"email\"]\n ]\n ),\n \"userid\",\n )\n userResults = [\n mtool.getMemberById(u[\"id\"])\n for u in userResults\n if u[\"id\"] not in ignore\n ]\n userResults.sort(\n key=lambda x: x is not None\n and x.getProperty(\"fullname\") is not None\n and normalizeString(x.getProperty(\"fullname\"))\n or \"\"\n )\n\n return groupResults + userResults\n\n def atoi(self, s):\n try:\n return int(s)\n except ValueError:\n return 0\n\n @property\n def is_zope_manager(self):\n return getSecurityManager().checkPermission(ManagePortal, self.context)\n\n # The next two class methods implement the following truth table:\n #\n # MANY USERS/GROUPS SEARCHING CAN LIST USERS/GROUPS RESULT\n # False False False Lists unavailable\n # False False True Show all\n # False True False Show matching\n # False True True Show matching\n # True False False Too many to list\n # True False True Lists unavailable\n # True True False Show matching\n # True True True Show matching\n\n # TODO: Maybe have these methods return a text message (instead of a bool)\n # corresponding to the actual result, e.g. \"Too many to list\", \"Lists\n # unavailable\"\n\n @property\n def show_group_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n if acl.canListAllGroups():\n if self.many_groups:\n return True\n return False\n\n @property\n def show_users_listing_warning(self):\n if not self.searchString:\n acl = getToolByName(self, \"acl_users\")\n # XXX Huh? Is canListAllUsers broken?\n if not acl.canListAllUsers():\n if self.many_users:\n return True\n return False\n", "path": "Products/CMFPlone/controlpanel/browser/usergroups.py"}]}
| 2,634 | 697 |
gh_patches_debug_4814
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-1966
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DTS - Do this. Do that.
**URL:** https://opin-stage.liqd.net/de/blueprints/liquid-democracy-ev/
**user:** *initiator*
**expected behaviour:** *An explaining help text in english. Do this: Start a discussion and moderate it. Do that: Participants can upload documents to deepen the exchange and share information.*
**behaviour:** *Lorem Ipsum text*
**Comment/Question:** *Would this be a story?*
If needed or helpful you can add a screenshot. Since issues are public, make sure to not display personal or secret data.
<img width="877" alt="Bildschirmfoto 2020-07-28 um 08 41 19" src="https://user-images.githubusercontent.com/52459078/88628485-565c5500-d0ae-11ea-9630-78ada0dcd294.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `euth/blueprints/blueprints.py`
Content:
```
1 from collections import namedtuple
2 from enum import Enum, unique
3
4 from django.utils.translation import ugettext_lazy as _
5
6 from adhocracy4.polls import phases as poll_phases
7 from euth.communitydebate import phases as communitydebate_phases
8 from euth.documents import phases as documents_phases
9 from euth.ideas import phases as ideas_phases
10 from euth.maps import phases as map_phases
11
12 from .names import BlueprintNames
13
14
15 class BlueprintEnum(Enum):
16 def __new__(cls, value, label):
17 obj = object.__new__(cls)
18 obj._value_ = len(cls.__members__) + 1
19 obj._value = value
20 obj.label = label
21 return obj
22
23 @property
24 def value(self):
25 return self._value
26
27 @classmethod
28 def get(cls, value):
29 return next(m for m in cls if m._value == value)
30
31
32 @unique
33 class Aim(Enum):
34 collect_ideas = (
35 'collect_ideas',
36 _('Create and collect new ideas or visions.'),
37 [_('(Urban) planning processes'),
38 _('Develop concepts or guiding principles')]
39 )
40 discuss_topic = (
41 'discuss_topic',
42 _('Gather feedback on a topic and discuss it in greater detail.'),
43 [_('Discuss existing concepts or plans'),
44 _('Develop solutions for existing problems')]
45 )
46 agenda_setting = (
47 'agenda_setting',
48 _('Agenda Setting'),
49 [_('Set the agenda of an event, a process, a project etc.')]
50 )
51 design_place = (
52 'design_place',
53 _('Design a place.'),
54 [_('(Urban) planning processes'),
55 _('Small scale design projects, e.g. renew your premises')]
56 )
57 run_survey = (
58 'run_survey',
59 _('Learn about what people like most.'),
60 [_('Opinion polls, majority votes etc.')]
61 )
62 run_competition = (
63 'run_competition',
64 _('Run a competition.'),
65 [_('All sorts of competitions, '
66 'like idea contests etc.')]
67 )
68 work_document = (
69 'work_document',
70 _('Work together with other people on a text document.'),
71 [_('Draft or revise statutes, articles, charters etc.'),
72 _('Involve different authors in writing a shared text')]
73 )
74 communitydebate = (
75 'communitydebate',
76 _('Find and debate topics and questions.'),
77 [_('Do this.'),
78 _('Do that.')]
79 )
80
81 def __new__(cls, value, label, examples):
82 obj = object.__new__(cls)
83 obj._value_ = value
84 obj.label = label
85 obj.examples = examples
86 return obj
87
88
89 @unique
90 class Result(BlueprintEnum):
91 collect_ideas = 3, _('Collection of commented ideas')
92 majority_vote = 2, _('Majority vote')
93 both = 1, _('Both')
94
95
96 @unique
97 class Experience(BlueprintEnum):
98 five_projects = 4, _('More than 5 participative projects')
99 two_projects = 3, _('More than 2 participative projects')
100 one_project = 2, _('1-2 participative projects')
101 no_projects = 1, _('I have no experiences in organising '
102 'participative projects')
103
104
105 class Motivation(BlueprintEnum):
106 high = 4, _('High motivation')
107 medium = 3, _('Medium motivation')
108 low = 2, _('Low motivation')
109 not_found = 1, _('No motivation')
110 unkown = 2, _('I don\'t know.')
111
112
113 @unique
114 class Participants(BlueprintEnum):
115 few = 0, '< 25'
116 some = 1, '25-50'
117 many = 2, '50+'
118
119
120 @unique
121 class Duration(BlueprintEnum):
122 one_weeks = 0, _('1-2 weeks')
123 two_weeks = 1, _('2-4 weeks')
124 four_weeks = 2, _('more than 4 weeks')
125
126
127 @unique
128 class Scope(BlueprintEnum):
129 local = 0, _('Local')
130 regional = 1, _('Regional')
131 national = 2, _('National or international')
132
133
134 class Accessibility(BlueprintEnum):
135 very_easy = 1, _('Very easy to access')
136 easy = 2, _('Easy to access')
137 hard = 3, _('Hard to access')
138 very_hard = 4, _('Very hard to access')
139 unkown = 3, _('I don\'t know')
140
141
142 ComplexityVector = namedtuple(
143 'ComplexityVector', [
144 'participants', 'duration', 'scope'
145 ]
146 )
147
148
149 COMPLEXITY_VECTOR_AC = ComplexityVector(
150 participants=(0, 0.5),
151 duration=(0, 1),
152 scope=(0, 0.5)
153 )
154
155 COMPLEXITY_VECTOR_BD = ComplexityVector(
156 participants=(0, 1),
157 duration=(0, 1),
158 scope=(0, 1)
159 )
160
161 COMPLEXITY_VECTOR_E = ComplexityVector(
162 participants=(0, 1 / 3),
163 duration=(0, 0),
164 scope=(0, 1 / 3)
165 )
166
167 COMPLEXITY_VECTOR_F = ComplexityVector(
168 participants=(1, 1),
169 duration=(0, 1),
170 scope=(0, 0)
171 )
172
173 Requirements = namedtuple(
174 'Requirements', [
175 'aims', 'results', 'experience', 'motivation'
176 ])
177
178
179 Blueprint = namedtuple(
180 'Blueprint', [
181 'title', 'description', 'content', 'image', 'settings_model',
182 'requirements', 'complexity', 'type'
183 ])
184
185
186 blueprints = [
187 (BlueprintNames.brainstorming.value,
188 Blueprint(
189 title=_('Brainstorming'),
190 description=_('Collect ideas, questions and input concerning '
191 'a problem or a question from a wide array of people.'),
192 content=[
193 ideas_phases.CollectPhase(),
194 ],
195 image='images/brainstorming.png',
196 settings_model=None,
197 requirements=Requirements(
198 aims=[Aim.collect_ideas, Aim.discuss_topic],
199 results=[Result.collect_ideas],
200 experience=Experience.no_projects,
201 motivation=Motivation.not_found
202 ),
203 complexity=COMPLEXITY_VECTOR_AC,
204 type=BlueprintNames.brainstorming.name
205 )),
206 (BlueprintNames.map_brainstorming.value,
207 Blueprint(
208 title=_('Spatial Brainstorming'),
209 description=_('Collect ideas, questions and input concerning a '
210 'problem or a question from a wide array of people.'),
211 content=[
212 map_phases.CollectPhase(),
213 ],
214 image='images/spatial_brainstorming.png',
215 settings_model=('a4maps', 'AreaSettings'),
216 requirements=Requirements(
217 aims=[Aim.design_place],
218 results=[Result.collect_ideas],
219 experience=Experience.no_projects,
220 motivation=Motivation.not_found
221 ),
222 complexity=COMPLEXITY_VECTOR_AC,
223 type=BlueprintNames.map_brainstorming.name
224 )),
225 (BlueprintNames.idea_challenge.value,
226 Blueprint(
227 title=_('Idea Challenge'),
228 description=_('Run a challenge and find the best ideas to solve '
229 'a particular problem.'),
230 content=[
231 ideas_phases.CollectPhase(),
232 ideas_phases.RatingPhase(),
233 ],
234 image='images/challenge.png',
235 settings_model=None,
236 requirements=Requirements(
237 aims=[Aim.run_competition, Aim.run_survey],
238 results=list(Result),
239 experience=Experience.one_project,
240 motivation=Motivation.low
241 ),
242 complexity=COMPLEXITY_VECTOR_BD,
243 type=BlueprintNames.idea_challenge.name
244 )),
245 (BlueprintNames.map_idea_challenge.value,
246 Blueprint(
247 title=_('Spatial Idea Challenge'),
248 description=_('Run a challenge concerning a certain area or space in '
249 'your community and find the best ideas to solve a '
250 'particular problem.'),
251 content=[
252 map_phases.CollectPhase(),
253 map_phases.RatingPhase(),
254 ],
255 image='images/spatial_challenge.png',
256 settings_model=('a4maps', 'AreaSettings'),
257 requirements=Requirements(
258 aims=[Aim.design_place],
259 results=list(Result),
260 experience=Experience.one_project,
261 motivation=Motivation.low
262 ),
263 complexity=COMPLEXITY_VECTOR_BD,
264 type=BlueprintNames.map_idea_challenge.name
265 )),
266 (BlueprintNames.agenda_setting.value,
267 Blueprint(
268 title=_('Agenda Setting'),
269 description=_('You can involve everyone in planning a meeting. '
270 'Collect ideas for an upcoming event and let your '
271 'participants vote on the topics you want to tackle.'),
272 content=[
273 ideas_phases.CollectPhase(),
274 ideas_phases.RatingPhase(),
275 ],
276 image='images/agenda_setting.png',
277 settings_model=None,
278 requirements=Requirements(
279 aims=[Aim.collect_ideas, Aim.discuss_topic,
280 Aim.run_survey, Aim.agenda_setting],
281 results=list(Result),
282 experience=Experience.one_project,
283 motivation=Motivation.low
284 ),
285 complexity=COMPLEXITY_VECTOR_AC,
286 type=BlueprintNames.agenda_setting.name
287 )),
288 (BlueprintNames.commenting_text.value,
289 Blueprint(
290 title=_('Text Review'),
291 description=_('Let participants discuss individual paragraphs of a '
292 'text. This is ideal for discussing position papers or '
293 'a mission statements with many people.'),
294 content=[
295 documents_phases.CreateDocumentPhase(),
296 documents_phases.CommentPhase(),
297 ],
298 image='images/text_review.png',
299 settings_model=None,
300 requirements=Requirements(
301 aims=[Aim.work_document],
302 results=None,
303 experience=None,
304 motivation=None
305 ),
306 complexity=COMPLEXITY_VECTOR_F,
307 type=BlueprintNames.commenting_text.name
308 )),
309 (BlueprintNames.a4_poll.value,
310 Blueprint(
311 title=_('Poll'),
312 description=_('Run customizable, multi-step polls on OPIN to get '
313 'detailed opinions on topics from the public or your '
314 'members. Via the OPIN polling app for iOS and Android '
315 'these polls are also accessible on smartphones.'),
316 content=[
317 poll_phases.VotingPhase(),
318 ],
319 image='images/poll.png',
320 settings_model=None,
321 requirements=Requirements(
322 aims=[Aim.run_survey],
323 results=[Result.majority_vote],
324 experience=Experience.no_projects,
325 motivation=Motivation.not_found
326 ),
327 complexity=COMPLEXITY_VECTOR_E,
328 type=BlueprintNames.a4_poll.name
329 )),
330 (BlueprintNames.communitydebate.value,
331 Blueprint(
332 title=_('Community debate'),
333 description=_('Collect topics and questions to discuss, '
334 'debate and prioritize them.'),
335 content=[
336 communitydebate_phases.DebatePhase(),
337 ],
338 image='images/brainstorming.png',
339 settings_model=None,
340 requirements=Requirements(
341 aims=[Aim.communitydebate],
342 results=[Result.both],
343 experience=Experience.no_projects,
344 motivation=Motivation.not_found
345 ),
346 complexity=COMPLEXITY_VECTOR_AC,
347 type=BlueprintNames.communitydebate.name
348 )),
349 ]
350
351
352 fallbacks = {
353 Aim.collect_ideas: BlueprintNames.brainstorming.value,
354 Aim.discuss_topic: BlueprintNames.brainstorming.value,
355 Aim.agenda_setting: BlueprintNames.agenda_setting.value,
356 Aim.design_place: BlueprintNames.map_brainstorming.value,
357 Aim.run_survey: BlueprintNames.a4_poll.value,
358 Aim.run_competition: BlueprintNames.agenda_setting.value,
359 Aim.work_document: BlueprintNames.commenting_text.value,
360 Aim.communitydebate: BlueprintNames.communitydebate.value
361 }
362
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/euth/blueprints/blueprints.py b/euth/blueprints/blueprints.py
--- a/euth/blueprints/blueprints.py
+++ b/euth/blueprints/blueprints.py
@@ -74,8 +74,9 @@
communitydebate = (
'communitydebate',
_('Find and debate topics and questions.'),
- [_('Do this.'),
- _('Do that.')]
+ [_('Start a discussion and moderate it'),
+ _('Participants can upload documents to deepen the exchange '
+ 'and share information')]
)
def __new__(cls, value, label, examples):
|
{"golden_diff": "diff --git a/euth/blueprints/blueprints.py b/euth/blueprints/blueprints.py\n--- a/euth/blueprints/blueprints.py\n+++ b/euth/blueprints/blueprints.py\n@@ -74,8 +74,9 @@\n communitydebate = (\n 'communitydebate',\n _('Find and debate topics and questions.'),\n- [_('Do this.'),\n- _('Do that.')]\n+ [_('Start a discussion and moderate it'),\n+ _('Participants can upload documents to deepen the exchange '\n+ 'and share information')]\n )\n \n def __new__(cls, value, label, examples):\n", "issue": "DTS - Do this. Do that.\n**URL:** https://opin-stage.liqd.net/de/blueprints/liquid-democracy-ev/\r\n**user:** *initiator*\r\n**expected behaviour:** *An explaining help text in english. Do this: Start a discussion and moderate it. Do that: Participants can upload documents to deepen the exchange and share information.*\r\n**behaviour:** *Lorem Ipsum text*\r\n\r\n\r\n**Comment/Question:** *Would this be a story?* \r\n\r\nIf needed or helpful you can add a screenshot. Since issues are public, make sure to not display personal or secret data.\r\n<img width=\"877\" alt=\"Bildschirmfoto 2020-07-28 um 08 41 19\" src=\"https://user-images.githubusercontent.com/52459078/88628485-565c5500-d0ae-11ea-9630-78ada0dcd294.png\">\r\n\n", "before_files": [{"content": "from collections import namedtuple\nfrom enum import Enum, unique\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.polls import phases as poll_phases\nfrom euth.communitydebate import phases as communitydebate_phases\nfrom euth.documents import phases as documents_phases\nfrom euth.ideas import phases as ideas_phases\nfrom euth.maps import phases as map_phases\n\nfrom .names import BlueprintNames\n\n\nclass BlueprintEnum(Enum):\n def __new__(cls, value, label):\n obj = object.__new__(cls)\n obj._value_ = len(cls.__members__) + 1\n obj._value = value\n obj.label = label\n return obj\n\n @property\n def value(self):\n return self._value\n\n @classmethod\n def get(cls, value):\n return next(m for m in cls if m._value == value)\n\n\n@unique\nclass Aim(Enum):\n collect_ideas = (\n 'collect_ideas',\n _('Create and collect new ideas or visions.'),\n [_('(Urban) planning processes'),\n _('Develop concepts or guiding principles')]\n )\n discuss_topic = (\n 'discuss_topic',\n _('Gather feedback on a topic and discuss it in greater detail.'),\n [_('Discuss existing concepts or plans'),\n _('Develop solutions for existing problems')]\n )\n agenda_setting = (\n 'agenda_setting',\n _('Agenda Setting'),\n [_('Set the agenda of an event, a process, a project etc.')]\n )\n design_place = (\n 'design_place',\n _('Design a place.'),\n [_('(Urban) planning processes'),\n _('Small scale design projects, e.g. renew your premises')]\n )\n run_survey = (\n 'run_survey',\n _('Learn about what people like most.'),\n [_('Opinion polls, majority votes etc.')]\n )\n run_competition = (\n 'run_competition',\n _('Run a competition.'),\n [_('All sorts of competitions, '\n 'like idea contests etc.')]\n )\n work_document = (\n 'work_document',\n _('Work together with other people on a text document.'),\n [_('Draft or revise statutes, articles, charters etc.'),\n _('Involve different authors in writing a shared text')]\n )\n communitydebate = (\n 'communitydebate',\n _('Find and debate topics and questions.'),\n [_('Do this.'),\n _('Do that.')]\n )\n\n def __new__(cls, value, label, examples):\n obj = object.__new__(cls)\n obj._value_ = value\n obj.label = label\n obj.examples = examples\n return obj\n\n\n@unique\nclass Result(BlueprintEnum):\n collect_ideas = 3, _('Collection of commented ideas')\n majority_vote = 2, _('Majority vote')\n both = 1, _('Both')\n\n\n@unique\nclass Experience(BlueprintEnum):\n five_projects = 4, _('More than 5 participative projects')\n two_projects = 3, _('More than 2 participative projects')\n one_project = 2, _('1-2 participative projects')\n no_projects = 1, _('I have no experiences in organising '\n 'participative projects')\n\n\nclass Motivation(BlueprintEnum):\n high = 4, _('High motivation')\n medium = 3, _('Medium motivation')\n low = 2, _('Low motivation')\n not_found = 1, _('No motivation')\n unkown = 2, _('I don\\'t know.')\n\n\n@unique\nclass Participants(BlueprintEnum):\n few = 0, '< 25'\n some = 1, '25-50'\n many = 2, '50+'\n\n\n@unique\nclass Duration(BlueprintEnum):\n one_weeks = 0, _('1-2 weeks')\n two_weeks = 1, _('2-4 weeks')\n four_weeks = 2, _('more than 4 weeks')\n\n\n@unique\nclass Scope(BlueprintEnum):\n local = 0, _('Local')\n regional = 1, _('Regional')\n national = 2, _('National or international')\n\n\nclass Accessibility(BlueprintEnum):\n very_easy = 1, _('Very easy to access')\n easy = 2, _('Easy to access')\n hard = 3, _('Hard to access')\n very_hard = 4, _('Very hard to access')\n unkown = 3, _('I don\\'t know')\n\n\nComplexityVector = namedtuple(\n 'ComplexityVector', [\n 'participants', 'duration', 'scope'\n ]\n)\n\n\nCOMPLEXITY_VECTOR_AC = ComplexityVector(\n participants=(0, 0.5),\n duration=(0, 1),\n scope=(0, 0.5)\n)\n\nCOMPLEXITY_VECTOR_BD = ComplexityVector(\n participants=(0, 1),\n duration=(0, 1),\n scope=(0, 1)\n)\n\nCOMPLEXITY_VECTOR_E = ComplexityVector(\n participants=(0, 1 / 3),\n duration=(0, 0),\n scope=(0, 1 / 3)\n)\n\nCOMPLEXITY_VECTOR_F = ComplexityVector(\n participants=(1, 1),\n duration=(0, 1),\n scope=(0, 0)\n)\n\nRequirements = namedtuple(\n 'Requirements', [\n 'aims', 'results', 'experience', 'motivation'\n ])\n\n\nBlueprint = namedtuple(\n 'Blueprint', [\n 'title', 'description', 'content', 'image', 'settings_model',\n 'requirements', 'complexity', 'type'\n ])\n\n\nblueprints = [\n (BlueprintNames.brainstorming.value,\n Blueprint(\n title=_('Brainstorming'),\n description=_('Collect ideas, questions and input concerning '\n 'a problem or a question from a wide array of people.'),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.collect_ideas, Aim.discuss_topic],\n results=[Result.collect_ideas],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.brainstorming.name\n )),\n (BlueprintNames.map_brainstorming.value,\n Blueprint(\n title=_('Spatial Brainstorming'),\n description=_('Collect ideas, questions and input concerning a '\n 'problem or a question from a wide array of people.'),\n content=[\n map_phases.CollectPhase(),\n ],\n image='images/spatial_brainstorming.png',\n settings_model=('a4maps', 'AreaSettings'),\n requirements=Requirements(\n aims=[Aim.design_place],\n results=[Result.collect_ideas],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.map_brainstorming.name\n )),\n (BlueprintNames.idea_challenge.value,\n Blueprint(\n title=_('Idea Challenge'),\n description=_('Run a challenge and find the best ideas to solve '\n 'a particular problem.'),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/challenge.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.run_competition, Aim.run_survey],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n complexity=COMPLEXITY_VECTOR_BD,\n type=BlueprintNames.idea_challenge.name\n )),\n (BlueprintNames.map_idea_challenge.value,\n Blueprint(\n title=_('Spatial Idea Challenge'),\n description=_('Run a challenge concerning a certain area or space in '\n 'your community and find the best ideas to solve a '\n 'particular problem.'),\n content=[\n map_phases.CollectPhase(),\n map_phases.RatingPhase(),\n ],\n image='images/spatial_challenge.png',\n settings_model=('a4maps', 'AreaSettings'),\n requirements=Requirements(\n aims=[Aim.design_place],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n complexity=COMPLEXITY_VECTOR_BD,\n type=BlueprintNames.map_idea_challenge.name\n )),\n (BlueprintNames.agenda_setting.value,\n Blueprint(\n title=_('Agenda Setting'),\n description=_('You can involve everyone in planning a meeting. '\n 'Collect ideas for an upcoming event and let your '\n 'participants vote on the topics you want to tackle.'),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/agenda_setting.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.collect_ideas, Aim.discuss_topic,\n Aim.run_survey, Aim.agenda_setting],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.agenda_setting.name\n )),\n (BlueprintNames.commenting_text.value,\n Blueprint(\n title=_('Text Review'),\n description=_('Let participants discuss individual paragraphs of a '\n 'text. This is ideal for discussing position papers or '\n 'a mission statements with many people.'),\n content=[\n documents_phases.CreateDocumentPhase(),\n documents_phases.CommentPhase(),\n ],\n image='images/text_review.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.work_document],\n results=None,\n experience=None,\n motivation=None\n ),\n complexity=COMPLEXITY_VECTOR_F,\n type=BlueprintNames.commenting_text.name\n )),\n (BlueprintNames.a4_poll.value,\n Blueprint(\n title=_('Poll'),\n description=_('Run customizable, multi-step polls on OPIN to get '\n 'detailed opinions on topics from the public or your '\n 'members. Via the OPIN polling app for iOS and Android '\n 'these polls are also accessible on smartphones.'),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.run_survey],\n results=[Result.majority_vote],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_E,\n type=BlueprintNames.a4_poll.name\n )),\n (BlueprintNames.communitydebate.value,\n Blueprint(\n title=_('Community debate'),\n description=_('Collect topics and questions to discuss, '\n 'debate and prioritize them.'),\n content=[\n communitydebate_phases.DebatePhase(),\n ],\n image='images/brainstorming.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.communitydebate],\n results=[Result.both],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.communitydebate.name\n )),\n]\n\n\nfallbacks = {\n Aim.collect_ideas: BlueprintNames.brainstorming.value,\n Aim.discuss_topic: BlueprintNames.brainstorming.value,\n Aim.agenda_setting: BlueprintNames.agenda_setting.value,\n Aim.design_place: BlueprintNames.map_brainstorming.value,\n Aim.run_survey: BlueprintNames.a4_poll.value,\n Aim.run_competition: BlueprintNames.agenda_setting.value,\n Aim.work_document: BlueprintNames.commenting_text.value,\n Aim.communitydebate: BlueprintNames.communitydebate.value\n}\n", "path": "euth/blueprints/blueprints.py"}], "after_files": [{"content": "from collections import namedtuple\nfrom enum import Enum, unique\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.polls import phases as poll_phases\nfrom euth.communitydebate import phases as communitydebate_phases\nfrom euth.documents import phases as documents_phases\nfrom euth.ideas import phases as ideas_phases\nfrom euth.maps import phases as map_phases\n\nfrom .names import BlueprintNames\n\n\nclass BlueprintEnum(Enum):\n def __new__(cls, value, label):\n obj = object.__new__(cls)\n obj._value_ = len(cls.__members__) + 1\n obj._value = value\n obj.label = label\n return obj\n\n @property\n def value(self):\n return self._value\n\n @classmethod\n def get(cls, value):\n return next(m for m in cls if m._value == value)\n\n\n@unique\nclass Aim(Enum):\n collect_ideas = (\n 'collect_ideas',\n _('Create and collect new ideas or visions.'),\n [_('(Urban) planning processes'),\n _('Develop concepts or guiding principles')]\n )\n discuss_topic = (\n 'discuss_topic',\n _('Gather feedback on a topic and discuss it in greater detail.'),\n [_('Discuss existing concepts or plans'),\n _('Develop solutions for existing problems')]\n )\n agenda_setting = (\n 'agenda_setting',\n _('Agenda Setting'),\n [_('Set the agenda of an event, a process, a project etc.')]\n )\n design_place = (\n 'design_place',\n _('Design a place.'),\n [_('(Urban) planning processes'),\n _('Small scale design projects, e.g. renew your premises')]\n )\n run_survey = (\n 'run_survey',\n _('Learn about what people like most.'),\n [_('Opinion polls, majority votes etc.')]\n )\n run_competition = (\n 'run_competition',\n _('Run a competition.'),\n [_('All sorts of competitions, '\n 'like idea contests etc.')]\n )\n work_document = (\n 'work_document',\n _('Work together with other people on a text document.'),\n [_('Draft or revise statutes, articles, charters etc.'),\n _('Involve different authors in writing a shared text')]\n )\n communitydebate = (\n 'communitydebate',\n _('Find and debate topics and questions.'),\n [_('Start a discussion and moderate it'),\n _('Participants can upload documents to deepen the exchange '\n 'and share information')]\n )\n\n def __new__(cls, value, label, examples):\n obj = object.__new__(cls)\n obj._value_ = value\n obj.label = label\n obj.examples = examples\n return obj\n\n\n@unique\nclass Result(BlueprintEnum):\n collect_ideas = 3, _('Collection of commented ideas')\n majority_vote = 2, _('Majority vote')\n both = 1, _('Both')\n\n\n@unique\nclass Experience(BlueprintEnum):\n five_projects = 4, _('More than 5 participative projects')\n two_projects = 3, _('More than 2 participative projects')\n one_project = 2, _('1-2 participative projects')\n no_projects = 1, _('I have no experiences in organising '\n 'participative projects')\n\n\nclass Motivation(BlueprintEnum):\n high = 4, _('High motivation')\n medium = 3, _('Medium motivation')\n low = 2, _('Low motivation')\n not_found = 1, _('No motivation')\n unkown = 2, _('I don\\'t know.')\n\n\n@unique\nclass Participants(BlueprintEnum):\n few = 0, '< 25'\n some = 1, '25-50'\n many = 2, '50+'\n\n\n@unique\nclass Duration(BlueprintEnum):\n one_weeks = 0, _('1-2 weeks')\n two_weeks = 1, _('2-4 weeks')\n four_weeks = 2, _('more than 4 weeks')\n\n\n@unique\nclass Scope(BlueprintEnum):\n local = 0, _('Local')\n regional = 1, _('Regional')\n national = 2, _('National or international')\n\n\nclass Accessibility(BlueprintEnum):\n very_easy = 1, _('Very easy to access')\n easy = 2, _('Easy to access')\n hard = 3, _('Hard to access')\n very_hard = 4, _('Very hard to access')\n unkown = 3, _('I don\\'t know')\n\n\nComplexityVector = namedtuple(\n 'ComplexityVector', [\n 'participants', 'duration', 'scope'\n ]\n)\n\n\nCOMPLEXITY_VECTOR_AC = ComplexityVector(\n participants=(0, 0.5),\n duration=(0, 1),\n scope=(0, 0.5)\n)\n\nCOMPLEXITY_VECTOR_BD = ComplexityVector(\n participants=(0, 1),\n duration=(0, 1),\n scope=(0, 1)\n)\n\nCOMPLEXITY_VECTOR_E = ComplexityVector(\n participants=(0, 1 / 3),\n duration=(0, 0),\n scope=(0, 1 / 3)\n)\n\nCOMPLEXITY_VECTOR_F = ComplexityVector(\n participants=(1, 1),\n duration=(0, 1),\n scope=(0, 0)\n)\n\nRequirements = namedtuple(\n 'Requirements', [\n 'aims', 'results', 'experience', 'motivation'\n ])\n\n\nBlueprint = namedtuple(\n 'Blueprint', [\n 'title', 'description', 'content', 'image', 'settings_model',\n 'requirements', 'complexity', 'type'\n ])\n\n\nblueprints = [\n (BlueprintNames.brainstorming.value,\n Blueprint(\n title=_('Brainstorming'),\n description=_('Collect ideas, questions and input concerning '\n 'a problem or a question from a wide array of people.'),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.collect_ideas, Aim.discuss_topic],\n results=[Result.collect_ideas],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.brainstorming.name\n )),\n (BlueprintNames.map_brainstorming.value,\n Blueprint(\n title=_('Spatial Brainstorming'),\n description=_('Collect ideas, questions and input concerning a '\n 'problem or a question from a wide array of people.'),\n content=[\n map_phases.CollectPhase(),\n ],\n image='images/spatial_brainstorming.png',\n settings_model=('a4maps', 'AreaSettings'),\n requirements=Requirements(\n aims=[Aim.design_place],\n results=[Result.collect_ideas],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.map_brainstorming.name\n )),\n (BlueprintNames.idea_challenge.value,\n Blueprint(\n title=_('Idea Challenge'),\n description=_('Run a challenge and find the best ideas to solve '\n 'a particular problem.'),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/challenge.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.run_competition, Aim.run_survey],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n complexity=COMPLEXITY_VECTOR_BD,\n type=BlueprintNames.idea_challenge.name\n )),\n (BlueprintNames.map_idea_challenge.value,\n Blueprint(\n title=_('Spatial Idea Challenge'),\n description=_('Run a challenge concerning a certain area or space in '\n 'your community and find the best ideas to solve a '\n 'particular problem.'),\n content=[\n map_phases.CollectPhase(),\n map_phases.RatingPhase(),\n ],\n image='images/spatial_challenge.png',\n settings_model=('a4maps', 'AreaSettings'),\n requirements=Requirements(\n aims=[Aim.design_place],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n complexity=COMPLEXITY_VECTOR_BD,\n type=BlueprintNames.map_idea_challenge.name\n )),\n (BlueprintNames.agenda_setting.value,\n Blueprint(\n title=_('Agenda Setting'),\n description=_('You can involve everyone in planning a meeting. '\n 'Collect ideas for an upcoming event and let your '\n 'participants vote on the topics you want to tackle.'),\n content=[\n ideas_phases.CollectPhase(),\n ideas_phases.RatingPhase(),\n ],\n image='images/agenda_setting.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.collect_ideas, Aim.discuss_topic,\n Aim.run_survey, Aim.agenda_setting],\n results=list(Result),\n experience=Experience.one_project,\n motivation=Motivation.low\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.agenda_setting.name\n )),\n (BlueprintNames.commenting_text.value,\n Blueprint(\n title=_('Text Review'),\n description=_('Let participants discuss individual paragraphs of a '\n 'text. This is ideal for discussing position papers or '\n 'a mission statements with many people.'),\n content=[\n documents_phases.CreateDocumentPhase(),\n documents_phases.CommentPhase(),\n ],\n image='images/text_review.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.work_document],\n results=None,\n experience=None,\n motivation=None\n ),\n complexity=COMPLEXITY_VECTOR_F,\n type=BlueprintNames.commenting_text.name\n )),\n (BlueprintNames.a4_poll.value,\n Blueprint(\n title=_('Poll'),\n description=_('Run customizable, multi-step polls on OPIN to get '\n 'detailed opinions on topics from the public or your '\n 'members. Via the OPIN polling app for iOS and Android '\n 'these polls are also accessible on smartphones.'),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.run_survey],\n results=[Result.majority_vote],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_E,\n type=BlueprintNames.a4_poll.name\n )),\n (BlueprintNames.communitydebate.value,\n Blueprint(\n title=_('Community debate'),\n description=_('Collect topics and questions to discuss, '\n 'debate and prioritize them.'),\n content=[\n communitydebate_phases.DebatePhase(),\n ],\n image='images/brainstorming.png',\n settings_model=None,\n requirements=Requirements(\n aims=[Aim.communitydebate],\n results=[Result.both],\n experience=Experience.no_projects,\n motivation=Motivation.not_found\n ),\n complexity=COMPLEXITY_VECTOR_AC,\n type=BlueprintNames.communitydebate.name\n )),\n]\n\n\nfallbacks = {\n Aim.collect_ideas: BlueprintNames.brainstorming.value,\n Aim.discuss_topic: BlueprintNames.brainstorming.value,\n Aim.agenda_setting: BlueprintNames.agenda_setting.value,\n Aim.design_place: BlueprintNames.map_brainstorming.value,\n Aim.run_survey: BlueprintNames.a4_poll.value,\n Aim.run_competition: BlueprintNames.agenda_setting.value,\n Aim.work_document: BlueprintNames.commenting_text.value,\n Aim.communitydebate: BlueprintNames.communitydebate.value\n}\n", "path": "euth/blueprints/blueprints.py"}]}
| 3,927 | 133 |
gh_patches_debug_32749
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-3083
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue on calling action-alias match in CLI
calling
`st2 action-alias match "st2 pack install" `
Gives the error
```
2016-11-30 04:02:38,984 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match with filters={} (remote_addr='127.0.0.1',method='POST',filters={},request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',path='/v1/actionalias/match')
2016-11-30 04:02:38,985 140340458909360 AUDIT auth [-] Token provided in headers
2016-11-30 04:02:38,986 140340458909360 AUDIT auth [-] Token with id "583e4f54c4da5f27da3c32ea" is validated.
2016-11-30 04:02:38,990 140340458909360 WARNING base [-] Type definition for 'action_alias_match_api' argument of 'match' is missing.
2016-11-30 04:02:38,990 140340458909360 INFO resource [-] GET all /v1/actionalias/match with filters={'order_by': ['pack', 'name']} (offset=0,limit='None',filters={'order_by': ['pack', 'name']},sort=[])
2016-11-30 04:02:38,996 140340458909360 ERROR hooks [-] API call failed: 'representation'
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 631, in __call__
self.invoke_controller(controller, args, kwargs, state)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 531, in invoke_controller
result = controller(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py", line 61, in func_wrapper
return func(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py", line 284, in callfunction
raise e
KeyError: 'representation' (_exception_data={},_exception_class='KeyError',_exception_message="'representation'")
2016-11-30 04:02:38,997 140340458909360 ERROR hooks [-] Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 631, in __call__
self.invoke_controller(controller, args, kwargs, state)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 531, in invoke_controller
result = controller(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py", line 61, in func_wrapper
return func(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py", line 284, in callfunction
raise e
KeyError: 'representation'
Traceback (most recent call last):
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 631, in __call__
self.invoke_controller(controller, args, kwargs, state)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py", line 531, in invoke_controller
result = controller(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py", line 61, in func_wrapper
return func(*args, **kwargs)
File "/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py", line 284, in callfunction
raise e
KeyError: 'representation'
2016-11-30 04:02:38,997 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match result={
"faultstring": "Internal Server Error"
} (result='{\n "faultstring": "Internal Server Error"\n}',request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',status_code='500 Internal Server Error',remote_addr='127.0.0.1',path='/v1/actionalias/match',method='POST')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2common/st2common/util/actionalias_matching.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import six
17
18 from st2common.exceptions.content import ParseException
19 from st2common.models.utils.action_alias_utils import extract_parameters
20
21 __all__ = [
22 'list_format_strings_from_aliases',
23 'normalise_alias_format_string',
24 'match_command_to_alias'
25 ]
26
27
28 def list_format_strings_from_aliases(aliases):
29 '''
30 List patterns from a collection of alias objects
31
32 :param aliases: The list of aliases
33 :type aliases: ``list`` of :class:`st2common.models.api.action.ActionAliasAPI`
34
35 :return: A description of potential execution patterns in a list of aliases.
36 :rtype: ``list`` of ``list``
37 '''
38 patterns = []
39 for alias in aliases:
40 for format_ in alias.formats:
41 display, representations = normalise_alias_format_string(format_)
42 patterns.extend([(display, representation) for representation in representations])
43 return patterns
44
45
46 def normalise_alias_format_string(alias_format):
47 '''
48 StackStorm action aliases can have two types;
49 1. A simple string holding the format
50 2. A dictionary which hold numerous alias format "representation(s)"
51 With a single "display" for help about the action alias.
52 This function processes both forms and returns a standardized form.
53
54 :param alias_format: The alias format
55 :type alias_format: ``str`` or ``dict``
56
57 :return: The representation of the alias
58 :rtype: ``tuple`` of (``str``, ``str``)
59 '''
60 display = None
61 representation = []
62
63 if isinstance(alias_format, six.string_types):
64 display = alias_format
65 representation.append(alias_format)
66 elif isinstance(alias_format, dict):
67 display = alias_format['display']
68 representation = alias_format['representation']
69 else:
70 raise TypeError("alias_format '%s' is neither a dictionary or string type."
71 % repr(alias_format))
72 return (display, representation)
73
74
75 def match_command_to_alias(command, aliases):
76 """
77 Match the text against an action and return the action reference.
78 """
79 results = []
80
81 for alias in aliases:
82 format_strings = list_format_strings_from_aliases([alias])
83 for format_string in format_strings:
84 try:
85 extract_parameters(format_str=format_string[1],
86 param_stream=command)
87 except ParseException:
88 continue
89
90 results.append((alias, format_string[0], format_string[1]))
91 return results
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/st2common/st2common/util/actionalias_matching.py b/st2common/st2common/util/actionalias_matching.py
--- a/st2common/st2common/util/actionalias_matching.py
+++ b/st2common/st2common/util/actionalias_matching.py
@@ -39,16 +39,20 @@
for alias in aliases:
for format_ in alias.formats:
display, representations = normalise_alias_format_string(format_)
- patterns.extend([(display, representation) for representation in representations])
+ if display and len(representations) == 0:
+ patterns.extend([(display, [])])
+ else:
+ patterns.extend([(display, representation) for representation in representations])
return patterns
def normalise_alias_format_string(alias_format):
'''
- StackStorm action aliases can have two types;
- 1. A simple string holding the format
- 2. A dictionary which hold numerous alias format "representation(s)"
- With a single "display" for help about the action alias.
+ StackStorm action aliases come in two forms;
+ 1. A string holding the format, which is also used as the help string.
+ 2. A dictionary containing "display" and/or "representation" keys.
+ "representation": a list of numerous alias format "representation(s)"
+ "display": a help string to be displayed.
This function processes both forms and returns a standardized form.
:param alias_format: The alias format
@@ -64,8 +68,10 @@
display = alias_format
representation.append(alias_format)
elif isinstance(alias_format, dict):
- display = alias_format['display']
- representation = alias_format['representation']
+ display = alias_format.get('display')
+ representation = alias_format.get('representation') or []
+ if isinstance(representation, six.string_types):
+ representation = [representation]
else:
raise TypeError("alias_format '%s' is neither a dictionary or string type."
% repr(alias_format))
|
{"golden_diff": "diff --git a/st2common/st2common/util/actionalias_matching.py b/st2common/st2common/util/actionalias_matching.py\n--- a/st2common/st2common/util/actionalias_matching.py\n+++ b/st2common/st2common/util/actionalias_matching.py\n@@ -39,16 +39,20 @@\n for alias in aliases:\n for format_ in alias.formats:\n display, representations = normalise_alias_format_string(format_)\n- patterns.extend([(display, representation) for representation in representations])\n+ if display and len(representations) == 0:\n+ patterns.extend([(display, [])])\n+ else:\n+ patterns.extend([(display, representation) for representation in representations])\n return patterns\n \n \n def normalise_alias_format_string(alias_format):\n '''\n- StackStorm action aliases can have two types;\n- 1. A simple string holding the format\n- 2. A dictionary which hold numerous alias format \"representation(s)\"\n- With a single \"display\" for help about the action alias.\n+ StackStorm action aliases come in two forms;\n+ 1. A string holding the format, which is also used as the help string.\n+ 2. A dictionary containing \"display\" and/or \"representation\" keys.\n+ \"representation\": a list of numerous alias format \"representation(s)\"\n+ \"display\": a help string to be displayed.\n This function processes both forms and returns a standardized form.\n \n :param alias_format: The alias format\n@@ -64,8 +68,10 @@\n display = alias_format\n representation.append(alias_format)\n elif isinstance(alias_format, dict):\n- display = alias_format['display']\n- representation = alias_format['representation']\n+ display = alias_format.get('display')\n+ representation = alias_format.get('representation') or []\n+ if isinstance(representation, six.string_types):\n+ representation = [representation]\n else:\n raise TypeError(\"alias_format '%s' is neither a dictionary or string type.\"\n % repr(alias_format))\n", "issue": "Issue on calling action-alias match in CLI\ncalling \r\n`st2 action-alias match \"st2 pack install\" `\r\nGives the error\r\n```\r\n2016-11-30 04:02:38,984 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match with filters={} (remote_addr='127.0.0.1',method='POST',filters={},request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',path='/v1/actionalias/match')\r\n2016-11-30 04:02:38,985 140340458909360 AUDIT auth [-] Token provided in headers\r\n2016-11-30 04:02:38,986 140340458909360 AUDIT auth [-] Token with id \"583e4f54c4da5f27da3c32ea\" is validated.\r\n2016-11-30 04:02:38,990 140340458909360 WARNING base [-] Type definition for 'action_alias_match_api' argument of 'match' is missing.\r\n2016-11-30 04:02:38,990 140340458909360 INFO resource [-] GET all /v1/actionalias/match with filters={'order_by': ['pack', 'name']} (offset=0,limit='None',filters={'order_by': ['pack', 'name']},sort=[])\r\n2016-11-30 04:02:38,996 140340458909360 ERROR hooks [-] API call failed: 'representation'\r\nTraceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 631, in __call__\r\n self.invoke_controller(controller, args, kwargs, state)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 531, in invoke_controller\r\n result = controller(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py\", line 61, in func_wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py\", line 284, in callfunction\r\n raise e\r\nKeyError: 'representation' (_exception_data={},_exception_class='KeyError',_exception_message=\"'representation'\")\r\n2016-11-30 04:02:38,997 140340458909360 ERROR hooks [-] Traceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 631, in __call__\r\n self.invoke_controller(controller, args, kwargs, state)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 531, in invoke_controller\r\n result = controller(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py\", line 61, in func_wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py\", line 284, in callfunction\r\n raise e\r\nKeyError: 'representation'\r\nTraceback (most recent call last):\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 631, in __call__\r\n self.invoke_controller(controller, args, kwargs, state)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/pecan/core.py\", line 531, in invoke_controller\r\n result = controller(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/rbac/decorators.py\", line 61, in func_wrapper\r\n return func(*args, **kwargs)\r\n File \"/opt/stackstorm/st2/local/lib/python2.7/site-packages/st2common/models/api/base.py\", line 284, in callfunction\r\n raise e\r\nKeyError: 'representation'\r\n2016-11-30 04:02:38,997 140340458909360 INFO hooks [-] 25e7cbee-ec68-4040-9adc-f3f82f11774c - POST /v1/actionalias/match result={\r\n \"faultstring\": \"Internal Server Error\"\r\n} (result='{\\n \"faultstring\": \"Internal Server Error\"\\n}',request_id='25e7cbee-ec68-4040-9adc-f3f82f11774c',status_code='500 Internal Server Error',remote_addr='127.0.0.1',path='/v1/actionalias/match',method='POST')\r\n```\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\n\nfrom st2common.exceptions.content import ParseException\nfrom st2common.models.utils.action_alias_utils import extract_parameters\n\n__all__ = [\n 'list_format_strings_from_aliases',\n 'normalise_alias_format_string',\n 'match_command_to_alias'\n]\n\n\ndef list_format_strings_from_aliases(aliases):\n '''\n List patterns from a collection of alias objects\n\n :param aliases: The list of aliases\n :type aliases: ``list`` of :class:`st2common.models.api.action.ActionAliasAPI`\n\n :return: A description of potential execution patterns in a list of aliases.\n :rtype: ``list`` of ``list``\n '''\n patterns = []\n for alias in aliases:\n for format_ in alias.formats:\n display, representations = normalise_alias_format_string(format_)\n patterns.extend([(display, representation) for representation in representations])\n return patterns\n\n\ndef normalise_alias_format_string(alias_format):\n '''\n StackStorm action aliases can have two types;\n 1. A simple string holding the format\n 2. A dictionary which hold numerous alias format \"representation(s)\"\n With a single \"display\" for help about the action alias.\n This function processes both forms and returns a standardized form.\n\n :param alias_format: The alias format\n :type alias_format: ``str`` or ``dict``\n\n :return: The representation of the alias\n :rtype: ``tuple`` of (``str``, ``str``)\n '''\n display = None\n representation = []\n\n if isinstance(alias_format, six.string_types):\n display = alias_format\n representation.append(alias_format)\n elif isinstance(alias_format, dict):\n display = alias_format['display']\n representation = alias_format['representation']\n else:\n raise TypeError(\"alias_format '%s' is neither a dictionary or string type.\"\n % repr(alias_format))\n return (display, representation)\n\n\ndef match_command_to_alias(command, aliases):\n \"\"\"\n Match the text against an action and return the action reference.\n \"\"\"\n results = []\n\n for alias in aliases:\n format_strings = list_format_strings_from_aliases([alias])\n for format_string in format_strings:\n try:\n extract_parameters(format_str=format_string[1],\n param_stream=command)\n except ParseException:\n continue\n\n results.append((alias, format_string[0], format_string[1]))\n return results\n", "path": "st2common/st2common/util/actionalias_matching.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport six\n\nfrom st2common.exceptions.content import ParseException\nfrom st2common.models.utils.action_alias_utils import extract_parameters\n\n__all__ = [\n 'list_format_strings_from_aliases',\n 'normalise_alias_format_string',\n 'match_command_to_alias'\n]\n\n\ndef list_format_strings_from_aliases(aliases):\n '''\n List patterns from a collection of alias objects\n\n :param aliases: The list of aliases\n :type aliases: ``list`` of :class:`st2common.models.api.action.ActionAliasAPI`\n\n :return: A description of potential execution patterns in a list of aliases.\n :rtype: ``list`` of ``list``\n '''\n patterns = []\n for alias in aliases:\n for format_ in alias.formats:\n display, representations = normalise_alias_format_string(format_)\n if display and len(representations) == 0:\n patterns.extend([(display, [])])\n else:\n patterns.extend([(display, representation) for representation in representations])\n return patterns\n\n\ndef normalise_alias_format_string(alias_format):\n '''\n StackStorm action aliases come in two forms;\n 1. A string holding the format, which is also used as the help string.\n 2. A dictionary containing \"display\" and/or \"representation\" keys.\n \"representation\": a list of numerous alias format \"representation(s)\"\n \"display\": a help string to be displayed.\n This function processes both forms and returns a standardized form.\n\n :param alias_format: The alias format\n :type alias_format: ``str`` or ``dict``\n\n :return: The representation of the alias\n :rtype: ``tuple`` of (``str``, ``str``)\n '''\n display = None\n representation = []\n\n if isinstance(alias_format, six.string_types):\n display = alias_format\n representation.append(alias_format)\n elif isinstance(alias_format, dict):\n display = alias_format.get('display')\n representation = alias_format.get('representation') or []\n if isinstance(representation, six.string_types):\n representation = [representation]\n else:\n raise TypeError(\"alias_format '%s' is neither a dictionary or string type.\"\n % repr(alias_format))\n return (display, representation)\n\n\ndef match_command_to_alias(command, aliases):\n \"\"\"\n Match the text against an action and return the action reference.\n \"\"\"\n results = []\n\n for alias in aliases:\n format_strings = list_format_strings_from_aliases([alias])\n for format_string in format_strings:\n try:\n extract_parameters(format_str=format_string[1],\n param_stream=command)\n except ParseException:\n continue\n\n results.append((alias, format_string[0], format_string[1]))\n return results\n", "path": "st2common/st2common/util/actionalias_matching.py"}]}
| 2,433 | 438 |
gh_patches_debug_7496
|
rasdani/github-patches
|
git_diff
|
GoogleCloudPlatform__professional-services-326
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Asset exporter tool - getting ImportError in GAE
Just tried to set up from scratch in new project. Followed steps from readme
When running the cron job I get this
ImportError: cannot import name 'expr_pb2' from 'google.type' (/env/lib/python3.7/site-packages/google/type/__init__.py)
at <module> (/env/lib/python3.7/site-packages/google/iam/v1/policy_pb2.py:16)
at <module> (/env/lib/python3.7/site-packages/google/iam/v1/iam_policy_pb2.py:17)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/assets_pb2.py:19)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/asset_service_pb2.py:20)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/types.py:23)
at <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/__init__.py:20)
at <module> (/srv/lib/asset_inventory/export.py:33)
at <module> (/srv/main.py:45)
at import_app (/env/lib/python3.7/site-packages/gunicorn/util.py:350)
at load_wsgiapp (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:41)
at load (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:52)
at wsgi (/env/lib/python3.7/site-packages/gunicorn/app/base.py:67)
at load_wsgi (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:138)
at init_process (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:129)
at init_process (/env/lib/python3.7/site-packages/gunicorn/workers/gthread.py:104)
at spawn_worker (/env/lib/python3.7/site-packages/gunicorn/arbiter.py:583)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/asset-inventory/asset_inventory/pipeline_runner.py`
Content:
```
1 # Copyright 2019 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Code to invoke the pipeline."""
15
16 import logging
17 import pprint
18 import time
19
20 from googleapiclient.discovery import build
21 from oauth2client.client import GoogleCredentials
22
23
24 def get_job_name(load_time):
25 """User friendly job name from load_time."""
26 return ('cloud-asset-import-' + load_time.lower().replace(
27 ':', '-').replace(' ', '').replace('.', '-'))
28
29
30 def is_successful_state(final_state):
31 """True if the status is successful.
32
33 Checks both for beam and template runner success codes.
34
35 Args:
36 final_state: Final state the pipeline is in.
37
38 Returns:
39 True if the job was successful.
40 """
41 if final_state not in ['JOB_STATE_DONE', 'DONE']:
42 return False
43 return True
44
45
46 def wait_on_pipeline_job(df_service, pipeline_job):
47 """Poll the job status every 60 seconds until done."""
48 dataflow_project = pipeline_job['projectId']
49 template_region = pipeline_job['location']
50 job_id = pipeline_job['id']
51 pipeline_job = df_service.projects().locations().jobs().get(
52 location=template_region, projectId=dataflow_project,
53 jobId=job_id).execute(num_retries=5)
54 logging.info('job status %s', pprint.pformat(pipeline_job))
55 current_state = pipeline_job['currentState']
56 # We have reached a terminal state.
57 if current_state in [
58 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED',
59 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED'
60 ]:
61 logging.info('final pipeline state : %s', current_state)
62 return current_state, pipeline_job
63 logging.info('sleeping 60 seconds before repolling.')
64 time.sleep(60)
65 return wait_on_pipeline_job(df_service, pipeline_job)
66
67
68 def run_pipeline_template(dataflow_project, template_region, template_location,
69 input_location, group_by, write_disposition, dataset,
70 stage, load_time, num_shards, runtime_environment):
71 """Invoke the suplied pipeline template.
72
73 Args:
74 dataflow_project: Project to run the dataflow job in.
75 template_region: Region to run the job in.
76 template_location: GCS path to the template file.
77 input_location: GCS path load json documents from,
78 group_by: How to split assets into tables.
79 write_disposition: To append to or ovewrite BigQuery tables.
80 dataset: BigQuery dataset to write to.
81 stage: GCS path to write BigQuery load files.
82 load_time: Timestamp or date to load data with.
83 num_shards: Shards for for each asset type.
84 runtime_environment: Dict suppling other runtime overrides.
85 Returns:
86 End state of the pipline and job object.
87 """
88 credentials = GoogleCredentials.get_application_default()
89 df_service = build('dataflow', 'v1b3', credentials=credentials)
90
91 # Set the following variables to your values.
92 job_name = get_job_name(load_time)
93 body = {
94 'jobName': job_name,
95 'parameters': {
96 'input': input_location,
97 'load_time': load_time,
98 'stage': stage,
99 'group_by': group_by,
100 'write_disposition': write_disposition,
101 'num_shards': num_shards,
102 'dataset': dataset,
103 },
104 'environment': runtime_environment
105 }
106 logging.info('launching template %s in %s:%s with %s', template_location,
107 dataflow_project, template_region, pprint.pformat(body))
108 launch_result = df_service.projects().locations().templates().launch(
109 location=template_region,
110 projectId=dataflow_project,
111 gcsPath=template_location,
112 body=body).execute(num_retries=5)
113
114 logging.info('waiting on pipeline : %s', pprint.pformat(launch_result))
115 return wait_on_pipeline_job(df_service, launch_result['job'])
116
117
118 def run_pipeline_beam_runner(pipeline_runner, dataflow_project, input_location,
119 group_by, write_disposition, dataset, stage,
120 load_time, num_shards, pipeline_arguments):
121 """Invokes the pipeline with a beam runner.
122
123 Only tested with the dataflow and direct runners.
124
125 Args:
126 pipeline_runner: The Beam runner to use.
127 dataflow_project: Project to run the dataflow job in.
128 input_location: GCS path load json documents from,
129 group_by: How to split assets into tables.
130 write_disposition: To append to or ovewrite BigQuery tables.
131 dataset: BigQuery dataset to write to.
132 stage: GCS path to write BigQuery load files.
133 load_time: Timestamp to add to data during during BigQuery load.
134 num_shards: Shards for for each asset type.
135 pipeline_arguments: List of additional runner arguments.
136 Returns:
137 The end state of the pipeline run (a string), and PipelineResult.
138 """
139
140 # pylint: disable=import-error
141 # import on demand as we don't want to depend on pipeline code which imports
142 # apache beam code unless we are using a beam runner and not invoking a
143 # template.
144 from asset_inventory import import_pipeline
145 job_name = get_job_name(load_time)
146
147 pipeline_parameters = pipeline_arguments
148
149 parameters = {
150 '--load_time': load_time,
151 '--job_name': job_name,
152 '--project': dataflow_project,
153 '--input': input_location,
154 '--group_by': group_by,
155 '--write_disposition': write_disposition,
156 '--num_shards': num_shards,
157 '--dataset': dataset,
158 '--stage': stage,
159 '--runner': pipeline_runner
160 }
161 for arg_name, value in parameters.items():
162 if value and arg_name not in pipeline_parameters:
163 pipeline_parameters += [arg_name, value]
164 pipeline_result = import_pipeline.run(pipeline_parameters)
165 logging.info('waiting on pipeline : %s', pprint.pformat(pipeline_result))
166 state = pipeline_result.wait_until_finish()
167 logging.info('final pipeline state: %s', state)
168 return pipeline_result.state, pipeline_result
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/asset-inventory/asset_inventory/pipeline_runner.py b/tools/asset-inventory/asset_inventory/pipeline_runner.py
--- a/tools/asset-inventory/asset_inventory/pipeline_runner.py
+++ b/tools/asset-inventory/asset_inventory/pipeline_runner.py
@@ -86,7 +86,8 @@
End state of the pipline and job object.
"""
credentials = GoogleCredentials.get_application_default()
- df_service = build('dataflow', 'v1b3', credentials=credentials)
+ df_service = build('dataflow', 'v1b3', credentials=credentials,
+ cache_discovery=False)
# Set the following variables to your values.
job_name = get_job_name(load_time)
|
{"golden_diff": "diff --git a/tools/asset-inventory/asset_inventory/pipeline_runner.py b/tools/asset-inventory/asset_inventory/pipeline_runner.py\n--- a/tools/asset-inventory/asset_inventory/pipeline_runner.py\n+++ b/tools/asset-inventory/asset_inventory/pipeline_runner.py\n@@ -86,7 +86,8 @@\n End state of the pipline and job object.\n \"\"\"\n credentials = GoogleCredentials.get_application_default()\n- df_service = build('dataflow', 'v1b3', credentials=credentials)\n+ df_service = build('dataflow', 'v1b3', credentials=credentials,\n+ cache_discovery=False)\n \n # Set the following variables to your values.\n job_name = get_job_name(load_time)\n", "issue": "Asset exporter tool - getting ImportError in GAE \nJust tried to set up from scratch in new project. Followed steps from readme\r\nWhen running the cron job I get this\r\n\r\nImportError: cannot import name 'expr_pb2' from 'google.type' (/env/lib/python3.7/site-packages/google/type/__init__.py)\r\nat <module> (/env/lib/python3.7/site-packages/google/iam/v1/policy_pb2.py:16)\r\nat <module> (/env/lib/python3.7/site-packages/google/iam/v1/iam_policy_pb2.py:17)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/assets_pb2.py:19)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/proto/asset_service_pb2.py:20)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/types.py:23)\r\nat <module> (/env/lib/python3.7/site-packages/google/cloud/asset_v1/__init__.py:20)\r\nat <module> (/srv/lib/asset_inventory/export.py:33)\r\nat <module> (/srv/main.py:45)\r\nat import_app (/env/lib/python3.7/site-packages/gunicorn/util.py:350)\r\nat load_wsgiapp (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:41)\r\nat load (/env/lib/python3.7/site-packages/gunicorn/app/wsgiapp.py:52)\r\nat wsgi (/env/lib/python3.7/site-packages/gunicorn/app/base.py:67)\r\nat load_wsgi (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:138)\r\nat init_process (/env/lib/python3.7/site-packages/gunicorn/workers/base.py:129)\r\nat init_process (/env/lib/python3.7/site-packages/gunicorn/workers/gthread.py:104)\r\nat spawn_worker (/env/lib/python3.7/site-packages/gunicorn/arbiter.py:583)\n", "before_files": [{"content": "# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Code to invoke the pipeline.\"\"\"\n\nimport logging\nimport pprint\nimport time\n\nfrom googleapiclient.discovery import build\nfrom oauth2client.client import GoogleCredentials\n\n\ndef get_job_name(load_time):\n \"\"\"User friendly job name from load_time.\"\"\"\n return ('cloud-asset-import-' + load_time.lower().replace(\n ':', '-').replace(' ', '').replace('.', '-'))\n\n\ndef is_successful_state(final_state):\n \"\"\"True if the status is successful.\n\n Checks both for beam and template runner success codes.\n\n Args:\n final_state: Final state the pipeline is in.\n\n Returns:\n True if the job was successful.\n \"\"\"\n if final_state not in ['JOB_STATE_DONE', 'DONE']:\n return False\n return True\n\n\ndef wait_on_pipeline_job(df_service, pipeline_job):\n \"\"\"Poll the job status every 60 seconds until done.\"\"\"\n dataflow_project = pipeline_job['projectId']\n template_region = pipeline_job['location']\n job_id = pipeline_job['id']\n pipeline_job = df_service.projects().locations().jobs().get(\n location=template_region, projectId=dataflow_project,\n jobId=job_id).execute(num_retries=5)\n logging.info('job status %s', pprint.pformat(pipeline_job))\n current_state = pipeline_job['currentState']\n # We have reached a terminal state.\n if current_state in [\n 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED',\n 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED'\n ]:\n logging.info('final pipeline state : %s', current_state)\n return current_state, pipeline_job\n logging.info('sleeping 60 seconds before repolling.')\n time.sleep(60)\n return wait_on_pipeline_job(df_service, pipeline_job)\n\n\ndef run_pipeline_template(dataflow_project, template_region, template_location,\n input_location, group_by, write_disposition, dataset,\n stage, load_time, num_shards, runtime_environment):\n \"\"\"Invoke the suplied pipeline template.\n\n Args:\n dataflow_project: Project to run the dataflow job in.\n template_region: Region to run the job in.\n template_location: GCS path to the template file.\n input_location: GCS path load json documents from,\n group_by: How to split assets into tables.\n write_disposition: To append to or ovewrite BigQuery tables.\n dataset: BigQuery dataset to write to.\n stage: GCS path to write BigQuery load files.\n load_time: Timestamp or date to load data with.\n num_shards: Shards for for each asset type.\n runtime_environment: Dict suppling other runtime overrides.\n Returns:\n End state of the pipline and job object.\n \"\"\"\n credentials = GoogleCredentials.get_application_default()\n df_service = build('dataflow', 'v1b3', credentials=credentials)\n\n # Set the following variables to your values.\n job_name = get_job_name(load_time)\n body = {\n 'jobName': job_name,\n 'parameters': {\n 'input': input_location,\n 'load_time': load_time,\n 'stage': stage,\n 'group_by': group_by,\n 'write_disposition': write_disposition,\n 'num_shards': num_shards,\n 'dataset': dataset,\n },\n 'environment': runtime_environment\n }\n logging.info('launching template %s in %s:%s with %s', template_location,\n dataflow_project, template_region, pprint.pformat(body))\n launch_result = df_service.projects().locations().templates().launch(\n location=template_region,\n projectId=dataflow_project,\n gcsPath=template_location,\n body=body).execute(num_retries=5)\n\n logging.info('waiting on pipeline : %s', pprint.pformat(launch_result))\n return wait_on_pipeline_job(df_service, launch_result['job'])\n\n\ndef run_pipeline_beam_runner(pipeline_runner, dataflow_project, input_location,\n group_by, write_disposition, dataset, stage,\n load_time, num_shards, pipeline_arguments):\n \"\"\"Invokes the pipeline with a beam runner.\n\n Only tested with the dataflow and direct runners.\n\n Args:\n pipeline_runner: The Beam runner to use.\n dataflow_project: Project to run the dataflow job in.\n input_location: GCS path load json documents from,\n group_by: How to split assets into tables.\n write_disposition: To append to or ovewrite BigQuery tables.\n dataset: BigQuery dataset to write to.\n stage: GCS path to write BigQuery load files.\n load_time: Timestamp to add to data during during BigQuery load.\n num_shards: Shards for for each asset type.\n pipeline_arguments: List of additional runner arguments.\n Returns:\n The end state of the pipeline run (a string), and PipelineResult.\n \"\"\"\n\n # pylint: disable=import-error\n # import on demand as we don't want to depend on pipeline code which imports\n # apache beam code unless we are using a beam runner and not invoking a\n # template.\n from asset_inventory import import_pipeline\n job_name = get_job_name(load_time)\n\n pipeline_parameters = pipeline_arguments\n\n parameters = {\n '--load_time': load_time,\n '--job_name': job_name,\n '--project': dataflow_project,\n '--input': input_location,\n '--group_by': group_by,\n '--write_disposition': write_disposition,\n '--num_shards': num_shards,\n '--dataset': dataset,\n '--stage': stage,\n '--runner': pipeline_runner\n }\n for arg_name, value in parameters.items():\n if value and arg_name not in pipeline_parameters:\n pipeline_parameters += [arg_name, value]\n pipeline_result = import_pipeline.run(pipeline_parameters)\n logging.info('waiting on pipeline : %s', pprint.pformat(pipeline_result))\n state = pipeline_result.wait_until_finish()\n logging.info('final pipeline state: %s', state)\n return pipeline_result.state, pipeline_result\n", "path": "tools/asset-inventory/asset_inventory/pipeline_runner.py"}], "after_files": [{"content": "# Copyright 2019 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Code to invoke the pipeline.\"\"\"\n\nimport logging\nimport pprint\nimport time\n\nfrom googleapiclient.discovery import build\nfrom oauth2client.client import GoogleCredentials\n\n\ndef get_job_name(load_time):\n \"\"\"User friendly job name from load_time.\"\"\"\n return ('cloud-asset-import-' + load_time.lower().replace(\n ':', '-').replace(' ', '').replace('.', '-'))\n\n\ndef is_successful_state(final_state):\n \"\"\"True if the status is successful.\n\n Checks both for beam and template runner success codes.\n\n Args:\n final_state: Final state the pipeline is in.\n\n Returns:\n True if the job was successful.\n \"\"\"\n if final_state not in ['JOB_STATE_DONE', 'DONE']:\n return False\n return True\n\n\ndef wait_on_pipeline_job(df_service, pipeline_job):\n \"\"\"Poll the job status every 60 seconds until done.\"\"\"\n dataflow_project = pipeline_job['projectId']\n template_region = pipeline_job['location']\n job_id = pipeline_job['id']\n pipeline_job = df_service.projects().locations().jobs().get(\n location=template_region, projectId=dataflow_project,\n jobId=job_id).execute(num_retries=5)\n logging.info('job status %s', pprint.pformat(pipeline_job))\n current_state = pipeline_job['currentState']\n # We have reached a terminal state.\n if current_state in [\n 'JOB_STATE_DONE', 'JOB_STATE_FAILED', 'JOB_STATE_CANCELLED',\n 'JOB_STATE_UPDATED', 'JOB_STATE_DRAINED'\n ]:\n logging.info('final pipeline state : %s', current_state)\n return current_state, pipeline_job\n logging.info('sleeping 60 seconds before repolling.')\n time.sleep(60)\n return wait_on_pipeline_job(df_service, pipeline_job)\n\n\ndef run_pipeline_template(dataflow_project, template_region, template_location,\n input_location, group_by, write_disposition, dataset,\n stage, load_time, num_shards, runtime_environment):\n \"\"\"Invoke the suplied pipeline template.\n\n Args:\n dataflow_project: Project to run the dataflow job in.\n template_region: Region to run the job in.\n template_location: GCS path to the template file.\n input_location: GCS path load json documents from,\n group_by: How to split assets into tables.\n write_disposition: To append to or ovewrite BigQuery tables.\n dataset: BigQuery dataset to write to.\n stage: GCS path to write BigQuery load files.\n load_time: Timestamp or date to load data with.\n num_shards: Shards for for each asset type.\n runtime_environment: Dict suppling other runtime overrides.\n Returns:\n End state of the pipline and job object.\n \"\"\"\n credentials = GoogleCredentials.get_application_default()\n df_service = build('dataflow', 'v1b3', credentials=credentials,\n cache_discovery=False)\n\n # Set the following variables to your values.\n job_name = get_job_name(load_time)\n body = {\n 'jobName': job_name,\n 'parameters': {\n 'input': input_location,\n 'load_time': load_time,\n 'stage': stage,\n 'group_by': group_by,\n 'write_disposition': write_disposition,\n 'num_shards': num_shards,\n 'dataset': dataset,\n },\n 'environment': runtime_environment\n }\n logging.info('launching template %s in %s:%s with %s', template_location,\n dataflow_project, template_region, pprint.pformat(body))\n launch_result = df_service.projects().locations().templates().launch(\n location=template_region,\n projectId=dataflow_project,\n gcsPath=template_location,\n body=body).execute(num_retries=5)\n\n logging.info('waiting on pipeline : %s', pprint.pformat(launch_result))\n return wait_on_pipeline_job(df_service, launch_result['job'])\n\n\ndef run_pipeline_beam_runner(pipeline_runner, dataflow_project, input_location,\n group_by, write_disposition, dataset, stage,\n load_time, num_shards, pipeline_arguments):\n \"\"\"Invokes the pipeline with a beam runner.\n\n Only tested with the dataflow and direct runners.\n\n Args:\n pipeline_runner: The Beam runner to use.\n dataflow_project: Project to run the dataflow job in.\n input_location: GCS path load json documents from,\n group_by: How to split assets into tables.\n write_disposition: To append to or ovewrite BigQuery tables.\n dataset: BigQuery dataset to write to.\n stage: GCS path to write BigQuery load files.\n load_time: Timestamp to add to data during during BigQuery load.\n num_shards: Shards for for each asset type.\n pipeline_arguments: List of additional runner arguments.\n Returns:\n The end state of the pipeline run (a string), and PipelineResult.\n \"\"\"\n\n # pylint: disable=import-error\n # import on demand as we don't want to depend on pipeline code which imports\n # apache beam code unless we are using a beam runner and not invoking a\n # template.\n from asset_inventory import import_pipeline\n job_name = get_job_name(load_time)\n\n pipeline_parameters = pipeline_arguments\n\n parameters = {\n '--load_time': load_time,\n '--job_name': job_name,\n '--project': dataflow_project,\n '--input': input_location,\n '--group_by': group_by,\n '--write_disposition': write_disposition,\n '--num_shards': num_shards,\n '--dataset': dataset,\n '--stage': stage,\n '--runner': pipeline_runner\n }\n for arg_name, value in parameters.items():\n if value and arg_name not in pipeline_parameters:\n pipeline_parameters += [arg_name, value]\n pipeline_result = import_pipeline.run(pipeline_parameters)\n logging.info('waiting on pipeline : %s', pprint.pformat(pipeline_result))\n state = pipeline_result.wait_until_finish()\n logging.info('final pipeline state: %s', state)\n return pipeline_result.state, pipeline_result\n", "path": "tools/asset-inventory/asset_inventory/pipeline_runner.py"}]}
| 2,543 | 162 |
gh_patches_debug_19844
|
rasdani/github-patches
|
git_diff
|
lisa-lab__pylearn2-1040
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
target_format.py introduces hard dependence on scipy
scipy imports are all meant to be guarded by try/except statements so you can run the core library without installing scipy
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pylearn2/format/target_format.py`
Content:
```
1 """Code for reformatting supervised learning targets."""
2 from operator import mul
3
4 import numpy as np
5 import scipy
6 import scipy.sparse
7 import theano.sparse
8 from theano import tensor, config
9
10
11 class OneHotFormatter(object):
12 """
13 A target formatter that transforms labels from integers in both single
14 and batch mode.
15
16 Parameters
17 ----------
18 max_labels : int
19 The number of possible classes/labels. This means that all labels
20 should be < max_labels. Example: For MNIST there are 10 numbers
21 and hence max_labels = 10.
22 dtype : dtype, optional
23 The desired dtype for the converted one-hot vectors. Defaults to
24 `config.floatX` if not given.
25 """
26 def __init__(self, max_labels, dtype=None):
27 """
28 Initializes the formatter given the number of max labels.
29 """
30 try:
31 np.empty(max_labels)
32 except (ValueError, TypeError):
33 raise ValueError("%s got bad max_labels argument '%s'" %
34 (self.__class__.__name__, str(max_labels)))
35 self._max_labels = max_labels
36 if dtype is None:
37 self._dtype = config.floatX
38 else:
39 try:
40 np.dtype(dtype)
41 except TypeError:
42 raise TypeError("%s got bad dtype identifier %s" %
43 (self.__class__.__name__, str(dtype)))
44 self._dtype = dtype
45
46 def format(self, targets, mode='stack', sparse=False):
47 """
48 Formats a given array of target labels into a one-hot
49 vector. If labels appear multiple times, their value
50 in the one-hot vector is incremented.
51
52 Parameters
53 ----------
54 targets : ndarray
55 A 1D array of targets, or a batch (2D array) where
56 each row is a list of targets.
57 mode : string
58 The way in which to convert the labels to arrays. Takes
59 three different options:
60
61 - "concatenate" : concatenates the one-hot vectors from
62 multiple labels
63 - "stack" : returns a matrix where each row is the
64 one-hot vector of a label
65 - "merge" : merges the one-hot vectors together to
66 form a vector where the elements are
67 the result of an indicator function
68 NB: As the result of an indicator function
69 the result is the same in case a label
70 is duplicated in the input.
71 sparse : bool
72 If true then the return value is sparse matrix. Note that
73 if sparse is True, then mode cannot be 'stack' because
74 sparse matrices need to be 2D
75
76 Returns
77 -------
78 one_hot : a NumPy array (can be 1D-3D depending on settings)
79 where normally the first axis are the different batch items,
80 the second axis the labels, the third axis the one_hot
81 vectors. Can be dense or sparse.
82 """
83 if mode not in ('concatenate', 'stack', 'merge'):
84 raise ValueError("%s got bad mode argument '%s'" %
85 (self.__class__.__name__, str(self._max_labels)))
86 elif mode == 'stack' and sparse:
87 raise ValueError("Sparse matrices need to be 2D, hence they"
88 "cannot be stacked")
89 if targets.ndim > 2:
90 raise ValueError("Targets needs to be 1D or 2D, but received %d "
91 "dimensions" % targets.ndim)
92 if 'int' not in str(targets.dtype):
93 raise TypeError("need an integer array for targets")
94 if sparse:
95 if mode == 'concatenate':
96 one_hot = scipy.sparse.csr_matrix(
97 (np.ones(targets.size, dtype=self._dtype),
98 (targets.flatten() + np.arange(targets.size)
99 * self._max_labels)
100 % (self._max_labels * targets.shape[1]),
101 np.arange(targets.shape[0] + 1) * targets.shape[1]),
102 (targets.shape[0], self._max_labels * targets.shape[1])
103 )
104 elif mode == 'merge':
105 one_hot = scipy.sparse.csr_matrix(
106 (np.ones(targets.size), targets.flatten(),
107 np.arange(targets.shape[0] + 1) * targets.shape[1]),
108 (targets.shape[0], self._max_labels)
109 )
110 else:
111 one_hot = np.zeros(targets.shape + (self._max_labels,),
112 dtype=self._dtype)
113 shape = (np.prod(one_hot.shape[:-1]), one_hot.shape[-1])
114 one_hot.reshape(shape)[np.arange(shape[0]), targets.flatten()] = 1
115 if mode == 'concatenate':
116 shape = one_hot.shape[-3:-2] + (reduce(mul,
117 one_hot.shape[-2:], 1),)
118 one_hot = one_hot.reshape(shape)
119 elif mode == 'merge':
120 one_hot = np.minimum(one_hot.sum(axis=one_hot.ndim - 2), 1)
121 return one_hot
122
123 def theano_expr(self, targets, mode='stack', sparse=False):
124 """
125 Return the one-hot transformation as a symbolic expression.
126 If labels appear multiple times, their value in the one-hot
127 vector is incremented.
128
129 Parameters
130 ----------
131 targets : tensor_like, 1- or 2-dimensional, integer dtype
132 A symbolic tensor representing labels as integers
133 between 0 and `max_labels` - 1, `max_labels` supplied
134 at formatter construction.
135 mode : string
136 The way in which to convert the labels to arrays. Takes
137 three different options:
138
139 - "concatenate" : concatenates the one-hot vectors from
140 multiple labels
141 - "stack" : returns a matrix where each row is the
142 one-hot vector of a label
143 - "merge" : merges the one-hot vectors together to
144 form a vector where the elements are
145 the result of an indicator function
146 NB: As the result of an indicator function
147 the result is the same in case a label
148 is duplicated in the input.
149 sparse : bool
150 If true then the return value is sparse matrix. Note that
151 if sparse is True, then mode cannot be 'stack' because
152 sparse matrices need to be 2D
153
154 Returns
155 -------
156 one_hot : TensorVariable, 1, 2 or 3-dimensional, sparse or dense
157 A symbolic tensor representing a one-hot encoding of the
158 supplied labels.
159 """
160 if mode not in ('concatenate', 'stack', 'merge'):
161 raise ValueError("%s got bad mode argument '%s'" %
162 (self.__class__.__name__, str(self._max_labels)))
163 elif mode == 'stack' and sparse:
164 raise ValueError("Sparse matrices need to be 2D, hence they"
165 "cannot be stacked")
166 squeeze_required = False
167 if targets.ndim != 2:
168 if targets.ndim == 1:
169 squeeze_required = True
170 targets = targets.dimshuffle('x', 0)
171 else:
172 raise ValueError("targets tensor must be 1 or 2-dimensional")
173 if 'int' not in str(targets.dtype):
174 raise TypeError("need an integer tensor for targets")
175 if sparse:
176 if mode == 'concatenate':
177 one_hot = theano.sparse.CSR(
178 tensor.ones_like(targets, dtype=self._dtype).flatten(),
179 (targets.flatten() + tensor.arange(targets.size) *
180 self._max_labels) % (self._max_labels * targets.shape[1]),
181 tensor.arange(targets.shape[0] + 1) * targets.shape[1],
182 tensor.stack(targets.shape[0],
183 self._max_labels * targets.shape[1])
184 )
185 else:
186 one_hot = theano.sparse.CSR(
187 tensor.ones_like(targets, dtype=self._dtype).flatten(),
188 targets.flatten(),
189 tensor.arange(targets.shape[0] + 1) * targets.shape[1],
190 tensor.stack(targets.shape[0], self._max_labels)
191 )
192 else:
193 if mode == 'concatenate':
194 one_hot = tensor.zeros((targets.shape[0] * targets.shape[1],
195 self._max_labels))
196 one_hot = tensor.set_subtensor(
197 one_hot[tensor.arange(targets.size),
198 targets.flatten()], 1)
199 one_hot = one_hot.reshape(
200 (targets.shape[0], targets.shape[1] * self._max_labels)
201 )
202 elif mode == 'merge':
203 one_hot = tensor.zeros((targets.shape[0], self._max_labels))
204 one_hot = tensor.set_subtensor(
205 one_hot[tensor.arange(targets.size) % targets.shape[0],
206 targets.T.flatten()], 1)
207 else:
208 one_hot = tensor.zeros((targets.shape[0], targets.shape[1],
209 self._max_labels))
210 one_hot = tensor.set_subtensor(one_hot[
211 tensor.arange(targets.shape[0]).reshape((targets.shape[0],
212 1)),
213 tensor.arange(targets.shape[1]),
214 targets
215 ], 1)
216 if squeeze_required:
217 if one_hot.ndim == 2:
218 one_hot = one_hot.reshape((one_hot.shape[1],))
219 if one_hot.ndim == 3:
220 one_hot = one_hot.reshape((one_hot.shape[1],
221 one_hot.shape[2]))
222 return one_hot
223
224
225 def convert_to_one_hot(integer_vector, dtype=None, max_labels=None,
226 mode='stack', sparse=False):
227 """
228 Formats a given array of target labels into a one-hot
229 vector.
230
231 Parameters
232 ----------
233 max_labels : int, optional
234 The number of possible classes/labels. This means that
235 all labels should be < max_labels. Example: For MNIST
236 there are 10 numbers and hence max_labels = 10. If not
237 given it defaults to max(integer_vector) + 1.
238 dtype : dtype, optional
239 The desired dtype for the converted one-hot vectors.
240 Defaults to config.floatX if not given.
241 integer_vector : ndarray
242 A 1D array of targets, or a batch (2D array) where
243 each row is a list of targets.
244 mode : string
245 The way in which to convert the labels to arrays. Takes
246 three different options:
247
248 - "concatenate" : concatenates the one-hot vectors from
249 multiple labels
250 - "stack" : returns a matrix where each row is the
251 one-hot vector of a label
252 - "merge" : merges the one-hot vectors together to
253 form a vector where the elements are
254 the result of an indicator function
255 sparse : bool
256 If true then the return value is sparse matrix. Note that
257 if sparse is True, then mode cannot be 'stack' because
258 sparse matrices need to be 2D
259
260 Returns
261 -------
262 one_hot : NumPy array
263 Can be 1D-3D depending on settings. Normally, the first axis are
264 the different batch items, the second axis the labels, the third
265 axis the one_hot vectors. Can be dense or sparse.
266 """
267 if dtype is None:
268 dtype = config.floatX
269 if isinstance(integer_vector, list):
270 integer_vector = np.array(integer_vector)
271 assert np.min(integer_vector) >= 0
272 assert integer_vector.ndim <= 2
273 if max_labels is None:
274 max_labels = max(integer_vector) + 1
275 return OneHotFormatter(max_labels, dtype=dtype).format(
276 integer_vector, mode=mode, sparse=sparse
277 )
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pylearn2/format/target_format.py b/pylearn2/format/target_format.py
--- a/pylearn2/format/target_format.py
+++ b/pylearn2/format/target_format.py
@@ -2,9 +2,12 @@
from operator import mul
import numpy as np
-import scipy
-import scipy.sparse
import theano.sparse
+if theano.sparse.enable_sparse:
+ scipy_available = True
+ import scipy.sparse
+else:
+ scipy_available = False
from theano import tensor, config
@@ -92,6 +95,10 @@
if 'int' not in str(targets.dtype):
raise TypeError("need an integer array for targets")
if sparse:
+ if not scipy_available:
+ raise RuntimeError("The converting of indices to a sparse "
+ "one-hot vector requires scipy to be "
+ "installed")
if mode == 'concatenate':
one_hot = scipy.sparse.csr_matrix(
(np.ones(targets.size, dtype=self._dtype),
|
{"golden_diff": "diff --git a/pylearn2/format/target_format.py b/pylearn2/format/target_format.py\n--- a/pylearn2/format/target_format.py\n+++ b/pylearn2/format/target_format.py\n@@ -2,9 +2,12 @@\n from operator import mul\n \n import numpy as np\n-import scipy\n-import scipy.sparse\n import theano.sparse\n+if theano.sparse.enable_sparse:\n+ scipy_available = True\n+ import scipy.sparse\n+else:\n+ scipy_available = False\n from theano import tensor, config\n \n \n@@ -92,6 +95,10 @@\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer array for targets\")\n if sparse:\n+ if not scipy_available:\n+ raise RuntimeError(\"The converting of indices to a sparse \"\n+ \"one-hot vector requires scipy to be \"\n+ \"installed\")\n if mode == 'concatenate':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size, dtype=self._dtype),\n", "issue": "target_format.py introduces hard dependence on scipy\nscipy imports are all meant to be guarded by try/except statements so you can run the core library without installing scipy\n\n", "before_files": [{"content": "\"\"\"Code for reformatting supervised learning targets.\"\"\"\nfrom operator import mul\n\nimport numpy as np\nimport scipy\nimport scipy.sparse\nimport theano.sparse\nfrom theano import tensor, config\n\n\nclass OneHotFormatter(object):\n \"\"\"\n A target formatter that transforms labels from integers in both single\n and batch mode.\n\n Parameters\n ----------\n max_labels : int\n The number of possible classes/labels. This means that all labels\n should be < max_labels. Example: For MNIST there are 10 numbers\n and hence max_labels = 10.\n dtype : dtype, optional\n The desired dtype for the converted one-hot vectors. Defaults to\n `config.floatX` if not given.\n \"\"\"\n def __init__(self, max_labels, dtype=None):\n \"\"\"\n Initializes the formatter given the number of max labels.\n \"\"\"\n try:\n np.empty(max_labels)\n except (ValueError, TypeError):\n raise ValueError(\"%s got bad max_labels argument '%s'\" %\n (self.__class__.__name__, str(max_labels)))\n self._max_labels = max_labels\n if dtype is None:\n self._dtype = config.floatX\n else:\n try:\n np.dtype(dtype)\n except TypeError:\n raise TypeError(\"%s got bad dtype identifier %s\" %\n (self.__class__.__name__, str(dtype)))\n self._dtype = dtype\n\n def format(self, targets, mode='stack', sparse=False):\n \"\"\"\n Formats a given array of target labels into a one-hot\n vector. If labels appear multiple times, their value\n in the one-hot vector is incremented.\n\n Parameters\n ----------\n targets : ndarray\n A 1D array of targets, or a batch (2D array) where\n each row is a list of targets.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n NB: As the result of an indicator function\n the result is the same in case a label\n is duplicated in the input.\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : a NumPy array (can be 1D-3D depending on settings)\n where normally the first axis are the different batch items,\n the second axis the labels, the third axis the one_hot\n vectors. Can be dense or sparse.\n \"\"\"\n if mode not in ('concatenate', 'stack', 'merge'):\n raise ValueError(\"%s got bad mode argument '%s'\" %\n (self.__class__.__name__, str(self._max_labels)))\n elif mode == 'stack' and sparse:\n raise ValueError(\"Sparse matrices need to be 2D, hence they\"\n \"cannot be stacked\")\n if targets.ndim > 2:\n raise ValueError(\"Targets needs to be 1D or 2D, but received %d \"\n \"dimensions\" % targets.ndim)\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer array for targets\")\n if sparse:\n if mode == 'concatenate':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size, dtype=self._dtype),\n (targets.flatten() + np.arange(targets.size)\n * self._max_labels)\n % (self._max_labels * targets.shape[1]),\n np.arange(targets.shape[0] + 1) * targets.shape[1]),\n (targets.shape[0], self._max_labels * targets.shape[1])\n )\n elif mode == 'merge':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size), targets.flatten(),\n np.arange(targets.shape[0] + 1) * targets.shape[1]),\n (targets.shape[0], self._max_labels)\n )\n else:\n one_hot = np.zeros(targets.shape + (self._max_labels,),\n dtype=self._dtype)\n shape = (np.prod(one_hot.shape[:-1]), one_hot.shape[-1])\n one_hot.reshape(shape)[np.arange(shape[0]), targets.flatten()] = 1\n if mode == 'concatenate':\n shape = one_hot.shape[-3:-2] + (reduce(mul,\n one_hot.shape[-2:], 1),)\n one_hot = one_hot.reshape(shape)\n elif mode == 'merge':\n one_hot = np.minimum(one_hot.sum(axis=one_hot.ndim - 2), 1)\n return one_hot\n\n def theano_expr(self, targets, mode='stack', sparse=False):\n \"\"\"\n Return the one-hot transformation as a symbolic expression.\n If labels appear multiple times, their value in the one-hot\n vector is incremented.\n\n Parameters\n ----------\n targets : tensor_like, 1- or 2-dimensional, integer dtype\n A symbolic tensor representing labels as integers\n between 0 and `max_labels` - 1, `max_labels` supplied\n at formatter construction.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n NB: As the result of an indicator function\n the result is the same in case a label\n is duplicated in the input.\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : TensorVariable, 1, 2 or 3-dimensional, sparse or dense\n A symbolic tensor representing a one-hot encoding of the\n supplied labels.\n \"\"\"\n if mode not in ('concatenate', 'stack', 'merge'):\n raise ValueError(\"%s got bad mode argument '%s'\" %\n (self.__class__.__name__, str(self._max_labels)))\n elif mode == 'stack' and sparse:\n raise ValueError(\"Sparse matrices need to be 2D, hence they\"\n \"cannot be stacked\")\n squeeze_required = False\n if targets.ndim != 2:\n if targets.ndim == 1:\n squeeze_required = True\n targets = targets.dimshuffle('x', 0)\n else:\n raise ValueError(\"targets tensor must be 1 or 2-dimensional\")\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer tensor for targets\")\n if sparse:\n if mode == 'concatenate':\n one_hot = theano.sparse.CSR(\n tensor.ones_like(targets, dtype=self._dtype).flatten(),\n (targets.flatten() + tensor.arange(targets.size) *\n self._max_labels) % (self._max_labels * targets.shape[1]),\n tensor.arange(targets.shape[0] + 1) * targets.shape[1],\n tensor.stack(targets.shape[0],\n self._max_labels * targets.shape[1])\n )\n else:\n one_hot = theano.sparse.CSR(\n tensor.ones_like(targets, dtype=self._dtype).flatten(),\n targets.flatten(),\n tensor.arange(targets.shape[0] + 1) * targets.shape[1],\n tensor.stack(targets.shape[0], self._max_labels)\n )\n else:\n if mode == 'concatenate':\n one_hot = tensor.zeros((targets.shape[0] * targets.shape[1],\n self._max_labels))\n one_hot = tensor.set_subtensor(\n one_hot[tensor.arange(targets.size),\n targets.flatten()], 1)\n one_hot = one_hot.reshape(\n (targets.shape[0], targets.shape[1] * self._max_labels)\n )\n elif mode == 'merge':\n one_hot = tensor.zeros((targets.shape[0], self._max_labels))\n one_hot = tensor.set_subtensor(\n one_hot[tensor.arange(targets.size) % targets.shape[0],\n targets.T.flatten()], 1)\n else:\n one_hot = tensor.zeros((targets.shape[0], targets.shape[1],\n self._max_labels))\n one_hot = tensor.set_subtensor(one_hot[\n tensor.arange(targets.shape[0]).reshape((targets.shape[0],\n 1)),\n tensor.arange(targets.shape[1]),\n targets\n ], 1)\n if squeeze_required:\n if one_hot.ndim == 2:\n one_hot = one_hot.reshape((one_hot.shape[1],))\n if one_hot.ndim == 3:\n one_hot = one_hot.reshape((one_hot.shape[1],\n one_hot.shape[2]))\n return one_hot\n\n\ndef convert_to_one_hot(integer_vector, dtype=None, max_labels=None,\n mode='stack', sparse=False):\n \"\"\"\n Formats a given array of target labels into a one-hot\n vector.\n\n Parameters\n ----------\n max_labels : int, optional\n The number of possible classes/labels. This means that\n all labels should be < max_labels. Example: For MNIST\n there are 10 numbers and hence max_labels = 10. If not\n given it defaults to max(integer_vector) + 1.\n dtype : dtype, optional\n The desired dtype for the converted one-hot vectors.\n Defaults to config.floatX if not given.\n integer_vector : ndarray\n A 1D array of targets, or a batch (2D array) where\n each row is a list of targets.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : NumPy array\n Can be 1D-3D depending on settings. Normally, the first axis are\n the different batch items, the second axis the labels, the third\n axis the one_hot vectors. Can be dense or sparse.\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if isinstance(integer_vector, list):\n integer_vector = np.array(integer_vector)\n assert np.min(integer_vector) >= 0\n assert integer_vector.ndim <= 2\n if max_labels is None:\n max_labels = max(integer_vector) + 1\n return OneHotFormatter(max_labels, dtype=dtype).format(\n integer_vector, mode=mode, sparse=sparse\n )\n", "path": "pylearn2/format/target_format.py"}], "after_files": [{"content": "\"\"\"Code for reformatting supervised learning targets.\"\"\"\nfrom operator import mul\n\nimport numpy as np\nimport theano.sparse\nif theano.sparse.enable_sparse:\n scipy_available = True\n import scipy.sparse\nelse:\n scipy_available = False\nfrom theano import tensor, config\n\n\nclass OneHotFormatter(object):\n \"\"\"\n A target formatter that transforms labels from integers in both single\n and batch mode.\n\n Parameters\n ----------\n max_labels : int\n The number of possible classes/labels. This means that all labels\n should be < max_labels. Example: For MNIST there are 10 numbers\n and hence max_labels = 10.\n dtype : dtype, optional\n The desired dtype for the converted one-hot vectors. Defaults to\n `config.floatX` if not given.\n \"\"\"\n def __init__(self, max_labels, dtype=None):\n \"\"\"\n Initializes the formatter given the number of max labels.\n \"\"\"\n try:\n np.empty(max_labels)\n except (ValueError, TypeError):\n raise ValueError(\"%s got bad max_labels argument '%s'\" %\n (self.__class__.__name__, str(max_labels)))\n self._max_labels = max_labels\n if dtype is None:\n self._dtype = config.floatX\n else:\n try:\n np.dtype(dtype)\n except TypeError:\n raise TypeError(\"%s got bad dtype identifier %s\" %\n (self.__class__.__name__, str(dtype)))\n self._dtype = dtype\n\n def format(self, targets, mode='stack', sparse=False):\n \"\"\"\n Formats a given array of target labels into a one-hot\n vector. If labels appear multiple times, their value\n in the one-hot vector is incremented.\n\n Parameters\n ----------\n targets : ndarray\n A 1D array of targets, or a batch (2D array) where\n each row is a list of targets.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n NB: As the result of an indicator function\n the result is the same in case a label\n is duplicated in the input.\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : a NumPy array (can be 1D-3D depending on settings)\n where normally the first axis are the different batch items,\n the second axis the labels, the third axis the one_hot\n vectors. Can be dense or sparse.\n \"\"\"\n if mode not in ('concatenate', 'stack', 'merge'):\n raise ValueError(\"%s got bad mode argument '%s'\" %\n (self.__class__.__name__, str(self._max_labels)))\n elif mode == 'stack' and sparse:\n raise ValueError(\"Sparse matrices need to be 2D, hence they\"\n \"cannot be stacked\")\n if targets.ndim > 2:\n raise ValueError(\"Targets needs to be 1D or 2D, but received %d \"\n \"dimensions\" % targets.ndim)\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer array for targets\")\n if sparse:\n if not scipy_available:\n raise RuntimeError(\"The converting of indices to a sparse \"\n \"one-hot vector requires scipy to be \"\n \"installed\")\n if mode == 'concatenate':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size, dtype=self._dtype),\n (targets.flatten() + np.arange(targets.size)\n * self._max_labels)\n % (self._max_labels * targets.shape[1]),\n np.arange(targets.shape[0] + 1) * targets.shape[1]),\n (targets.shape[0], self._max_labels * targets.shape[1])\n )\n elif mode == 'merge':\n one_hot = scipy.sparse.csr_matrix(\n (np.ones(targets.size), targets.flatten(),\n np.arange(targets.shape[0] + 1) * targets.shape[1]),\n (targets.shape[0], self._max_labels)\n )\n else:\n one_hot = np.zeros(targets.shape + (self._max_labels,),\n dtype=self._dtype)\n shape = (np.prod(one_hot.shape[:-1]), one_hot.shape[-1])\n one_hot.reshape(shape)[np.arange(shape[0]), targets.flatten()] = 1\n if mode == 'concatenate':\n shape = one_hot.shape[-3:-2] + (reduce(mul,\n one_hot.shape[-2:], 1),)\n one_hot = one_hot.reshape(shape)\n elif mode == 'merge':\n one_hot = np.minimum(one_hot.sum(axis=one_hot.ndim - 2), 1)\n return one_hot\n\n def theano_expr(self, targets, mode='stack', sparse=False):\n \"\"\"\n Return the one-hot transformation as a symbolic expression.\n If labels appear multiple times, their value in the one-hot\n vector is incremented.\n\n Parameters\n ----------\n targets : tensor_like, 1- or 2-dimensional, integer dtype\n A symbolic tensor representing labels as integers\n between 0 and `max_labels` - 1, `max_labels` supplied\n at formatter construction.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n NB: As the result of an indicator function\n the result is the same in case a label\n is duplicated in the input.\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : TensorVariable, 1, 2 or 3-dimensional, sparse or dense\n A symbolic tensor representing a one-hot encoding of the\n supplied labels.\n \"\"\"\n if mode not in ('concatenate', 'stack', 'merge'):\n raise ValueError(\"%s got bad mode argument '%s'\" %\n (self.__class__.__name__, str(self._max_labels)))\n elif mode == 'stack' and sparse:\n raise ValueError(\"Sparse matrices need to be 2D, hence they\"\n \"cannot be stacked\")\n squeeze_required = False\n if targets.ndim != 2:\n if targets.ndim == 1:\n squeeze_required = True\n targets = targets.dimshuffle('x', 0)\n else:\n raise ValueError(\"targets tensor must be 1 or 2-dimensional\")\n if 'int' not in str(targets.dtype):\n raise TypeError(\"need an integer tensor for targets\")\n if sparse:\n if mode == 'concatenate':\n one_hot = theano.sparse.CSR(\n tensor.ones_like(targets, dtype=self._dtype).flatten(),\n (targets.flatten() + tensor.arange(targets.size) *\n self._max_labels) % (self._max_labels * targets.shape[1]),\n tensor.arange(targets.shape[0] + 1) * targets.shape[1],\n tensor.stack(targets.shape[0],\n self._max_labels * targets.shape[1])\n )\n else:\n one_hot = theano.sparse.CSR(\n tensor.ones_like(targets, dtype=self._dtype).flatten(),\n targets.flatten(),\n tensor.arange(targets.shape[0] + 1) * targets.shape[1],\n tensor.stack(targets.shape[0], self._max_labels)\n )\n else:\n if mode == 'concatenate':\n one_hot = tensor.zeros((targets.shape[0] * targets.shape[1],\n self._max_labels))\n one_hot = tensor.set_subtensor(\n one_hot[tensor.arange(targets.size),\n targets.flatten()], 1)\n one_hot = one_hot.reshape(\n (targets.shape[0], targets.shape[1] * self._max_labels)\n )\n elif mode == 'merge':\n one_hot = tensor.zeros((targets.shape[0], self._max_labels))\n one_hot = tensor.set_subtensor(\n one_hot[tensor.arange(targets.size) % targets.shape[0],\n targets.T.flatten()], 1)\n else:\n one_hot = tensor.zeros((targets.shape[0], targets.shape[1],\n self._max_labels))\n one_hot = tensor.set_subtensor(one_hot[\n tensor.arange(targets.shape[0]).reshape((targets.shape[0],\n 1)),\n tensor.arange(targets.shape[1]),\n targets\n ], 1)\n if squeeze_required:\n if one_hot.ndim == 2:\n one_hot = one_hot.reshape((one_hot.shape[1],))\n if one_hot.ndim == 3:\n one_hot = one_hot.reshape((one_hot.shape[1],\n one_hot.shape[2]))\n return one_hot\n\n\ndef convert_to_one_hot(integer_vector, dtype=None, max_labels=None,\n mode='stack', sparse=False):\n \"\"\"\n Formats a given array of target labels into a one-hot\n vector.\n\n Parameters\n ----------\n max_labels : int, optional\n The number of possible classes/labels. This means that\n all labels should be < max_labels. Example: For MNIST\n there are 10 numbers and hence max_labels = 10. If not\n given it defaults to max(integer_vector) + 1.\n dtype : dtype, optional\n The desired dtype for the converted one-hot vectors.\n Defaults to config.floatX if not given.\n integer_vector : ndarray\n A 1D array of targets, or a batch (2D array) where\n each row is a list of targets.\n mode : string\n The way in which to convert the labels to arrays. Takes\n three different options:\n\n - \"concatenate\" : concatenates the one-hot vectors from\n multiple labels\n - \"stack\" : returns a matrix where each row is the\n one-hot vector of a label\n - \"merge\" : merges the one-hot vectors together to\n form a vector where the elements are\n the result of an indicator function\n sparse : bool\n If true then the return value is sparse matrix. Note that\n if sparse is True, then mode cannot be 'stack' because\n sparse matrices need to be 2D\n\n Returns\n -------\n one_hot : NumPy array\n Can be 1D-3D depending on settings. Normally, the first axis are\n the different batch items, the second axis the labels, the third\n axis the one_hot vectors. Can be dense or sparse.\n \"\"\"\n if dtype is None:\n dtype = config.floatX\n if isinstance(integer_vector, list):\n integer_vector = np.array(integer_vector)\n assert np.min(integer_vector) >= 0\n assert integer_vector.ndim <= 2\n if max_labels is None:\n max_labels = max(integer_vector) + 1\n return OneHotFormatter(max_labels, dtype=dtype).format(\n integer_vector, mode=mode, sparse=sparse\n )\n", "path": "pylearn2/format/target_format.py"}]}
| 3,478 | 219 |
gh_patches_debug_926
|
rasdani/github-patches
|
git_diff
|
Pyomo__pyomo-429
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Review objects exposed by environ
At the request of @jsiirola after I brought this to his attention, some Pyomo objects are not exposed by environ that would otherwise be expected. One that I have encountered is `TerminationCondition`, which needs to be imported from `pyomo.opt`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/environ/__init__.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 import sys as _sys
12 if _sys.version_info[0] >= 3:
13 import importlib
14
15 def _do_import(pkg_name):
16 importlib.import_module(pkg_name)
17 else:
18 def _do_import(pkg_name):
19 __import__(pkg_name, globals(), locals(), [], -1)
20
21 #
22 # These packages contain plugins that need to be loaded
23 #
24 _packages = [
25 'pyomo.opt',
26 'pyomo.core',
27 'pyomo.checker',
28 'pyomo.repn',
29 'pyomo.pysp',
30 'pyomo.neos',
31 'pyomo.solvers',
32 'pyomo.gdp',
33 'pyomo.mpec',
34 'pyomo.dae',
35 'pyomo.bilevel',
36 'pyomo.scripting',
37 ]
38 #
39 #
40 # These packages also contain plugins that need to be loaded, but
41 # we silently ignore any import errors because these
42 # packages are optional and/or under development.
43 #
44 _optional_packages = set([
45 'pyomo.contrib.example',
46 'pyomo.contrib.preprocessing',
47 'pyomo.contrib.gdpopt',
48 'pyomo.contrib.trustregion',
49 ])
50
51
52 def _import_packages():
53 #
54 # Import required packages
55 #
56 for name in _packages:
57 pname = name+'.plugins'
58 try:
59 _do_import(pname)
60 except ImportError:
61 exctype, err, tb = _sys.exc_info() # BUG?
62 import traceback
63 msg = "pyomo.environ failed to import %s:\nOriginal %s: %s\n"\
64 "Traceback:\n%s" \
65 % (pname, exctype.__name__, err,
66 ''.join(traceback.format_tb(tb)),)
67 # clear local variables to remove circular references
68 exctype = err = tb = None
69 # TODO: Should this just log an error and re-raise the
70 # original exception?
71 raise ImportError(msg)
72
73 pkg = _sys.modules[pname]
74 pkg.load()
75 #
76 # Import optional packages
77 #
78 for name in _optional_packages:
79 pname = name+'.plugins'
80 try:
81 _do_import(pname)
82 except ImportError:
83 continue
84 pkg = _sys.modules[pname]
85 pkg.load()
86
87 from pyomo.util.plugin import PluginGlobals as _PG
88 _PG.add_env("pyomo")
89 _import_packages()
90 _PG.pop_env()
91
92 #
93 # Expose the symbols from pyomo.core
94 #
95 from pyomo.core import *
96 from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py
--- a/pyomo/environ/__init__.py
+++ b/pyomo/environ/__init__.py
@@ -93,4 +93,7 @@
# Expose the symbols from pyomo.core
#
from pyomo.core import *
-from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver
+from pyomo.opt import (
+ SolverFactory, SolverManagerFactory, UnknownSolver,
+ TerminationCondition, SolverStatus,
+)
|
{"golden_diff": "diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py\n--- a/pyomo/environ/__init__.py\n+++ b/pyomo/environ/__init__.py\n@@ -93,4 +93,7 @@\n # Expose the symbols from pyomo.core\n #\n from pyomo.core import *\n-from pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver\n+from pyomo.opt import (\n+ SolverFactory, SolverManagerFactory, UnknownSolver,\n+ TerminationCondition, SolverStatus,\n+)\n", "issue": "Review objects exposed by environ\nAt the request of @jsiirola after I brought this to his attention, some Pyomo objects are not exposed by environ that would otherwise be expected. One that I have encountered is `TerminationCondition`, which needs to be imported from `pyomo.opt`.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport sys as _sys\nif _sys.version_info[0] >= 3:\n import importlib\n\n def _do_import(pkg_name):\n importlib.import_module(pkg_name)\nelse:\n def _do_import(pkg_name):\n __import__(pkg_name, globals(), locals(), [], -1)\n\n#\n# These packages contain plugins that need to be loaded\n#\n_packages = [\n 'pyomo.opt',\n 'pyomo.core',\n 'pyomo.checker',\n 'pyomo.repn',\n 'pyomo.pysp',\n 'pyomo.neos',\n 'pyomo.solvers',\n 'pyomo.gdp',\n 'pyomo.mpec',\n 'pyomo.dae',\n 'pyomo.bilevel',\n 'pyomo.scripting',\n]\n#\n#\n# These packages also contain plugins that need to be loaded, but\n# we silently ignore any import errors because these\n# packages are optional and/or under development.\n#\n_optional_packages = set([\n 'pyomo.contrib.example',\n 'pyomo.contrib.preprocessing',\n 'pyomo.contrib.gdpopt',\n 'pyomo.contrib.trustregion',\n])\n\n\ndef _import_packages():\n #\n # Import required packages\n #\n for name in _packages:\n pname = name+'.plugins'\n try:\n _do_import(pname)\n except ImportError:\n exctype, err, tb = _sys.exc_info() # BUG?\n import traceback\n msg = \"pyomo.environ failed to import %s:\\nOriginal %s: %s\\n\"\\\n \"Traceback:\\n%s\" \\\n % (pname, exctype.__name__, err,\n ''.join(traceback.format_tb(tb)),)\n # clear local variables to remove circular references\n exctype = err = tb = None\n # TODO: Should this just log an error and re-raise the\n # original exception?\n raise ImportError(msg)\n\n pkg = _sys.modules[pname]\n pkg.load()\n #\n # Import optional packages\n #\n for name in _optional_packages:\n pname = name+'.plugins'\n try:\n _do_import(pname)\n except ImportError:\n continue\n pkg = _sys.modules[pname]\n pkg.load()\n\nfrom pyomo.util.plugin import PluginGlobals as _PG\n_PG.add_env(\"pyomo\")\n_import_packages()\n_PG.pop_env()\n\n#\n# Expose the symbols from pyomo.core\n#\nfrom pyomo.core import *\nfrom pyomo.opt import SolverFactory, SolverManagerFactory, UnknownSolver\n", "path": "pyomo/environ/__init__.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nimport sys as _sys\nif _sys.version_info[0] >= 3:\n import importlib\n\n def _do_import(pkg_name):\n importlib.import_module(pkg_name)\nelse:\n def _do_import(pkg_name):\n __import__(pkg_name, globals(), locals(), [], -1)\n\n#\n# These packages contain plugins that need to be loaded\n#\n_packages = [\n 'pyomo.opt',\n 'pyomo.core',\n 'pyomo.checker',\n 'pyomo.repn',\n 'pyomo.pysp',\n 'pyomo.neos',\n 'pyomo.solvers',\n 'pyomo.gdp',\n 'pyomo.mpec',\n 'pyomo.dae',\n 'pyomo.bilevel',\n 'pyomo.scripting',\n]\n#\n#\n# These packages also contain plugins that need to be loaded, but\n# we silently ignore any import errors because these\n# packages are optional and/or under development.\n#\n_optional_packages = set([\n 'pyomo.contrib.example',\n 'pyomo.contrib.preprocessing',\n 'pyomo.contrib.gdpopt',\n 'pyomo.contrib.trustregion',\n])\n\n\ndef _import_packages():\n #\n # Import required packages\n #\n for name in _packages:\n pname = name+'.plugins'\n try:\n _do_import(pname)\n except ImportError:\n exctype, err, tb = _sys.exc_info() # BUG?\n import traceback\n msg = \"pyomo.environ failed to import %s:\\nOriginal %s: %s\\n\"\\\n \"Traceback:\\n%s\" \\\n % (pname, exctype.__name__, err,\n ''.join(traceback.format_tb(tb)),)\n # clear local variables to remove circular references\n exctype = err = tb = None\n # TODO: Should this just log an error and re-raise the\n # original exception?\n raise ImportError(msg)\n\n pkg = _sys.modules[pname]\n pkg.load()\n #\n # Import optional packages\n #\n for name in _optional_packages:\n pname = name+'.plugins'\n try:\n _do_import(pname)\n except ImportError:\n continue\n pkg = _sys.modules[pname]\n pkg.load()\n\nfrom pyomo.util.plugin import PluginGlobals as _PG\n_PG.add_env(\"pyomo\")\n_import_packages()\n_PG.pop_env()\n\n#\n# Expose the symbols from pyomo.core\n#\nfrom pyomo.core import *\nfrom pyomo.opt import (\n SolverFactory, SolverManagerFactory, UnknownSolver,\n TerminationCondition, SolverStatus,\n)\n", "path": "pyomo/environ/__init__.py"}]}
| 1,153 | 118 |
gh_patches_debug_27512
|
rasdani/github-patches
|
git_diff
|
blakeblackshear__frigate-5133
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Config Support]: Docker fails to start with KeyError: 'go2rtc'
### Describe the problem you are having
Starting an instance of frigate with a minimal config gives error `KeyError: 'go2rtc'`
Config file was based on the startup walk though from DOCs https://deploy-preview-4055--frigate-docs.netlify.app/guides/getting_started
Not sure if its a doc or build issue. Also tried putting in basic restream config and getting same error
### Version
v0.12.0-beta4
### Frigate config file
```yaml
mqtt:
enabled: false
cameras:
camera_1:
ffmpeg:
inputs:
- path: rtsp://10.0.20.102:554/s0
roles:
- detect
hwaccel_args: -c:v h264_cuvid
detect:
width: 1920
height: 1080
```
### Relevant log output
```shell
docker-compose up
Pulling frigate (ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt)...
0.12.0-beta4-tensorrt: Pulling from blakeblackshear/frigate
8740c948ffd4: Pull complete
b84a1ada9828: Pull complete
832b257640b7: Pull complete
60beb73d7e76: Pull complete
8e29f3174a08: Pull complete
4b46f368fd71: Pull complete
b651bc39c0a3: Pull complete
43b988f71200: Pull complete
Digest: sha256:7010839f6794a5d21a98bc9088400fdc3c382e089539f0409e0cec1ca64473fa
Status: Downloaded newer image for ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt
Creating frigate ... done
Attaching to frigate
frigate | s6-rc: info: service s6rc-oneshot-runner: starting
frigate | s6-rc: info: service s6rc-oneshot-runner successfully started
frigate | s6-rc: info: service fix-attrs: starting
frigate | s6-rc: info: service fix-attrs successfully started
frigate | s6-rc: info: service legacy-cont-init: starting
frigate | cont-init: info: running /etc/cont-init.d/prepare-logs.sh
frigate | cont-init: info: /etc/cont-init.d/prepare-logs.sh exited 0
frigate | s6-rc: info: service legacy-cont-init successfully started
frigate | s6-rc: info: service legacy-services: starting
frigate | services-up: info: copying legacy longrun frigate (no readiness notification)
frigate | services-up: info: copying legacy longrun go2rtc (no readiness notification)
frigate | services-up: info: copying legacy longrun nginx (no readiness notification)
frigate | s6-rc: info: service legacy-services successfully started
frigate | Traceback (most recent call last):
frigate | File "/usr/local/go2rtc/create_config.py", line 23, in <module>
frigate | go2rtc_config: dict[str, any] = config["go2rtc"]
frigate | KeyError: 'go2rtc'
frigate | s6-rc: info: service legacy-services: stopping
frigate | s6-svwait: fatal: supervisor died
frigate | s6-rc: info: service legacy-services successfully stopped
frigate | s6-rc: info: service legacy-cont-init: stopping
frigate | s6-rc: info: service legacy-cont-init successfully stopped
frigate | s6-rc: info: service fix-attrs: stopping
frigate | s6-rc: info: service fix-attrs successfully stopped
frigate | s6-rc: info: service s6rc-oneshot-runner: stopping
frigate | s6-rc: info: service s6rc-oneshot-runner successfully stopped
```
### Frigate stats
```json
N/A Frigate doesn't start
```
### Operating system
Other Linux
### Install method
Docker Compose
### Coral version
CPU (no coral)
### Any other information that may be helpful
Using a basic restream config:
``` yaml
mqtt:
enabled: false
#detectors:
#tensorrt:
# type: tensorrt
# device: 0 #This is the default, select the first GPU
go2rtc:
streams:
test_cam: ffmpeg:rtsp://10.0.20.102:554/s0
cameras:
camera_1:
ffmpeg:
inputs:
- path: rtsp://127.0.0.1:8554/test_cam?video=copy
input_args: preset-rtsp-restream
roles:
- detect
hwaccel_args: -c:v h264_cuvid
detect:
width: 1920
height: 1080
```
results in the same error
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/rootfs/usr/local/go2rtc/create_config.py`
Content:
```
1 """Creates a go2rtc config file."""
2
3 import json
4 import os
5 import yaml
6
7
8 config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
9
10 # Check if we can use .yaml instead of .yml
11 config_file_yaml = config_file.replace(".yml", ".yaml")
12 if os.path.isfile(config_file_yaml):
13 config_file = config_file_yaml
14
15 with open(config_file) as f:
16 raw_config = f.read()
17
18 if config_file.endswith((".yaml", ".yml")):
19 config = yaml.safe_load(raw_config)
20 elif config_file.endswith(".json"):
21 config = json.loads(raw_config)
22
23 go2rtc_config: dict[str, any] = config["go2rtc"]
24
25 if not go2rtc_config.get("log", {}).get("format"):
26 go2rtc_config["log"] = {"format": "text"}
27
28 if not go2rtc_config.get("webrtc", {}).get("candidates", []):
29 go2rtc_config["webrtc"] = {"candidates": ["stun:8555"]}
30
31 print(json.dumps(go2rtc_config))
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py
--- a/docker/rootfs/usr/local/go2rtc/create_config.py
+++ b/docker/rootfs/usr/local/go2rtc/create_config.py
@@ -5,6 +5,7 @@
import yaml
+BTBN_PATH = "/usr/lib/btbn-ffmpeg"
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
@@ -20,12 +21,27 @@
elif config_file.endswith(".json"):
config = json.loads(raw_config)
-go2rtc_config: dict[str, any] = config["go2rtc"]
+go2rtc_config: dict[str, any] = config.get("go2rtc", {})
-if not go2rtc_config.get("log", {}).get("format"):
+# we want to ensure that logs are easy to read
+if go2rtc_config.get("log") is None:
go2rtc_config["log"] = {"format": "text"}
+elif go2rtc_config["log"].get("format") is None:
+ go2rtc_config["log"]["format"] = "text"
+# should set default stun server so webrtc can work
if not go2rtc_config.get("webrtc", {}).get("candidates", []):
go2rtc_config["webrtc"] = {"candidates": ["stun:8555"]}
-print(json.dumps(go2rtc_config))
\ No newline at end of file
+# need to replace ffmpeg command when using ffmpeg4
+if not os.path.exists(BTBN_PATH):
+ if go2rtc_config.get("ffmpeg") is None:
+ go2rtc_config["ffmpeg"] = {
+ "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
+ }
+ elif go2rtc_config["ffmpeg"].get("rtsp") is None:
+ go2rtc_config["ffmpeg"][
+ "rtsp"
+ ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
+
+print(json.dumps(go2rtc_config))
|
{"golden_diff": "diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/rootfs/usr/local/go2rtc/create_config.py\n--- a/docker/rootfs/usr/local/go2rtc/create_config.py\n+++ b/docker/rootfs/usr/local/go2rtc/create_config.py\n@@ -5,6 +5,7 @@\n import yaml\n \n \n+BTBN_PATH = \"/usr/lib/btbn-ffmpeg\"\n config_file = os.environ.get(\"CONFIG_FILE\", \"/config/config.yml\")\n \n # Check if we can use .yaml instead of .yml\n@@ -20,12 +21,27 @@\n elif config_file.endswith(\".json\"):\n config = json.loads(raw_config)\n \n-go2rtc_config: dict[str, any] = config[\"go2rtc\"]\n+go2rtc_config: dict[str, any] = config.get(\"go2rtc\", {})\n \n-if not go2rtc_config.get(\"log\", {}).get(\"format\"):\n+# we want to ensure that logs are easy to read\n+if go2rtc_config.get(\"log\") is None:\n go2rtc_config[\"log\"] = {\"format\": \"text\"}\n+elif go2rtc_config[\"log\"].get(\"format\") is None:\n+ go2rtc_config[\"log\"][\"format\"] = \"text\"\n \n+# should set default stun server so webrtc can work\n if not go2rtc_config.get(\"webrtc\", {}).get(\"candidates\", []):\n go2rtc_config[\"webrtc\"] = {\"candidates\": [\"stun:8555\"]}\n \n-print(json.dumps(go2rtc_config))\n\\ No newline at end of file\n+# need to replace ffmpeg command when using ffmpeg4\n+if not os.path.exists(BTBN_PATH):\n+ if go2rtc_config.get(\"ffmpeg\") is None:\n+ go2rtc_config[\"ffmpeg\"] = {\n+ \"rtsp\": \"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}\"\n+ }\n+ elif go2rtc_config[\"ffmpeg\"].get(\"rtsp\") is None:\n+ go2rtc_config[\"ffmpeg\"][\n+ \"rtsp\"\n+ ] = \"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}\"\n+\n+print(json.dumps(go2rtc_config))\n", "issue": "[Config Support]: Docker fails to start with KeyError: 'go2rtc'\n### Describe the problem you are having\n\nStarting an instance of frigate with a minimal config gives error `KeyError: 'go2rtc'`\r\n\r\nConfig file was based on the startup walk though from DOCs https://deploy-preview-4055--frigate-docs.netlify.app/guides/getting_started\r\n\r\nNot sure if its a doc or build issue. Also tried putting in basic restream config and getting same error\n\n### Version\n\nv0.12.0-beta4\n\n### Frigate config file\n\n```yaml\nmqtt:\r\n enabled: false\r\n\r\ncameras:\r\n camera_1:\r\n ffmpeg:\r\n inputs:\r\n - path: rtsp://10.0.20.102:554/s0\r\n roles:\r\n - detect\r\n hwaccel_args: -c:v h264_cuvid\r\n detect:\r\n width: 1920\r\n height: 1080\n```\n\n\n### Relevant log output\n\n```shell\ndocker-compose up\r\nPulling frigate (ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt)...\r\n0.12.0-beta4-tensorrt: Pulling from blakeblackshear/frigate\r\n8740c948ffd4: Pull complete\r\nb84a1ada9828: Pull complete\r\n832b257640b7: Pull complete\r\n60beb73d7e76: Pull complete\r\n8e29f3174a08: Pull complete\r\n4b46f368fd71: Pull complete\r\nb651bc39c0a3: Pull complete\r\n43b988f71200: Pull complete\r\nDigest: sha256:7010839f6794a5d21a98bc9088400fdc3c382e089539f0409e0cec1ca64473fa\r\nStatus: Downloaded newer image for ghcr.io/blakeblackshear/frigate:0.12.0-beta4-tensorrt\r\nCreating frigate ... done\r\nAttaching to frigate\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner: starting\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner successfully started\r\nfrigate | s6-rc: info: service fix-attrs: starting\r\nfrigate | s6-rc: info: service fix-attrs successfully started\r\nfrigate | s6-rc: info: service legacy-cont-init: starting\r\nfrigate | cont-init: info: running /etc/cont-init.d/prepare-logs.sh\r\nfrigate | cont-init: info: /etc/cont-init.d/prepare-logs.sh exited 0\r\nfrigate | s6-rc: info: service legacy-cont-init successfully started\r\nfrigate | s6-rc: info: service legacy-services: starting\r\nfrigate | services-up: info: copying legacy longrun frigate (no readiness notification)\r\nfrigate | services-up: info: copying legacy longrun go2rtc (no readiness notification)\r\nfrigate | services-up: info: copying legacy longrun nginx (no readiness notification)\r\nfrigate | s6-rc: info: service legacy-services successfully started\r\nfrigate | Traceback (most recent call last):\r\nfrigate | File \"/usr/local/go2rtc/create_config.py\", line 23, in <module>\r\nfrigate | go2rtc_config: dict[str, any] = config[\"go2rtc\"]\r\nfrigate | KeyError: 'go2rtc'\r\nfrigate | s6-rc: info: service legacy-services: stopping\r\nfrigate | s6-svwait: fatal: supervisor died\r\nfrigate | s6-rc: info: service legacy-services successfully stopped\r\nfrigate | s6-rc: info: service legacy-cont-init: stopping\r\nfrigate | s6-rc: info: service legacy-cont-init successfully stopped\r\nfrigate | s6-rc: info: service fix-attrs: stopping\r\nfrigate | s6-rc: info: service fix-attrs successfully stopped\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner: stopping\r\nfrigate | s6-rc: info: service s6rc-oneshot-runner successfully stopped\n```\n\n\n### Frigate stats\n\n```json\nN/A Frigate doesn't start\n```\n\n\n### Operating system\n\nOther Linux\n\n### Install method\n\nDocker Compose\n\n### Coral version\n\nCPU (no coral)\n\n### Any other information that may be helpful\n\nUsing a basic restream config:\r\n``` yaml\r\nmqtt:\r\n enabled: false\r\n\r\n #detectors:\r\n #tensorrt:\r\n # type: tensorrt\r\n # device: 0 #This is the default, select the first GPU\r\n\r\ngo2rtc:\r\n streams:\r\n test_cam: ffmpeg:rtsp://10.0.20.102:554/s0\r\n\r\ncameras:\r\n camera_1:\r\n ffmpeg:\r\n inputs:\r\n - path: rtsp://127.0.0.1:8554/test_cam?video=copy\r\n input_args: preset-rtsp-restream\r\n roles:\r\n - detect\r\n hwaccel_args: -c:v h264_cuvid\r\n detect:\r\n width: 1920\r\n height: 1080\r\n```\r\nresults in the same error\n", "before_files": [{"content": "\"\"\"Creates a go2rtc config file.\"\"\"\n\nimport json\nimport os\nimport yaml\n\n\nconfig_file = os.environ.get(\"CONFIG_FILE\", \"/config/config.yml\")\n\n# Check if we can use .yaml instead of .yml\nconfig_file_yaml = config_file.replace(\".yml\", \".yaml\")\nif os.path.isfile(config_file_yaml):\n config_file = config_file_yaml\n\nwith open(config_file) as f:\n raw_config = f.read()\n\nif config_file.endswith((\".yaml\", \".yml\")):\n config = yaml.safe_load(raw_config)\nelif config_file.endswith(\".json\"):\n config = json.loads(raw_config)\n\ngo2rtc_config: dict[str, any] = config[\"go2rtc\"]\n\nif not go2rtc_config.get(\"log\", {}).get(\"format\"):\n go2rtc_config[\"log\"] = {\"format\": \"text\"}\n\nif not go2rtc_config.get(\"webrtc\", {}).get(\"candidates\", []):\n go2rtc_config[\"webrtc\"] = {\"candidates\": [\"stun:8555\"]}\n\nprint(json.dumps(go2rtc_config))", "path": "docker/rootfs/usr/local/go2rtc/create_config.py"}], "after_files": [{"content": "\"\"\"Creates a go2rtc config file.\"\"\"\n\nimport json\nimport os\nimport yaml\n\n\nBTBN_PATH = \"/usr/lib/btbn-ffmpeg\"\nconfig_file = os.environ.get(\"CONFIG_FILE\", \"/config/config.yml\")\n\n# Check if we can use .yaml instead of .yml\nconfig_file_yaml = config_file.replace(\".yml\", \".yaml\")\nif os.path.isfile(config_file_yaml):\n config_file = config_file_yaml\n\nwith open(config_file) as f:\n raw_config = f.read()\n\nif config_file.endswith((\".yaml\", \".yml\")):\n config = yaml.safe_load(raw_config)\nelif config_file.endswith(\".json\"):\n config = json.loads(raw_config)\n\ngo2rtc_config: dict[str, any] = config.get(\"go2rtc\", {})\n\n# we want to ensure that logs are easy to read\nif go2rtc_config.get(\"log\") is None:\n go2rtc_config[\"log\"] = {\"format\": \"text\"}\nelif go2rtc_config[\"log\"].get(\"format\") is None:\n go2rtc_config[\"log\"][\"format\"] = \"text\"\n\n# should set default stun server so webrtc can work\nif not go2rtc_config.get(\"webrtc\", {}).get(\"candidates\", []):\n go2rtc_config[\"webrtc\"] = {\"candidates\": [\"stun:8555\"]}\n\n# need to replace ffmpeg command when using ffmpeg4\nif not os.path.exists(BTBN_PATH):\n if go2rtc_config.get(\"ffmpeg\") is None:\n go2rtc_config[\"ffmpeg\"] = {\n \"rtsp\": \"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}\"\n }\n elif go2rtc_config[\"ffmpeg\"].get(\"rtsp\") is None:\n go2rtc_config[\"ffmpeg\"][\n \"rtsp\"\n ] = \"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}\"\n\nprint(json.dumps(go2rtc_config))\n", "path": "docker/rootfs/usr/local/go2rtc/create_config.py"}]}
| 1,797 | 518 |
gh_patches_debug_10359
|
rasdani/github-patches
|
git_diff
|
beetbox__beets-3805
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
keyfinder: Output parsing error
### Problem
Running this command in verbose (`-vv`) mode:
``` sh
$ beet -vv keyfinder anything
```
Led to this problem:
```
user configuration: /home/diomekes/.config/beets/config.yaml
data directory: /home/diomekes/.config/beets
plugin paths:
Sending event: pluginload
inline: adding item field disc_and_track
library database: /home/diomekes/.config/beets/library.db
library directory: /home/diomekes/media/music
Sending event: library_opened
Traceback (most recent call last):
File "/usr/bin/beet", line 9, in <module>
load_entry_point('beets==1.3.19', 'console_scripts', 'beet')()
File "/usr/lib/python2.7/site-packages/beets/ui/__init__.py", line 1266, in main
_raw_main(args)
File "/usr/lib/python2.7/site-packages/beets/ui/__init__.py", line 1253, in _raw_main
subcommand.func(lib, suboptions, subargs)
File "/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py", line 48, in command
self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
File "/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py", line 74, in find_key
key_raw = output.rsplit(None, 1)[-1]
IndexError: list index out of range
```
keyfinder-cli works if run directly
### Setup
- OS: archlinux
- Python version: 2.7.12
- beets version: 1.3.19
- Turning off plugins made problem go away (yes/no): problem is with keyfinder plugin only
- libkeyfinder-git 239.0a5ec7f-1
- keyfinder-cli-git 49.40a41ab-1
My configuration (output of `beet config`) is:
``` yaml
...
keyfinder:
bin: keyfinder-cli
auto: yes
overwrite: no
plugins: badfiles chroma convert duplicates fetchart fromfilename fuzzy info inline keyfinder lastgenre lyrics mbcollection mbsync missing play random scrub smartplaylist zero
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/keyfinder.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Thomas Scholtes.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Uses the `KeyFinder` program to add the `initial_key` field.
17 """
18
19 from __future__ import division, absolute_import, print_function
20
21 import os.path
22 import subprocess
23
24 from beets import ui
25 from beets import util
26 from beets.plugins import BeetsPlugin
27
28
29 class KeyFinderPlugin(BeetsPlugin):
30
31 def __init__(self):
32 super(KeyFinderPlugin, self).__init__()
33 self.config.add({
34 u'bin': u'KeyFinder',
35 u'auto': True,
36 u'overwrite': False,
37 })
38
39 if self.config['auto'].get(bool):
40 self.import_stages = [self.imported]
41
42 def commands(self):
43 cmd = ui.Subcommand('keyfinder',
44 help=u'detect and add initial key from audio')
45 cmd.func = self.command
46 return [cmd]
47
48 def command(self, lib, opts, args):
49 self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
50
51 def imported(self, session, task):
52 self.find_key(task.imported_items())
53
54 def find_key(self, items, write=False):
55 overwrite = self.config['overwrite'].get(bool)
56 command = [self.config['bin'].as_str()]
57 # The KeyFinder GUI program needs the -f flag before the path.
58 # keyfinder-cli is similar, but just wants the path with no flag.
59 if 'keyfinder-cli' not in os.path.basename(command[0]).lower():
60 command.append('-f')
61
62 for item in items:
63 if item['initial_key'] and not overwrite:
64 continue
65
66 try:
67 output = util.command_output(command + [util.syspath(
68 item.path)]).stdout
69 except (subprocess.CalledProcessError, OSError) as exc:
70 self._log.error(u'execution failed: {0}', exc)
71 continue
72 except UnicodeEncodeError:
73 # Workaround for Python 2 Windows bug.
74 # https://bugs.python.org/issue1759845
75 self._log.error(u'execution failed for Unicode path: {0!r}',
76 item.path)
77 continue
78
79 key_raw = output.rsplit(None, 1)[-1]
80 try:
81 key = util.text_string(key_raw)
82 except UnicodeDecodeError:
83 self._log.error(u'output is invalid UTF-8')
84 continue
85
86 item['initial_key'] = key
87 self._log.info(u'added computed initial key {0} for {1}',
88 key, util.displayable_path(item.path))
89
90 if write:
91 item.try_write()
92 item.store()
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/beetsplug/keyfinder.py b/beetsplug/keyfinder.py
--- a/beetsplug/keyfinder.py
+++ b/beetsplug/keyfinder.py
@@ -76,7 +76,14 @@
item.path)
continue
- key_raw = output.rsplit(None, 1)[-1]
+ try:
+ key_raw = output.rsplit(None, 1)[-1]
+ except IndexError:
+ # Sometimes keyfinder-cli returns 0 but with no key, usually
+ # when the file is silent or corrupt, so we log and skip.
+ self._log.error(u'no key returned for path: {0}', item.path)
+ continue
+
try:
key = util.text_string(key_raw)
except UnicodeDecodeError:
|
{"golden_diff": "diff --git a/beetsplug/keyfinder.py b/beetsplug/keyfinder.py\n--- a/beetsplug/keyfinder.py\n+++ b/beetsplug/keyfinder.py\n@@ -76,7 +76,14 @@\n item.path)\n continue\n \n- key_raw = output.rsplit(None, 1)[-1]\n+ try:\n+ key_raw = output.rsplit(None, 1)[-1]\n+ except IndexError:\n+ # Sometimes keyfinder-cli returns 0 but with no key, usually\n+ # when the file is silent or corrupt, so we log and skip.\n+ self._log.error(u'no key returned for path: {0}', item.path)\n+ continue\n+\n try:\n key = util.text_string(key_raw)\n except UnicodeDecodeError:\n", "issue": "keyfinder: Output parsing error\n### Problem\n\nRunning this command in verbose (`-vv`) mode:\n\n``` sh\n$ beet -vv keyfinder anything\n```\n\nLed to this problem:\n\n```\nuser configuration: /home/diomekes/.config/beets/config.yaml\ndata directory: /home/diomekes/.config/beets\nplugin paths:\nSending event: pluginload\ninline: adding item field disc_and_track\nlibrary database: /home/diomekes/.config/beets/library.db\nlibrary directory: /home/diomekes/media/music\nSending event: library_opened\nTraceback (most recent call last):\n File \"/usr/bin/beet\", line 9, in <module>\n load_entry_point('beets==1.3.19', 'console_scripts', 'beet')()\n File \"/usr/lib/python2.7/site-packages/beets/ui/__init__.py\", line 1266, in main\n _raw_main(args)\n File \"/usr/lib/python2.7/site-packages/beets/ui/__init__.py\", line 1253, in _raw_main\n subcommand.func(lib, suboptions, subargs)\n File \"/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py\", line 48, in command\n self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())\n File \"/usr/lib/python2.7/site-packages/beetsplug/keyfinder.py\", line 74, in find_key\n key_raw = output.rsplit(None, 1)[-1]\nIndexError: list index out of range\n```\n\nkeyfinder-cli works if run directly\n### Setup\n- OS: archlinux\n- Python version: 2.7.12\n- beets version: 1.3.19\n- Turning off plugins made problem go away (yes/no): problem is with keyfinder plugin only\n- libkeyfinder-git 239.0a5ec7f-1\n- keyfinder-cli-git 49.40a41ab-1\n\nMy configuration (output of `beet config`) is:\n\n``` yaml\n...\nkeyfinder:\n bin: keyfinder-cli\n auto: yes\n overwrite: no\n\nplugins: badfiles chroma convert duplicates fetchart fromfilename fuzzy info inline keyfinder lastgenre lyrics mbcollection mbsync missing play random scrub smartplaylist zero\n...\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Thomas Scholtes.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Uses the `KeyFinder` program to add the `initial_key` field.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os.path\nimport subprocess\n\nfrom beets import ui\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\n\n\nclass KeyFinderPlugin(BeetsPlugin):\n\n def __init__(self):\n super(KeyFinderPlugin, self).__init__()\n self.config.add({\n u'bin': u'KeyFinder',\n u'auto': True,\n u'overwrite': False,\n })\n\n if self.config['auto'].get(bool):\n self.import_stages = [self.imported]\n\n def commands(self):\n cmd = ui.Subcommand('keyfinder',\n help=u'detect and add initial key from audio')\n cmd.func = self.command\n return [cmd]\n\n def command(self, lib, opts, args):\n self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())\n\n def imported(self, session, task):\n self.find_key(task.imported_items())\n\n def find_key(self, items, write=False):\n overwrite = self.config['overwrite'].get(bool)\n command = [self.config['bin'].as_str()]\n # The KeyFinder GUI program needs the -f flag before the path.\n # keyfinder-cli is similar, but just wants the path with no flag.\n if 'keyfinder-cli' not in os.path.basename(command[0]).lower():\n command.append('-f')\n\n for item in items:\n if item['initial_key'] and not overwrite:\n continue\n\n try:\n output = util.command_output(command + [util.syspath(\n item.path)]).stdout\n except (subprocess.CalledProcessError, OSError) as exc:\n self._log.error(u'execution failed: {0}', exc)\n continue\n except UnicodeEncodeError:\n # Workaround for Python 2 Windows bug.\n # https://bugs.python.org/issue1759845\n self._log.error(u'execution failed for Unicode path: {0!r}',\n item.path)\n continue\n\n key_raw = output.rsplit(None, 1)[-1]\n try:\n key = util.text_string(key_raw)\n except UnicodeDecodeError:\n self._log.error(u'output is invalid UTF-8')\n continue\n\n item['initial_key'] = key\n self._log.info(u'added computed initial key {0} for {1}',\n key, util.displayable_path(item.path))\n\n if write:\n item.try_write()\n item.store()\n", "path": "beetsplug/keyfinder.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Thomas Scholtes.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Uses the `KeyFinder` program to add the `initial_key` field.\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nimport os.path\nimport subprocess\n\nfrom beets import ui\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\n\n\nclass KeyFinderPlugin(BeetsPlugin):\n\n def __init__(self):\n super(KeyFinderPlugin, self).__init__()\n self.config.add({\n u'bin': u'KeyFinder',\n u'auto': True,\n u'overwrite': False,\n })\n\n if self.config['auto'].get(bool):\n self.import_stages = [self.imported]\n\n def commands(self):\n cmd = ui.Subcommand('keyfinder',\n help=u'detect and add initial key from audio')\n cmd.func = self.command\n return [cmd]\n\n def command(self, lib, opts, args):\n self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())\n\n def imported(self, session, task):\n self.find_key(task.imported_items())\n\n def find_key(self, items, write=False):\n overwrite = self.config['overwrite'].get(bool)\n command = [self.config['bin'].as_str()]\n # The KeyFinder GUI program needs the -f flag before the path.\n # keyfinder-cli is similar, but just wants the path with no flag.\n if 'keyfinder-cli' not in os.path.basename(command[0]).lower():\n command.append('-f')\n\n for item in items:\n if item['initial_key'] and not overwrite:\n continue\n\n try:\n output = util.command_output(command + [util.syspath(\n item.path)]).stdout\n except (subprocess.CalledProcessError, OSError) as exc:\n self._log.error(u'execution failed: {0}', exc)\n continue\n except UnicodeEncodeError:\n # Workaround for Python 2 Windows bug.\n # https://bugs.python.org/issue1759845\n self._log.error(u'execution failed for Unicode path: {0!r}',\n item.path)\n continue\n\n try:\n key_raw = output.rsplit(None, 1)[-1]\n except IndexError:\n # Sometimes keyfinder-cli returns 0 but with no key, usually\n # when the file is silent or corrupt, so we log and skip.\n self._log.error(u'no key returned for path: {0}', item.path)\n continue\n\n try:\n key = util.text_string(key_raw)\n except UnicodeDecodeError:\n self._log.error(u'output is invalid UTF-8')\n continue\n\n item['initial_key'] = key\n self._log.info(u'added computed initial key {0} for {1}',\n key, util.displayable_path(item.path))\n\n if write:\n item.try_write()\n item.store()\n", "path": "beetsplug/keyfinder.py"}]}
| 1,669 | 172 |
gh_patches_debug_54782
|
rasdani/github-patches
|
git_diff
|
encode__httpx-362
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Releasing 0.7.3
Hi @encode/httpx-maintainers!
It’s been 21 days since 0.7.2 was released, and we’ve got [a bunch of features](https://github.com/encode/httpx/compare/0.7.2...HEAD) ready for 0.7.3 already, eg:
- Digest auth
- SSLKEYLOGFILE
- Response.elapsed
- A host of bug fixes
So regardless of what gets merged until then I think it’s time to release the next version. :)
As suggested by @sethmlarson I-cant-remember-where I’d like to take on this release. I’ll probably take the opportunity to document the release process as well - #313. 👍
Probably will do tonight.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/__version__.py`
Content:
```
1 __title__ = "httpx"
2 __description__ = "A next generation HTTP client, for Python 3."
3 __version__ = "0.7.2"
4
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/__version__.py b/httpx/__version__.py
--- a/httpx/__version__.py
+++ b/httpx/__version__.py
@@ -1,3 +1,3 @@
__title__ = "httpx"
__description__ = "A next generation HTTP client, for Python 3."
-__version__ = "0.7.2"
+__version__ = "0.7.3"
|
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.7.2\"\n+__version__ = \"0.7.3\"\n", "issue": "Releasing 0.7.3\nHi @encode/httpx-maintainers!\r\n\r\nIt\u2019s been 21 days since 0.7.2 was released, and we\u2019ve got [a bunch of features](https://github.com/encode/httpx/compare/0.7.2...HEAD) ready for 0.7.3 already, eg:\r\n\r\n- Digest auth\r\n- SSLKEYLOGFILE\r\n- Response.elapsed\r\n- A host of bug fixes\r\n\r\nSo regardless of what gets merged until then I think it\u2019s time to release the next version. :)\r\n\r\nAs suggested by @sethmlarson I-cant-remember-where I\u2019d like to take on this release. I\u2019ll probably take the opportunity to document the release process as well - #313. \ud83d\udc4d\r\n\r\nProbably will do tonight.\r\n\r\n\n", "before_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.7.2\"\n", "path": "httpx/__version__.py"}], "after_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.7.3\"\n", "path": "httpx/__version__.py"}]}
| 472 | 94 |
gh_patches_debug_19838
|
rasdani/github-patches
|
git_diff
|
napari__napari-649
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
numpy.pad() issues on version 1.16
## 🐛 Bug
I had numpy version 1.16 and the `test_viewer.py` tests were failing because `np.pad()` was missing the `mode` positional argument (see below). I noticed that in [v1.17, `mode` is optional](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html) and when I upgraded numpy (1.17.3), the tests passed.
## To Reproduce
Steps to reproduce the behavior:
1. Install numpy version 1.16
2. run the tests in `/napari/tests/test_viewer.py`
3. Receive the following error
```python
self = <napari._vispy.vispy_shapes_layer.VispyShapesLayer object at 0x149450f28>
def _on_data_change(self):
faces = self.layer._data_view._mesh.displayed_triangles
colors = self.layer._data_view._mesh.displayed_triangles_colors
vertices = self.layer._data_view._mesh.vertices
# Note that the indicies of the vertices need to be resversed to
# go from numpy style to xyz
if vertices is not None:
vertices = vertices[:, ::-1] + 0.5
if len(vertices) == 0 or len(faces) == 0:
vertices = np.zeros((3, self.layer.dims.ndisplay))
faces = np.array([[0, 1, 2]])
colors = np.array([[0, 0, 0, 0]])
if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
> vertices = np.pad(vertices, ((0, 0), (0, 1)))
E TypeError: pad() missing 1 required positional argument: 'mode'
../_vispy/vispy_shapes_layer.py:47: TypeError
```
## Expected behavior
Currently, the requirements specifty numpy >= 1.10.0, so I think we should bump to 1.17 (or the oldest version that passes).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_vispy/vispy_vectors_layer.py`
Content:
```
1 from vispy.scene.visuals import Mesh as MeshNode
2 from .vispy_base_layer import VispyBaseLayer
3 import numpy as np
4
5
6 class VispyVectorsLayer(VispyBaseLayer):
7 def __init__(self, layer):
8 node = MeshNode()
9 super().__init__(layer, node)
10
11 self.layer.events.edge_color.connect(lambda e: self._on_data_change())
12
13 self._reset_base()
14 self._on_data_change()
15
16 def _on_data_change(self):
17 if (
18 len(self.layer._view_vertices) == 0
19 or len(self.layer._view_faces) == 0
20 ):
21 vertices = np.zeros((3, self.layer.dims.ndisplay))
22 faces = np.array([[0, 1, 2]])
23 else:
24 vertices = self.layer._view_vertices[:, ::-1] + 0.5
25 faces = self.layer._view_faces
26
27 if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
28 vertices = np.pad(vertices, ((0, 0), (0, 1)))
29
30 self.node.set_data(
31 vertices=vertices, faces=faces, color=self.layer.edge_color
32 )
33 self.node.update()
34
```
Path: `napari/_vispy/vispy_shapes_layer.py`
Content:
```
1 from vispy.scene.visuals import Line, Mesh, Compound
2 from .markers import Markers
3 from .vispy_base_layer import VispyBaseLayer
4 import numpy as np
5
6
7 class VispyShapesLayer(VispyBaseLayer):
8 def __init__(self, layer):
9 # Create a compound visual with the following four subvisuals:
10 # Markers: corresponding to the vertices of the interaction box or the
11 # shapes that are used for highlights.
12 # Lines: The lines of the interaction box used for highlights.
13 # Mesh: The mesh of the outlines for each shape used for highlights.
14 # Mesh: The actual meshes of the shape faces and edges
15 node = Compound([Mesh(), Mesh(), Line(), Markers()])
16
17 super().__init__(layer, node)
18
19 self.layer.events.edge_width.connect(lambda e: self._on_data_change())
20 self.layer.events.edge_color.connect(lambda e: self._on_data_change())
21 self.layer.events.face_color.connect(lambda e: self._on_data_change())
22 self.layer.events.opacity.connect(lambda e: self._on_data_change())
23 self.layer.events.highlight.connect(
24 lambda e: self._on_highlight_change()
25 )
26
27 self._reset_base()
28 self._on_data_change()
29 self._on_highlight_change()
30
31 def _on_data_change(self):
32 faces = self.layer._data_view._mesh.displayed_triangles
33 colors = self.layer._data_view._mesh.displayed_triangles_colors
34 vertices = self.layer._data_view._mesh.vertices
35
36 # Note that the indicies of the vertices need to be resversed to
37 # go from numpy style to xyz
38 if vertices is not None:
39 vertices = vertices[:, ::-1] + 0.5
40
41 if len(vertices) == 0 or len(faces) == 0:
42 vertices = np.zeros((3, self.layer.dims.ndisplay))
43 faces = np.array([[0, 1, 2]])
44 colors = np.array([[0, 0, 0, 0]])
45
46 if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
47 vertices = np.pad(vertices, ((0, 0), (0, 1)))
48
49 self.node._subvisuals[0].set_data(
50 vertices=vertices, faces=faces, face_colors=colors
51 )
52 self.node.update()
53
54 def _on_highlight_change(self):
55 # Compute the vertices and faces of any shape outlines
56 vertices, faces = self.layer._outline_shapes()
57
58 if vertices is None or len(vertices) == 0 or len(faces) == 0:
59 vertices = np.zeros((3, self.layer.dims.ndisplay))
60 faces = np.array([[0, 1, 2]])
61 else:
62 vertices = vertices + 0.5
63
64 self.node._subvisuals[1].set_data(
65 vertices=vertices, faces=faces, color=self.layer._highlight_color
66 )
67
68 # Compute the location and properties of the vertices and box that
69 # need to get rendered
70 (
71 vertices,
72 face_color,
73 edge_color,
74 pos,
75 width,
76 ) = self.layer._compute_vertices_and_box()
77
78 if vertices is None or len(vertices) == 0:
79 vertices = np.zeros((1, self.layer.dims.ndisplay))
80 size = 0
81 else:
82 vertices = vertices + 0.5
83 size = self.layer._vertex_size
84
85 self.node._subvisuals[3].set_data(
86 vertices,
87 size=size,
88 face_color=face_color,
89 edge_color=edge_color,
90 edge_width=1.5,
91 symbol='square',
92 scaling=False,
93 )
94
95 if pos is None or len(pos) == 0:
96 pos = np.zeros((1, self.layer.dims.ndisplay))
97 width = 0
98 else:
99 pos = pos + 0.5
100
101 self.node._subvisuals[2].set_data(
102 pos=pos, color=edge_color, width=width
103 )
104
105 def _on_opacity_change(self):
106 pass
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/napari/_vispy/vispy_shapes_layer.py b/napari/_vispy/vispy_shapes_layer.py
--- a/napari/_vispy/vispy_shapes_layer.py
+++ b/napari/_vispy/vispy_shapes_layer.py
@@ -44,7 +44,7 @@
colors = np.array([[0, 0, 0, 0]])
if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
- vertices = np.pad(vertices, ((0, 0), (0, 1)))
+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')
self.node._subvisuals[0].set_data(
vertices=vertices, faces=faces, face_colors=colors
diff --git a/napari/_vispy/vispy_vectors_layer.py b/napari/_vispy/vispy_vectors_layer.py
--- a/napari/_vispy/vispy_vectors_layer.py
+++ b/napari/_vispy/vispy_vectors_layer.py
@@ -25,7 +25,7 @@
faces = self.layer._view_faces
if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:
- vertices = np.pad(vertices, ((0, 0), (0, 1)))
+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')
self.node.set_data(
vertices=vertices, faces=faces, color=self.layer.edge_color
|
{"golden_diff": "diff --git a/napari/_vispy/vispy_shapes_layer.py b/napari/_vispy/vispy_shapes_layer.py\n--- a/napari/_vispy/vispy_shapes_layer.py\n+++ b/napari/_vispy/vispy_shapes_layer.py\n@@ -44,7 +44,7 @@\n colors = np.array([[0, 0, 0, 0]])\n \n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n- vertices = np.pad(vertices, ((0, 0), (0, 1)))\n+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n \n self.node._subvisuals[0].set_data(\n vertices=vertices, faces=faces, face_colors=colors\ndiff --git a/napari/_vispy/vispy_vectors_layer.py b/napari/_vispy/vispy_vectors_layer.py\n--- a/napari/_vispy/vispy_vectors_layer.py\n+++ b/napari/_vispy/vispy_vectors_layer.py\n@@ -25,7 +25,7 @@\n faces = self.layer._view_faces\n \n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n- vertices = np.pad(vertices, ((0, 0), (0, 1)))\n+ vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n \n self.node.set_data(\n vertices=vertices, faces=faces, color=self.layer.edge_color\n", "issue": "numpy.pad() issues on version 1.16\n## \ud83d\udc1b Bug\r\n\r\nI had numpy version 1.16 and the `test_viewer.py` tests were failing because `np.pad()` was missing the `mode` positional argument (see below). I noticed that in [v1.17, `mode` is optional](https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html) and when I upgraded numpy (1.17.3), the tests passed.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Install numpy version 1.16\r\n2. run the tests in `/napari/tests/test_viewer.py`\r\n3. Receive the following error\r\n\r\n```python\r\nself = <napari._vispy.vispy_shapes_layer.VispyShapesLayer object at 0x149450f28>\r\n\r\n def _on_data_change(self):\r\n faces = self.layer._data_view._mesh.displayed_triangles\r\n colors = self.layer._data_view._mesh.displayed_triangles_colors\r\n vertices = self.layer._data_view._mesh.vertices\r\n\r\n # Note that the indicies of the vertices need to be resversed to\r\n # go from numpy style to xyz\r\n if vertices is not None:\r\n vertices = vertices[:, ::-1] + 0.5\r\n\r\n if len(vertices) == 0 or len(faces) == 0:\r\n vertices = np.zeros((3, self.layer.dims.ndisplay))\r\n faces = np.array([[0, 1, 2]])\r\n colors = np.array([[0, 0, 0, 0]])\r\n\r\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\r\n> vertices = np.pad(vertices, ((0, 0), (0, 1)))\r\nE TypeError: pad() missing 1 required positional argument: 'mode'\r\n\r\n../_vispy/vispy_shapes_layer.py:47: TypeError\r\n```\r\n\r\n## Expected behavior\r\n\r\nCurrently, the requirements specifty numpy >= 1.10.0, so I think we should bump to 1.17 (or the oldest version that passes).\r\n\r\n\n", "before_files": [{"content": "from vispy.scene.visuals import Mesh as MeshNode\nfrom .vispy_base_layer import VispyBaseLayer\nimport numpy as np\n\n\nclass VispyVectorsLayer(VispyBaseLayer):\n def __init__(self, layer):\n node = MeshNode()\n super().__init__(layer, node)\n\n self.layer.events.edge_color.connect(lambda e: self._on_data_change())\n\n self._reset_base()\n self._on_data_change()\n\n def _on_data_change(self):\n if (\n len(self.layer._view_vertices) == 0\n or len(self.layer._view_faces) == 0\n ):\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n else:\n vertices = self.layer._view_vertices[:, ::-1] + 0.5\n faces = self.layer._view_faces\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)))\n\n self.node.set_data(\n vertices=vertices, faces=faces, color=self.layer.edge_color\n )\n self.node.update()\n", "path": "napari/_vispy/vispy_vectors_layer.py"}, {"content": "from vispy.scene.visuals import Line, Mesh, Compound\nfrom .markers import Markers\nfrom .vispy_base_layer import VispyBaseLayer\nimport numpy as np\n\n\nclass VispyShapesLayer(VispyBaseLayer):\n def __init__(self, layer):\n # Create a compound visual with the following four subvisuals:\n # Markers: corresponding to the vertices of the interaction box or the\n # shapes that are used for highlights.\n # Lines: The lines of the interaction box used for highlights.\n # Mesh: The mesh of the outlines for each shape used for highlights.\n # Mesh: The actual meshes of the shape faces and edges\n node = Compound([Mesh(), Mesh(), Line(), Markers()])\n\n super().__init__(layer, node)\n\n self.layer.events.edge_width.connect(lambda e: self._on_data_change())\n self.layer.events.edge_color.connect(lambda e: self._on_data_change())\n self.layer.events.face_color.connect(lambda e: self._on_data_change())\n self.layer.events.opacity.connect(lambda e: self._on_data_change())\n self.layer.events.highlight.connect(\n lambda e: self._on_highlight_change()\n )\n\n self._reset_base()\n self._on_data_change()\n self._on_highlight_change()\n\n def _on_data_change(self):\n faces = self.layer._data_view._mesh.displayed_triangles\n colors = self.layer._data_view._mesh.displayed_triangles_colors\n vertices = self.layer._data_view._mesh.vertices\n\n # Note that the indicies of the vertices need to be resversed to\n # go from numpy style to xyz\n if vertices is not None:\n vertices = vertices[:, ::-1] + 0.5\n\n if len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n colors = np.array([[0, 0, 0, 0]])\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)))\n\n self.node._subvisuals[0].set_data(\n vertices=vertices, faces=faces, face_colors=colors\n )\n self.node.update()\n\n def _on_highlight_change(self):\n # Compute the vertices and faces of any shape outlines\n vertices, faces = self.layer._outline_shapes()\n\n if vertices is None or len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n else:\n vertices = vertices + 0.5\n\n self.node._subvisuals[1].set_data(\n vertices=vertices, faces=faces, color=self.layer._highlight_color\n )\n\n # Compute the location and properties of the vertices and box that\n # need to get rendered\n (\n vertices,\n face_color,\n edge_color,\n pos,\n width,\n ) = self.layer._compute_vertices_and_box()\n\n if vertices is None or len(vertices) == 0:\n vertices = np.zeros((1, self.layer.dims.ndisplay))\n size = 0\n else:\n vertices = vertices + 0.5\n size = self.layer._vertex_size\n\n self.node._subvisuals[3].set_data(\n vertices,\n size=size,\n face_color=face_color,\n edge_color=edge_color,\n edge_width=1.5,\n symbol='square',\n scaling=False,\n )\n\n if pos is None or len(pos) == 0:\n pos = np.zeros((1, self.layer.dims.ndisplay))\n width = 0\n else:\n pos = pos + 0.5\n\n self.node._subvisuals[2].set_data(\n pos=pos, color=edge_color, width=width\n )\n\n def _on_opacity_change(self):\n pass\n", "path": "napari/_vispy/vispy_shapes_layer.py"}], "after_files": [{"content": "from vispy.scene.visuals import Mesh as MeshNode\nfrom .vispy_base_layer import VispyBaseLayer\nimport numpy as np\n\n\nclass VispyVectorsLayer(VispyBaseLayer):\n def __init__(self, layer):\n node = MeshNode()\n super().__init__(layer, node)\n\n self.layer.events.edge_color.connect(lambda e: self._on_data_change())\n\n self._reset_base()\n self._on_data_change()\n\n def _on_data_change(self):\n if (\n len(self.layer._view_vertices) == 0\n or len(self.layer._view_faces) == 0\n ):\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n else:\n vertices = self.layer._view_vertices[:, ::-1] + 0.5\n faces = self.layer._view_faces\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n\n self.node.set_data(\n vertices=vertices, faces=faces, color=self.layer.edge_color\n )\n self.node.update()\n", "path": "napari/_vispy/vispy_vectors_layer.py"}, {"content": "from vispy.scene.visuals import Line, Mesh, Compound\nfrom .markers import Markers\nfrom .vispy_base_layer import VispyBaseLayer\nimport numpy as np\n\n\nclass VispyShapesLayer(VispyBaseLayer):\n def __init__(self, layer):\n # Create a compound visual with the following four subvisuals:\n # Markers: corresponding to the vertices of the interaction box or the\n # shapes that are used for highlights.\n # Lines: The lines of the interaction box used for highlights.\n # Mesh: The mesh of the outlines for each shape used for highlights.\n # Mesh: The actual meshes of the shape faces and edges\n node = Compound([Mesh(), Mesh(), Line(), Markers()])\n\n super().__init__(layer, node)\n\n self.layer.events.edge_width.connect(lambda e: self._on_data_change())\n self.layer.events.edge_color.connect(lambda e: self._on_data_change())\n self.layer.events.face_color.connect(lambda e: self._on_data_change())\n self.layer.events.opacity.connect(lambda e: self._on_data_change())\n self.layer.events.highlight.connect(\n lambda e: self._on_highlight_change()\n )\n\n self._reset_base()\n self._on_data_change()\n self._on_highlight_change()\n\n def _on_data_change(self):\n faces = self.layer._data_view._mesh.displayed_triangles\n colors = self.layer._data_view._mesh.displayed_triangles_colors\n vertices = self.layer._data_view._mesh.vertices\n\n # Note that the indicies of the vertices need to be resversed to\n # go from numpy style to xyz\n if vertices is not None:\n vertices = vertices[:, ::-1] + 0.5\n\n if len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n colors = np.array([[0, 0, 0, 0]])\n\n if self.layer.dims.ndisplay == 3 and self.layer.dims.ndim == 2:\n vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')\n\n self.node._subvisuals[0].set_data(\n vertices=vertices, faces=faces, face_colors=colors\n )\n self.node.update()\n\n def _on_highlight_change(self):\n # Compute the vertices and faces of any shape outlines\n vertices, faces = self.layer._outline_shapes()\n\n if vertices is None or len(vertices) == 0 or len(faces) == 0:\n vertices = np.zeros((3, self.layer.dims.ndisplay))\n faces = np.array([[0, 1, 2]])\n else:\n vertices = vertices + 0.5\n\n self.node._subvisuals[1].set_data(\n vertices=vertices, faces=faces, color=self.layer._highlight_color\n )\n\n # Compute the location and properties of the vertices and box that\n # need to get rendered\n (\n vertices,\n face_color,\n edge_color,\n pos,\n width,\n ) = self.layer._compute_vertices_and_box()\n\n if vertices is None or len(vertices) == 0:\n vertices = np.zeros((1, self.layer.dims.ndisplay))\n size = 0\n else:\n vertices = vertices + 0.5\n size = self.layer._vertex_size\n\n self.node._subvisuals[3].set_data(\n vertices,\n size=size,\n face_color=face_color,\n edge_color=edge_color,\n edge_width=1.5,\n symbol='square',\n scaling=False,\n )\n\n if pos is None or len(pos) == 0:\n pos = np.zeros((1, self.layer.dims.ndisplay))\n width = 0\n else:\n pos = pos + 0.5\n\n self.node._subvisuals[2].set_data(\n pos=pos, color=edge_color, width=width\n )\n\n def _on_opacity_change(self):\n pass\n", "path": "napari/_vispy/vispy_shapes_layer.py"}]}
| 2,177 | 351 |
gh_patches_debug_22190
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-11421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build: expose `ubuntu-24.04` as an option for `build.os`
We are close to Ubuntu 24.04 release. We should expose it to our users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/builds/constants_docker.py`
Content:
```
1 """
2 Define constants here to allow import them without any external dependency.
3
4 There are situations where we want to have access to these values without Django installed
5 (e.g. common/dockerfiles/tasks.py)
6
7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
8 """
9
10 DOCKER_DEFAULT_IMAGE = "readthedocs/build"
11
12 # When adding a new tool/version to this setting, you should:
13 #
14 # - Add a mapping between the expected version in the config file, to the full
15 # version installed via asdf (found via ``asdf list all <tool>``).
16 # - Run the script ``./scripts/compile_version_upload.sh`` in
17 # development to compile and cache the new tool/version.
18 # - Update the CircleCI job on the ``readthedocs-docker-images`` repository with the new versions at
19 # https://github.com/rtfd/readthedocs-docker-images/blob/d2760526abdfe27001946614b749abf8011b7f90/.circleci/config.yml#L38-L44.
20 # - Update the latest aliases for OS and tools (below this setting).
21 # - Update readthedocs/rtd_tests/fixtures/spec/v2/schema.json.
22 # - Update the documentation in ``docs/user/config-file/v2.rst``.
23 RTD_DOCKER_BUILD_SETTINGS = {
24 # Mapping of build.os options to docker image.
25 "os": {
26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
28 },
29 # Mapping of build.tools options to specific versions.
30 "tools": {
31 "python": {
32 "2.7": "2.7.18",
33 "3.6": "3.6.15",
34 "3.7": "3.7.17",
35 "3.8": "3.8.19",
36 "3.9": "3.9.19",
37 "3.10": "3.10.14",
38 "3.11": "3.11.9",
39 "3.12": "3.12.3",
40 "miniconda3-4.7": "miniconda3-4.7.12",
41 "miniconda3-3.12-24.1": "miniconda3-3.12-24.1.2-0",
42 "mambaforge-4.10": "mambaforge-4.10.3-10",
43 "mambaforge-22.9": "mambaforge-22.9.0-3",
44 "mambaforge-23.11": "mambaforge-23.11.0-0",
45 },
46 "nodejs": {
47 "14": "14.20.1",
48 "16": "16.18.1",
49 "18": "18.16.1",
50 "19": "19.0.1",
51 "20": "20.14.0", # LTS
52 },
53 "ruby": {
54 "3.3": "3.3.2",
55 },
56 "rust": {
57 "1.55": "1.55.0",
58 "1.61": "1.61.0",
59 "1.64": "1.64.0",
60 "1.70": "1.70.0",
61 "1.75": "1.75.0",
62 "1.78": "1.78.0",
63 },
64 "golang": {
65 "1.17": "1.17.13",
66 "1.18": "1.18.10",
67 "1.19": "1.19.13",
68 "1.20": "1.20.14",
69 "1.21": "1.21.11",
70 "1.22": "1.22.4",
71 },
72 },
73 }
74
75 # Set latest aliases for OS and tools.
76 _OS = RTD_DOCKER_BUILD_SETTINGS["os"]
77 _TOOLS = RTD_DOCKER_BUILD_SETTINGS["tools"]
78 _OS["ubuntu-lts-latest"] = _OS["ubuntu-22.04"]
79 _TOOLS["python"]["3"] = _TOOLS["python"]["3.12"]
80 _TOOLS["python"]["latest"] = _TOOLS["python"]["3"]
81 _TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-3.12-24.1"]
82 _TOOLS["python"]["mambaforge-latest"] = _TOOLS["python"]["mambaforge-23.11"]
83 _TOOLS["nodejs"]["latest"] = _TOOLS["nodejs"]["20"]
84 _TOOLS["ruby"]["latest"] = _TOOLS["ruby"]["3.3"]
85 _TOOLS["rust"]["latest"] = _TOOLS["rust"]["1.78"]
86 _TOOLS["golang"]["latest"] = _TOOLS["golang"]["1.22"]
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -25,6 +25,7 @@
"os": {
"ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
"ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
+ "ubuntu-24.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-24.04",
},
# Mapping of build.tools options to specific versions.
"tools": {
@@ -75,7 +76,11 @@
# Set latest aliases for OS and tools.
_OS = RTD_DOCKER_BUILD_SETTINGS["os"]
_TOOLS = RTD_DOCKER_BUILD_SETTINGS["tools"]
+
+# TODO: point ``ubuntu-lts-latest`` to Ubuntu 24.04 LTS once we have tested it
+# in production after some weeks
_OS["ubuntu-lts-latest"] = _OS["ubuntu-22.04"]
+
_TOOLS["python"]["3"] = _TOOLS["python"]["3.12"]
_TOOLS["python"]["latest"] = _TOOLS["python"]["3"]
_TOOLS["python"]["miniconda-latest"] = _TOOLS["python"]["miniconda3-3.12-24.1"]
|
{"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -25,6 +25,7 @@\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n+ \"ubuntu-24.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-24.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n@@ -75,7 +76,11 @@\n # Set latest aliases for OS and tools.\n _OS = RTD_DOCKER_BUILD_SETTINGS[\"os\"]\n _TOOLS = RTD_DOCKER_BUILD_SETTINGS[\"tools\"]\n+\n+# TODO: point ``ubuntu-lts-latest`` to Ubuntu 24.04 LTS once we have tested it\n+# in production after some weeks\n _OS[\"ubuntu-lts-latest\"] = _OS[\"ubuntu-22.04\"]\n+\n _TOOLS[\"python\"][\"3\"] = _TOOLS[\"python\"][\"3.12\"]\n _TOOLS[\"python\"][\"latest\"] = _TOOLS[\"python\"][\"3\"]\n _TOOLS[\"python\"][\"miniconda-latest\"] = _TOOLS[\"python\"][\"miniconda3-3.12-24.1\"]\n", "issue": "Build: expose `ubuntu-24.04` as an option for `build.os`\nWe are close to Ubuntu 24.04 release. We should expose it to our users.\n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# When adding a new tool/version to this setting, you should:\n#\n# - Add a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``).\n# - Run the script ``./scripts/compile_version_upload.sh`` in\n# development to compile and cache the new tool/version.\n# - Update the CircleCI job on the ``readthedocs-docker-images`` repository with the new versions at\n# https://github.com/rtfd/readthedocs-docker-images/blob/d2760526abdfe27001946614b749abf8011b7f90/.circleci/config.yml#L38-L44.\n# - Update the latest aliases for OS and tools (below this setting).\n# - Update readthedocs/rtd_tests/fixtures/spec/v2/schema.json.\n# - Update the documentation in ``docs/user/config-file/v2.rst``.\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.19\",\n \"3.9\": \"3.9.19\",\n \"3.10\": \"3.10.14\",\n \"3.11\": \"3.11.9\",\n \"3.12\": \"3.12.3\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"miniconda3-3.12-24.1\": \"miniconda3-3.12-24.1.2-0\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n \"mambaforge-23.11\": \"mambaforge-23.11.0-0\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\",\n \"19\": \"19.0.1\",\n \"20\": \"20.14.0\", # LTS\n },\n \"ruby\": {\n \"3.3\": \"3.3.2\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n \"1.75\": \"1.75.0\",\n \"1.78\": \"1.78.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.13\",\n \"1.20\": \"1.20.14\",\n \"1.21\": \"1.21.11\",\n \"1.22\": \"1.22.4\",\n },\n },\n}\n\n# Set latest aliases for OS and tools.\n_OS = RTD_DOCKER_BUILD_SETTINGS[\"os\"]\n_TOOLS = RTD_DOCKER_BUILD_SETTINGS[\"tools\"]\n_OS[\"ubuntu-lts-latest\"] = _OS[\"ubuntu-22.04\"]\n_TOOLS[\"python\"][\"3\"] = _TOOLS[\"python\"][\"3.12\"]\n_TOOLS[\"python\"][\"latest\"] = _TOOLS[\"python\"][\"3\"]\n_TOOLS[\"python\"][\"miniconda-latest\"] = _TOOLS[\"python\"][\"miniconda3-3.12-24.1\"]\n_TOOLS[\"python\"][\"mambaforge-latest\"] = _TOOLS[\"python\"][\"mambaforge-23.11\"]\n_TOOLS[\"nodejs\"][\"latest\"] = _TOOLS[\"nodejs\"][\"20\"]\n_TOOLS[\"ruby\"][\"latest\"] = _TOOLS[\"ruby\"][\"3.3\"]\n_TOOLS[\"rust\"][\"latest\"] = _TOOLS[\"rust\"][\"1.78\"]\n_TOOLS[\"golang\"][\"latest\"] = _TOOLS[\"golang\"][\"1.22\"]\n", "path": "readthedocs/builds/constants_docker.py"}], "after_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# When adding a new tool/version to this setting, you should:\n#\n# - Add a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``).\n# - Run the script ``./scripts/compile_version_upload.sh`` in\n# development to compile and cache the new tool/version.\n# - Update the CircleCI job on the ``readthedocs-docker-images`` repository with the new versions at\n# https://github.com/rtfd/readthedocs-docker-images/blob/d2760526abdfe27001946614b749abf8011b7f90/.circleci/config.yml#L38-L44.\n# - Update the latest aliases for OS and tools (below this setting).\n# - Update readthedocs/rtd_tests/fixtures/spec/v2/schema.json.\n# - Update the documentation in ``docs/user/config-file/v2.rst``.\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n \"ubuntu-24.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-24.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.19\",\n \"3.9\": \"3.9.19\",\n \"3.10\": \"3.10.14\",\n \"3.11\": \"3.11.9\",\n \"3.12\": \"3.12.3\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"miniconda3-3.12-24.1\": \"miniconda3-3.12-24.1.2-0\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n \"mambaforge-23.11\": \"mambaforge-23.11.0-0\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\",\n \"19\": \"19.0.1\",\n \"20\": \"20.14.0\", # LTS\n },\n \"ruby\": {\n \"3.3\": \"3.3.2\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n \"1.75\": \"1.75.0\",\n \"1.78\": \"1.78.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.13\",\n \"1.20\": \"1.20.14\",\n \"1.21\": \"1.21.11\",\n \"1.22\": \"1.22.4\",\n },\n },\n}\n\n# Set latest aliases for OS and tools.\n_OS = RTD_DOCKER_BUILD_SETTINGS[\"os\"]\n_TOOLS = RTD_DOCKER_BUILD_SETTINGS[\"tools\"]\n\n# TODO: point ``ubuntu-lts-latest`` to Ubuntu 24.04 LTS once we have tested it\n# in production after some weeks\n_OS[\"ubuntu-lts-latest\"] = _OS[\"ubuntu-22.04\"]\n\n_TOOLS[\"python\"][\"3\"] = _TOOLS[\"python\"][\"3.12\"]\n_TOOLS[\"python\"][\"latest\"] = _TOOLS[\"python\"][\"3\"]\n_TOOLS[\"python\"][\"miniconda-latest\"] = _TOOLS[\"python\"][\"miniconda3-3.12-24.1\"]\n_TOOLS[\"python\"][\"mambaforge-latest\"] = _TOOLS[\"python\"][\"mambaforge-23.11\"]\n_TOOLS[\"nodejs\"][\"latest\"] = _TOOLS[\"nodejs\"][\"20\"]\n_TOOLS[\"ruby\"][\"latest\"] = _TOOLS[\"ruby\"][\"3.3\"]\n_TOOLS[\"rust\"][\"latest\"] = _TOOLS[\"rust\"][\"1.78\"]\n_TOOLS[\"golang\"][\"latest\"] = _TOOLS[\"golang\"][\"1.22\"]\n", "path": "readthedocs/builds/constants_docker.py"}]}
| 1,639 | 325 |
gh_patches_debug_13461
|
rasdani/github-patches
|
git_diff
|
huggingface__optimum-217
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Support for DeBERTaV2
I would like to use DeBERTaV2 for sequence classification as a quantized model. Please let me know what needs to be done to open a PR to add this support!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optimum/onnxruntime/utils.py`
Content:
```
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from pathlib import Path
15 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
16
17 import torch
18 from transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
19 from transformers.utils import logging
20
21 import onnx
22 import onnxruntime as ort
23
24 from ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss
25
26
27 logger = logging.get_logger(__name__)
28
29 ONNX_WEIGHTS_NAME = "model.onnx"
30 OPTIMIZED_ONNX_WEIGHTS_NAME = "optimized_model.onnx"
31 QUANTIZED_ONNX_WEIGHTS_NAME = "q8_model.onnx"
32
33
34 def _is_gpu_available():
35 """
36 checks if a gpu is available.
37 """
38 available_providers = ort.get_available_providers()
39 if "CUDAExecutionProvider" in available_providers and torch.cuda.is_available():
40 return True
41 else:
42 return False
43
44
45 class ORTConfigManager:
46 """
47 A class that contains all the information needed by ONNX Runtime optimization for a given model type.
48
49 Attributes:
50 _conf (`Dict[str, tuple]`):
51 A dictionary mapping each supported model type to a tuple containing the number of attention heads
52 and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.
53 """
54
55 _conf = {
56 "bert": ("num_attention_heads", "hidden_size", "bert"),
57 "albert": ("num_attention_heads", "hidden_size", "bert"),
58 "camembert": ("num_attention_heads", "hidden_size", "bert"),
59 "distilbert": ("n_heads", "dim", "bert"),
60 "electra": ("num_attention_heads", "hidden_size", "bert"),
61 "roberta": ("num_attention_heads", "hidden_size", "bert"),
62 "bart": ("encoder_attention_heads", "d_model", "bart"),
63 "gpt2": ("n_head", "n_embd", "gpt2"),
64 "gpt_neo": ("num_heads", "hidden_size", "gpt2"),
65 }
66
67 @classmethod
68 def get_num_heads_name(cls, model_type: str) -> str:
69 num_heads = "num_attention_heads"
70 try:
71 num_heads = cls._conf[model_type][0]
72 except KeyError:
73 logger.warning(
74 f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to "
75 f"access the number of heads defined in the config is set to `{num_heads}`."
76 )
77 return num_heads
78
79 @classmethod
80 def get_hidden_size_name(cls, model_type: str) -> str:
81 hidden_size = "hidden_size"
82 try:
83 hidden_size = cls._conf[model_type][1]
84 except KeyError:
85 logger.warning(
86 f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to "
87 f"access the hidden size defined in the config is set to `{hidden_size}`."
88 )
89 return hidden_size
90
91 @classmethod
92 def get_model_ort_type(cls, model_type: str) -> str:
93 try:
94 model_type = cls._conf[model_type][2]
95 except KeyError:
96 logger.warning(f"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.")
97 return model_type
98
99 @classmethod
100 def check_supported_model_or_raise(cls, model_type: str) -> bool:
101 if model_type not in cls._conf:
102 raise KeyError(
103 f"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. "
104 f"If you want to support {model_type} please propose a PR or open up an issue."
105 )
106
107
108 def generate_identified_filename(filename, identifier):
109 return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
110
111
112 def fix_atenops_to_gather(model_path):
113 # Fix broken ATenOp nodes back to Gather nodes.
114 model = onnx.load(model_path)
115 onnx.checker.check_model(model)
116
117 nodes = model.graph.node
118
119 for node in nodes:
120 if node.op_type in ["ATenOp", "ATen"]:
121 logger.info(f"----Start fixing node: {node.name}----")
122 op_num = node.name.split("_")[-1]
123 new_node = onnx.helper.make_node(
124 "Gather",
125 name="Gather_" + op_num,
126 inputs=[node.input[0], node.input[1]],
127 outputs=node.output,
128 )
129
130 model.graph.node.remove(node)
131 model.graph.node.insert(int(op_num), new_node)
132
133 onnx.checker.check_model(model)
134 onnx.save(model, model_path)
135
136
137 def wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:
138 if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):
139 return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)
140 elif isinstance(onnx_config, OnnxConfigWithPast):
141 return OnnxConfigWithPastAndLoss(onnx_config)
142 else:
143 return OnnxConfigWithLoss(onnx_config)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py
--- a/optimum/onnxruntime/utils.py
+++ b/optimum/onnxruntime/utils.py
@@ -57,6 +57,8 @@
"albert": ("num_attention_heads", "hidden_size", "bert"),
"camembert": ("num_attention_heads", "hidden_size", "bert"),
"distilbert": ("n_heads", "dim", "bert"),
+ "deberta": ("num_attention_heads", "hidden_size", "bert"),
+ "deberta-v2": ("num_attention_heads", "hidden_size", "bert"),
"electra": ("num_attention_heads", "hidden_size", "bert"),
"roberta": ("num_attention_heads", "hidden_size", "bert"),
"bart": ("encoder_attention_heads", "d_model", "bart"),
|
{"golden_diff": "diff --git a/optimum/onnxruntime/utils.py b/optimum/onnxruntime/utils.py\n--- a/optimum/onnxruntime/utils.py\n+++ b/optimum/onnxruntime/utils.py\n@@ -57,6 +57,8 @@\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n+ \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n+ \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n", "issue": "Add Support for DeBERTaV2\nI would like to use DeBERTaV2 for sequence classification as a quantized model. Please let me know what needs to be done to open a PR to add this support!\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n", "path": "optimum/onnxruntime/utils.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom transformers.onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast\nfrom transformers.utils import logging\n\nimport onnx\nimport onnxruntime as ort\n\nfrom ..onnx import OnnxConfigWithLoss, OnnxConfigWithPastAndLoss, OnnxSeq2SeqConfigWithPastAndLoss\n\n\nlogger = logging.get_logger(__name__)\n\nONNX_WEIGHTS_NAME = \"model.onnx\"\nOPTIMIZED_ONNX_WEIGHTS_NAME = \"optimized_model.onnx\"\nQUANTIZED_ONNX_WEIGHTS_NAME = \"q8_model.onnx\"\n\n\ndef _is_gpu_available():\n \"\"\"\n checks if a gpu is available.\n \"\"\"\n available_providers = ort.get_available_providers()\n if \"CUDAExecutionProvider\" in available_providers and torch.cuda.is_available():\n return True\n else:\n return False\n\n\nclass ORTConfigManager:\n \"\"\"\n A class that contains all the information needed by ONNX Runtime optimization for a given model type.\n\n Attributes:\n _conf (`Dict[str, tuple]`):\n A dictionary mapping each supported model type to a tuple containing the number of attention heads\n and the hidden size model config attribute names as well as the corresponding ONNX Runtime model type.\n \"\"\"\n\n _conf = {\n \"bert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"albert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"camembert\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"distilbert\": (\"n_heads\", \"dim\", \"bert\"),\n \"deberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"deberta-v2\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"electra\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"roberta\": (\"num_attention_heads\", \"hidden_size\", \"bert\"),\n \"bart\": (\"encoder_attention_heads\", \"d_model\", \"bart\"),\n \"gpt2\": (\"n_head\", \"n_embd\", \"gpt2\"),\n \"gpt_neo\": (\"num_heads\", \"hidden_size\", \"gpt2\"),\n }\n\n @classmethod\n def get_num_heads_name(cls, model_type: str) -> str:\n num_heads = \"num_attention_heads\"\n try:\n num_heads = cls._conf[model_type][0]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the number of heads defined in the config is set to `{num_heads}`.\"\n )\n return num_heads\n\n @classmethod\n def get_hidden_size_name(cls, model_type: str) -> str:\n hidden_size = \"hidden_size\"\n try:\n hidden_size = cls._conf[model_type][1]\n except KeyError:\n logger.warning(\n f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported. The default value to \"\n f\"access the hidden size defined in the config is set to `{hidden_size}`.\"\n )\n return hidden_size\n\n @classmethod\n def get_model_ort_type(cls, model_type: str) -> str:\n try:\n model_type = cls._conf[model_type][2]\n except KeyError:\n logger.warning(f\"{model_type} is not supported yet. Only {list(cls._conf.keys())} are supported.\")\n return model_type\n\n @classmethod\n def check_supported_model_or_raise(cls, model_type: str) -> bool:\n if model_type not in cls._conf:\n raise KeyError(\n f\"{model_type} model type is not supported yet. Only {list(cls._conf.keys())} are supported. \"\n f\"If you want to support {model_type} please propose a PR or open up an issue.\"\n )\n\n\ndef generate_identified_filename(filename, identifier):\n return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)\n\n\ndef fix_atenops_to_gather(model_path):\n # Fix broken ATenOp nodes back to Gather nodes.\n model = onnx.load(model_path)\n onnx.checker.check_model(model)\n\n nodes = model.graph.node\n\n for node in nodes:\n if node.op_type in [\"ATenOp\", \"ATen\"]:\n logger.info(f\"----Start fixing node: {node.name}----\")\n op_num = node.name.split(\"_\")[-1]\n new_node = onnx.helper.make_node(\n \"Gather\",\n name=\"Gather_\" + op_num,\n inputs=[node.input[0], node.input[1]],\n outputs=node.output,\n )\n\n model.graph.node.remove(node)\n model.graph.node.insert(int(op_num), new_node)\n\n onnx.checker.check_model(model)\n onnx.save(model, model_path)\n\n\ndef wrap_onnx_config_for_loss(onnx_config: OnnxConfig) -> OnnxConfig:\n if isinstance(onnx_config, OnnxSeq2SeqConfigWithPast):\n return OnnxSeq2SeqConfigWithPastAndLoss(onnx_config)\n elif isinstance(onnx_config, OnnxConfigWithPast):\n return OnnxConfigWithPastAndLoss(onnx_config)\n else:\n return OnnxConfigWithLoss(onnx_config)\n", "path": "optimum/onnxruntime/utils.py"}]}
| 1,921 | 194 |
gh_patches_debug_26700
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-2563
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SSIM update does not work for the last batch (if batch size is different)
## 🐛 Bug description

<!-- A clear and concise description of what the bug is. -->
<!-- Please, add steps on how to reproduce it. -->
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<!-- A clear and concise description of what you expected to happen. -->
## Environment
- PyTorch Version :1.10.1
- Ignite Version ():0.4.8
- OS : Ubuntu
- How you installed Ignite (`conda`, `pip`, source):
- Python version: 3.9
- Any other relevant information:
If the previous batch and the current batch are different then it throws the error. Attaching the screen shot of error.
Screenshot of the basic code is also attached.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/metrics/ssim.py`
Content:
```
1 from typing import Callable, Sequence, Union
2
3 import torch
4 import torch.nn.functional as F
5
6 from ignite.exceptions import NotComputableError
7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
8
9 __all__ = ["SSIM"]
10
11
12 class SSIM(Metric):
13 """
14 Computes Structual Similarity Index Measure
15
16 Args:
17 data_range: Range of the image. Typically, ``1.0`` or ``255``.
18 kernel_size: Size of the kernel. Default: (11, 11)
19 sigma: Standard deviation of the gaussian kernel.
20 Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
21 k1: Parameter of SSIM. Default: 0.01
22 k2: Parameter of SSIM. Default: 0.03
23 gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
24 output_transform: A callable that is used to transform the
25 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
26 form expected by the metric.
27 device: specifies which device updates are accumulated on. Setting the metric's
28 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
29 default, CPU.
30
31 Examples:
32 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
33 The output of the engine's ``process_function`` needs to be in the format of
34 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.
35
36 ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need
37 to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.
38
39 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
40
41 .. include:: defaults.rst
42 :start-after: :orphan:
43
44 .. testcode::
45
46 metric = SSIM(data_range=1.0)
47 metric.attach(default_evaluator, 'ssim')
48 preds = torch.rand([4, 3, 16, 16])
49 target = preds * 0.75
50 state = default_evaluator.run([[preds, target]])
51 print(state.metrics['ssim'])
52
53 .. testoutput::
54
55 0.9218971...
56
57 .. versionadded:: 0.4.2
58 """
59
60 def __init__(
61 self,
62 data_range: Union[int, float],
63 kernel_size: Union[int, Sequence[int]] = (11, 11),
64 sigma: Union[float, Sequence[float]] = (1.5, 1.5),
65 k1: float = 0.01,
66 k2: float = 0.03,
67 gaussian: bool = True,
68 output_transform: Callable = lambda x: x,
69 device: Union[str, torch.device] = torch.device("cpu"),
70 ):
71 if isinstance(kernel_size, int):
72 self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]
73 elif isinstance(kernel_size, Sequence):
74 self.kernel_size = kernel_size
75 else:
76 raise ValueError("Argument kernel_size should be either int or a sequence of int.")
77
78 if isinstance(sigma, float):
79 self.sigma = [sigma, sigma] # type: Sequence[float]
80 elif isinstance(sigma, Sequence):
81 self.sigma = sigma
82 else:
83 raise ValueError("Argument sigma should be either float or a sequence of float.")
84
85 if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
86 raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
87
88 if any(y <= 0 for y in self.sigma):
89 raise ValueError(f"Expected sigma to have positive number. Got {sigma}.")
90
91 super(SSIM, self).__init__(output_transform=output_transform, device=device)
92 self.gaussian = gaussian
93 self.c1 = (k1 * data_range) ** 2
94 self.c2 = (k2 * data_range) ** 2
95 self.pad_h = (self.kernel_size[0] - 1) // 2
96 self.pad_w = (self.kernel_size[1] - 1) // 2
97 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
98
99 @reinit__is_reduced
100 def reset(self) -> None:
101 # Not a tensor because batch size is not known in advance.
102 self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]
103 self._num_examples = 0
104 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
105
106 def _uniform(self, kernel_size: int) -> torch.Tensor:
107 max, min = 2.5, -2.5
108 ksize_half = (kernel_size - 1) * 0.5
109 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
110 for i, j in enumerate(kernel):
111 if min <= j <= max:
112 kernel[i] = 1 / (max - min)
113 else:
114 kernel[i] = 0
115
116 return kernel.unsqueeze(dim=0) # (1, kernel_size)
117
118 def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
119 ksize_half = (kernel_size - 1) * 0.5
120 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
121 gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
122 return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
123
124 def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
125 if self.gaussian:
126 kernel_x = self._gaussian(kernel_size[0], sigma[0])
127 kernel_y = self._gaussian(kernel_size[1], sigma[1])
128 else:
129 kernel_x = self._uniform(kernel_size[0])
130 kernel_y = self._uniform(kernel_size[1])
131
132 return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
133
134 @reinit__is_reduced
135 def update(self, output: Sequence[torch.Tensor]) -> None:
136 y_pred, y = output[0].detach(), output[1].detach()
137
138 if y_pred.dtype != y.dtype:
139 raise TypeError(
140 f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
141 )
142
143 if y_pred.shape != y.shape:
144 raise ValueError(
145 f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
146 )
147
148 if len(y_pred.shape) != 4 or len(y.shape) != 4:
149 raise ValueError(
150 f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
151 )
152
153 channel = y_pred.size(1)
154 if len(self._kernel.shape) < 4:
155 self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)
156
157 y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
158 y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
159
160 input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])
161 outputs = F.conv2d(input_list, self._kernel, groups=channel)
162
163 output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]
164
165 mu_pred_sq = output_list[0].pow(2)
166 mu_target_sq = output_list[1].pow(2)
167 mu_pred_target = output_list[0] * output_list[1]
168
169 sigma_pred_sq = output_list[2] - mu_pred_sq
170 sigma_target_sq = output_list[3] - mu_target_sq
171 sigma_pred_target = output_list[4] - mu_pred_target
172
173 a1 = 2 * mu_pred_target + self.c1
174 a2 = 2 * sigma_pred_target + self.c2
175 b1 = mu_pred_sq + mu_target_sq + self.c1
176 b2 = sigma_pred_sq + sigma_target_sq + self.c2
177
178 ssim_idx = (a1 * a2) / (b1 * b2)
179 self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)
180 self._num_examples += y.shape[0]
181
182 @sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples")
183 def compute(self) -> torch.Tensor:
184 if self._num_examples == 0:
185 raise NotComputableError("SSIM must have at least one example before it can be computed.")
186 return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py
--- a/ignite/metrics/ssim.py
+++ b/ignite/metrics/ssim.py
@@ -98,8 +98,7 @@
@reinit__is_reduced
def reset(self) -> None:
- # Not a tensor because batch size is not known in advance.
- self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]
+ self._sum_of_ssim = torch.tensor(0.0, device=self._device)
self._num_examples = 0
self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
@@ -176,11 +175,12 @@
b2 = sigma_pred_sq + sigma_target_sq + self.c2
ssim_idx = (a1 * a2) / (b1 * b2)
- self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)
+ self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)
+
self._num_examples += y.shape[0]
- @sync_all_reduce("_sum_of_batchwise_ssim", "_num_examples")
+ @sync_all_reduce("_sum_of_ssim", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("SSIM must have at least one example before it can be computed.")
- return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]
+ return self._sum_of_ssim / self._num_examples
|
{"golden_diff": "diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py\n--- a/ignite/metrics/ssim.py\n+++ b/ignite/metrics/ssim.py\n@@ -98,8 +98,7 @@\n \n @reinit__is_reduced\n def reset(self) -> None:\n- # Not a tensor because batch size is not known in advance.\n- self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]\n+ self._sum_of_ssim = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n \n@@ -176,11 +175,12 @@\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n \n ssim_idx = (a1 * a2) / (b1 * b2)\n- self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)\n+ self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)\n+\n self._num_examples += y.shape[0]\n \n- @sync_all_reduce(\"_sum_of_batchwise_ssim\", \"_num_examples\")\n+ @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n- return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]\n+ return self._sum_of_ssim / self._num_examples\n", "issue": "SSIM update does not work for the last batch (if batch size is different)\n## \ud83d\udc1b Bug description\r\n\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n<!-- Please, add steps on how to reproduce it. -->\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n## Environment\r\n\r\n- PyTorch Version :1.10.1\r\n- Ignite Version ():0.4.8\r\n- OS : Ubuntu\r\n- How you installed Ignite (`conda`, `pip`, source): \r\n- Python version: 3.9\r\n- Any other relevant information:\r\n\r\nIf the previous batch and the current batch are different then it throws the error. Attaching the screen shot of error. \r\n\r\nScreenshot of the basic code is also attached.\r\n\r\n\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n data_range: Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size: Size of the kernel. Default: (11, 11)\n sigma: Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform: A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = SSIM(data_range=1.0)\n metric.attach(default_evaluator, 'ssim')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['ssim'])\n\n .. testoutput::\n\n 0.9218971...\n\n .. versionadded:: 0.4.2\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma = [sigma, sigma] # type: Sequence[float]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(f\"Expected kernel_size to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(f\"Expected sigma to have positive number. Got {sigma}.\")\n\n super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n @reinit__is_reduced\n def reset(self) -> None:\n # Not a tensor because batch size is not known in advance.\n self._sum_of_batchwise_ssim = 0.0 # type: Union[float, torch.Tensor]\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size: int) -> torch.Tensor:\n max, min = 2.5, -2.5\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n f\"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_batchwise_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).to(self._device)\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_batchwise_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return torch.sum(self._sum_of_batchwise_ssim / self._num_examples) # type: ignore[arg-type]\n", "path": "ignite/metrics/ssim.py"}], "after_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n Args:\n data_range: Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size: Size of the kernel. Default: (11, 11)\n sigma: Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform: A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = SSIM(data_range=1.0)\n metric.attach(default_evaluator, 'ssim')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['ssim'])\n\n .. testoutput::\n\n 0.9218971...\n\n .. versionadded:: 0.4.2\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size = [kernel_size, kernel_size] # type: Sequence[int]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma = [sigma, sigma] # type: Sequence[float]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(f\"Expected kernel_size to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(f\"Expected sigma to have positive number. Got {sigma}.\")\n\n super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_ssim = torch.tensor(0.0, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size: int) -> torch.Tensor:\n max, min = 2.5, -2.5\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n f\"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)\n\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n def compute(self) -> torch.Tensor:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return self._sum_of_ssim / self._num_examples\n", "path": "ignite/metrics/ssim.py"}]}
| 3,200 | 418 |
gh_patches_debug_21461
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1902
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unhandled exception in wit.ai matcher (wrong wit.ai response parsing)
# Description
Opsdroid doesn't understand wit.ai time range response
## Steps to Reproduce
Please also include relevant information and steps to reproduce the bug/issue.
1. Connect with wit.ai app
2. Create new indent
3. Train indent to understand `wit/datetime`
4. Send message like `aws cost since december`
5. Opsdroid will fail to understand wit.ai api response
```
INFO opsdroid.parsers.witai.call_witai(): wit.ai response - {"_text": "aws cost since december", "entities": {"intent": [{"confidence": 0.99986626506986, "value": "aws_cost"}], "datetime": [{"confidence": 0.9995, "type": "interval", "from": {"grain": "month", "value": "2022-12-01T00:00:00.000-08:00"}, "values": [{"type": "interval", "from": {"grain": "month", "value": "2022-12-01T00:00:00.000-08:00"}}, {"type": "interval", "from": {"grain": "month", "value": "2023-12-01T00:00:00.000-08:00"}}, {"type": "interval", "from": {"grain": "month", "value": "2024-12-01T00:00:00.000-08:00"}}]}]}, "WARNING": "DEPRECATED", "msg_id": "0eanvH01TmiwU0Era"}.
ERROR aiohttp.server.log_exception(): Error handling request
Traceback (most recent call last):
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\aiohttp\web_protocol.py", line 435, in _handle_request
resp = await request_handler(request)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\aiohttp\web_app.py", line 504, in _handle
resp = await handler(request)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\connector\websocket\__init__.py", line 99, in websocket_handler
await self.opsdroid.parse(message)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\core.py", line 627, in parse
ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\core.py", line 516, in get_ranked_skills
ranked_skills += await parse_witai(self, skills, message, witai)
File "C:\Users\jakub\Documents\venvs\chatops\lib\site-packages\opsdroid\parsers\witai.py", line 72, in parse_witai
key, entity[0]["value"], entity[0]["confidence"]
KeyError: 'value'
```
## Expected Functionality
There should be no exception
## Experienced Functionality
Thrown exception
## Versions
- **Opsdroid version:** 0.25.0
- **Python version:** 3.9.5
- **OS/Docker version:** Windows 10
## Configuration File
Please include your version of the configuration file below.
```yaml
# Your code goes here.
parsers:
- name: witai
access-token: [REDACTED]
min-score: 0.6
```
## Additional Details
Raw wit.ai response for `aws cost since december`
```json
{
"_text": "aws cost since december",
"entities": {
"intent": [
{
"confidence": 0.99965322126667,
"value": "aws_cost"
}
],
"datetime": [
{
"confidence": 0.9995,
"type": "interval",
"from": {
"grain": "month",
"value": "2022-12-01T00:00:00.000-08:00"
},
"values": [
{
"type": "interval",
"from": {
"grain": "month",
"value": "2022-12-01T00:00:00.000-08:00"
}
},
{
"type": "interval",
"from": {
"grain": "month",
"value": "2023-12-01T00:00:00.000-08:00"
}
},
{
"type": "interval",
"from": {
"grain": "month",
"value": "2024-12-01T00:00:00.000-08:00"
}
}
]
}
]
},
"WARNING": "DEPRECATED",
"msg_id": "051qg0BBGn4O7xZDj"
}
```
Here we can see that wit.ai sends `values` in `datetime[0]` dict.
Opsdroid expects it to be `value` (without **s**):
https://github.com/opsdroid/opsdroid/blob/c5dad210fe3d9068c75cd4fac9762fcc353335d3/opsdroid/parsers/witai.py#L69-L73
Which is fine for simple response without any matched intents (excluded by `if` in L70):
```json
{
"_text": "aws",
"entities": {
"intent": [
{
"confidence": 0.99692494474705,
"value": "aws_cost"
}
]
},
"WARNING": "DEPRECATED",
"msg_id": "0lbTZJcwDL5RoT2Wi"
}
```
Simple query `aws cost today`. If there is only one field in `values` wit.ai will rewrite it to short version to `values`
```json
{
"_text": "aws cost today",
"entities": {
"intent": [
{
"confidence": 0.99965536553564,
"value": "aws_cost"
}
],
"datetime": [
{
"confidence": 0.9995,
"type": "value",
"grain": "day",
"value": "2022-04-16T00:00:00.000-07:00",
"values": [
{
"type": "value",
"grain": "day",
"value": "2022-04-16T00:00:00.000-07:00"
}
]
}
]
},
"WARNING": "DEPRECATED",
"msg_id": "05vACT9WHhDUmAy9u"
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/parsers/witai.py`
Content:
```
1 """A helper function for parsing and executing wit.ai skills."""
2
3 import logging
4 import json
5
6 import aiohttp
7
8 from voluptuous import Required
9
10 from opsdroid.const import WITAI_DEFAULT_VERSION
11 from opsdroid.const import WITAI_API_ENDPOINT
12
13 _LOGGER = logging.getLogger(__name__)
14 CONFIG_SCHEMA = {Required("token"): str, "min-score": float}
15
16
17 async def call_witai(message, config):
18 """Call the wit.ai api and return the response."""
19 async with aiohttp.ClientSession(trust_env=True) as session:
20 headers = {"Authorization": "Bearer " + config["token"]}
21 payload = {"v": WITAI_DEFAULT_VERSION, "q": message.text}
22 resp = await session.get(
23 WITAI_API_ENDPOINT + "v={}&q={}".format(payload["v"], payload["q"]),
24 headers=headers,
25 )
26 result = await resp.json()
27 _LOGGER.info(_("wit.ai response - %s."), json.dumps(result))
28 return result
29
30
31 async def parse_witai(opsdroid, skills, message, config):
32 """Parse a message against all witai skills."""
33 matched_skills = []
34 if "token" in config:
35 try:
36 result = await call_witai(message, config)
37 except aiohttp.ClientOSError:
38 _LOGGER.error(_("No response from wit.ai, check your network."))
39 return matched_skills
40
41 if "code" in result:
42 _LOGGER.error(
43 _("wit.ai error - %s %s"), str(result["code"]), str(result["error"])
44 )
45 return matched_skills
46
47 if result["entities"] == {}:
48 _LOGGER.error(
49 _("wit.ai error - No intent found. Did you forget to create one?")
50 )
51 return matched_skills
52
53 try:
54 confidence = result["entities"]["intent"][0]["confidence"]
55 except KeyError:
56 confidence = 0.0
57 if "min-score" in config and confidence < config["min-score"]:
58 _LOGGER.info(_("wit.ai score lower than min-score."))
59 return matched_skills
60
61 if result:
62 for skill in skills:
63 for matcher in skill.matchers:
64 if "witai_intent" in matcher:
65 if matcher["witai_intent"] in [
66 i["value"] for i in result["entities"]["intent"]
67 ]:
68 message.witai = result
69 for key, entity in result["entities"].items():
70 if key != "intent":
71 message.update_entity(
72 key, entity[0]["value"], entity[0]["confidence"]
73 )
74 matched_skills.append(
75 {
76 "score": confidence,
77 "skill": skill,
78 "config": skill.config,
79 "message": message,
80 }
81 )
82 return matched_skills
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/parsers/witai.py b/opsdroid/parsers/witai.py
--- a/opsdroid/parsers/witai.py
+++ b/opsdroid/parsers/witai.py
@@ -68,8 +68,16 @@
message.witai = result
for key, entity in result["entities"].items():
if key != "intent":
+ witai_entity_value = ""
+ if "value" in entity[0]:
+ witai_entity_value = entity[0]["value"]
+ elif "values" in entity[0]:
+ # we never know which data are important for user,
+ # so we return list with all values
+ witai_entity_value = entity[0]["values"]
+
message.update_entity(
- key, entity[0]["value"], entity[0]["confidence"]
+ key, witai_entity_value, entity[0]["confidence"]
)
matched_skills.append(
{
|
{"golden_diff": "diff --git a/opsdroid/parsers/witai.py b/opsdroid/parsers/witai.py\n--- a/opsdroid/parsers/witai.py\n+++ b/opsdroid/parsers/witai.py\n@@ -68,8 +68,16 @@\n message.witai = result\n for key, entity in result[\"entities\"].items():\n if key != \"intent\":\n+ witai_entity_value = \"\"\n+ if \"value\" in entity[0]:\n+ witai_entity_value = entity[0][\"value\"]\n+ elif \"values\" in entity[0]:\n+ # we never know which data are important for user,\n+ # so we return list with all values\n+ witai_entity_value = entity[0][\"values\"]\n+\n message.update_entity(\n- key, entity[0][\"value\"], entity[0][\"confidence\"]\n+ key, witai_entity_value, entity[0][\"confidence\"]\n )\n matched_skills.append(\n {\n", "issue": "Unhandled exception in wit.ai matcher (wrong wit.ai response parsing)\n# Description\r\nOpsdroid doesn't understand wit.ai time range response\r\n\r\n\r\n## Steps to Reproduce\r\nPlease also include relevant information and steps to reproduce the bug/issue.\r\n1. Connect with wit.ai app\r\n2. Create new indent\r\n3. Train indent to understand `wit/datetime`\r\n4. Send message like `aws cost since december`\r\n5. Opsdroid will fail to understand wit.ai api response\r\n\r\n```\r\nINFO opsdroid.parsers.witai.call_witai(): wit.ai response - {\"_text\": \"aws cost since december\", \"entities\": {\"intent\": [{\"confidence\": 0.99986626506986, \"value\": \"aws_cost\"}], \"datetime\": [{\"confidence\": 0.9995, \"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2022-12-01T00:00:00.000-08:00\"}, \"values\": [{\"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2022-12-01T00:00:00.000-08:00\"}}, {\"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2023-12-01T00:00:00.000-08:00\"}}, {\"type\": \"interval\", \"from\": {\"grain\": \"month\", \"value\": \"2024-12-01T00:00:00.000-08:00\"}}]}]}, \"WARNING\": \"DEPRECATED\", \"msg_id\": \"0eanvH01TmiwU0Era\"}.\r\nERROR aiohttp.server.log_exception(): Error handling request\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\aiohttp\\web_protocol.py\", line 435, in _handle_request\r\n resp = await request_handler(request)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\aiohttp\\web_app.py\", line 504, in _handle\r\n resp = await handler(request)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\connector\\websocket\\__init__.py\", line 99, in websocket_handler\r\n await self.opsdroid.parse(message)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\core.py\", line 627, in parse\r\n ranked_skills = await self.get_ranked_skills(unconstrained_skills, event)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\core.py\", line 516, in get_ranked_skills\r\n ranked_skills += await parse_witai(self, skills, message, witai)\r\n File \"C:\\Users\\jakub\\Documents\\venvs\\chatops\\lib\\site-packages\\opsdroid\\parsers\\witai.py\", line 72, in parse_witai\r\n key, entity[0][\"value\"], entity[0][\"confidence\"]\r\nKeyError: 'value'\r\n```\r\n\r\n## Expected Functionality\r\nThere should be no exception\r\n\r\n\r\n## Experienced Functionality\r\nThrown exception\r\n\r\n## Versions\r\n- **Opsdroid version:** 0.25.0\r\n- **Python version:** 3.9.5 \r\n- **OS/Docker version:** Windows 10\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\n# Your code goes here.\r\nparsers:\r\n - name: witai\r\n access-token: [REDACTED]\r\n min-score: 0.6\r\n```\r\n\r\n## Additional Details\r\n\r\nRaw wit.ai response for `aws cost since december`\r\n\r\n```json\r\n{\r\n \"_text\": \"aws cost since december\",\r\n \"entities\": {\r\n \"intent\": [\r\n {\r\n \"confidence\": 0.99965322126667,\r\n \"value\": \"aws_cost\"\r\n }\r\n ],\r\n \"datetime\": [\r\n {\r\n \"confidence\": 0.9995,\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2022-12-01T00:00:00.000-08:00\"\r\n },\r\n \"values\": [\r\n {\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2022-12-01T00:00:00.000-08:00\"\r\n }\r\n },\r\n {\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2023-12-01T00:00:00.000-08:00\"\r\n }\r\n },\r\n {\r\n \"type\": \"interval\",\r\n \"from\": {\r\n \"grain\": \"month\",\r\n \"value\": \"2024-12-01T00:00:00.000-08:00\"\r\n }\r\n }\r\n ]\r\n }\r\n ]\r\n },\r\n \"WARNING\": \"DEPRECATED\",\r\n \"msg_id\": \"051qg0BBGn4O7xZDj\"\r\n}\r\n\r\n```\r\n\r\nHere we can see that wit.ai sends `values` in `datetime[0]` dict. \r\n\r\nOpsdroid expects it to be `value` (without **s**):\r\n\r\nhttps://github.com/opsdroid/opsdroid/blob/c5dad210fe3d9068c75cd4fac9762fcc353335d3/opsdroid/parsers/witai.py#L69-L73\r\n\r\nWhich is fine for simple response without any matched intents (excluded by `if` in L70):\r\n\r\n```json\r\n{\r\n \"_text\": \"aws\",\r\n \"entities\": {\r\n \"intent\": [\r\n {\r\n \"confidence\": 0.99692494474705,\r\n \"value\": \"aws_cost\"\r\n }\r\n ]\r\n },\r\n \"WARNING\": \"DEPRECATED\",\r\n \"msg_id\": \"0lbTZJcwDL5RoT2Wi\"\r\n}\r\n```\r\n\r\nSimple query `aws cost today`. If there is only one field in `values` wit.ai will rewrite it to short version to `values`\r\n\r\n```json\r\n{\r\n \"_text\": \"aws cost today\",\r\n \"entities\": {\r\n \"intent\": [\r\n {\r\n \"confidence\": 0.99965536553564,\r\n \"value\": \"aws_cost\"\r\n }\r\n ],\r\n \"datetime\": [\r\n {\r\n \"confidence\": 0.9995,\r\n \"type\": \"value\",\r\n \"grain\": \"day\",\r\n \"value\": \"2022-04-16T00:00:00.000-07:00\",\r\n \"values\": [\r\n {\r\n \"type\": \"value\",\r\n \"grain\": \"day\",\r\n \"value\": \"2022-04-16T00:00:00.000-07:00\"\r\n }\r\n ]\r\n }\r\n ]\r\n },\r\n \"WARNING\": \"DEPRECATED\",\r\n \"msg_id\": \"05vACT9WHhDUmAy9u\"\r\n}\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"A helper function for parsing and executing wit.ai skills.\"\"\"\n\nimport logging\nimport json\n\nimport aiohttp\n\nfrom voluptuous import Required\n\nfrom opsdroid.const import WITAI_DEFAULT_VERSION\nfrom opsdroid.const import WITAI_API_ENDPOINT\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"token\"): str, \"min-score\": float}\n\n\nasync def call_witai(message, config):\n \"\"\"Call the wit.ai api and return the response.\"\"\"\n async with aiohttp.ClientSession(trust_env=True) as session:\n headers = {\"Authorization\": \"Bearer \" + config[\"token\"]}\n payload = {\"v\": WITAI_DEFAULT_VERSION, \"q\": message.text}\n resp = await session.get(\n WITAI_API_ENDPOINT + \"v={}&q={}\".format(payload[\"v\"], payload[\"q\"]),\n headers=headers,\n )\n result = await resp.json()\n _LOGGER.info(_(\"wit.ai response - %s.\"), json.dumps(result))\n return result\n\n\nasync def parse_witai(opsdroid, skills, message, config):\n \"\"\"Parse a message against all witai skills.\"\"\"\n matched_skills = []\n if \"token\" in config:\n try:\n result = await call_witai(message, config)\n except aiohttp.ClientOSError:\n _LOGGER.error(_(\"No response from wit.ai, check your network.\"))\n return matched_skills\n\n if \"code\" in result:\n _LOGGER.error(\n _(\"wit.ai error - %s %s\"), str(result[\"code\"]), str(result[\"error\"])\n )\n return matched_skills\n\n if result[\"entities\"] == {}:\n _LOGGER.error(\n _(\"wit.ai error - No intent found. Did you forget to create one?\")\n )\n return matched_skills\n\n try:\n confidence = result[\"entities\"][\"intent\"][0][\"confidence\"]\n except KeyError:\n confidence = 0.0\n if \"min-score\" in config and confidence < config[\"min-score\"]:\n _LOGGER.info(_(\"wit.ai score lower than min-score.\"))\n return matched_skills\n\n if result:\n for skill in skills:\n for matcher in skill.matchers:\n if \"witai_intent\" in matcher:\n if matcher[\"witai_intent\"] in [\n i[\"value\"] for i in result[\"entities\"][\"intent\"]\n ]:\n message.witai = result\n for key, entity in result[\"entities\"].items():\n if key != \"intent\":\n message.update_entity(\n key, entity[0][\"value\"], entity[0][\"confidence\"]\n )\n matched_skills.append(\n {\n \"score\": confidence,\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills\n", "path": "opsdroid/parsers/witai.py"}], "after_files": [{"content": "\"\"\"A helper function for parsing and executing wit.ai skills.\"\"\"\n\nimport logging\nimport json\n\nimport aiohttp\n\nfrom voluptuous import Required\n\nfrom opsdroid.const import WITAI_DEFAULT_VERSION\nfrom opsdroid.const import WITAI_API_ENDPOINT\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {Required(\"token\"): str, \"min-score\": float}\n\n\nasync def call_witai(message, config):\n \"\"\"Call the wit.ai api and return the response.\"\"\"\n async with aiohttp.ClientSession(trust_env=True) as session:\n headers = {\"Authorization\": \"Bearer \" + config[\"token\"]}\n payload = {\"v\": WITAI_DEFAULT_VERSION, \"q\": message.text}\n resp = await session.get(\n WITAI_API_ENDPOINT + \"v={}&q={}\".format(payload[\"v\"], payload[\"q\"]),\n headers=headers,\n )\n result = await resp.json()\n _LOGGER.info(_(\"wit.ai response - %s.\"), json.dumps(result))\n return result\n\n\nasync def parse_witai(opsdroid, skills, message, config):\n \"\"\"Parse a message against all witai skills.\"\"\"\n matched_skills = []\n if \"token\" in config:\n try:\n result = await call_witai(message, config)\n except aiohttp.ClientOSError:\n _LOGGER.error(_(\"No response from wit.ai, check your network.\"))\n return matched_skills\n\n if \"code\" in result:\n _LOGGER.error(\n _(\"wit.ai error - %s %s\"), str(result[\"code\"]), str(result[\"error\"])\n )\n return matched_skills\n\n if result[\"entities\"] == {}:\n _LOGGER.error(\n _(\"wit.ai error - No intent found. Did you forget to create one?\")\n )\n return matched_skills\n\n try:\n confidence = result[\"entities\"][\"intent\"][0][\"confidence\"]\n except KeyError:\n confidence = 0.0\n if \"min-score\" in config and confidence < config[\"min-score\"]:\n _LOGGER.info(_(\"wit.ai score lower than min-score.\"))\n return matched_skills\n\n if result:\n for skill in skills:\n for matcher in skill.matchers:\n if \"witai_intent\" in matcher:\n if matcher[\"witai_intent\"] in [\n i[\"value\"] for i in result[\"entities\"][\"intent\"]\n ]:\n message.witai = result\n for key, entity in result[\"entities\"].items():\n if key != \"intent\":\n witai_entity_value = \"\"\n if \"value\" in entity[0]:\n witai_entity_value = entity[0][\"value\"]\n elif \"values\" in entity[0]:\n # we never know which data are important for user,\n # so we return list with all values\n witai_entity_value = entity[0][\"values\"]\n\n message.update_entity(\n key, witai_entity_value, entity[0][\"confidence\"]\n )\n matched_skills.append(\n {\n \"score\": confidence,\n \"skill\": skill,\n \"config\": skill.config,\n \"message\": message,\n }\n )\n return matched_skills\n", "path": "opsdroid/parsers/witai.py"}]}
| 2,734 | 218 |
gh_patches_debug_8374
|
rasdani/github-patches
|
git_diff
|
cocotb__cocotb-1810
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
setup.py uses https://cocotb.org which doesn't work (yet)
We have
https://github.com/cocotb/cocotb/blob/e74d508e30027c16778b95ef0985b6bcbc5207c2/setup.py#L90
which doesn't work (yet). Do we also want to use https://docs.cocotb.org temporarily like in GitHub?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 ###############################################################################
3 # Copyright (c) 2013 Potential Ventures Ltd
4 # Copyright (c) 2013 SolarFlare Communications Inc
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of Potential Ventures Ltd,
15 # SolarFlare Communications Inc nor the
16 # names of its contributors may be used to endorse or promote products
17 # derived from this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 ###############################################################################
30
31 import sys
32 if sys.version_info[:2] < (3, 5):
33 msg = [
34 "This version of cocotb requires at least Python 3.5,",
35 "you are running Python %d.%d.%d." % (
36 sys.version_info[0], sys.version_info[1], sys.version_info[2])
37 ]
38 if sys.version_info[0] == 2:
39 msg += [
40 "If you have Python 3 installed on your machine try ",
41 "using 'python3 -m pip' instead of 'pip' to install cocotb."
42 ]
43 msg += [
44 "For more information please refer to the documentation at ",
45 "https://cocotb.readthedocs.io."
46 ]
47
48 raise SystemExit("\n".join(msg))
49
50 import logging
51 from setuptools import setup
52 from setuptools import find_packages
53 from os import path, walk
54 from io import StringIO
55
56 # Note: cocotb is not installed properly yet and is missing dependencies and binaries
57 # We can still import other files next to setup.py, as long as they're in MANIFEST.in
58 from cocotb_build_libs import get_ext, build_ext
59
60
61 def read_file(fname):
62 with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:
63 return f.read()
64
65
66 def package_files(directory):
67 paths = []
68 for (fpath, directories, filenames) in walk(directory):
69 for filename in filenames:
70 paths.append(path.join('..', fpath, filename))
71 return paths
72
73
74 # this sets the __version__ variable
75 exec(read_file(path.join('cocotb', '_version.py')))
76
77 # store log from build_libs and display at the end in verbose mode
78 # see https://github.com/pypa/pip/issues/6634
79 log_stream = StringIO()
80 handler = logging.StreamHandler(log_stream)
81 log = logging.getLogger("cocotb._build_libs")
82 log.setLevel(logging.INFO)
83 log.addHandler(handler)
84
85 setup(
86 name='cocotb',
87 cmdclass={'build_ext': build_ext},
88 version=__version__, # noqa: F821
89 description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
90 url='https://cocotb.org',
91 license='BSD',
92 long_description=read_file('README.md'),
93 long_description_content_type='text/markdown',
94 author='Chris Higgs, Stuart Hodgson',
95 maintainer='cocotb contributors',
96 maintainer_email='[email protected]',
97 install_requires=[],
98 python_requires='>=3.5',
99 packages=find_packages(),
100 include_package_data=True,
101 package_data={'cocotb': package_files('cocotb/share')},
102 ext_modules=get_ext(),
103 entry_points={
104 'console_scripts': [
105 'cocotb-config=cocotb.config:main',
106 ]
107 },
108 platforms='any',
109 classifiers=[
110 "Programming Language :: Python :: 3",
111 "Programming Language :: Python :: 3.5",
112 "Programming Language :: Python :: 3.6",
113 "Programming Language :: Python :: 3.7",
114 "Programming Language :: Python :: 3.8",
115 "License :: OSI Approved :: BSD License",
116 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
117 ],
118
119 # these appear in the sidebar on PyPI
120 project_urls={
121 "Bug Tracker": "https://github.com/cocotb/cocotb/issues",
122 "Source Code": "https://github.com/cocotb/cocotb",
123 "Documentation": "https://docs.cocotb.org",
124 },
125 )
126
127 print(log_stream.getvalue())
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -87,7 +87,7 @@
cmdclass={'build_ext': build_ext},
version=__version__, # noqa: F821
description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',
- url='https://cocotb.org',
+ url='https://docs.cocotb.org',
license='BSD',
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -87,7 +87,7 @@\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n- url='https://cocotb.org',\n+ url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n", "issue": "setup.py uses https://cocotb.org which doesn't work (yet)\nWe have \r\nhttps://github.com/cocotb/cocotb/blob/e74d508e30027c16778b95ef0985b6bcbc5207c2/setup.py#L90\r\nwhich doesn't work (yet). Do we also want to use https://docs.cocotb.org temporarily like in GitHub?\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join('cocotb', '_version.py')))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\nif sys.version_info[:2] < (3, 5):\n msg = [\n \"This version of cocotb requires at least Python 3.5,\",\n \"you are running Python %d.%d.%d.\" % (\n sys.version_info[0], sys.version_info[1], sys.version_info[2])\n ]\n if sys.version_info[0] == 2:\n msg += [\n \"If you have Python 3 installed on your machine try \",\n \"using 'python3 -m pip' instead of 'pip' to install cocotb.\"\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\"\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom setuptools import setup\nfrom setuptools import find_packages\nfrom os import path, walk\nfrom io import StringIO\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\nfrom cocotb_build_libs import get_ext, build_ext\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding='utf8') as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for (fpath, directories, filenames) in walk(directory):\n for filename in filenames:\n paths.append(path.join('..', fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join('cocotb', '_version.py')))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name='cocotb',\n cmdclass={'build_ext': build_ext},\n version=__version__, # noqa: F821\n description='cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.',\n url='https://docs.cocotb.org',\n license='BSD',\n long_description=read_file('README.md'),\n long_description_content_type='text/markdown',\n author='Chris Higgs, Stuart Hodgson',\n maintainer='cocotb contributors',\n maintainer_email='[email protected]',\n install_requires=[],\n python_requires='>=3.5',\n packages=find_packages(),\n include_package_data=True,\n package_data={'cocotb': package_files('cocotb/share')},\n ext_modules=get_ext(),\n entry_points={\n 'console_scripts': [\n 'cocotb-config=cocotb.config:main',\n ]\n },\n platforms='any',\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n ],\n\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}]}
| 1,788 | 130 |
gh_patches_debug_35091
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-2330
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
common_data should contain the current user data
## Description
`common_data` should contain the information of the current logged in user.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/views.py`
Content:
```
1 from django.conf import settings
2 from django.contrib.auth.decorators import login_required
3 from django.shortcuts import render, redirect, get_object_or_404
4 from rest_framework import status
5 from rest_framework.decorators import api_view
6 from rest_framework.response import Response
7
8 from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer
9 from mathesar.api.serializers.schemas import SchemaSerializer
10 from mathesar.api.serializers.tables import TableSerializer
11 from mathesar.api.serializers.queries import QuerySerializer
12 from mathesar.database.types import UIType
13 from mathesar.models.base import Database, Schema, Table
14 from mathesar.models.query import UIQuery
15 from mathesar.state import reset_reflection
16
17
18 def get_schema_list(request, database):
19 schema_serializer = SchemaSerializer(
20 Schema.objects.filter(database=database),
21 many=True,
22 context={'request': request}
23 )
24 return schema_serializer.data
25
26
27 def get_database_list(request):
28 database_serializer = DatabaseSerializer(
29 Database.objects.all(),
30 many=True,
31 context={'request': request}
32 )
33 return database_serializer.data
34
35
36 def get_table_list(request, schema):
37 if schema is None:
38 return []
39 table_serializer = TableSerializer(
40 Table.objects.filter(schema=schema),
41 many=True,
42 context={'request': request}
43 )
44 return table_serializer.data
45
46
47 def get_queries_list(request, schema):
48 if schema is None:
49 return []
50 query_serializer = QuerySerializer(
51 UIQuery.objects.filter(base_table__schema=schema),
52 many=True,
53 context={'request': request}
54 )
55 return query_serializer.data
56
57
58 def get_ui_type_list(request, database):
59 if database is None:
60 return []
61 type_serializer = TypeSerializer(
62 UIType,
63 many=True,
64 context={'request': request}
65 )
66 return type_serializer.data
67
68
69 def get_common_data(request, database, schema=None):
70 return {
71 'current_db': database.name if database else None,
72 'current_schema': schema.id if schema else None,
73 'schemas': get_schema_list(request, database),
74 'databases': get_database_list(request),
75 'tables': get_table_list(request, schema),
76 'queries': get_queries_list(request, schema),
77 'abstract_types': get_ui_type_list(request, database),
78 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),
79 }
80
81
82 def get_current_database(request, db_name):
83 """Get database from passed name, with fall back behavior."""
84 if db_name is not None:
85 current_database = get_object_or_404(Database, name=db_name)
86 else:
87 request_database_name = request.GET.get('database')
88 try:
89 if request_database_name is not None:
90 # Try to get the database named specified in the request
91 current_database = Database.objects.get(name=request_database_name)
92 else:
93 # Try to get the first database available
94 current_database = Database.objects.order_by('id').first()
95 except Database.DoesNotExist:
96 current_database = None
97 return current_database
98
99
100 def get_current_schema(request, schema_id, database):
101 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.
102 if schema_id is not None:
103 return get_object_or_404(Schema, id=schema_id)
104 else:
105 try:
106 # Try to get the first schema in the DB
107 return Schema.objects.filter(database=database).order_by('id').first()
108 except Schema.DoesNotExist:
109 return None
110
111
112 def render_schema(request, database, schema):
113 # if there's no schema available, redirect to the schemas page.
114 if not schema:
115 return redirect('schemas', db_name=database.name)
116 else:
117 # We are redirecting so that the correct URL is passed to the frontend.
118 return redirect('schema_home', db_name=database.name, schema_id=schema.id)
119
120
121 @login_required
122 @api_view(['POST'])
123 def reflect_all(_):
124 reset_reflection()
125 return Response(status=status.HTTP_200_OK)
126
127
128 @login_required
129 def home(request):
130 database = get_current_database(request, None)
131 return redirect('schemas', db_name=database.name)
132
133
134 @login_required
135 def schema_home(request, db_name, schema_id, **kwargs):
136 database = get_current_database(request, db_name)
137 schema = get_current_schema(request, schema_id, database)
138 return render(request, 'mathesar/index.html', {
139 'common_data': get_common_data(request, database, schema)
140 })
141
142
143 @login_required
144 def schemas(request, db_name):
145 database = get_current_database(request, db_name)
146 return render(request, 'mathesar/index.html', {
147 'common_data': get_common_data(request, database, None)
148 })
149
```
Path: `mathesar/api/ui/viewsets/users.py`
Content:
```
1 from django.contrib.auth import get_user_model
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.exceptions import MethodNotAllowed
6 from rest_framework.generics import get_object_or_404
7 from rest_framework.response import Response
8
9 from mathesar.api.ui.permissions.database_role import DatabaseRoleAccessPolicy
10 from mathesar.api.ui.permissions.schema_role import SchemaRoleAccessPolicy
11 from mathesar.api.ui.serializers.users import (
12 ChangePasswordSerializer, PasswordResetSerializer, UserSerializer, DatabaseRoleSerializer,
13 SchemaRoleSerializer,
14 )
15 from mathesar.api.pagination import DefaultLimitOffsetPagination
16 from mathesar.api.ui.permissions.users import UserAccessPolicy
17 from mathesar.models.users import User, DatabaseRole, SchemaRole
18
19
20 class UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
21 queryset = User.objects.all().order_by('id')
22 serializer_class = UserSerializer
23 pagination_class = DefaultLimitOffsetPagination
24 access_policy = UserAccessPolicy
25
26 @action(methods=['post'], detail=True)
27 def password_reset(self, request, pk=None):
28 serializer = PasswordResetSerializer(data=request.data, context={'request': request})
29 serializer.is_valid(raise_exception=True)
30 user = get_object_or_404(get_user_model(), pk=pk)
31 password = serializer.validated_data["password"]
32 user.set_password(password)
33 # Make sure we redirect user to change password set by the admin on login
34 user.password_change_needed = True
35 user.save()
36 return Response(status=status.HTTP_200_OK)
37
38 @action(methods=['post'], detail=False)
39 def password_change(self, request):
40 serializer = ChangePasswordSerializer(
41 instance=request.user,
42 data=request.data,
43 context={'request': request}
44 )
45 serializer.is_valid(raise_exception=True)
46 serializer.save()
47 return Response(status=status.HTTP_200_OK)
48
49
50 class DatabaseRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
51 queryset = DatabaseRole.objects.all().order_by('id')
52 serializer_class = DatabaseRoleSerializer
53 pagination_class = DefaultLimitOffsetPagination
54 access_policy = DatabaseRoleAccessPolicy
55
56 def get_queryset(self):
57 return self.access_policy.scope_queryset(
58 self.request, super().get_queryset()
59 )
60
61 def update(self, request, pk=None):
62 raise MethodNotAllowed(request.method)
63
64 def partial_update(self, request, pk=None):
65 raise MethodNotAllowed(request.method)
66
67
68 class SchemaRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
69 queryset = SchemaRole.objects.all().order_by('id')
70 serializer_class = SchemaRoleSerializer
71 pagination_class = DefaultLimitOffsetPagination
72 access_policy = SchemaRoleAccessPolicy
73
74 def get_queryset(self):
75 return self.access_policy.scope_queryset(
76 self.request, super().get_queryset()
77 )
78
79 def update(self, request, pk=None):
80 raise MethodNotAllowed(request.method)
81
82 def partial_update(self, request, pk=None):
83 raise MethodNotAllowed(request.method)
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/api/ui/viewsets/users.py b/mathesar/api/ui/viewsets/users.py
--- a/mathesar/api/ui/viewsets/users.py
+++ b/mathesar/api/ui/viewsets/users.py
@@ -14,11 +14,11 @@
)
from mathesar.api.pagination import DefaultLimitOffsetPagination
from mathesar.api.ui.permissions.users import UserAccessPolicy
-from mathesar.models.users import User, DatabaseRole, SchemaRole
+from mathesar.models.users import DatabaseRole, SchemaRole
class UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):
- queryset = User.objects.all().order_by('id')
+ queryset = get_user_model().objects.all().order_by('id')
serializer_class = UserSerializer
pagination_class = DefaultLimitOffsetPagination
access_policy = UserAccessPolicy
diff --git a/mathesar/views.py b/mathesar/views.py
--- a/mathesar/views.py
+++ b/mathesar/views.py
@@ -9,6 +9,7 @@
from mathesar.api.serializers.schemas import SchemaSerializer
from mathesar.api.serializers.tables import TableSerializer
from mathesar.api.serializers.queries import QuerySerializer
+from mathesar.api.ui.serializers.users import UserSerializer
from mathesar.database.types import UIType
from mathesar.models.base import Database, Schema, Table
from mathesar.models.query import UIQuery
@@ -66,6 +67,15 @@
return type_serializer.data
+def get_user_data(request):
+ user_serializer = UserSerializer(
+ request.user,
+ many=False,
+ context={'request': request}
+ )
+ return user_serializer.data
+
+
def get_common_data(request, database, schema=None):
return {
'current_db': database.name if database else None,
@@ -75,6 +85,7 @@
'tables': get_table_list(request, schema),
'queries': get_queries_list(request, schema),
'abstract_types': get_ui_type_list(request, database),
+ 'user': get_user_data(request),
'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),
}
|
{"golden_diff": "diff --git a/mathesar/api/ui/viewsets/users.py b/mathesar/api/ui/viewsets/users.py\n--- a/mathesar/api/ui/viewsets/users.py\n+++ b/mathesar/api/ui/viewsets/users.py\n@@ -14,11 +14,11 @@\n )\n from mathesar.api.pagination import DefaultLimitOffsetPagination\n from mathesar.api.ui.permissions.users import UserAccessPolicy\n-from mathesar.models.users import User, DatabaseRole, SchemaRole\n+from mathesar.models.users import DatabaseRole, SchemaRole\n \n \n class UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n- queryset = User.objects.all().order_by('id')\n+ queryset = get_user_model().objects.all().order_by('id')\n serializer_class = UserSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = UserAccessPolicy\ndiff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -9,6 +9,7 @@\n from mathesar.api.serializers.schemas import SchemaSerializer\n from mathesar.api.serializers.tables import TableSerializer\n from mathesar.api.serializers.queries import QuerySerializer\n+from mathesar.api.ui.serializers.users import UserSerializer\n from mathesar.database.types import UIType\n from mathesar.models.base import Database, Schema, Table\n from mathesar.models.query import UIQuery\n@@ -66,6 +67,15 @@\n return type_serializer.data\n \n \n+def get_user_data(request):\n+ user_serializer = UserSerializer(\n+ request.user,\n+ many=False,\n+ context={'request': request}\n+ )\n+ return user_serializer.data\n+\n+\n def get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n@@ -75,6 +85,7 @@\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n+ 'user': get_user_data(request),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n }\n", "issue": "common_data should contain the current user data\n## Description\r\n`common_data` should contain the information of the current logged in user.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.models.query import UIQuery\nfrom mathesar.state import reset_reflection\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.filter(base_table__schema=schema),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n }\n\n\ndef get_current_database(request, db_name):\n \"\"\"Get database from passed name, with fall back behavior.\"\"\"\n if db_name is not None:\n current_database = get_object_or_404(Database, name=db_name)\n else:\n request_database_name = request.GET.get('database')\n try:\n if request_database_name is not None:\n # Try to get the database named specified in the request\n current_database = Database.objects.get(name=request_database_name)\n else:\n # Try to get the first database available\n current_database = Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n current_database = None\n return current_database\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\n@login_required\n@api_view(['POST'])\ndef reflect_all(_):\n reset_reflection()\n return Response(status=status.HTTP_200_OK)\n\n\n@login_required\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\n@login_required\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\n@login_required\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py"}, {"content": "from django.contrib.auth import get_user_model\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import MethodNotAllowed\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\n\nfrom mathesar.api.ui.permissions.database_role import DatabaseRoleAccessPolicy\nfrom mathesar.api.ui.permissions.schema_role import SchemaRoleAccessPolicy\nfrom mathesar.api.ui.serializers.users import (\n ChangePasswordSerializer, PasswordResetSerializer, UserSerializer, DatabaseRoleSerializer,\n SchemaRoleSerializer,\n)\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.users import User, DatabaseRole, SchemaRole\n\n\nclass UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = User.objects.all().order_by('id')\n serializer_class = UserSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = UserAccessPolicy\n\n @action(methods=['post'], detail=True)\n def password_reset(self, request, pk=None):\n serializer = PasswordResetSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = get_object_or_404(get_user_model(), pk=pk)\n password = serializer.validated_data[\"password\"]\n user.set_password(password)\n # Make sure we redirect user to change password set by the admin on login\n user.password_change_needed = True\n user.save()\n return Response(status=status.HTTP_200_OK)\n\n @action(methods=['post'], detail=False)\n def password_change(self, request):\n serializer = ChangePasswordSerializer(\n instance=request.user,\n data=request.data,\n context={'request': request}\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_200_OK)\n\n\nclass DatabaseRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = DatabaseRole.objects.all().order_by('id')\n serializer_class = DatabaseRoleSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = DatabaseRoleAccessPolicy\n\n def get_queryset(self):\n return self.access_policy.scope_queryset(\n self.request, super().get_queryset()\n )\n\n def update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n def partial_update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n\nclass SchemaRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = SchemaRole.objects.all().order_by('id')\n serializer_class = SchemaRoleSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = SchemaRoleAccessPolicy\n\n def get_queryset(self):\n return self.access_policy.scope_queryset(\n self.request, super().get_queryset()\n )\n\n def update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n def partial_update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n", "path": "mathesar/api/ui/viewsets/users.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.api.ui.serializers.users import UserSerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.models.query import UIQuery\nfrom mathesar.state import reset_reflection\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.filter(base_table__schema=schema),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_user_data(request):\n user_serializer = UserSerializer(\n request.user,\n many=False,\n context={'request': request}\n )\n return user_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database),\n 'user': get_user_data(request),\n 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False),\n }\n\n\ndef get_current_database(request, db_name):\n \"\"\"Get database from passed name, with fall back behavior.\"\"\"\n if db_name is not None:\n current_database = get_object_or_404(Database, name=db_name)\n else:\n request_database_name = request.GET.get('database')\n try:\n if request_database_name is not None:\n # Try to get the database named specified in the request\n current_database = Database.objects.get(name=request_database_name)\n else:\n # Try to get the first database available\n current_database = Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n current_database = None\n return current_database\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\n@login_required\n@api_view(['POST'])\ndef reflect_all(_):\n reset_reflection()\n return Response(status=status.HTTP_200_OK)\n\n\n@login_required\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\n@login_required\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\n@login_required\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py"}, {"content": "from django.contrib.auth import get_user_model\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import MethodNotAllowed\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.response import Response\n\nfrom mathesar.api.ui.permissions.database_role import DatabaseRoleAccessPolicy\nfrom mathesar.api.ui.permissions.schema_role import SchemaRoleAccessPolicy\nfrom mathesar.api.ui.serializers.users import (\n ChangePasswordSerializer, PasswordResetSerializer, UserSerializer, DatabaseRoleSerializer,\n SchemaRoleSerializer,\n)\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.ui.permissions.users import UserAccessPolicy\nfrom mathesar.models.users import DatabaseRole, SchemaRole\n\n\nclass UserViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = get_user_model().objects.all().order_by('id')\n serializer_class = UserSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = UserAccessPolicy\n\n @action(methods=['post'], detail=True)\n def password_reset(self, request, pk=None):\n serializer = PasswordResetSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = get_object_or_404(get_user_model(), pk=pk)\n password = serializer.validated_data[\"password\"]\n user.set_password(password)\n # Make sure we redirect user to change password set by the admin on login\n user.password_change_needed = True\n user.save()\n return Response(status=status.HTTP_200_OK)\n\n @action(methods=['post'], detail=False)\n def password_change(self, request):\n serializer = ChangePasswordSerializer(\n instance=request.user,\n data=request.data,\n context={'request': request}\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(status=status.HTTP_200_OK)\n\n\nclass DatabaseRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = DatabaseRole.objects.all().order_by('id')\n serializer_class = DatabaseRoleSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = DatabaseRoleAccessPolicy\n\n def get_queryset(self):\n return self.access_policy.scope_queryset(\n self.request, super().get_queryset()\n )\n\n def update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n def partial_update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n\nclass SchemaRoleViewSet(AccessViewSetMixin, viewsets.ModelViewSet):\n queryset = SchemaRole.objects.all().order_by('id')\n serializer_class = SchemaRoleSerializer\n pagination_class = DefaultLimitOffsetPagination\n access_policy = SchemaRoleAccessPolicy\n\n def get_queryset(self):\n return self.access_policy.scope_queryset(\n self.request, super().get_queryset()\n )\n\n def update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n\n def partial_update(self, request, pk=None):\n raise MethodNotAllowed(request.method)\n", "path": "mathesar/api/ui/viewsets/users.py"}]}
| 2,468 | 463 |
gh_patches_debug_28171
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-5129
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Correct exception for quantity < max order in tickets schema
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Validation check message for quantity to be more than max-order needs to be corrected.
**To Reproduce**
A validation check for quantity and max-order is:
```
if 'quantity' in data and 'max_order' in data:
if data['quantity'] < data['max_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
"quantity should be lesser than max-order")
```
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
The message should be: "quantity should not be lesser than max-order".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/schema/tickets.py`
Content:
```
1 from marshmallow import validates_schema
2 from marshmallow_jsonapi import fields
3 from marshmallow_jsonapi.flask import Relationship
4 from sqlalchemy.orm.exc import NoResultFound
5
6 from app.api.helpers.exceptions import UnprocessableEntity
7 from app.api.helpers.utilities import dasherize
8 from app.api.schema.base import SoftDeletionSchema
9 from app.models.discount_code import DiscountCode
10 from app.models.ticket import Ticket
11 from utils.common import use_defaults
12
13
14 @use_defaults()
15 class TicketSchemaPublic(SoftDeletionSchema):
16 class Meta:
17 type_ = 'ticket'
18 self_view = 'v1.ticket_detail'
19 self_view_kwargs = {'id': '<id>'}
20 inflect = dasherize
21
22 @validates_schema(pass_original=True)
23 def validate_date(self, data, original_data):
24 if 'id' in original_data['data']:
25 ticket = Ticket.query.filter_by(id=original_data['data']['id']).one()
26
27 if 'sales_starts_at' not in data:
28 data['sales_starts_at'] = ticket.sales_starts_at
29
30 if 'sales_ends_at' not in data:
31 data['sales_ends_at'] = ticket.sales_ends_at
32
33 if data['sales_starts_at'] >= data['sales_ends_at']:
34 raise UnprocessableEntity({'pointer': '/data/attributes/sales-ends-at'},
35 "sales-ends-at should be after sales-starts-at")
36
37 @validates_schema
38 def validate_quantity(self, data):
39 if 'max_order' in data and 'min_order' in data:
40 if data['max_order'] < data['min_order']:
41 raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},
42 "max-order should be greater than min-order")
43
44 if 'quantity' in data and 'min_order' in data:
45 if data['quantity'] < data['min_order']:
46 raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
47 "quantity should be greater than min-order")
48
49 if 'quantity' in data and 'max_order' in data:
50 if data['quantity'] < data['max_order']:
51 raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
52 "quantity should be lesser than max-order")
53
54 @validates_schema(pass_original=True)
55 def validate_discount_code(self, data, original_data):
56 if 'relationships' in original_data and 'discount-codes' in original_data['data']['relationships']:
57 discount_codes = original_data['data']['relationships']['discount-codes']
58 for code in discount_codes['data']:
59 try:
60 DiscountCode.query.filter_by(id=code['id']).one()
61 except NoResultFound:
62 raise UnprocessableEntity(
63 {'pointer': '/data/relationships/discount-codes'}, "Discount code does not exist")
64
65 id = fields.Str(dump_only=True)
66 name = fields.Str(required=True)
67 description = fields.Str(allow_none=True)
68 type = fields.Str(required=True)
69 price = fields.Float(validate=lambda n: n >= 0, allow_none=True)
70 quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
71 is_description_visible = fields.Boolean(default=False)
72 position = fields.Integer(allow_none=True)
73 is_fee_absorbed = fields.Boolean()
74 sales_starts_at = fields.DateTime(required=True)
75 sales_ends_at = fields.DateTime(required=True)
76 is_hidden = fields.Boolean(default=False)
77 min_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
78 max_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)
79 is_checkin_restricted = fields.Boolean(default=True)
80 auto_checkin_enabled = fields.Boolean(default=False)
81 event = Relationship(attribute='event',
82 self_view='v1.ticket_event',
83 self_view_kwargs={'id': '<id>'},
84 related_view='v1.event_detail',
85 related_view_kwargs={'ticket_id': '<id>'},
86 schema='EventSchemaPublic',
87 type_='event')
88
89 ticket_tags = Relationship(attribute='tags',
90 self_view='v1.ticket_ticket_tag',
91 self_view_kwargs={'id': '<id>'},
92 related_view='v1.ticket_tag_list',
93 related_view_kwargs={'ticket_id': '<id>'},
94 schema='TicketTagSchema',
95 many=True,
96 type_='ticket-tag')
97
98 discount_codes = Relationship(
99 attribute='discount_codes',
100 self_view='v1.ticket_discount_codes',
101 self_view_kwargs={'id': '<id>'},
102 related_view='v1.discount_code_list',
103 related_view_kwargs={'ticket_id': '<id>'},
104 schema='DiscountCodeSchemaTicket',
105 many=True,
106 type_='discount-code')
107
108
109 class TicketSchema(TicketSchemaPublic):
110 class Meta:
111 type_ = 'ticket'
112 self_view = 'v1.ticket_detail'
113 self_view_kwargs = {'id': '<id>'}
114 inflect = dasherize
115
116 access_codes = Relationship(attribute='access_codes',
117 self_view='v1.ticket_access_code',
118 self_view_kwargs={'id': '<id>'},
119 related_view='v1.access_code_list',
120 related_view_kwargs={'ticket_id': '<id>'},
121 schema='AccessCodeSchema',
122 many=True,
123 type_='access-code')
124 attendees = Relationship(attribute='ticket_holders',
125 self_view='v1.ticket_attendees',
126 self_view_kwargs={'id': '<id>'},
127 related_view='v1.attendee_list_post',
128 related_view_kwargs={'ticket_id': '<id>'},
129 schema='AttendeeSchema',
130 many=True,
131 type_='attendee')
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/api/schema/tickets.py b/app/api/schema/tickets.py
--- a/app/api/schema/tickets.py
+++ b/app/api/schema/tickets.py
@@ -39,17 +39,17 @@
if 'max_order' in data and 'min_order' in data:
if data['max_order'] < data['min_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},
- "max-order should be greater than min-order")
+ "max-order should be greater than or equal to min-order")
if 'quantity' in data and 'min_order' in data:
if data['quantity'] < data['min_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
- "quantity should be greater than min-order")
+ "quantity should be greater than or equal to min-order")
if 'quantity' in data and 'max_order' in data:
if data['quantity'] < data['max_order']:
raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},
- "quantity should be lesser than max-order")
+ "quantity should be greater than or equal to max-order")
@validates_schema(pass_original=True)
def validate_discount_code(self, data, original_data):
|
{"golden_diff": "diff --git a/app/api/schema/tickets.py b/app/api/schema/tickets.py\n--- a/app/api/schema/tickets.py\n+++ b/app/api/schema/tickets.py\n@@ -39,17 +39,17 @@\n if 'max_order' in data and 'min_order' in data:\n if data['max_order'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},\n- \"max-order should be greater than min-order\")\n+ \"max-order should be greater than or equal to min-order\")\n \n if 'quantity' in data and 'min_order' in data:\n if data['quantity'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n- \"quantity should be greater than min-order\")\n+ \"quantity should be greater than or equal to min-order\")\n \n if 'quantity' in data and 'max_order' in data:\n if data['quantity'] < data['max_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n- \"quantity should be lesser than max-order\")\n+ \"quantity should be greater than or equal to max-order\")\n \n @validates_schema(pass_original=True)\n def validate_discount_code(self, data, original_data):\n", "issue": "Correct exception for quantity < max order in tickets schema\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nValidation check message for quantity to be more than max-order needs to be corrected.\r\n\r\n**To Reproduce**\r\nA validation check for quantity and max-order is:\r\n```\r\n if 'quantity' in data and 'max_order' in data:\r\n if data['quantity'] < data['max_order']:\r\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\r\n \"quantity should be lesser than max-order\")\r\n```\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe message should be: \"quantity should not be lesser than max-order\".\n", "before_files": [{"content": "from marshmallow import validates_schema\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom app.models.discount_code import DiscountCode\nfrom app.models.ticket import Ticket\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass TicketSchemaPublic(SoftDeletionSchema):\n class Meta:\n type_ = 'ticket'\n self_view = 'v1.ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n ticket = Ticket.query.filter_by(id=original_data['data']['id']).one()\n\n if 'sales_starts_at' not in data:\n data['sales_starts_at'] = ticket.sales_starts_at\n\n if 'sales_ends_at' not in data:\n data['sales_ends_at'] = ticket.sales_ends_at\n\n if data['sales_starts_at'] >= data['sales_ends_at']:\n raise UnprocessableEntity({'pointer': '/data/attributes/sales-ends-at'},\n \"sales-ends-at should be after sales-starts-at\")\n\n @validates_schema\n def validate_quantity(self, data):\n if 'max_order' in data and 'min_order' in data:\n if data['max_order'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},\n \"max-order should be greater than min-order\")\n\n if 'quantity' in data and 'min_order' in data:\n if data['quantity'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n \"quantity should be greater than min-order\")\n\n if 'quantity' in data and 'max_order' in data:\n if data['quantity'] < data['max_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n \"quantity should be lesser than max-order\")\n\n @validates_schema(pass_original=True)\n def validate_discount_code(self, data, original_data):\n if 'relationships' in original_data and 'discount-codes' in original_data['data']['relationships']:\n discount_codes = original_data['data']['relationships']['discount-codes']\n for code in discount_codes['data']:\n try:\n DiscountCode.query.filter_by(id=code['id']).one()\n except NoResultFound:\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/discount-codes'}, \"Discount code does not exist\")\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n description = fields.Str(allow_none=True)\n type = fields.Str(required=True)\n price = fields.Float(validate=lambda n: n >= 0, allow_none=True)\n quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n is_description_visible = fields.Boolean(default=False)\n position = fields.Integer(allow_none=True)\n is_fee_absorbed = fields.Boolean()\n sales_starts_at = fields.DateTime(required=True)\n sales_ends_at = fields.DateTime(required=True)\n is_hidden = fields.Boolean(default=False)\n min_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n max_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n is_checkin_restricted = fields.Boolean(default=True)\n auto_checkin_enabled = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.ticket_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n\n ticket_tags = Relationship(attribute='tags',\n self_view='v1.ticket_ticket_tag',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_tag_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='TicketTagSchema',\n many=True,\n type_='ticket-tag')\n\n discount_codes = Relationship(\n attribute='discount_codes',\n self_view='v1.ticket_discount_codes',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.discount_code_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='DiscountCodeSchemaTicket',\n many=True,\n type_='discount-code')\n\n\nclass TicketSchema(TicketSchemaPublic):\n class Meta:\n type_ = 'ticket'\n self_view = 'v1.ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n access_codes = Relationship(attribute='access_codes',\n self_view='v1.ticket_access_code',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.access_code_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='AccessCodeSchema',\n many=True,\n type_='access-code')\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.ticket_attendees',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.attendee_list_post',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='AttendeeSchema',\n many=True,\n type_='attendee')\n", "path": "app/api/schema/tickets.py"}], "after_files": [{"content": "from marshmallow import validates_schema\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.helpers.exceptions import UnprocessableEntity\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom app.models.discount_code import DiscountCode\nfrom app.models.ticket import Ticket\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass TicketSchemaPublic(SoftDeletionSchema):\n class Meta:\n type_ = 'ticket'\n self_view = 'v1.ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n ticket = Ticket.query.filter_by(id=original_data['data']['id']).one()\n\n if 'sales_starts_at' not in data:\n data['sales_starts_at'] = ticket.sales_starts_at\n\n if 'sales_ends_at' not in data:\n data['sales_ends_at'] = ticket.sales_ends_at\n\n if data['sales_starts_at'] >= data['sales_ends_at']:\n raise UnprocessableEntity({'pointer': '/data/attributes/sales-ends-at'},\n \"sales-ends-at should be after sales-starts-at\")\n\n @validates_schema\n def validate_quantity(self, data):\n if 'max_order' in data and 'min_order' in data:\n if data['max_order'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/max-order'},\n \"max-order should be greater than or equal to min-order\")\n\n if 'quantity' in data and 'min_order' in data:\n if data['quantity'] < data['min_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n \"quantity should be greater than or equal to min-order\")\n\n if 'quantity' in data and 'max_order' in data:\n if data['quantity'] < data['max_order']:\n raise UnprocessableEntity({'pointer': '/data/attributes/quantity'},\n \"quantity should be greater than or equal to max-order\")\n\n @validates_schema(pass_original=True)\n def validate_discount_code(self, data, original_data):\n if 'relationships' in original_data and 'discount-codes' in original_data['data']['relationships']:\n discount_codes = original_data['data']['relationships']['discount-codes']\n for code in discount_codes['data']:\n try:\n DiscountCode.query.filter_by(id=code['id']).one()\n except NoResultFound:\n raise UnprocessableEntity(\n {'pointer': '/data/relationships/discount-codes'}, \"Discount code does not exist\")\n\n id = fields.Str(dump_only=True)\n name = fields.Str(required=True)\n description = fields.Str(allow_none=True)\n type = fields.Str(required=True)\n price = fields.Float(validate=lambda n: n >= 0, allow_none=True)\n quantity = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n is_description_visible = fields.Boolean(default=False)\n position = fields.Integer(allow_none=True)\n is_fee_absorbed = fields.Boolean()\n sales_starts_at = fields.DateTime(required=True)\n sales_ends_at = fields.DateTime(required=True)\n is_hidden = fields.Boolean(default=False)\n min_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n max_order = fields.Integer(validate=lambda n: n >= 0, allow_none=True)\n is_checkin_restricted = fields.Boolean(default=True)\n auto_checkin_enabled = fields.Boolean(default=False)\n event = Relationship(attribute='event',\n self_view='v1.ticket_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n\n ticket_tags = Relationship(attribute='tags',\n self_view='v1.ticket_ticket_tag',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_tag_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='TicketTagSchema',\n many=True,\n type_='ticket-tag')\n\n discount_codes = Relationship(\n attribute='discount_codes',\n self_view='v1.ticket_discount_codes',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.discount_code_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='DiscountCodeSchemaTicket',\n many=True,\n type_='discount-code')\n\n\nclass TicketSchema(TicketSchemaPublic):\n class Meta:\n type_ = 'ticket'\n self_view = 'v1.ticket_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n access_codes = Relationship(attribute='access_codes',\n self_view='v1.ticket_access_code',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.access_code_list',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='AccessCodeSchema',\n many=True,\n type_='access-code')\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.ticket_attendees',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.attendee_list_post',\n related_view_kwargs={'ticket_id': '<id>'},\n schema='AttendeeSchema',\n many=True,\n type_='attendee')\n", "path": "app/api/schema/tickets.py"}]}
| 1,878 | 279 |
gh_patches_debug_55113
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-9819
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Circuit termination on interface not working when accesssed from interface table
### NetBox version
v3.3-beta1
### Python version
3.9
### Steps to Reproduce
1. Create device with interfaces
2. Create circuit with Z-side on same site as device
3. Try to connect interface to circuit termination from interface table
4. Select B Side Cicuit
5. Try to select Side for circuit
### Expected Behavior
On B Side you are able to select side for circuit
### Observed Behavior
Side option menu is empty because.
This is caused because $ is missing for `$termination_{cable_end}_circuit`
https://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141
Circuit termination on interface not working when accesssed from interface table
### NetBox version
v3.3-beta1
### Python version
3.9
### Steps to Reproduce
1. Create device with interfaces
2. Create circuit with Z-side on same site as device
3. Try to connect interface to circuit termination from interface table
4. Select B Side Cicuit
5. Try to select Side for circuit
### Expected Behavior
On B Side you are able to select side for circuit
### Observed Behavior
Side option menu is empty because.
This is caused because $ is missing for `$termination_{cable_end}_circuit`
https://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/dcim/forms/connections.py`
Content:
```
1 from django import forms
2
3 from circuits.models import Circuit, CircuitTermination, Provider
4 from dcim.models import *
5 from utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField
6 from .models import CableForm
7
8
9 def get_cable_form(a_type, b_type):
10
11 class FormMetaclass(forms.models.ModelFormMetaclass):
12
13 def __new__(mcs, name, bases, attrs):
14
15 for cable_end, term_cls in (('a', a_type), ('b', b_type)):
16
17 attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(
18 queryset=Region.objects.all(),
19 label='Region',
20 required=False,
21 initial_params={
22 'sites': f'$termination_{cable_end}_site'
23 }
24 )
25 attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(
26 queryset=SiteGroup.objects.all(),
27 label='Site group',
28 required=False,
29 initial_params={
30 'sites': f'$termination_{cable_end}_site'
31 }
32 )
33 attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(
34 queryset=Site.objects.all(),
35 label='Site',
36 required=False,
37 query_params={
38 'region_id': f'$termination_{cable_end}_region',
39 'group_id': f'$termination_{cable_end}_sitegroup',
40 }
41 )
42 attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(
43 queryset=Location.objects.all(),
44 label='Location',
45 required=False,
46 null_option='None',
47 query_params={
48 'site_id': f'$termination_{cable_end}_site'
49 }
50 )
51
52 # Device component
53 if hasattr(term_cls, 'device'):
54
55 attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(
56 queryset=Rack.objects.all(),
57 label='Rack',
58 required=False,
59 null_option='None',
60 initial_params={
61 'devices': f'$termination_{cable_end}_device'
62 },
63 query_params={
64 'site_id': f'$termination_{cable_end}_site',
65 'location_id': f'$termination_{cable_end}_location',
66 }
67 )
68 attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(
69 queryset=Device.objects.all(),
70 label='Device',
71 required=False,
72 initial_params={
73 f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'
74 },
75 query_params={
76 'site_id': f'$termination_{cable_end}_site',
77 'location_id': f'$termination_{cable_end}_location',
78 'rack_id': f'$termination_{cable_end}_rack',
79 }
80 )
81 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
82 queryset=term_cls.objects.all(),
83 label=term_cls._meta.verbose_name.title(),
84 disabled_indicator='_occupied',
85 query_params={
86 'device_id': f'$termination_{cable_end}_device',
87 }
88 )
89
90 # PowerFeed
91 elif term_cls == PowerFeed:
92
93 attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(
94 queryset=PowerPanel.objects.all(),
95 label='Power Panel',
96 required=False,
97 initial_params={
98 'powerfeeds__in': f'${cable_end}_terminations'
99 },
100 query_params={
101 'site_id': f'$termination_{cable_end}_site',
102 'location_id': f'$termination_{cable_end}_location',
103 }
104 )
105 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
106 queryset=term_cls.objects.all(),
107 label='Power Feed',
108 disabled_indicator='_occupied',
109 query_params={
110 'powerpanel_id': f'$termination_{cable_end}_powerpanel',
111 }
112 )
113
114 # CircuitTermination
115 elif term_cls == CircuitTermination:
116
117 attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(
118 queryset=Provider.objects.all(),
119 label='Provider',
120 initial_params={
121 'circuits': f'$termination_{cable_end}_circuit'
122 },
123 required=False
124 )
125 attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(
126 queryset=Circuit.objects.all(),
127 label='Circuit',
128 initial_params={
129 'terminations__in': f'${cable_end}_terminations'
130 },
131 query_params={
132 'provider_id': f'$termination_{cable_end}_provider',
133 'site_id': f'$termination_{cable_end}_site',
134 }
135 )
136 attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(
137 queryset=term_cls.objects.all(),
138 label='Side',
139 disabled_indicator='_occupied',
140 query_params={
141 'circuit_id': f'termination_{cable_end}_circuit',
142 }
143 )
144
145 return super().__new__(mcs, name, bases, attrs)
146
147 class _CableForm(CableForm, metaclass=FormMetaclass):
148
149 def __init__(self, *args, **kwargs):
150
151 # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()
152 for field_name in ('a_terminations', 'b_terminations'):
153 if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:
154 kwargs['initial'][field_name] = [kwargs['initial'][field_name]]
155
156 super().__init__(*args, **kwargs)
157
158 if self.instance and self.instance.pk:
159 # Initialize A/B terminations when modifying an existing Cable instance
160 self.initial['a_terminations'] = self.instance.a_terminations
161 self.initial['b_terminations'] = self.instance.b_terminations
162
163 def clean(self):
164 super().clean()
165
166 # Set the A/B terminations on the Cable instance
167 self.instance.a_terminations = self.cleaned_data['a_terminations']
168 self.instance.b_terminations = self.cleaned_data['b_terminations']
169
170 return _CableForm
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py
--- a/netbox/dcim/forms/connections.py
+++ b/netbox/dcim/forms/connections.py
@@ -138,7 +138,7 @@
label='Side',
disabled_indicator='_occupied',
query_params={
- 'circuit_id': f'termination_{cable_end}_circuit',
+ 'circuit_id': f'$termination_{cable_end}_circuit',
}
)
|
{"golden_diff": "diff --git a/netbox/dcim/forms/connections.py b/netbox/dcim/forms/connections.py\n--- a/netbox/dcim/forms/connections.py\n+++ b/netbox/dcim/forms/connections.py\n@@ -138,7 +138,7 @@\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n- 'circuit_id': f'termination_{cable_end}_circuit',\n+ 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n", "issue": "Circuit termination on interface not working when accesssed from interface table\n### NetBox version\n\nv3.3-beta1\n\n### Python version\n\n3.9\n\n### Steps to Reproduce\n\n1. Create device with interfaces\r\n2. Create circuit with Z-side on same site as device\r\n3. Try to connect interface to circuit termination from interface table\r\n4. Select B Side Cicuit\r\n5. Try to select Side for circuit\n\n### Expected Behavior\n\nOn B Side you are able to select side for circuit\n\n### Observed Behavior\n\nSide option menu is empty because.\r\n\r\nThis is caused because $ is missing for `$termination_{cable_end}_circuit`\r\n\r\nhttps://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141\nCircuit termination on interface not working when accesssed from interface table\n### NetBox version\n\nv3.3-beta1\n\n### Python version\n\n3.9\n\n### Steps to Reproduce\n\n1. Create device with interfaces\r\n2. Create circuit with Z-side on same site as device\r\n3. Try to connect interface to circuit termination from interface table\r\n4. Select B Side Cicuit\r\n5. Try to select Side for circuit\n\n### Expected Behavior\n\nOn B Side you are able to select side for circuit\n\n### Observed Behavior\n\nSide option menu is empty because.\r\n\r\nThis is caused because $ is missing for `$termination_{cable_end}_circuit`\r\n\r\nhttps://github.com/netbox-community/netbox/blob/abfa6a325af17d65038304feed2536330d8c60a7/netbox/dcim/forms/connections.py#L141\n", "before_files": [{"content": "from django import forms\n\nfrom circuits.models import Circuit, CircuitTermination, Provider\nfrom dcim.models import *\nfrom utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .models import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n label='Region',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n label='Site group',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n label='Site',\n required=False,\n query_params={\n 'region_id': f'$termination_{cable_end}_region',\n 'group_id': f'$termination_{cable_end}_sitegroup',\n }\n )\n attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n label='Location',\n required=False,\n null_option='None',\n query_params={\n 'site_id': f'$termination_{cable_end}_site'\n }\n )\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n label='Rack',\n required=False,\n null_option='None',\n initial_params={\n 'devices': f'$termination_{cable_end}_device'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label='Device',\n required=False,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n 'rack_id': f'$termination_{cable_end}_rack',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n disabled_indicator='_occupied',\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label='Power Panel',\n required=False,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Power Feed',\n disabled_indicator='_occupied',\n query_params={\n 'powerpanel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(\n queryset=Provider.objects.all(),\n label='Provider',\n initial_params={\n 'circuits': f'$termination_{cable_end}_circuit'\n },\n required=False\n )\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label='Circuit',\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n },\n query_params={\n 'provider_id': f'$termination_{cable_end}_provider',\n 'site_id': f'$termination_{cable_end}_site',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n 'circuit_id': f'termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, **kwargs):\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:\n kwargs['initial'][field_name] = [kwargs['initial'][field_name]]\n\n super().__init__(*args, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data['a_terminations']\n self.instance.b_terminations = self.cleaned_data['b_terminations']\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}], "after_files": [{"content": "from django import forms\n\nfrom circuits.models import Circuit, CircuitTermination, Provider\nfrom dcim.models import *\nfrom utilities.forms import DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom .models import CableForm\n\n\ndef get_cable_form(a_type, b_type):\n\n class FormMetaclass(forms.models.ModelFormMetaclass):\n\n def __new__(mcs, name, bases, attrs):\n\n for cable_end, term_cls in (('a', a_type), ('b', b_type)):\n\n attrs[f'termination_{cable_end}_region'] = DynamicModelChoiceField(\n queryset=Region.objects.all(),\n label='Region',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_sitegroup'] = DynamicModelChoiceField(\n queryset=SiteGroup.objects.all(),\n label='Site group',\n required=False,\n initial_params={\n 'sites': f'$termination_{cable_end}_site'\n }\n )\n attrs[f'termination_{cable_end}_site'] = DynamicModelChoiceField(\n queryset=Site.objects.all(),\n label='Site',\n required=False,\n query_params={\n 'region_id': f'$termination_{cable_end}_region',\n 'group_id': f'$termination_{cable_end}_sitegroup',\n }\n )\n attrs[f'termination_{cable_end}_location'] = DynamicModelChoiceField(\n queryset=Location.objects.all(),\n label='Location',\n required=False,\n null_option='None',\n query_params={\n 'site_id': f'$termination_{cable_end}_site'\n }\n )\n\n # Device component\n if hasattr(term_cls, 'device'):\n\n attrs[f'termination_{cable_end}_rack'] = DynamicModelChoiceField(\n queryset=Rack.objects.all(),\n label='Rack',\n required=False,\n null_option='None',\n initial_params={\n 'devices': f'$termination_{cable_end}_device'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'termination_{cable_end}_device'] = DynamicModelChoiceField(\n queryset=Device.objects.all(),\n label='Device',\n required=False,\n initial_params={\n f'{term_cls._meta.model_name}s__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n 'rack_id': f'$termination_{cable_end}_rack',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label=term_cls._meta.verbose_name.title(),\n disabled_indicator='_occupied',\n query_params={\n 'device_id': f'$termination_{cable_end}_device',\n }\n )\n\n # PowerFeed\n elif term_cls == PowerFeed:\n\n attrs[f'termination_{cable_end}_powerpanel'] = DynamicModelChoiceField(\n queryset=PowerPanel.objects.all(),\n label='Power Panel',\n required=False,\n initial_params={\n 'powerfeeds__in': f'${cable_end}_terminations'\n },\n query_params={\n 'site_id': f'$termination_{cable_end}_site',\n 'location_id': f'$termination_{cable_end}_location',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Power Feed',\n disabled_indicator='_occupied',\n query_params={\n 'powerpanel_id': f'$termination_{cable_end}_powerpanel',\n }\n )\n\n # CircuitTermination\n elif term_cls == CircuitTermination:\n\n attrs[f'termination_{cable_end}_provider'] = DynamicModelChoiceField(\n queryset=Provider.objects.all(),\n label='Provider',\n initial_params={\n 'circuits': f'$termination_{cable_end}_circuit'\n },\n required=False\n )\n attrs[f'termination_{cable_end}_circuit'] = DynamicModelChoiceField(\n queryset=Circuit.objects.all(),\n label='Circuit',\n initial_params={\n 'terminations__in': f'${cable_end}_terminations'\n },\n query_params={\n 'provider_id': f'$termination_{cable_end}_provider',\n 'site_id': f'$termination_{cable_end}_site',\n }\n )\n attrs[f'{cable_end}_terminations'] = DynamicModelMultipleChoiceField(\n queryset=term_cls.objects.all(),\n label='Side',\n disabled_indicator='_occupied',\n query_params={\n 'circuit_id': f'$termination_{cable_end}_circuit',\n }\n )\n\n return super().__new__(mcs, name, bases, attrs)\n\n class _CableForm(CableForm, metaclass=FormMetaclass):\n\n def __init__(self, *args, **kwargs):\n\n # TODO: Temporary hack to work around list handling limitations with utils.normalize_querydict()\n for field_name in ('a_terminations', 'b_terminations'):\n if field_name in kwargs.get('initial', {}) and type(kwargs['initial'][field_name]) is not list:\n kwargs['initial'][field_name] = [kwargs['initial'][field_name]]\n\n super().__init__(*args, **kwargs)\n\n if self.instance and self.instance.pk:\n # Initialize A/B terminations when modifying an existing Cable instance\n self.initial['a_terminations'] = self.instance.a_terminations\n self.initial['b_terminations'] = self.instance.b_terminations\n\n def clean(self):\n super().clean()\n\n # Set the A/B terminations on the Cable instance\n self.instance.a_terminations = self.cleaned_data['a_terminations']\n self.instance.b_terminations = self.cleaned_data['b_terminations']\n\n return _CableForm\n", "path": "netbox/dcim/forms/connections.py"}]}
| 2,392 | 114 |
gh_patches_debug_37978
|
rasdani/github-patches
|
git_diff
|
AnalogJ__lexicon-336
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Memset provider: TypeError: string indices must be integers
Hi,
When using the Memset provider with the default table formatting I get this error:
```bash
$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300
Traceback (most recent call last):
File "/usr/local/bin/lexicon", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 133, in main
handle_output(results, parsed_args.output)
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 109, in handle_output
table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER')
File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 75, in generate_table_result
array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output]
TypeError: string indices must be integers
```
I think this is because `output` is a string not an array - when I added `print output` I got a string like `969f9caabe19859c11249333dd80aa15`.
When I use `--output JSON` I get the same ID plus quotes:
```bash
$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 --output JSON
"969f9caabe19859c11249333dd80aa15"
```
I know Memset's not public so if you need any help to test it just let me know. For now I'll work around it with `--output QUIET` since I don't really care about the output here.
Thanks!
Dave
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/cli.py`
Content:
```
1 #!/usr/bin/env python
2 """Module for Lexicon command-line interface"""
3 from __future__ import absolute_import, print_function
4 import json
5 import logging
6 import os
7 import sys
8
9 from lexicon.client import Client
10 from lexicon.config import ConfigResolver
11 from lexicon.parser import generate_cli_main_parser
12
13
14 logger = logging.getLogger(__name__) # pylint: disable=C0103
15
16
17 def generate_table_result(lexicon_logger, output=None, without_header=None):
18 """Convert returned JSON into a nice table for command line usage"""
19 try:
20 _ = (entry for entry in output)
21 except TypeError:
22 lexicon_logger.debug('Command output is not iterable, and then cannot '
23 'be printed with --quiet parameter not enabled.')
24 return None
25
26 array = [[
27 row.get('id', ''),
28 row.get('type', ''),
29 row.get('name', ''),
30 row.get('content', ''),
31 row.get('ttl', '')] for row in output]
32
33 # Insert header (insert before calculating the max width of each column
34 # to take headers size into account)
35 if not without_header:
36 headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']
37 array.insert(0, headers)
38
39 column_widths = [0, 0, 0, 0, 0]
40 # Find max width for each column
41 for row in array:
42 for idx, col in enumerate(row):
43 width = len(str(col))
44 if width > column_widths[idx]:
45 column_widths[idx] = width
46
47 # Add a 'nice' separator
48 if not without_header:
49 array.insert(1, ['-' * column_widths[idx]
50 for idx in range(len(column_widths))])
51
52 # Construct table to be printed
53 table = []
54 for row in array:
55 row_list = []
56 for idx, col in enumerate(row):
57 row_list.append(str(col).ljust(column_widths[idx]))
58 table.append(' '.join(row_list))
59
60 # Return table
61 return '\n'.join(table)
62
63
64 def handle_output(results, output_type):
65 """Print the relevant output for given output_type"""
66 if not output_type == 'QUIET':
67 if not output_type == 'JSON':
68 table = generate_table_result(
69 logger, results, output_type == 'TABLE-NO-HEADER')
70 if table:
71 print(table)
72 else:
73 try:
74 _ = (entry for entry in results)
75 json_str = json.dumps(results)
76 if json_str:
77 print(json_str)
78 except TypeError:
79 logger.debug('Output is not a JSON, and then cannot '
80 'be printed with --output=JSON parameter.')
81
82
83 def main():
84 """Main function of Lexicon."""
85 # Dynamically determine all the providers available and gather command line arguments.
86 parsed_args = generate_cli_main_parser().parse_args()
87
88 log_level = logging.getLevelName(parsed_args.log_level)
89 logging.basicConfig(stream=sys.stdout, level=log_level,
90 format='%(message)s')
91 logger.debug('Arguments: %s', parsed_args)
92
93 # In the CLI context, will get configuration interactively:
94 # * from the command line
95 # * from the environment variables
96 # * from lexicon configuration files in working directory
97 config = ConfigResolver()
98 config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())
99
100 client = Client(config)
101
102 results = client.execute()
103
104 handle_output(results, parsed_args.output)
105
106
107 if __name__ == '__main__':
108 main()
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lexicon/cli.py b/lexicon/cli.py
--- a/lexicon/cli.py
+++ b/lexicon/cli.py
@@ -14,12 +14,10 @@
logger = logging.getLogger(__name__) # pylint: disable=C0103
-def generate_table_result(lexicon_logger, output=None, without_header=None):
- """Convert returned JSON into a nice table for command line usage"""
- try:
- _ = (entry for entry in output)
- except TypeError:
- lexicon_logger.debug('Command output is not iterable, and then cannot '
+def generate_list_table_result(lexicon_logger, output=None, without_header=None):
+ """Convert returned data from list actions into a nice table for command line usage"""
+ if not isinstance(output, list):
+ lexicon_logger.debug('Command output is not a list, and then cannot '
'be printed with --quiet parameter not enabled.')
return None
@@ -58,26 +56,43 @@
table.append(' '.join(row_list))
# Return table
- return '\n'.join(table)
+ return os.linesep.join(table)
-def handle_output(results, output_type):
+def generate_table_results(output=None, without_header=None):
+ """Convert returned data from non-list actions into a nice table for command line usage"""
+ array = []
+ str_output = str(output)
+
+ if not without_header:
+ array.append('RESULT')
+ array.append('-' * max(6, len(str_output)))
+
+ array.append(str_output)
+ return os.linesep.join(array)
+
+
+def handle_output(results, output_type, action):
"""Print the relevant output for given output_type"""
- if not output_type == 'QUIET':
- if not output_type == 'JSON':
- table = generate_table_result(
+ if output_type == 'QUIET':
+ return
+
+ if not output_type == 'JSON':
+ if action == 'list':
+ table = generate_list_table_result(
logger, results, output_type == 'TABLE-NO-HEADER')
- if table:
- print(table)
else:
- try:
- _ = (entry for entry in results)
- json_str = json.dumps(results)
- if json_str:
- print(json_str)
- except TypeError:
- logger.debug('Output is not a JSON, and then cannot '
- 'be printed with --output=JSON parameter.')
+ table = generate_table_results(results, output_type == 'TABLE-NO-HEADER')
+ if table:
+ print(table)
+ else:
+ try:
+ json_str = json.dumps(results)
+ if json_str:
+ print(json_str)
+ except TypeError:
+ logger.debug('Output is not JSON serializable, and then cannot '
+ 'be printed with --output=JSON parameter.')
def main():
@@ -101,7 +116,7 @@
results = client.execute()
- handle_output(results, parsed_args.output)
+ handle_output(results, parsed_args.output, config.resolve('lexicon:action'))
if __name__ == '__main__':
|
{"golden_diff": "diff --git a/lexicon/cli.py b/lexicon/cli.py\n--- a/lexicon/cli.py\n+++ b/lexicon/cli.py\n@@ -14,12 +14,10 @@\n logger = logging.getLogger(__name__) # pylint: disable=C0103\n \n \n-def generate_table_result(lexicon_logger, output=None, without_header=None):\n- \"\"\"Convert returned JSON into a nice table for command line usage\"\"\"\n- try:\n- _ = (entry for entry in output)\n- except TypeError:\n- lexicon_logger.debug('Command output is not iterable, and then cannot '\n+def generate_list_table_result(lexicon_logger, output=None, without_header=None):\n+ \"\"\"Convert returned data from list actions into a nice table for command line usage\"\"\"\n+ if not isinstance(output, list):\n+ lexicon_logger.debug('Command output is not a list, and then cannot '\n 'be printed with --quiet parameter not enabled.')\n return None\n \n@@ -58,26 +56,43 @@\n table.append(' '.join(row_list))\n \n # Return table\n- return '\\n'.join(table)\n+ return os.linesep.join(table)\n \n \n-def handle_output(results, output_type):\n+def generate_table_results(output=None, without_header=None):\n+ \"\"\"Convert returned data from non-list actions into a nice table for command line usage\"\"\"\n+ array = []\n+ str_output = str(output)\n+\n+ if not without_header:\n+ array.append('RESULT')\n+ array.append('-' * max(6, len(str_output)))\n+\n+ array.append(str_output)\n+ return os.linesep.join(array)\n+\n+\n+def handle_output(results, output_type, action):\n \"\"\"Print the relevant output for given output_type\"\"\"\n- if not output_type == 'QUIET':\n- if not output_type == 'JSON':\n- table = generate_table_result(\n+ if output_type == 'QUIET':\n+ return\n+\n+ if not output_type == 'JSON':\n+ if action == 'list':\n+ table = generate_list_table_result(\n logger, results, output_type == 'TABLE-NO-HEADER')\n- if table:\n- print(table)\n else:\n- try:\n- _ = (entry for entry in results)\n- json_str = json.dumps(results)\n- if json_str:\n- print(json_str)\n- except TypeError:\n- logger.debug('Output is not a JSON, and then cannot '\n- 'be printed with --output=JSON parameter.')\n+ table = generate_table_results(results, output_type == 'TABLE-NO-HEADER')\n+ if table:\n+ print(table)\n+ else:\n+ try:\n+ json_str = json.dumps(results)\n+ if json_str:\n+ print(json_str)\n+ except TypeError:\n+ logger.debug('Output is not JSON serializable, and then cannot '\n+ 'be printed with --output=JSON parameter.')\n \n \n def main():\n@@ -101,7 +116,7 @@\n \n results = client.execute()\n \n- handle_output(results, parsed_args.output)\n+ handle_output(results, parsed_args.output, config.resolve('lexicon:action'))\n \n \n if __name__ == '__main__':\n", "issue": "Memset provider: TypeError: string indices must be integers\nHi,\r\n\r\nWhen using the Memset provider with the default table formatting I get this error:\r\n\r\n```bash\r\n$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/lexicon\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py\", line 133, in main\r\n handle_output(results, parsed_args.output)\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py\", line 109, in handle_output\r\n table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER')\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py\", line 75, in generate_table_result\r\n array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output]\r\nTypeError: string indices must be integers\r\n```\r\n\r\nI think this is because `output` is a string not an array - when I added `print output` I got a string like `969f9caabe19859c11249333dd80aa15`.\r\n\r\nWhen I use `--output JSON` I get the same ID plus quotes:\r\n\r\n```bash\r\n$ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 --output JSON\r\n\"969f9caabe19859c11249333dd80aa15\"\r\n```\r\n\r\nI know Memset's not public so if you need any help to test it just let me know. For now I'll work around it with `--output QUIET` since I don't really care about the output here.\r\n\r\nThanks!\r\nDave\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Module for Lexicon command-line interface\"\"\"\nfrom __future__ import absolute_import, print_function\nimport json\nimport logging\nimport os\nimport sys\n\nfrom lexicon.client import Client\nfrom lexicon.config import ConfigResolver\nfrom lexicon.parser import generate_cli_main_parser\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=C0103\n\n\ndef generate_table_result(lexicon_logger, output=None, without_header=None):\n \"\"\"Convert returned JSON into a nice table for command line usage\"\"\"\n try:\n _ = (entry for entry in output)\n except TypeError:\n lexicon_logger.debug('Command output is not iterable, and then cannot '\n 'be printed with --quiet parameter not enabled.')\n return None\n\n array = [[\n row.get('id', ''),\n row.get('type', ''),\n row.get('name', ''),\n row.get('content', ''),\n row.get('ttl', '')] for row in output]\n\n # Insert header (insert before calculating the max width of each column\n # to take headers size into account)\n if not without_header:\n headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']\n array.insert(0, headers)\n\n column_widths = [0, 0, 0, 0, 0]\n # Find max width for each column\n for row in array:\n for idx, col in enumerate(row):\n width = len(str(col))\n if width > column_widths[idx]:\n column_widths[idx] = width\n\n # Add a 'nice' separator\n if not without_header:\n array.insert(1, ['-' * column_widths[idx]\n for idx in range(len(column_widths))])\n\n # Construct table to be printed\n table = []\n for row in array:\n row_list = []\n for idx, col in enumerate(row):\n row_list.append(str(col).ljust(column_widths[idx]))\n table.append(' '.join(row_list))\n\n # Return table\n return '\\n'.join(table)\n\n\ndef handle_output(results, output_type):\n \"\"\"Print the relevant output for given output_type\"\"\"\n if not output_type == 'QUIET':\n if not output_type == 'JSON':\n table = generate_table_result(\n logger, results, output_type == 'TABLE-NO-HEADER')\n if table:\n print(table)\n else:\n try:\n _ = (entry for entry in results)\n json_str = json.dumps(results)\n if json_str:\n print(json_str)\n except TypeError:\n logger.debug('Output is not a JSON, and then cannot '\n 'be printed with --output=JSON parameter.')\n\n\ndef main():\n \"\"\"Main function of Lexicon.\"\"\"\n # Dynamically determine all the providers available and gather command line arguments.\n parsed_args = generate_cli_main_parser().parse_args()\n\n log_level = logging.getLevelName(parsed_args.log_level)\n logging.basicConfig(stream=sys.stdout, level=log_level,\n format='%(message)s')\n logger.debug('Arguments: %s', parsed_args)\n\n # In the CLI context, will get configuration interactively:\n # * from the command line\n # * from the environment variables\n # * from lexicon configuration files in working directory\n config = ConfigResolver()\n config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())\n\n client = Client(config)\n\n results = client.execute()\n\n handle_output(results, parsed_args.output)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lexicon/cli.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Module for Lexicon command-line interface\"\"\"\nfrom __future__ import absolute_import, print_function\nimport json\nimport logging\nimport os\nimport sys\n\nfrom lexicon.client import Client\nfrom lexicon.config import ConfigResolver\nfrom lexicon.parser import generate_cli_main_parser\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=C0103\n\n\ndef generate_list_table_result(lexicon_logger, output=None, without_header=None):\n \"\"\"Convert returned data from list actions into a nice table for command line usage\"\"\"\n if not isinstance(output, list):\n lexicon_logger.debug('Command output is not a list, and then cannot '\n 'be printed with --quiet parameter not enabled.')\n return None\n\n array = [[\n row.get('id', ''),\n row.get('type', ''),\n row.get('name', ''),\n row.get('content', ''),\n row.get('ttl', '')] for row in output]\n\n # Insert header (insert before calculating the max width of each column\n # to take headers size into account)\n if not without_header:\n headers = ['ID', 'TYPE', 'NAME', 'CONTENT', 'TTL']\n array.insert(0, headers)\n\n column_widths = [0, 0, 0, 0, 0]\n # Find max width for each column\n for row in array:\n for idx, col in enumerate(row):\n width = len(str(col))\n if width > column_widths[idx]:\n column_widths[idx] = width\n\n # Add a 'nice' separator\n if not without_header:\n array.insert(1, ['-' * column_widths[idx]\n for idx in range(len(column_widths))])\n\n # Construct table to be printed\n table = []\n for row in array:\n row_list = []\n for idx, col in enumerate(row):\n row_list.append(str(col).ljust(column_widths[idx]))\n table.append(' '.join(row_list))\n\n # Return table\n return os.linesep.join(table)\n\n\ndef generate_table_results(output=None, without_header=None):\n \"\"\"Convert returned data from non-list actions into a nice table for command line usage\"\"\"\n array = []\n str_output = str(output)\n\n if not without_header:\n array.append('RESULT')\n array.append('-' * max(6, len(str_output)))\n\n array.append(str_output)\n return os.linesep.join(array)\n\n\ndef handle_output(results, output_type, action):\n \"\"\"Print the relevant output for given output_type\"\"\"\n if output_type == 'QUIET':\n return\n\n if not output_type == 'JSON':\n if action == 'list':\n table = generate_list_table_result(\n logger, results, output_type == 'TABLE-NO-HEADER')\n else:\n table = generate_table_results(results, output_type == 'TABLE-NO-HEADER')\n if table:\n print(table)\n else:\n try:\n json_str = json.dumps(results)\n if json_str:\n print(json_str)\n except TypeError:\n logger.debug('Output is not JSON serializable, and then cannot '\n 'be printed with --output=JSON parameter.')\n\n\ndef main():\n \"\"\"Main function of Lexicon.\"\"\"\n # Dynamically determine all the providers available and gather command line arguments.\n parsed_args = generate_cli_main_parser().parse_args()\n\n log_level = logging.getLevelName(parsed_args.log_level)\n logging.basicConfig(stream=sys.stdout, level=log_level,\n format='%(message)s')\n logger.debug('Arguments: %s', parsed_args)\n\n # In the CLI context, will get configuration interactively:\n # * from the command line\n # * from the environment variables\n # * from lexicon configuration files in working directory\n config = ConfigResolver()\n config.with_args(parsed_args).with_env().with_config_dir(os.getcwd())\n\n client = Client(config)\n\n results = client.execute()\n\n handle_output(results, parsed_args.output, config.resolve('lexicon:action'))\n\n\nif __name__ == '__main__':\n main()\n", "path": "lexicon/cli.py"}]}
| 1,687 | 703 |
gh_patches_debug_31143
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-17490
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Coarsened target calculation for `pylint` pulls in more files than it should on v2.15.0a0
**Describe the bug**
While testing `v2.15.0a0`, I saw that `./pants lint` would consistently freeze / be OOM killed. Through `py-spy` and logging I found that:
* Much time was being spent merging together source digests when setting up `pylint` runs
* The number of source digests in each of ^^^ setups was much larger than expected (tens of thousands of files for an input batch size of a few hundred)
* `pylint` batches of different sizes always ended up with the same number of source digests
While looking through the `pylint` changes in v2.15, I found that coarsened target calculation is currently running in the "partitioner" rule (see [here](https://github.com/pantsbuild/pants/blob/main/src/python/pants/backend/python/lint/pylint/rules.py#L89-L92)). This will result in too many targets being associated with each `pylint` batch, because the partitions returned by that rule are re-batched into smaller chunks according to `[lint].batch_size`, and there's no support in the re-batching logic for subsetting the partition metadata.
We should push the calculation of coarsened targets into the "runner" rule for `pylint`, so we only compute & hydrate the transitive dependencies that are relevant for the specific inputs used in each batch.
**Pants version**
v2.15.0a0
**OS**
Both
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/python/lint/pylint/rules.py`
Content:
```
1 # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7 from typing import Tuple
8
9 from pants.backend.python.lint.pylint.subsystem import (
10 Pylint,
11 PylintFieldSet,
12 PylintFirstPartyPlugins,
13 )
14 from pants.backend.python.subsystems.setup import PythonSetup
15 from pants.backend.python.util_rules import pex_from_targets
16 from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
17 from pants.backend.python.util_rules.partition import (
18 _partition_by_interpreter_constraints_and_resolve,
19 )
20 from pants.backend.python.util_rules.pex import (
21 Pex,
22 PexRequest,
23 VenvPex,
24 VenvPexProcess,
25 VenvPexRequest,
26 )
27 from pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest
28 from pants.backend.python.util_rules.python_sources import (
29 PythonSourceFiles,
30 PythonSourceFilesRequest,
31 )
32 from pants.core.goals.lint import REPORT_DIR, LintResult, LintTargetsRequest, Partitions
33 from pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest
34 from pants.core.util_rules.partitions import Partition
35 from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix
36 from pants.engine.process import FallibleProcessResult
37 from pants.engine.rules import Get, MultiGet, collect_rules, rule
38 from pants.engine.target import CoarsenedTargets, CoarsenedTargetsRequest
39 from pants.util.logging import LogLevel
40 from pants.util.strutil import pluralize
41
42
43 @dataclass(frozen=True)
44 class PartitionMetadata:
45 coarsened_targets: CoarsenedTargets
46 # NB: These are the same across every element in a partition
47 resolve_description: str | None
48 interpreter_constraints: InterpreterConstraints
49
50 @property
51 def description(self) -> str:
52 ics = str(sorted(str(c) for c in self.interpreter_constraints))
53 return f"{self.resolve_description}, {ics}" if self.resolve_description else ics
54
55
56 class PylintRequest(LintTargetsRequest):
57 field_set_type = PylintFieldSet
58 tool_subsystem = Pylint
59
60
61 def generate_argv(field_sets: tuple[PylintFieldSet, ...], pylint: Pylint) -> Tuple[str, ...]:
62 args = []
63 if pylint.config is not None:
64 args.append(f"--rcfile={pylint.config}")
65 args.append("--jobs={pants_concurrency}")
66 args.extend(pylint.args)
67 args.extend(field_set.source.file_path for field_set in field_sets)
68 return tuple(args)
69
70
71 @rule(desc="Determine if necessary to partition Pylint input", level=LogLevel.DEBUG)
72 async def partition_pylint(
73 request: PylintRequest.PartitionRequest[PylintFieldSet],
74 pylint: Pylint,
75 python_setup: PythonSetup,
76 first_party_plugins: PylintFirstPartyPlugins,
77 ) -> Partitions[PylintFieldSet, PartitionMetadata]:
78 if pylint.skip:
79 return Partitions()
80
81 first_party_ics = InterpreterConstraints.create_from_compatibility_fields(
82 first_party_plugins.interpreter_constraints_fields, python_setup
83 )
84
85 resolve_and_interpreter_constraints_to_field_sets = (
86 _partition_by_interpreter_constraints_and_resolve(request.field_sets, python_setup)
87 )
88
89 coarsened_targets = await Get(
90 CoarsenedTargets,
91 CoarsenedTargetsRequest(field_set.address for field_set in request.field_sets),
92 )
93 coarsened_targets_by_address = coarsened_targets.by_address()
94
95 return Partitions(
96 Partition(
97 tuple(field_sets),
98 PartitionMetadata(
99 CoarsenedTargets(
100 coarsened_targets_by_address[field_set.address] for field_set in field_sets
101 ),
102 resolve if len(python_setup.resolves) > 1 else None,
103 InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),
104 ),
105 )
106 for (
107 resolve,
108 interpreter_constraints,
109 ), field_sets, in resolve_and_interpreter_constraints_to_field_sets.items()
110 )
111
112
113 @rule(desc="Lint using Pylint", level=LogLevel.DEBUG)
114 async def run_pylint(
115 request: PylintRequest.Batch[PylintFieldSet, PartitionMetadata],
116 pylint: Pylint,
117 first_party_plugins: PylintFirstPartyPlugins,
118 ) -> LintResult:
119 assert request.partition_metadata is not None
120
121 requirements_pex_get = Get(
122 Pex,
123 RequirementsPexRequest(
124 (target.address for target in request.partition_metadata.coarsened_targets.closure()),
125 # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
126 # a different version for the requirements than the other two PEXes, which can result
127 # in a PEX runtime error about missing dependencies.
128 hardcoded_interpreter_constraints=request.partition_metadata.interpreter_constraints,
129 ),
130 )
131
132 pylint_pex_get = Get(
133 Pex,
134 PexRequest,
135 pylint.to_pex_request(
136 interpreter_constraints=request.partition_metadata.interpreter_constraints,
137 extra_requirements=first_party_plugins.requirement_strings,
138 ),
139 )
140
141 sources_get = Get(
142 PythonSourceFiles,
143 PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),
144 )
145 # Ensure that the empty report dir exists.
146 report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
147
148 (pylint_pex, requirements_pex, sources, report_directory,) = await MultiGet(
149 pylint_pex_get,
150 requirements_pex_get,
151 sources_get,
152 report_directory_digest_get,
153 )
154
155 pylint_runner_pex, config_files = await MultiGet(
156 Get(
157 VenvPex,
158 VenvPexRequest(
159 PexRequest(
160 output_filename="pylint_runner.pex",
161 interpreter_constraints=request.partition_metadata.interpreter_constraints,
162 main=pylint.main,
163 internal_only=True,
164 pex_path=[pylint_pex, requirements_pex],
165 ),
166 # TODO(John Sirois): Remove this (change to the default of symlinks) when we can
167 # upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470
168 # resolved.
169 site_packages_copies=True,
170 ),
171 ),
172 Get(
173 ConfigFiles,
174 ConfigFilesRequest,
175 pylint.config_request(sources.source_files.snapshot.dirs),
176 ),
177 )
178
179 pythonpath = list(sources.source_roots)
180 if first_party_plugins:
181 pythonpath.append(first_party_plugins.PREFIX)
182
183 input_digest = await Get(
184 Digest,
185 MergeDigests(
186 (
187 config_files.snapshot.digest,
188 first_party_plugins.sources_digest,
189 sources.source_files.snapshot.digest,
190 report_directory,
191 )
192 ),
193 )
194
195 result = await Get(
196 FallibleProcessResult,
197 VenvPexProcess(
198 pylint_runner_pex,
199 argv=generate_argv(request.elements, pylint),
200 input_digest=input_digest,
201 output_directories=(REPORT_DIR,),
202 extra_env={"PEX_EXTRA_SYS_PATH": ":".join(pythonpath)},
203 concurrency_available=len(request.elements),
204 description=f"Run Pylint on {pluralize(len(request.elements), 'target')}.",
205 level=LogLevel.DEBUG,
206 ),
207 )
208 report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))
209 return LintResult.create(request, result, report=report)
210
211
212 def rules():
213 return [
214 *collect_rules(),
215 *PylintRequest.rules(),
216 *pex_from_targets.rules(),
217 ]
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/backend/python/lint/pylint/rules.py b/src/python/pants/backend/python/lint/pylint/rules.py
--- a/src/python/pants/backend/python/lint/pylint/rules.py
+++ b/src/python/pants/backend/python/lint/pylint/rules.py
@@ -118,10 +118,20 @@
) -> LintResult:
assert request.partition_metadata is not None
+ # The coarsened targets in the incoming request are for all targets in the request's original
+ # partition. Since the core `lint` logic re-batches inputs according to `[lint].batch_size`,
+ # this could be many more targets than are actually needed to lint the specific batch of files
+ # received by this rule. Subset the CTs one more time here to only those that are relevant.
+ all_coarsened_targets_by_address = request.partition_metadata.coarsened_targets.by_address()
+ coarsened_targets = CoarsenedTargets(
+ all_coarsened_targets_by_address[field_set.address] for field_set in request.elements
+ )
+ coarsened_closure = tuple(coarsened_targets.closure())
+
requirements_pex_get = Get(
Pex,
RequirementsPexRequest(
- (target.address for target in request.partition_metadata.coarsened_targets.closure()),
+ (target.address for target in coarsened_closure),
# NB: These constraints must be identical to the other PEXes. Otherwise, we risk using
# a different version for the requirements than the other two PEXes, which can result
# in a PEX runtime error about missing dependencies.
@@ -140,7 +150,7 @@
sources_get = Get(
PythonSourceFiles,
- PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),
+ PythonSourceFilesRequest(coarsened_closure),
)
# Ensure that the empty report dir exists.
report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))
|
{"golden_diff": "diff --git a/src/python/pants/backend/python/lint/pylint/rules.py b/src/python/pants/backend/python/lint/pylint/rules.py\n--- a/src/python/pants/backend/python/lint/pylint/rules.py\n+++ b/src/python/pants/backend/python/lint/pylint/rules.py\n@@ -118,10 +118,20 @@\n ) -> LintResult:\n assert request.partition_metadata is not None\n \n+ # The coarsened targets in the incoming request are for all targets in the request's original\n+ # partition. Since the core `lint` logic re-batches inputs according to `[lint].batch_size`,\n+ # this could be many more targets than are actually needed to lint the specific batch of files\n+ # received by this rule. Subset the CTs one more time here to only those that are relevant.\n+ all_coarsened_targets_by_address = request.partition_metadata.coarsened_targets.by_address()\n+ coarsened_targets = CoarsenedTargets(\n+ all_coarsened_targets_by_address[field_set.address] for field_set in request.elements\n+ )\n+ coarsened_closure = tuple(coarsened_targets.closure())\n+\n requirements_pex_get = Get(\n Pex,\n RequirementsPexRequest(\n- (target.address for target in request.partition_metadata.coarsened_targets.closure()),\n+ (target.address for target in coarsened_closure),\n # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using\n # a different version for the requirements than the other two PEXes, which can result\n # in a PEX runtime error about missing dependencies.\n@@ -140,7 +150,7 @@\n \n sources_get = Get(\n PythonSourceFiles,\n- PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),\n+ PythonSourceFilesRequest(coarsened_closure),\n )\n # Ensure that the empty report dir exists.\n report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))\n", "issue": "Coarsened target calculation for `pylint` pulls in more files than it should on v2.15.0a0\n**Describe the bug**\r\n\r\nWhile testing `v2.15.0a0`, I saw that `./pants lint` would consistently freeze / be OOM killed. Through `py-spy` and logging I found that:\r\n\r\n* Much time was being spent merging together source digests when setting up `pylint` runs\r\n* The number of source digests in each of ^^^ setups was much larger than expected (tens of thousands of files for an input batch size of a few hundred)\r\n* `pylint` batches of different sizes always ended up with the same number of source digests\r\n\r\nWhile looking through the `pylint` changes in v2.15, I found that coarsened target calculation is currently running in the \"partitioner\" rule (see [here](https://github.com/pantsbuild/pants/blob/main/src/python/pants/backend/python/lint/pylint/rules.py#L89-L92)). This will result in too many targets being associated with each `pylint` batch, because the partitions returned by that rule are re-batched into smaller chunks according to `[lint].batch_size`, and there's no support in the re-batching logic for subsetting the partition metadata.\r\n\r\nWe should push the calculation of coarsened targets into the \"runner\" rule for `pylint`, so we only compute & hydrate the transitive dependencies that are relevant for the specific inputs used in each batch.\r\n\r\n**Pants version**\r\n\r\nv2.15.0a0\r\n\r\n**OS**\r\n\r\nBoth\n", "before_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nfrom pants.backend.python.lint.pylint.subsystem import (\n Pylint,\n PylintFieldSet,\n PylintFirstPartyPlugins,\n)\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_from_targets\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.backend.python.util_rules.partition import (\n _partition_by_interpreter_constraints_and_resolve,\n)\nfrom pants.backend.python.util_rules.pex import (\n Pex,\n PexRequest,\n VenvPex,\n VenvPexProcess,\n VenvPexRequest,\n)\nfrom pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.lint import REPORT_DIR, LintResult, LintTargetsRequest, Partitions\nfrom pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest\nfrom pants.core.util_rules.partitions import Partition\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix\nfrom pants.engine.process import FallibleProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import CoarsenedTargets, CoarsenedTargetsRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\n@dataclass(frozen=True)\nclass PartitionMetadata:\n coarsened_targets: CoarsenedTargets\n # NB: These are the same across every element in a partition\n resolve_description: str | None\n interpreter_constraints: InterpreterConstraints\n\n @property\n def description(self) -> str:\n ics = str(sorted(str(c) for c in self.interpreter_constraints))\n return f\"{self.resolve_description}, {ics}\" if self.resolve_description else ics\n\n\nclass PylintRequest(LintTargetsRequest):\n field_set_type = PylintFieldSet\n tool_subsystem = Pylint\n\n\ndef generate_argv(field_sets: tuple[PylintFieldSet, ...], pylint: Pylint) -> Tuple[str, ...]:\n args = []\n if pylint.config is not None:\n args.append(f\"--rcfile={pylint.config}\")\n args.append(\"--jobs={pants_concurrency}\")\n args.extend(pylint.args)\n args.extend(field_set.source.file_path for field_set in field_sets)\n return tuple(args)\n\n\n@rule(desc=\"Determine if necessary to partition Pylint input\", level=LogLevel.DEBUG)\nasync def partition_pylint(\n request: PylintRequest.PartitionRequest[PylintFieldSet],\n pylint: Pylint,\n python_setup: PythonSetup,\n first_party_plugins: PylintFirstPartyPlugins,\n) -> Partitions[PylintFieldSet, PartitionMetadata]:\n if pylint.skip:\n return Partitions()\n\n first_party_ics = InterpreterConstraints.create_from_compatibility_fields(\n first_party_plugins.interpreter_constraints_fields, python_setup\n )\n\n resolve_and_interpreter_constraints_to_field_sets = (\n _partition_by_interpreter_constraints_and_resolve(request.field_sets, python_setup)\n )\n\n coarsened_targets = await Get(\n CoarsenedTargets,\n CoarsenedTargetsRequest(field_set.address for field_set in request.field_sets),\n )\n coarsened_targets_by_address = coarsened_targets.by_address()\n\n return Partitions(\n Partition(\n tuple(field_sets),\n PartitionMetadata(\n CoarsenedTargets(\n coarsened_targets_by_address[field_set.address] for field_set in field_sets\n ),\n resolve if len(python_setup.resolves) > 1 else None,\n InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),\n ),\n )\n for (\n resolve,\n interpreter_constraints,\n ), field_sets, in resolve_and_interpreter_constraints_to_field_sets.items()\n )\n\n\n@rule(desc=\"Lint using Pylint\", level=LogLevel.DEBUG)\nasync def run_pylint(\n request: PylintRequest.Batch[PylintFieldSet, PartitionMetadata],\n pylint: Pylint,\n first_party_plugins: PylintFirstPartyPlugins,\n) -> LintResult:\n assert request.partition_metadata is not None\n\n requirements_pex_get = Get(\n Pex,\n RequirementsPexRequest(\n (target.address for target in request.partition_metadata.coarsened_targets.closure()),\n # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using\n # a different version for the requirements than the other two PEXes, which can result\n # in a PEX runtime error about missing dependencies.\n hardcoded_interpreter_constraints=request.partition_metadata.interpreter_constraints,\n ),\n )\n\n pylint_pex_get = Get(\n Pex,\n PexRequest,\n pylint.to_pex_request(\n interpreter_constraints=request.partition_metadata.interpreter_constraints,\n extra_requirements=first_party_plugins.requirement_strings,\n ),\n )\n\n sources_get = Get(\n PythonSourceFiles,\n PythonSourceFilesRequest(request.partition_metadata.coarsened_targets.closure()),\n )\n # Ensure that the empty report dir exists.\n report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))\n\n (pylint_pex, requirements_pex, sources, report_directory,) = await MultiGet(\n pylint_pex_get,\n requirements_pex_get,\n sources_get,\n report_directory_digest_get,\n )\n\n pylint_runner_pex, config_files = await MultiGet(\n Get(\n VenvPex,\n VenvPexRequest(\n PexRequest(\n output_filename=\"pylint_runner.pex\",\n interpreter_constraints=request.partition_metadata.interpreter_constraints,\n main=pylint.main,\n internal_only=True,\n pex_path=[pylint_pex, requirements_pex],\n ),\n # TODO(John Sirois): Remove this (change to the default of symlinks) when we can\n # upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470\n # resolved.\n site_packages_copies=True,\n ),\n ),\n Get(\n ConfigFiles,\n ConfigFilesRequest,\n pylint.config_request(sources.source_files.snapshot.dirs),\n ),\n )\n\n pythonpath = list(sources.source_roots)\n if first_party_plugins:\n pythonpath.append(first_party_plugins.PREFIX)\n\n input_digest = await Get(\n Digest,\n MergeDigests(\n (\n config_files.snapshot.digest,\n first_party_plugins.sources_digest,\n sources.source_files.snapshot.digest,\n report_directory,\n )\n ),\n )\n\n result = await Get(\n FallibleProcessResult,\n VenvPexProcess(\n pylint_runner_pex,\n argv=generate_argv(request.elements, pylint),\n input_digest=input_digest,\n output_directories=(REPORT_DIR,),\n extra_env={\"PEX_EXTRA_SYS_PATH\": \":\".join(pythonpath)},\n concurrency_available=len(request.elements),\n description=f\"Run Pylint on {pluralize(len(request.elements), 'target')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))\n return LintResult.create(request, result, report=report)\n\n\ndef rules():\n return [\n *collect_rules(),\n *PylintRequest.rules(),\n *pex_from_targets.rules(),\n ]\n", "path": "src/python/pants/backend/python/lint/pylint/rules.py"}], "after_files": [{"content": "# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Tuple\n\nfrom pants.backend.python.lint.pylint.subsystem import (\n Pylint,\n PylintFieldSet,\n PylintFirstPartyPlugins,\n)\nfrom pants.backend.python.subsystems.setup import PythonSetup\nfrom pants.backend.python.util_rules import pex_from_targets\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.backend.python.util_rules.partition import (\n _partition_by_interpreter_constraints_and_resolve,\n)\nfrom pants.backend.python.util_rules.pex import (\n Pex,\n PexRequest,\n VenvPex,\n VenvPexProcess,\n VenvPexRequest,\n)\nfrom pants.backend.python.util_rules.pex_from_targets import RequirementsPexRequest\nfrom pants.backend.python.util_rules.python_sources import (\n PythonSourceFiles,\n PythonSourceFilesRequest,\n)\nfrom pants.core.goals.lint import REPORT_DIR, LintResult, LintTargetsRequest, Partitions\nfrom pants.core.util_rules.config_files import ConfigFiles, ConfigFilesRequest\nfrom pants.core.util_rules.partitions import Partition\nfrom pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests, RemovePrefix\nfrom pants.engine.process import FallibleProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, rule\nfrom pants.engine.target import CoarsenedTargets, CoarsenedTargetsRequest\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\n@dataclass(frozen=True)\nclass PartitionMetadata:\n coarsened_targets: CoarsenedTargets\n # NB: These are the same across every element in a partition\n resolve_description: str | None\n interpreter_constraints: InterpreterConstraints\n\n @property\n def description(self) -> str:\n ics = str(sorted(str(c) for c in self.interpreter_constraints))\n return f\"{self.resolve_description}, {ics}\" if self.resolve_description else ics\n\n\nclass PylintRequest(LintTargetsRequest):\n field_set_type = PylintFieldSet\n tool_subsystem = Pylint\n\n\ndef generate_argv(field_sets: tuple[PylintFieldSet, ...], pylint: Pylint) -> Tuple[str, ...]:\n args = []\n if pylint.config is not None:\n args.append(f\"--rcfile={pylint.config}\")\n args.append(\"--jobs={pants_concurrency}\")\n args.extend(pylint.args)\n args.extend(field_set.source.file_path for field_set in field_sets)\n return tuple(args)\n\n\n@rule(desc=\"Determine if necessary to partition Pylint input\", level=LogLevel.DEBUG)\nasync def partition_pylint(\n request: PylintRequest.PartitionRequest[PylintFieldSet],\n pylint: Pylint,\n python_setup: PythonSetup,\n first_party_plugins: PylintFirstPartyPlugins,\n) -> Partitions[PylintFieldSet, PartitionMetadata]:\n if pylint.skip:\n return Partitions()\n\n first_party_ics = InterpreterConstraints.create_from_compatibility_fields(\n first_party_plugins.interpreter_constraints_fields, python_setup\n )\n\n resolve_and_interpreter_constraints_to_field_sets = (\n _partition_by_interpreter_constraints_and_resolve(request.field_sets, python_setup)\n )\n\n coarsened_targets = await Get(\n CoarsenedTargets,\n CoarsenedTargetsRequest(field_set.address for field_set in request.field_sets),\n )\n coarsened_targets_by_address = coarsened_targets.by_address()\n\n return Partitions(\n Partition(\n tuple(field_sets),\n PartitionMetadata(\n CoarsenedTargets(\n coarsened_targets_by_address[field_set.address] for field_set in field_sets\n ),\n resolve if len(python_setup.resolves) > 1 else None,\n InterpreterConstraints.merge((interpreter_constraints, first_party_ics)),\n ),\n )\n for (\n resolve,\n interpreter_constraints,\n ), field_sets, in resolve_and_interpreter_constraints_to_field_sets.items()\n )\n\n\n@rule(desc=\"Lint using Pylint\", level=LogLevel.DEBUG)\nasync def run_pylint(\n request: PylintRequest.Batch[PylintFieldSet, PartitionMetadata],\n pylint: Pylint,\n first_party_plugins: PylintFirstPartyPlugins,\n) -> LintResult:\n assert request.partition_metadata is not None\n\n # The coarsened targets in the incoming request are for all targets in the request's original\n # partition. Since the core `lint` logic re-batches inputs according to `[lint].batch_size`,\n # this could be many more targets than are actually needed to lint the specific batch of files\n # received by this rule. Subset the CTs one more time here to only those that are relevant.\n all_coarsened_targets_by_address = request.partition_metadata.coarsened_targets.by_address()\n coarsened_targets = CoarsenedTargets(\n all_coarsened_targets_by_address[field_set.address] for field_set in request.elements\n )\n coarsened_closure = tuple(coarsened_targets.closure())\n\n requirements_pex_get = Get(\n Pex,\n RequirementsPexRequest(\n (target.address for target in coarsened_closure),\n # NB: These constraints must be identical to the other PEXes. Otherwise, we risk using\n # a different version for the requirements than the other two PEXes, which can result\n # in a PEX runtime error about missing dependencies.\n hardcoded_interpreter_constraints=request.partition_metadata.interpreter_constraints,\n ),\n )\n\n pylint_pex_get = Get(\n Pex,\n PexRequest,\n pylint.to_pex_request(\n interpreter_constraints=request.partition_metadata.interpreter_constraints,\n extra_requirements=first_party_plugins.requirement_strings,\n ),\n )\n\n sources_get = Get(\n PythonSourceFiles,\n PythonSourceFilesRequest(coarsened_closure),\n )\n # Ensure that the empty report dir exists.\n report_directory_digest_get = Get(Digest, CreateDigest([Directory(REPORT_DIR)]))\n\n (pylint_pex, requirements_pex, sources, report_directory,) = await MultiGet(\n pylint_pex_get,\n requirements_pex_get,\n sources_get,\n report_directory_digest_get,\n )\n\n pylint_runner_pex, config_files = await MultiGet(\n Get(\n VenvPex,\n VenvPexRequest(\n PexRequest(\n output_filename=\"pylint_runner.pex\",\n interpreter_constraints=request.partition_metadata.interpreter_constraints,\n main=pylint.main,\n internal_only=True,\n pex_path=[pylint_pex, requirements_pex],\n ),\n # TODO(John Sirois): Remove this (change to the default of symlinks) when we can\n # upgrade to a version of Pylint with https://github.com/PyCQA/pylint/issues/1470\n # resolved.\n site_packages_copies=True,\n ),\n ),\n Get(\n ConfigFiles,\n ConfigFilesRequest,\n pylint.config_request(sources.source_files.snapshot.dirs),\n ),\n )\n\n pythonpath = list(sources.source_roots)\n if first_party_plugins:\n pythonpath.append(first_party_plugins.PREFIX)\n\n input_digest = await Get(\n Digest,\n MergeDigests(\n (\n config_files.snapshot.digest,\n first_party_plugins.sources_digest,\n sources.source_files.snapshot.digest,\n report_directory,\n )\n ),\n )\n\n result = await Get(\n FallibleProcessResult,\n VenvPexProcess(\n pylint_runner_pex,\n argv=generate_argv(request.elements, pylint),\n input_digest=input_digest,\n output_directories=(REPORT_DIR,),\n extra_env={\"PEX_EXTRA_SYS_PATH\": \":\".join(pythonpath)},\n concurrency_available=len(request.elements),\n description=f\"Run Pylint on {pluralize(len(request.elements), 'target')}.\",\n level=LogLevel.DEBUG,\n ),\n )\n report = await Get(Digest, RemovePrefix(result.output_digest, REPORT_DIR))\n return LintResult.create(request, result, report=report)\n\n\ndef rules():\n return [\n *collect_rules(),\n *PylintRequest.rules(),\n *pex_from_targets.rules(),\n ]\n", "path": "src/python/pants/backend/python/lint/pylint/rules.py"}]}
| 2,795 | 439 |
gh_patches_debug_3258
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-755
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"manim -" is not working
I broke this when revamping the config system. Thanks @naveen521kk for reporting
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/utils/module_ops.py`
Content:
```
1 from .. import constants, logger, console, config
2 import importlib.util
3 import inspect
4 import os
5 from pathlib import Path
6 import sys
7 import types
8 import re
9
10
11 def get_module(file_name):
12 if file_name == "-":
13 module = types.ModuleType("input_scenes")
14 logger.info(
15 "Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):"
16 )
17 code = sys.stdin.read()
18 if not code.startswith("from manim import"):
19 logger.warn(
20 "Didn't find an import statement for Manim. Importing automatically..."
21 )
22 code = "from manim import *\n" + code
23 logger.info("Rendering animation from typed code...")
24 try:
25 exec(code, module.__dict__)
26 return module
27 except Exception as e:
28 logger.error(f"Failed to render scene: {str(e)}")
29 sys.exit(2)
30 else:
31 if Path(file_name).exists():
32 ext = file_name.suffix
33 if ext != ".py":
34 raise ValueError(f"{file_name} is not a valid Manim python script.")
35 module_name = ext.replace(os.sep, ".").split(".")[-1]
36 spec = importlib.util.spec_from_file_location(module_name, file_name)
37 module = importlib.util.module_from_spec(spec)
38 sys.modules[module_name] = module
39 spec.loader.exec_module(module)
40 return module
41 else:
42 raise FileNotFoundError(f"{file_name} not found")
43
44
45 def get_scene_classes_from_module(module):
46 from ..scene.scene import Scene
47
48 def is_child_scene(obj, module):
49 return (
50 inspect.isclass(obj)
51 and issubclass(obj, Scene)
52 and obj != Scene
53 and obj.__module__.startswith(module.__name__)
54 )
55
56 return [
57 member[1]
58 for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))
59 ]
60
61
62 def get_scenes_to_render(scene_classes):
63 if not scene_classes:
64 logger.error(constants.NO_SCENE_MESSAGE)
65 return []
66 if config["write_all"]:
67 return scene_classes
68 result = []
69 for scene_name in config["scene_names"]:
70 found = False
71 for scene_class in scene_classes:
72 if scene_class.__name__ == scene_name:
73 result.append(scene_class)
74 found = True
75 break
76 if not found and (scene_name != ""):
77 logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))
78 if result:
79 return result
80 return (
81 [scene_classes[0]]
82 if len(scene_classes) == 1
83 else prompt_user_for_choice(scene_classes)
84 )
85
86
87 def prompt_user_for_choice(scene_classes):
88 num_to_class = {}
89 for count, scene_class in enumerate(scene_classes):
90 count += 1 # start with 1 instead of 0
91 name = scene_class.__name__
92 console.print(f"{count}: {name}", style="logging.level.info")
93 num_to_class[count] = scene_class
94 try:
95 user_input = console.input(
96 f"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]"
97 )
98 return [
99 num_to_class[int(num_str)]
100 for num_str in re.split(r"\s*,\s*", user_input.strip())
101 ]
102 except KeyError:
103 logger.error(constants.INVALID_NUMBER_MESSAGE)
104 sys.exit(2)
105 except EOFError:
106 sys.exit(1)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/manim/utils/module_ops.py b/manim/utils/module_ops.py
--- a/manim/utils/module_ops.py
+++ b/manim/utils/module_ops.py
@@ -9,7 +9,7 @@
def get_module(file_name):
- if file_name == "-":
+ if str(file_name) == "-":
module = types.ModuleType("input_scenes")
logger.info(
"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):"
|
{"golden_diff": "diff --git a/manim/utils/module_ops.py b/manim/utils/module_ops.py\n--- a/manim/utils/module_ops.py\n+++ b/manim/utils/module_ops.py\n@@ -9,7 +9,7 @@\n \n \n def get_module(file_name):\n- if file_name == \"-\":\n+ if str(file_name) == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\n \"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\"\n", "issue": "\"manim -\" is not working\nI broke this when revamping the config system. Thanks @naveen521kk for reporting\n", "before_files": [{"content": "from .. import constants, logger, console, config\nimport importlib.util\nimport inspect\nimport os\nfrom pathlib import Path\nimport sys\nimport types\nimport re\n\n\ndef get_module(file_name):\n if file_name == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\n \"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\"\n )\n code = sys.stdin.read()\n if not code.startswith(\"from manim import\"):\n logger.warn(\n \"Didn't find an import statement for Manim. Importing automatically...\"\n )\n code = \"from manim import *\\n\" + code\n logger.info(\"Rendering animation from typed code...\")\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if Path(file_name).exists():\n ext = file_name.suffix\n if ext != \".py\":\n raise ValueError(f\"{file_name} is not a valid Manim python script.\")\n module_name = ext.replace(os.sep, \".\").split(\".\")[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f\"{file_name} not found\")\n\n\ndef get_scene_classes_from_module(module):\n from ..scene.scene import Scene\n\n def is_child_scene(obj, module):\n return (\n inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__)\n )\n\n return [\n member[1]\n for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))\n ]\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))\n if result:\n return result\n return (\n [scene_classes[0]]\n if len(scene_classes) == 1\n else prompt_user_for_choice(scene_classes)\n )\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n console.print(f\"{count}: {name}\", style=\"logging.level.info\")\n num_to_class[count] = scene_class\n try:\n user_input = console.input(\n f\"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]\"\n )\n return [\n num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())\n ]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n", "path": "manim/utils/module_ops.py"}], "after_files": [{"content": "from .. import constants, logger, console, config\nimport importlib.util\nimport inspect\nimport os\nfrom pathlib import Path\nimport sys\nimport types\nimport re\n\n\ndef get_module(file_name):\n if str(file_name) == \"-\":\n module = types.ModuleType(\"input_scenes\")\n logger.info(\n \"Enter the animation's code & end with an EOF (CTRL+D on Linux/Unix, CTRL+Z on Windows):\"\n )\n code = sys.stdin.read()\n if not code.startswith(\"from manim import\"):\n logger.warn(\n \"Didn't find an import statement for Manim. Importing automatically...\"\n )\n code = \"from manim import *\\n\" + code\n logger.info(\"Rendering animation from typed code...\")\n try:\n exec(code, module.__dict__)\n return module\n except Exception as e:\n logger.error(f\"Failed to render scene: {str(e)}\")\n sys.exit(2)\n else:\n if Path(file_name).exists():\n ext = file_name.suffix\n if ext != \".py\":\n raise ValueError(f\"{file_name} is not a valid Manim python script.\")\n module_name = ext.replace(os.sep, \".\").split(\".\")[-1]\n spec = importlib.util.spec_from_file_location(module_name, file_name)\n module = importlib.util.module_from_spec(spec)\n sys.modules[module_name] = module\n spec.loader.exec_module(module)\n return module\n else:\n raise FileNotFoundError(f\"{file_name} not found\")\n\n\ndef get_scene_classes_from_module(module):\n from ..scene.scene import Scene\n\n def is_child_scene(obj, module):\n return (\n inspect.isclass(obj)\n and issubclass(obj, Scene)\n and obj != Scene\n and obj.__module__.startswith(module.__name__)\n )\n\n return [\n member[1]\n for member in inspect.getmembers(module, lambda x: is_child_scene(x, module))\n ]\n\n\ndef get_scenes_to_render(scene_classes):\n if not scene_classes:\n logger.error(constants.NO_SCENE_MESSAGE)\n return []\n if config[\"write_all\"]:\n return scene_classes\n result = []\n for scene_name in config[\"scene_names\"]:\n found = False\n for scene_class in scene_classes:\n if scene_class.__name__ == scene_name:\n result.append(scene_class)\n found = True\n break\n if not found and (scene_name != \"\"):\n logger.error(constants.SCENE_NOT_FOUND_MESSAGE.format(scene_name))\n if result:\n return result\n return (\n [scene_classes[0]]\n if len(scene_classes) == 1\n else prompt_user_for_choice(scene_classes)\n )\n\n\ndef prompt_user_for_choice(scene_classes):\n num_to_class = {}\n for count, scene_class in enumerate(scene_classes):\n count += 1 # start with 1 instead of 0\n name = scene_class.__name__\n console.print(f\"{count}: {name}\", style=\"logging.level.info\")\n num_to_class[count] = scene_class\n try:\n user_input = console.input(\n f\"[log.message] {constants.CHOOSE_NUMBER_MESSAGE} [/log.message]\"\n )\n return [\n num_to_class[int(num_str)]\n for num_str in re.split(r\"\\s*,\\s*\", user_input.strip())\n ]\n except KeyError:\n logger.error(constants.INVALID_NUMBER_MESSAGE)\n sys.exit(2)\n except EOFError:\n sys.exit(1)\n", "path": "manim/utils/module_ops.py"}]}
| 1,242 | 114 |
gh_patches_debug_12818
|
rasdani/github-patches
|
git_diff
|
replicate__cog-620
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
What should `cog predict` do if you don't pass an input name?
The syntax of `cog predict` is this:
cog predict -i [email protected]
But you can also do this:
cog predict -i @image.jpg
Which implicitly means an input name of `input`. This is a neat short hand but a bit weird for a few reasons:
- `input` is a Python built-in, so we should really be encouraging using that as a variable name.
- It is a magic name.
- For a sufficiently complex model, you probably don't want to call it `input`.
What could we do that is better here? Maybe if you don't pass a name, it defaults to the _first_ input defined, rather than a magic name? This is vaguely backwards compatible, which is neat.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pkg/cli/init-templates/predict.py`
Content:
```
1 # Prediction interface for Cog ⚙️
2 # https://github.com/replicate/cog/blob/main/docs/python.md
3
4 from cog import BasePredictor, Input, Path
5
6
7 class Predictor(BasePredictor):
8 def setup(self):
9 """Load the model into memory to make running multiple predictions efficient"""
10 # self.model = torch.load("./weights.pth")
11
12 def predict(
13 self,
14 input: Path = Input(description="Grayscale input image"),
15 scale: float = Input(
16 description="Factor to scale image by", ge=0, le=10, default=1.5
17 ),
18 ) -> Path:
19 """Run a single prediction on the model"""
20 # processed_input = preprocess(input)
21 # output = self.model(processed_input, scale)
22 # return postprocess(output)
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pkg/cli/init-templates/predict.py b/pkg/cli/init-templates/predict.py
--- a/pkg/cli/init-templates/predict.py
+++ b/pkg/cli/init-templates/predict.py
@@ -11,12 +11,12 @@
def predict(
self,
- input: Path = Input(description="Grayscale input image"),
+ image: Path = Input(description="Grayscale input image"),
scale: float = Input(
description="Factor to scale image by", ge=0, le=10, default=1.5
),
) -> Path:
"""Run a single prediction on the model"""
- # processed_input = preprocess(input)
- # output = self.model(processed_input, scale)
+ # processed_input = preprocess(image)
+ # output = self.model(processed_image, scale)
# return postprocess(output)
|
{"golden_diff": "diff --git a/pkg/cli/init-templates/predict.py b/pkg/cli/init-templates/predict.py\n--- a/pkg/cli/init-templates/predict.py\n+++ b/pkg/cli/init-templates/predict.py\n@@ -11,12 +11,12 @@\n \n def predict(\n self,\n- input: Path = Input(description=\"Grayscale input image\"),\n+ image: Path = Input(description=\"Grayscale input image\"),\n scale: float = Input(\n description=\"Factor to scale image by\", ge=0, le=10, default=1.5\n ),\n ) -> Path:\n \"\"\"Run a single prediction on the model\"\"\"\n- # processed_input = preprocess(input)\n- # output = self.model(processed_input, scale)\n+ # processed_input = preprocess(image)\n+ # output = self.model(processed_image, scale)\n # return postprocess(output)\n", "issue": "What should `cog predict` do if you don't pass an input name?\nThe syntax of `cog predict` is this:\r\n\r\n cog predict -i [email protected]\r\n\r\nBut you can also do this:\r\n\r\n cog predict -i @image.jpg\r\n\r\nWhich implicitly means an input name of `input`. This is a neat short hand but a bit weird for a few reasons:\r\n\r\n- `input` is a Python built-in, so we should really be encouraging using that as a variable name.\r\n- It is a magic name.\r\n- For a sufficiently complex model, you probably don't want to call it `input`.\r\n\r\nWhat could we do that is better here? Maybe if you don't pass a name, it defaults to the _first_ input defined, rather than a magic name? This is vaguely backwards compatible, which is neat.\n", "before_files": [{"content": "# Prediction interface for Cog \u2699\ufe0f\n# https://github.com/replicate/cog/blob/main/docs/python.md\n\nfrom cog import BasePredictor, Input, Path\n\n\nclass Predictor(BasePredictor):\n def setup(self):\n \"\"\"Load the model into memory to make running multiple predictions efficient\"\"\"\n # self.model = torch.load(\"./weights.pth\")\n\n def predict(\n self,\n input: Path = Input(description=\"Grayscale input image\"),\n scale: float = Input(\n description=\"Factor to scale image by\", ge=0, le=10, default=1.5\n ),\n ) -> Path:\n \"\"\"Run a single prediction on the model\"\"\"\n # processed_input = preprocess(input)\n # output = self.model(processed_input, scale)\n # return postprocess(output)\n", "path": "pkg/cli/init-templates/predict.py"}], "after_files": [{"content": "# Prediction interface for Cog \u2699\ufe0f\n# https://github.com/replicate/cog/blob/main/docs/python.md\n\nfrom cog import BasePredictor, Input, Path\n\n\nclass Predictor(BasePredictor):\n def setup(self):\n \"\"\"Load the model into memory to make running multiple predictions efficient\"\"\"\n # self.model = torch.load(\"./weights.pth\")\n\n def predict(\n self,\n image: Path = Input(description=\"Grayscale input image\"),\n scale: float = Input(\n description=\"Factor to scale image by\", ge=0, le=10, default=1.5\n ),\n ) -> Path:\n \"\"\"Run a single prediction on the model\"\"\"\n # processed_input = preprocess(image)\n # output = self.model(processed_image, scale)\n # return postprocess(output)\n", "path": "pkg/cli/init-templates/predict.py"}]}
| 644 | 193 |
gh_patches_debug_40554
|
rasdani/github-patches
|
git_diff
|
ESMCI__cime-886
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bless_test_results - can not find create_test command
I am using tag:
https://github.com/CESM-Development/cime/tags/cime5.2.0-alpha.10
When I issue the command:
/glade/u/home/cacraig/cam5_4_94/cime/scripts/Tools/bless_test_results -t '' -c '' -r /glade/scratch/cacraig/aux_cam_20161123172411 -b cam5_4_94-copy6 -f
I get the following error:
FAILED TO BLESS TEST: SMS_Lm13.f09_f09.F2000_DEV.yellowstone_intel.cam-outfrq1m_clm5, reason Namelist regen failed: '/bin/sh: create_test: command not found'
The baseline directory created by this script has the netCDF files in it, but no namelists are copied. I've tried running this command from various locations including the cime/scripts directory where create_test resides, and can not eliminate this error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `utils/python/CIME/case_cmpgen_namelists.py`
Content:
```
1 """
2 Library for case.cmpgen_namelists.
3 """
4
5 from CIME.XML.standard_module_setup import *
6
7 from CIME.preview_namelists import create_namelists
8 from CIME.compare_namelists import is_namelist_file, compare_namelist_files
9 from CIME.simple_compare import compare_files
10 from CIME.utils import get_current_branch, append_status
11 from CIME.test_status import *
12
13 import os, shutil, traceback, stat, glob
14
15 logger = logging.getLogger(__name__)
16
17 def _do_full_nl_comp(case, test, compare_name):
18 test_dir = case.get_value("CASEROOT")
19 casedoc_dir = os.path.join(test_dir, "CaseDocs")
20 baseline_root = case.get_value("BASELINE_ROOT")
21
22 all_match = True
23 baseline_dir = os.path.join(baseline_root, compare_name, test)
24 baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
25
26 # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!)
27 # TODO: Namelist files should have consistent suffix
28 all_items_to_compare = [item for item in glob.glob("%s/*" % casedoc_dir)\
29 if "README" not in os.path.basename(item)\
30 and not item.endswith("doc")\
31 and not item.endswith("prescribed")\
32 and not os.path.basename(item).startswith(".")] + \
33 glob.glob("%s/*user_nl*" % test_dir)
34
35 comments = ""
36 for item in all_items_to_compare:
37 baseline_counterpart = os.path.join(baseline_casedocs \
38 if os.path.dirname(item).endswith("CaseDocs") \
39 else baseline_dir,os.path.basename(item))
40 if not os.path.exists(baseline_counterpart):
41 comments += "Missing baseline namelist '%s'\n" % baseline_counterpart
42 all_match = False
43 else:
44 if is_namelist_file(item):
45 success, current_comments = compare_namelist_files(baseline_counterpart, item, test)
46 else:
47 success, current_comments = compare_files(baseline_counterpart, item, test)
48
49 all_match &= success
50 comments += current_comments
51
52 logging.info(comments)
53 return all_match, comments
54
55 def _do_full_nl_gen(case, test, generate_name):
56 test_dir = case.get_value("CASEROOT")
57 casedoc_dir = os.path.join(test_dir, "CaseDocs")
58 baseline_root = case.get_value("BASELINE_ROOT")
59
60 baseline_dir = os.path.join(baseline_root, generate_name, test)
61 baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
62
63 if not os.path.isdir(baseline_dir):
64 os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)
65
66 if os.path.isdir(baseline_casedocs):
67 shutil.rmtree(baseline_casedocs)
68
69 shutil.copytree(casedoc_dir, baseline_casedocs)
70 os.chmod(baseline_casedocs, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)
71 for item in glob.glob("%s/*" % baseline_casedocs):
72 os.chmod(item, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
73
74 for item in glob.glob(os.path.join(test_dir, "user_nl*")):
75 preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item))
76 if (os.path.exists(preexisting_baseline)):
77 os.remove(preexisting_baseline)
78
79 shutil.copy2(item, baseline_dir)
80 os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
81
82 def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):
83 expect(case.get_value("TEST"), "Only makes sense to run this for a test case")
84
85 caseroot, casebaseid = case.get_value("CASEROOT"), case.get_value("CASEBASEID")
86
87 if not compare:
88 compare = case.get_value("COMPARE_BASELINE")
89 if not generate:
90 generate = case.get_value("GENERATE_BASELINE")
91
92 if not compare and not generate:
93 logging.info("Nothing to do")
94 return True
95
96 # create namelists for case if they haven't been already
97 casedocs = os.path.join(caseroot, "CaseDocs")
98 if not os.path.exists(os.path.join(casedocs, "drv_in")):
99 create_namelists(case)
100
101 test_name = casebaseid if casebaseid is not None else case.get_value("CASE")
102 with TestStatus(test_dir=caseroot, test_name=test_name) as ts:
103 try:
104 # Inside this try are where we catch non-fatal errors, IE errors involving
105 # baseline operations which may not directly impact the functioning of the viability of this case
106 if compare and not compare_name:
107 compare_name = case.get_value("BASELINE_NAME_CMP")
108 compare_name = get_current_branch() if compare_name is None else compare_name
109 expect(compare_name, "Was asked to do baseline compare but unable to determine baseline name")
110 logging.info("Comparing namelists with baselines '%s'" % compare_name)
111 if generate and not generate_name:
112 generate_name = case.get_value("BASELINE_NAME_GEN")
113 generate_name = get_current_branch() if generate_name is None else generate_name
114 expect(generate_name, "Was asked to do baseline generation but unable to determine baseline name")
115 logging.info("Generating namelists to baselines '%s'" % generate_name)
116
117 success = True
118 output = ""
119 if compare:
120 success, output = _do_full_nl_comp(case, test_name, compare_name)
121 if generate:
122 _do_full_nl_gen(case, test_name, generate_name)
123 except:
124 ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)
125 success = False
126 warn = "Exception during namelist operations:\n%s\n%s" % (sys.exc_info()[1], traceback.format_exc())
127 output += warn
128 logging.warning(warn)
129 finally:
130 ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)
131 append_status(output, caseroot=caseroot, sfile="TestStatus.log")
132
133 return success
134
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/utils/python/CIME/case_cmpgen_namelists.py b/utils/python/CIME/case_cmpgen_namelists.py
--- a/utils/python/CIME/case_cmpgen_namelists.py
+++ b/utils/python/CIME/case_cmpgen_namelists.py
@@ -14,10 +14,10 @@
logger = logging.getLogger(__name__)
-def _do_full_nl_comp(case, test, compare_name):
+def _do_full_nl_comp(case, test, compare_name, baseline_root=None):
test_dir = case.get_value("CASEROOT")
casedoc_dir = os.path.join(test_dir, "CaseDocs")
- baseline_root = case.get_value("BASELINE_ROOT")
+ baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root
all_match = True
baseline_dir = os.path.join(baseline_root, compare_name, test)
@@ -52,10 +52,10 @@
logging.info(comments)
return all_match, comments
-def _do_full_nl_gen(case, test, generate_name):
+def _do_full_nl_gen(case, test, generate_name, baseline_root=None):
test_dir = case.get_value("CASEROOT")
casedoc_dir = os.path.join(test_dir, "CaseDocs")
- baseline_root = case.get_value("BASELINE_ROOT")
+ baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root
baseline_dir = os.path.join(baseline_root, generate_name, test)
baseline_casedocs = os.path.join(baseline_dir, "CaseDocs")
@@ -79,7 +79,7 @@
shutil.copy2(item, baseline_dir)
os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)
-def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):
+def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name="TestStatus.log"):
expect(case.get_value("TEST"), "Only makes sense to run this for a test case")
caseroot, casebaseid = case.get_value("CASEROOT"), case.get_value("CASEBASEID")
@@ -117,9 +117,9 @@
success = True
output = ""
if compare:
- success, output = _do_full_nl_comp(case, test_name, compare_name)
+ success, output = _do_full_nl_comp(case, test_name, compare_name, baseline_root)
if generate:
- _do_full_nl_gen(case, test_name, generate_name)
+ _do_full_nl_gen(case, test_name, generate_name, baseline_root)
except:
ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)
success = False
@@ -128,7 +128,7 @@
logging.warning(warn)
finally:
ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)
- append_status(output, caseroot=caseroot, sfile="TestStatus.log")
+ append_status(output, caseroot=caseroot, sfile=logfile_name)
return success
|
{"golden_diff": "diff --git a/utils/python/CIME/case_cmpgen_namelists.py b/utils/python/CIME/case_cmpgen_namelists.py\n--- a/utils/python/CIME/case_cmpgen_namelists.py\n+++ b/utils/python/CIME/case_cmpgen_namelists.py\n@@ -14,10 +14,10 @@\n \n logger = logging.getLogger(__name__)\n \n-def _do_full_nl_comp(case, test, compare_name):\n+def _do_full_nl_comp(case, test, compare_name, baseline_root=None):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n- baseline_root = case.get_value(\"BASELINE_ROOT\")\n+ baseline_root = case.get_value(\"BASELINE_ROOT\") if baseline_root is None else baseline_root\n \n all_match = True\n baseline_dir = os.path.join(baseline_root, compare_name, test)\n@@ -52,10 +52,10 @@\n logging.info(comments)\n return all_match, comments\n \n-def _do_full_nl_gen(case, test, generate_name):\n+def _do_full_nl_gen(case, test, generate_name, baseline_root=None):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n- baseline_root = case.get_value(\"BASELINE_ROOT\")\n+ baseline_root = case.get_value(\"BASELINE_ROOT\") if baseline_root is None else baseline_root\n \n baseline_dir = os.path.join(baseline_root, generate_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n@@ -79,7 +79,7 @@\n shutil.copy2(item, baseline_dir)\n os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n \n-def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):\n+def case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name=\"TestStatus.log\"):\n expect(case.get_value(\"TEST\"), \"Only makes sense to run this for a test case\")\n \n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n@@ -117,9 +117,9 @@\n success = True\n output = \"\"\n if compare:\n- success, output = _do_full_nl_comp(case, test_name, compare_name)\n+ success, output = _do_full_nl_comp(case, test_name, compare_name, baseline_root)\n if generate:\n- _do_full_nl_gen(case, test_name, generate_name)\n+ _do_full_nl_gen(case, test_name, generate_name, baseline_root)\n except:\n ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)\n success = False\n@@ -128,7 +128,7 @@\n logging.warning(warn)\n finally:\n ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)\n- append_status(output, caseroot=caseroot, sfile=\"TestStatus.log\")\n+ append_status(output, caseroot=caseroot, sfile=logfile_name)\n \n return success\n", "issue": "bless_test_results - can not find create_test command\nI am using tag:\r\n https://github.com/CESM-Development/cime/tags/cime5.2.0-alpha.10\r\n\r\nWhen I issue the command:\r\n /glade/u/home/cacraig/cam5_4_94/cime/scripts/Tools/bless_test_results -t '' -c '' -r /glade/scratch/cacraig/aux_cam_20161123172411 -b cam5_4_94-copy6 -f\r\n\r\nI get the following error:\r\nFAILED TO BLESS TEST: SMS_Lm13.f09_f09.F2000_DEV.yellowstone_intel.cam-outfrq1m_clm5, reason Namelist regen failed: '/bin/sh: create_test: command not found'\r\n\r\nThe baseline directory created by this script has the netCDF files in it, but no namelists are copied. I've tried running this command from various locations including the cime/scripts directory where create_test resides, and can not eliminate this error.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nLibrary for case.cmpgen_namelists.\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.compare_namelists import is_namelist_file, compare_namelist_files\nfrom CIME.simple_compare import compare_files\nfrom CIME.utils import get_current_branch, append_status\nfrom CIME.test_status import *\n\nimport os, shutil, traceback, stat, glob\n\nlogger = logging.getLogger(__name__)\n\ndef _do_full_nl_comp(case, test, compare_name):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n baseline_root = case.get_value(\"BASELINE_ROOT\")\n\n all_match = True\n baseline_dir = os.path.join(baseline_root, compare_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n\n # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!)\n # TODO: Namelist files should have consistent suffix\n all_items_to_compare = [item for item in glob.glob(\"%s/*\" % casedoc_dir)\\\n if \"README\" not in os.path.basename(item)\\\n and not item.endswith(\"doc\")\\\n and not item.endswith(\"prescribed\")\\\n and not os.path.basename(item).startswith(\".\")] + \\\n glob.glob(\"%s/*user_nl*\" % test_dir)\n\n comments = \"\"\n for item in all_items_to_compare:\n baseline_counterpart = os.path.join(baseline_casedocs \\\n if os.path.dirname(item).endswith(\"CaseDocs\") \\\n else baseline_dir,os.path.basename(item))\n if not os.path.exists(baseline_counterpart):\n comments += \"Missing baseline namelist '%s'\\n\" % baseline_counterpart\n all_match = False\n else:\n if is_namelist_file(item):\n success, current_comments = compare_namelist_files(baseline_counterpart, item, test)\n else:\n success, current_comments = compare_files(baseline_counterpart, item, test)\n\n all_match &= success\n comments += current_comments\n\n logging.info(comments)\n return all_match, comments\n\ndef _do_full_nl_gen(case, test, generate_name):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n baseline_root = case.get_value(\"BASELINE_ROOT\")\n\n baseline_dir = os.path.join(baseline_root, generate_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n\n if not os.path.isdir(baseline_dir):\n os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)\n\n if os.path.isdir(baseline_casedocs):\n shutil.rmtree(baseline_casedocs)\n\n shutil.copytree(casedoc_dir, baseline_casedocs)\n os.chmod(baseline_casedocs, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)\n for item in glob.glob(\"%s/*\" % baseline_casedocs):\n os.chmod(item, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n\n for item in glob.glob(os.path.join(test_dir, \"user_nl*\")):\n preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item))\n if (os.path.exists(preexisting_baseline)):\n os.remove(preexisting_baseline)\n\n shutil.copy2(item, baseline_dir)\n os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n\ndef case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None):\n expect(case.get_value(\"TEST\"), \"Only makes sense to run this for a test case\")\n\n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n\n if not compare:\n compare = case.get_value(\"COMPARE_BASELINE\")\n if not generate:\n generate = case.get_value(\"GENERATE_BASELINE\")\n\n if not compare and not generate:\n logging.info(\"Nothing to do\")\n return True\n\n # create namelists for case if they haven't been already\n casedocs = os.path.join(caseroot, \"CaseDocs\")\n if not os.path.exists(os.path.join(casedocs, \"drv_in\")):\n create_namelists(case)\n\n test_name = casebaseid if casebaseid is not None else case.get_value(\"CASE\")\n with TestStatus(test_dir=caseroot, test_name=test_name) as ts:\n try:\n # Inside this try are where we catch non-fatal errors, IE errors involving\n # baseline operations which may not directly impact the functioning of the viability of this case\n if compare and not compare_name:\n compare_name = case.get_value(\"BASELINE_NAME_CMP\")\n compare_name = get_current_branch() if compare_name is None else compare_name\n expect(compare_name, \"Was asked to do baseline compare but unable to determine baseline name\")\n logging.info(\"Comparing namelists with baselines '%s'\" % compare_name)\n if generate and not generate_name:\n generate_name = case.get_value(\"BASELINE_NAME_GEN\")\n generate_name = get_current_branch() if generate_name is None else generate_name\n expect(generate_name, \"Was asked to do baseline generation but unable to determine baseline name\")\n logging.info(\"Generating namelists to baselines '%s'\" % generate_name)\n\n success = True\n output = \"\"\n if compare:\n success, output = _do_full_nl_comp(case, test_name, compare_name)\n if generate:\n _do_full_nl_gen(case, test_name, generate_name)\n except:\n ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)\n success = False\n warn = \"Exception during namelist operations:\\n%s\\n%s\" % (sys.exc_info()[1], traceback.format_exc())\n output += warn\n logging.warning(warn)\n finally:\n ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)\n append_status(output, caseroot=caseroot, sfile=\"TestStatus.log\")\n\n return success\n\n", "path": "utils/python/CIME/case_cmpgen_namelists.py"}], "after_files": [{"content": "\"\"\"\nLibrary for case.cmpgen_namelists.\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.preview_namelists import create_namelists\nfrom CIME.compare_namelists import is_namelist_file, compare_namelist_files\nfrom CIME.simple_compare import compare_files\nfrom CIME.utils import get_current_branch, append_status\nfrom CIME.test_status import *\n\nimport os, shutil, traceback, stat, glob\n\nlogger = logging.getLogger(__name__)\n\ndef _do_full_nl_comp(case, test, compare_name, baseline_root=None):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n baseline_root = case.get_value(\"BASELINE_ROOT\") if baseline_root is None else baseline_root\n\n all_match = True\n baseline_dir = os.path.join(baseline_root, compare_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n\n # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!)\n # TODO: Namelist files should have consistent suffix\n all_items_to_compare = [item for item in glob.glob(\"%s/*\" % casedoc_dir)\\\n if \"README\" not in os.path.basename(item)\\\n and not item.endswith(\"doc\")\\\n and not item.endswith(\"prescribed\")\\\n and not os.path.basename(item).startswith(\".\")] + \\\n glob.glob(\"%s/*user_nl*\" % test_dir)\n\n comments = \"\"\n for item in all_items_to_compare:\n baseline_counterpart = os.path.join(baseline_casedocs \\\n if os.path.dirname(item).endswith(\"CaseDocs\") \\\n else baseline_dir,os.path.basename(item))\n if not os.path.exists(baseline_counterpart):\n comments += \"Missing baseline namelist '%s'\\n\" % baseline_counterpart\n all_match = False\n else:\n if is_namelist_file(item):\n success, current_comments = compare_namelist_files(baseline_counterpart, item, test)\n else:\n success, current_comments = compare_files(baseline_counterpart, item, test)\n\n all_match &= success\n comments += current_comments\n\n logging.info(comments)\n return all_match, comments\n\ndef _do_full_nl_gen(case, test, generate_name, baseline_root=None):\n test_dir = case.get_value(\"CASEROOT\")\n casedoc_dir = os.path.join(test_dir, \"CaseDocs\")\n baseline_root = case.get_value(\"BASELINE_ROOT\") if baseline_root is None else baseline_root\n\n baseline_dir = os.path.join(baseline_root, generate_name, test)\n baseline_casedocs = os.path.join(baseline_dir, \"CaseDocs\")\n\n if not os.path.isdir(baseline_dir):\n os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)\n\n if os.path.isdir(baseline_casedocs):\n shutil.rmtree(baseline_casedocs)\n\n shutil.copytree(casedoc_dir, baseline_casedocs)\n os.chmod(baseline_casedocs, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH)\n for item in glob.glob(\"%s/*\" % baseline_casedocs):\n os.chmod(item, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n\n for item in glob.glob(os.path.join(test_dir, \"user_nl*\")):\n preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item))\n if (os.path.exists(preexisting_baseline)):\n os.remove(preexisting_baseline)\n\n shutil.copy2(item, baseline_dir)\n os.chmod(preexisting_baseline, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP)\n\ndef case_cmpgen_namelists(case, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name=\"TestStatus.log\"):\n expect(case.get_value(\"TEST\"), \"Only makes sense to run this for a test case\")\n\n caseroot, casebaseid = case.get_value(\"CASEROOT\"), case.get_value(\"CASEBASEID\")\n\n if not compare:\n compare = case.get_value(\"COMPARE_BASELINE\")\n if not generate:\n generate = case.get_value(\"GENERATE_BASELINE\")\n\n if not compare and not generate:\n logging.info(\"Nothing to do\")\n return True\n\n # create namelists for case if they haven't been already\n casedocs = os.path.join(caseroot, \"CaseDocs\")\n if not os.path.exists(os.path.join(casedocs, \"drv_in\")):\n create_namelists(case)\n\n test_name = casebaseid if casebaseid is not None else case.get_value(\"CASE\")\n with TestStatus(test_dir=caseroot, test_name=test_name) as ts:\n try:\n # Inside this try are where we catch non-fatal errors, IE errors involving\n # baseline operations which may not directly impact the functioning of the viability of this case\n if compare and not compare_name:\n compare_name = case.get_value(\"BASELINE_NAME_CMP\")\n compare_name = get_current_branch() if compare_name is None else compare_name\n expect(compare_name, \"Was asked to do baseline compare but unable to determine baseline name\")\n logging.info(\"Comparing namelists with baselines '%s'\" % compare_name)\n if generate and not generate_name:\n generate_name = case.get_value(\"BASELINE_NAME_GEN\")\n generate_name = get_current_branch() if generate_name is None else generate_name\n expect(generate_name, \"Was asked to do baseline generation but unable to determine baseline name\")\n logging.info(\"Generating namelists to baselines '%s'\" % generate_name)\n\n success = True\n output = \"\"\n if compare:\n success, output = _do_full_nl_comp(case, test_name, compare_name, baseline_root)\n if generate:\n _do_full_nl_gen(case, test_name, generate_name, baseline_root)\n except:\n ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS)\n success = False\n warn = \"Exception during namelist operations:\\n%s\\n%s\" % (sys.exc_info()[1], traceback.format_exc())\n output += warn\n logging.warning(warn)\n finally:\n ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS)\n append_status(output, caseroot=caseroot, sfile=logfile_name)\n\n return success\n\n", "path": "utils/python/CIME/case_cmpgen_namelists.py"}]}
| 2,182 | 728 |
gh_patches_debug_31994
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-6109
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce the wheel size relative to 0.19.0 for upcoming releases
## Description
Continuing the discussion from https://github.com/scikit-image/scikit-image/issues/6086#issuecomment-986063471
We have seen a recent increase in wheel size by nearly a factor of two for 0.19.0 vs. 0.18.3. In order to be a good citizen, preserving PyPI and CI resources we should try to reduce this if possible.
So far, one difference I noticed was that the 0.19.0 wheels have various `.c`, `.cpp` and `.pyx` files that should not need to be in the wheel.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 import os
4 import sys
5 import tempfile
6 import shutil
7 import builtins
8 import textwrap
9 from numpy.distutils.command.build_ext import build_ext as npy_build_ext
10
11 import setuptools
12 from setuptools.command.build_py import build_py
13 from setuptools.command.sdist import sdist
14 try:
15 from setuptools.errors import CompileError, LinkError
16 except ImportError:
17 # can remove this except case once we require setuptools>=59.0
18 from distutils.errors import CompileError, LinkError
19
20 from pythran.dist import PythranBuildExt as pythran_build_ext
21
22 DISTNAME = 'scikit-image'
23 DESCRIPTION = 'Image processing in Python'
24 MAINTAINER = 'Stefan van der Walt'
25 MAINTAINER_EMAIL = '[email protected]'
26 URL = 'https://scikit-image.org'
27 LICENSE = 'Modified BSD'
28 DOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'
29 PROJECT_URLS = {
30 "Bug Tracker": 'https://github.com/scikit-image/scikit-image/issues',
31 "Documentation": 'https://scikit-image.org/docs/stable/',
32 "Source Code": 'https://github.com/scikit-image/scikit-image'
33 }
34
35 with open('README.md', encoding='utf-8') as f:
36 LONG_DESCRIPTION = f.read()
37
38 if sys.version_info < (3, 8):
39
40 error = """Python {py} detected.
41
42 scikit-image supports only Python 3.8 and above.
43
44 For Python 2.7, please install the 0.14.x Long Term Support release using:
45
46 $ pip install 'scikit-image<0.15'
47 """.format(py='.'.join([str(v) for v in sys.version_info[:3]]))
48
49 sys.stderr.write(error + "\n")
50 sys.exit(1)
51
52 # This is a bit (!) hackish: we are setting a global variable so that the main
53 # skimage __init__ can detect if it is being loaded by the setup routine, to
54 # avoid attempting to load components that aren't built yet:
55 # the numpy distutils extensions that are used by scikit-image to recursively
56 # build the compiled extensions in sub-packages is based on the Python import
57 # machinery.
58 builtins.__SKIMAGE_SETUP__ = True
59
60
61 # Support for openmp
62
63 class ConditionalOpenMP(pythran_build_ext[npy_build_ext]):
64
65 def can_compile_link(self, compile_flags, link_flags):
66
67 if "PYODIDE_PACKAGE_ABI" in os.environ:
68 # pyodide doesn't support OpenMP
69 return False
70
71 cc = self.compiler
72 fname = 'test.c'
73 cwd = os.getcwd()
74 tmpdir = tempfile.mkdtemp()
75
76 code = ("#include <omp.h>"
77 "int main(int argc, char** argv) { return(0); }")
78
79 if self.compiler.compiler_type == "msvc":
80 # make sure we build a DLL on Windows
81 local_link_flags = link_flags + ["/DLL"]
82 else:
83 local_link_flags = link_flags
84
85 try:
86 os.chdir(tmpdir)
87 with open(fname, 'wt') as fobj:
88 fobj.write(code)
89 try:
90 objects = cc.compile([fname],
91 extra_postargs=compile_flags)
92 except CompileError:
93 return False
94 try:
95 # Link shared lib rather then executable to avoid
96 # http://bugs.python.org/issue4431 with MSVC 10+
97 cc.link_shared_lib(objects, "testlib",
98 extra_postargs=local_link_flags)
99 except (LinkError, TypeError):
100 return False
101 finally:
102 os.chdir(cwd)
103 shutil.rmtree(tmpdir)
104 return True
105
106 def build_extensions(self):
107 """ Hook into extension building to set compiler flags """
108
109 compile_flags = list()
110 link_flags = list()
111
112 # check which compiler is being used
113 if self.compiler.compiler_type == "msvc":
114 # '-fopenmp' is called '/openmp' in msvc
115 compile_flags += ['/openmp']
116 else:
117 compile_flags += ['-fopenmp']
118 link_flags += ['-fopenmp']
119
120 if self.can_compile_link(compile_flags, link_flags):
121 for ext in self.extensions:
122 ext.extra_compile_args += compile_flags
123 ext.extra_link_args += link_flags
124
125 super(ConditionalOpenMP, self).build_extensions()
126
127
128 with open('skimage/__init__.py', encoding='utf-8') as fid:
129 for line in fid:
130 if line.startswith('__version__'):
131 VERSION = line.strip().split()[-1][1:-1]
132 break
133
134
135 def parse_requirements_file(filename):
136 with open(filename, encoding='utf-8') as fid:
137 requires = [line.strip() for line in fid.readlines() if line]
138
139 return requires
140
141
142 INSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')
143 # The `requirements/extras.txt` file is explicitely omitted because
144 # it contains requirements that do not have wheels uploaded to pip
145 # for the platforms we wish to support.
146 extras_require = {
147 dep: parse_requirements_file('requirements/' + dep + '.txt')
148 for dep in ['docs', 'optional', 'test', 'data']
149 }
150
151
152 def configuration(parent_package='', top_path=None):
153 if os.path.exists('MANIFEST'):
154 os.remove('MANIFEST')
155
156 from numpy.distutils.misc_util import Configuration
157 config = Configuration(None, parent_package, top_path)
158
159 config.set_options(
160 ignore_setup_xxx_py=True,
161 assume_default_configuration=True,
162 delegate_options_to_subpackages=True,
163 quiet=True)
164
165 config.add_subpackage('skimage')
166
167 return config
168
169
170 if __name__ == "__main__":
171 cmdclass = {'build_py': build_py,
172 'sdist': sdist}
173 try:
174 # test if build dependencies exist.
175 # if not, some commands are still viable.
176 # note: this must be kept in sync with pyproject.toml
177 from numpy.distutils.core import setup
178 import cython
179 extra = {'configuration': configuration}
180 cmdclass['build_ext'] = ConditionalOpenMP
181 except ImportError:
182 if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
183 sys.argv[1] in ('--help-commands',
184 '--version',
185 'clean',
186 'egg_info',
187 'install_egg_info',
188 'rotate',
189 'sdist')):
190 # For these actions, compilation is not required.
191 #
192 # They are required to succeed for example when pip is
193 # used to install scikit-image when Numpy/cython are not
194 # yet present in the system.
195 from setuptools import setup
196 extra = {}
197 else:
198 print(textwrap.dedent("""
199 To install scikit-image from source, you will need NumPy
200 and Cython.
201 Install NumPy, Cython with your python package manager.
202 If you are using pip, the commands are:
203
204 pip install numpy cython pythran
205
206 For more details, see:
207
208 https://scikit-image.org/docs/stable/install.html
209 """))
210 sys.exit(1)
211
212 setup(
213 name=DISTNAME,
214 description=DESCRIPTION,
215 long_description=LONG_DESCRIPTION,
216 long_description_content_type="text/markdown",
217 maintainer=MAINTAINER,
218 maintainer_email=MAINTAINER_EMAIL,
219 url=URL,
220 license=LICENSE,
221 download_url=DOWNLOAD_URL,
222 project_urls=PROJECT_URLS,
223 version=VERSION,
224 classifiers=[
225 'Development Status :: 4 - Beta',
226 'Environment :: Console',
227 'Intended Audience :: Developers',
228 'Intended Audience :: Science/Research',
229 'License :: OSI Approved :: BSD License',
230 'Programming Language :: C',
231 'Programming Language :: Python',
232 'Programming Language :: Python :: 3',
233 'Programming Language :: Python :: 3.8',
234 'Programming Language :: Python :: 3.9',
235 'Programming Language :: Python :: 3.10',
236 'Programming Language :: Python :: 3 :: Only',
237 'Topic :: Scientific/Engineering',
238 'Operating System :: Microsoft :: Windows',
239 'Operating System :: POSIX',
240 'Operating System :: Unix',
241 'Operating System :: MacOS',
242 ],
243 install_requires=INSTALL_REQUIRES,
244 extras_require=extras_require,
245 python_requires='>=3.8',
246 packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),
247 include_package_data=True,
248 zip_safe=False, # the package can run out of an .egg file
249 entry_points={
250 'console_scripts': ['skivi = skimage.scripts.skivi:main'],
251 },
252 cmdclass=cmdclass,
253 **extra
254 )
255
```
Path: `skimage/_build.py`
Content:
```
1 import sys
2 import os
3 from packaging import version
4 from multiprocessing import cpu_count
5
6 CYTHON_VERSION = '0.23.4'
7
8 # WindowsError is not defined on unix systems
9 try:
10 WindowsError
11 except NameError:
12 class WindowsError(Exception):
13 pass
14
15
16 def _compiled_filename(f):
17 """Check for the presence of a .pyx[.in] file as a .c or .cpp."""
18 basename = f.replace('.in', '').replace('.pyx', '')
19 for ext in ('.c', '.cpp'):
20 filename = basename + ext
21 if os.path.exists(filename):
22 return filename
23 else:
24 raise RuntimeError('Cython >= %s is required to build '
25 'scikit-image from git checkout' %
26 CYTHON_VERSION)
27
28
29 def cython(pyx_files, working_path=''):
30 """Use Cython to convert the given files to C.
31
32 Parameters
33 ----------
34 pyx_files : list of str
35 The input .pyx files.
36
37 """
38 # Do not build cython files if target is clean
39 if len(sys.argv) >= 2 and sys.argv[1] == 'clean':
40 return
41
42 try:
43 from Cython import __version__
44 if version.parse(__version__) < version.parse(CYTHON_VERSION):
45 raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)
46
47 from Cython.Build import cythonize
48 except ImportError:
49 # If cython is not found, the build will make use of
50 # the distributed .c or .cpp files if present
51 c_files_used = [_compiled_filename(os.path.join(working_path, f))
52 for f in pyx_files]
53
54 print("Cython >= %s not found; falling back to pre-built %s" \
55 % (CYTHON_VERSION, " ".join(c_files_used)))
56 else:
57 pyx_files = [os.path.join(working_path, f) for f in pyx_files]
58 for i, pyxfile in enumerate(pyx_files):
59 if pyxfile.endswith('.pyx.in'):
60 process_tempita_pyx(pyxfile)
61 pyx_files[i] = pyxfile.replace('.pyx.in', '.pyx')
62
63 # Cython doesn't automatically choose a number of threads > 1
64 # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925
65 cythonize(pyx_files, nthreads=cpu_count(),
66 compiler_directives={'language_level': 3})
67
68
69 def process_tempita_pyx(fromfile):
70 try:
71 try:
72 from Cython import Tempita as tempita
73 except ImportError:
74 import tempita
75 except ImportError:
76 raise Exception('Building requires Tempita: '
77 'pip install --user Tempita')
78 template = tempita.Template.from_filename(fromfile,
79 encoding=sys.getdefaultencoding())
80 pyxcontent = template.substitute()
81 if not fromfile.endswith('.pyx.in'):
82 raise ValueError("Unexpected extension of %s." % fromfile)
83
84 pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending
85 with open(pyxfile, "w") as f:
86 f.write(pyxcontent)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -116,6 +116,8 @@
else:
compile_flags += ['-fopenmp']
link_flags += ['-fopenmp']
+ if 'SKIMAGE_LINK_FLAGS' in os.environ:
+ link_flags += [os.environ['SKIMAGE_LINK_FLAGS']]
if self.can_compile_link(compile_flags, link_flags):
for ext in self.extensions:
@@ -244,7 +246,7 @@
extras_require=extras_require,
python_requires='>=3.8',
packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),
- include_package_data=True,
+ include_package_data=False,
zip_safe=False, # the package can run out of an .egg file
entry_points={
'console_scripts': ['skivi = skimage.scripts.skivi:main'],
diff --git a/skimage/_build.py b/skimage/_build.py
--- a/skimage/_build.py
+++ b/skimage/_build.py
@@ -60,10 +60,13 @@
process_tempita_pyx(pyxfile)
pyx_files[i] = pyxfile.replace('.pyx.in', '.pyx')
- # Cython doesn't automatically choose a number of threads > 1
- # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925
- cythonize(pyx_files, nthreads=cpu_count(),
- compiler_directives={'language_level': 3})
+ # skip cythonize when creating an sdist
+ # (we do not want the large cython-generated sources to be included)
+ if 'sdist' not in sys.argv:
+ # Cython doesn't automatically choose a number of threads > 1
+ # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925
+ cythonize(pyx_files, nthreads=cpu_count(),
+ compiler_directives={'language_level': 3})
def process_tempita_pyx(fromfile):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -116,6 +116,8 @@\n else:\n compile_flags += ['-fopenmp']\n link_flags += ['-fopenmp']\n+ if 'SKIMAGE_LINK_FLAGS' in os.environ:\n+ link_flags += [os.environ['SKIMAGE_LINK_FLAGS']]\n \n if self.can_compile_link(compile_flags, link_flags):\n for ext in self.extensions:\n@@ -244,7 +246,7 @@\n extras_require=extras_require,\n python_requires='>=3.8',\n packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),\n- include_package_data=True,\n+ include_package_data=False,\n zip_safe=False, # the package can run out of an .egg file\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\ndiff --git a/skimage/_build.py b/skimage/_build.py\n--- a/skimage/_build.py\n+++ b/skimage/_build.py\n@@ -60,10 +60,13 @@\n process_tempita_pyx(pyxfile)\n pyx_files[i] = pyxfile.replace('.pyx.in', '.pyx')\n \n- # Cython doesn't automatically choose a number of threads > 1\n- # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925\n- cythonize(pyx_files, nthreads=cpu_count(),\n- compiler_directives={'language_level': 3})\n+ # skip cythonize when creating an sdist\n+ # (we do not want the large cython-generated sources to be included)\n+ if 'sdist' not in sys.argv:\n+ # Cython doesn't automatically choose a number of threads > 1\n+ # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925\n+ cythonize(pyx_files, nthreads=cpu_count(),\n+ compiler_directives={'language_level': 3})\n \n \n def process_tempita_pyx(fromfile):\n", "issue": "Reduce the wheel size relative to 0.19.0 for upcoming releases\n## Description\r\n\r\nContinuing the discussion from https://github.com/scikit-image/scikit-image/issues/6086#issuecomment-986063471\r\n\r\nWe have seen a recent increase in wheel size by nearly a factor of two for 0.19.0 vs. 0.18.3. In order to be a good citizen, preserving PyPI and CI resources we should try to reduce this if possible.\r\n\r\nSo far, one difference I noticed was that the 0.19.0 wheels have various `.c`, `.cpp` and `.pyx` files that should not need to be in the wheel. \r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport tempfile\nimport shutil\nimport builtins\nimport textwrap\nfrom numpy.distutils.command.build_ext import build_ext as npy_build_ext\n\nimport setuptools\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\ntry:\n from setuptools.errors import CompileError, LinkError\nexcept ImportError:\n # can remove this except case once we require setuptools>=59.0\n from distutils.errors import CompileError, LinkError\n\nfrom pythran.dist import PythranBuildExt as pythran_build_ext\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing in Python'\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'\nPROJECT_URLS = {\n \"Bug Tracker\": 'https://github.com/scikit-image/scikit-image/issues',\n \"Documentation\": 'https://scikit-image.org/docs/stable/',\n \"Source Code\": 'https://github.com/scikit-image/scikit-image'\n}\n\nwith open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nif sys.version_info < (3, 8):\n\n error = \"\"\"Python {py} detected.\n\nscikit-image supports only Python 3.8 and above.\n\nFor Python 2.7, please install the 0.14.x Long Term Support release using:\n\n $ pip install 'scikit-image<0.15'\n\"\"\".format(py='.'.join([str(v) for v in sys.version_info[:3]]))\n\n sys.stderr.write(error + \"\\n\")\n sys.exit(1)\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\n# Support for openmp\n\nclass ConditionalOpenMP(pythran_build_ext[npy_build_ext]):\n\n def can_compile_link(self, compile_flags, link_flags):\n\n if \"PYODIDE_PACKAGE_ABI\" in os.environ:\n # pyodide doesn't support OpenMP\n return False\n\n cc = self.compiler\n fname = 'test.c'\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n\n code = (\"#include <omp.h>\"\n \"int main(int argc, char** argv) { return(0); }\")\n\n if self.compiler.compiler_type == \"msvc\":\n # make sure we build a DLL on Windows\n local_link_flags = link_flags + [\"/DLL\"]\n else:\n local_link_flags = link_flags\n\n try:\n os.chdir(tmpdir)\n with open(fname, 'wt') as fobj:\n fobj.write(code)\n try:\n objects = cc.compile([fname],\n extra_postargs=compile_flags)\n except CompileError:\n return False\n try:\n # Link shared lib rather then executable to avoid\n # http://bugs.python.org/issue4431 with MSVC 10+\n cc.link_shared_lib(objects, \"testlib\",\n extra_postargs=local_link_flags)\n except (LinkError, TypeError):\n return False\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n return True\n\n def build_extensions(self):\n \"\"\" Hook into extension building to set compiler flags \"\"\"\n\n compile_flags = list()\n link_flags = list()\n\n # check which compiler is being used\n if self.compiler.compiler_type == \"msvc\":\n # '-fopenmp' is called '/openmp' in msvc\n compile_flags += ['/openmp']\n else:\n compile_flags += ['-fopenmp']\n link_flags += ['-fopenmp']\n\n if self.can_compile_link(compile_flags, link_flags):\n for ext in self.extensions:\n ext.extra_compile_args += compile_flags\n ext.extra_link_args += link_flags\n\n super(ConditionalOpenMP, self).build_extensions()\n\n\nwith open('skimage/__init__.py', encoding='utf-8') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\n\ndef parse_requirements_file(filename):\n with open(filename, encoding='utf-8') as fid:\n requires = [line.strip() for line in fid.readlines() if line]\n\n return requires\n\n\nINSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')\n# The `requirements/extras.txt` file is explicitely omitted because\n# it contains requirements that do not have wheels uploaded to pip\n# for the platforms we wish to support.\nextras_require = {\n dep: parse_requirements_file('requirements/' + dep + '.txt')\n for dep in ['docs', 'optional', 'test', 'data']\n}\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n\n return config\n\n\nif __name__ == \"__main__\":\n cmdclass = {'build_py': build_py,\n 'sdist': sdist}\n try:\n # test if build dependencies exist.\n # if not, some commands are still viable.\n # note: this must be kept in sync with pyproject.toml\n from numpy.distutils.core import setup\n import cython\n extra = {'configuration': configuration}\n cmdclass['build_ext'] = ConditionalOpenMP\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate',\n 'sdist')):\n # For these actions, compilation is not required.\n #\n # They are required to succeed for example when pip is\n # used to install scikit-image when Numpy/cython are not\n # yet present in the system.\n from setuptools import setup\n extra = {}\n else:\n print(textwrap.dedent(\"\"\"\n To install scikit-image from source, you will need NumPy\n and Cython.\n Install NumPy, Cython with your python package manager.\n If you are using pip, the commands are:\n\n pip install numpy cython pythran\n\n For more details, see:\n\n https://scikit-image.org/docs/stable/install.html\n \"\"\"))\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n project_urls=PROJECT_URLS,\n version=VERSION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n extras_require=extras_require,\n python_requires='>=3.8',\n packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n cmdclass=cmdclass,\n **extra\n )\n", "path": "setup.py"}, {"content": "import sys\nimport os\nfrom packaging import version\nfrom multiprocessing import cpu_count\n\nCYTHON_VERSION = '0.23.4'\n\n# WindowsError is not defined on unix systems\ntry:\n WindowsError\nexcept NameError:\n class WindowsError(Exception):\n pass\n\n\ndef _compiled_filename(f):\n \"\"\"Check for the presence of a .pyx[.in] file as a .c or .cpp.\"\"\"\n basename = f.replace('.in', '').replace('.pyx', '')\n for ext in ('.c', '.cpp'):\n filename = basename + ext\n if os.path.exists(filename):\n return filename\n else:\n raise RuntimeError('Cython >= %s is required to build '\n 'scikit-image from git checkout' %\n CYTHON_VERSION)\n\n\ndef cython(pyx_files, working_path=''):\n \"\"\"Use Cython to convert the given files to C.\n\n Parameters\n ----------\n pyx_files : list of str\n The input .pyx files.\n\n \"\"\"\n # Do not build cython files if target is clean\n if len(sys.argv) >= 2 and sys.argv[1] == 'clean':\n return\n\n try:\n from Cython import __version__\n if version.parse(__version__) < version.parse(CYTHON_VERSION):\n raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)\n\n from Cython.Build import cythonize\n except ImportError:\n # If cython is not found, the build will make use of\n # the distributed .c or .cpp files if present\n c_files_used = [_compiled_filename(os.path.join(working_path, f))\n for f in pyx_files]\n\n print(\"Cython >= %s not found; falling back to pre-built %s\" \\\n % (CYTHON_VERSION, \" \".join(c_files_used)))\n else:\n pyx_files = [os.path.join(working_path, f) for f in pyx_files]\n for i, pyxfile in enumerate(pyx_files):\n if pyxfile.endswith('.pyx.in'):\n process_tempita_pyx(pyxfile)\n pyx_files[i] = pyxfile.replace('.pyx.in', '.pyx')\n\n # Cython doesn't automatically choose a number of threads > 1\n # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925\n cythonize(pyx_files, nthreads=cpu_count(),\n compiler_directives={'language_level': 3})\n\n\ndef process_tempita_pyx(fromfile):\n try:\n try:\n from Cython import Tempita as tempita\n except ImportError:\n import tempita\n except ImportError:\n raise Exception('Building requires Tempita: '\n 'pip install --user Tempita')\n template = tempita.Template.from_filename(fromfile,\n encoding=sys.getdefaultencoding())\n pyxcontent = template.substitute()\n if not fromfile.endswith('.pyx.in'):\n raise ValueError(\"Unexpected extension of %s.\" % fromfile)\n\n pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending\n with open(pyxfile, \"w\") as f:\n f.write(pyxcontent)\n", "path": "skimage/_build.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport tempfile\nimport shutil\nimport builtins\nimport textwrap\nfrom numpy.distutils.command.build_ext import build_ext as npy_build_ext\n\nimport setuptools\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\ntry:\n from setuptools.errors import CompileError, LinkError\nexcept ImportError:\n # can remove this except case once we require setuptools>=59.0\n from distutils.errors import CompileError, LinkError\n\nfrom pythran.dist import PythranBuildExt as pythran_build_ext\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing in Python'\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'\nPROJECT_URLS = {\n \"Bug Tracker\": 'https://github.com/scikit-image/scikit-image/issues',\n \"Documentation\": 'https://scikit-image.org/docs/stable/',\n \"Source Code\": 'https://github.com/scikit-image/scikit-image'\n}\n\nwith open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nif sys.version_info < (3, 8):\n\n error = \"\"\"Python {py} detected.\n\nscikit-image supports only Python 3.8 and above.\n\nFor Python 2.7, please install the 0.14.x Long Term Support release using:\n\n $ pip install 'scikit-image<0.15'\n\"\"\".format(py='.'.join([str(v) for v in sys.version_info[:3]]))\n\n sys.stderr.write(error + \"\\n\")\n sys.exit(1)\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\n# Support for openmp\n\nclass ConditionalOpenMP(pythran_build_ext[npy_build_ext]):\n\n def can_compile_link(self, compile_flags, link_flags):\n\n if \"PYODIDE_PACKAGE_ABI\" in os.environ:\n # pyodide doesn't support OpenMP\n return False\n\n cc = self.compiler\n fname = 'test.c'\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n\n code = (\"#include <omp.h>\"\n \"int main(int argc, char** argv) { return(0); }\")\n\n if self.compiler.compiler_type == \"msvc\":\n # make sure we build a DLL on Windows\n local_link_flags = link_flags + [\"/DLL\"]\n else:\n local_link_flags = link_flags\n\n try:\n os.chdir(tmpdir)\n with open(fname, 'wt') as fobj:\n fobj.write(code)\n try:\n objects = cc.compile([fname],\n extra_postargs=compile_flags)\n except CompileError:\n return False\n try:\n # Link shared lib rather then executable to avoid\n # http://bugs.python.org/issue4431 with MSVC 10+\n cc.link_shared_lib(objects, \"testlib\",\n extra_postargs=local_link_flags)\n except (LinkError, TypeError):\n return False\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n return True\n\n def build_extensions(self):\n \"\"\" Hook into extension building to set compiler flags \"\"\"\n\n compile_flags = list()\n link_flags = list()\n\n # check which compiler is being used\n if self.compiler.compiler_type == \"msvc\":\n # '-fopenmp' is called '/openmp' in msvc\n compile_flags += ['/openmp']\n else:\n compile_flags += ['-fopenmp']\n link_flags += ['-fopenmp']\n if 'SKIMAGE_LINK_FLAGS' in os.environ:\n link_flags += [os.environ['SKIMAGE_LINK_FLAGS']]\n\n if self.can_compile_link(compile_flags, link_flags):\n for ext in self.extensions:\n ext.extra_compile_args += compile_flags\n ext.extra_link_args += link_flags\n\n super(ConditionalOpenMP, self).build_extensions()\n\n\nwith open('skimage/__init__.py', encoding='utf-8') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\n\ndef parse_requirements_file(filename):\n with open(filename, encoding='utf-8') as fid:\n requires = [line.strip() for line in fid.readlines() if line]\n\n return requires\n\n\nINSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')\n# The `requirements/extras.txt` file is explicitely omitted because\n# it contains requirements that do not have wheels uploaded to pip\n# for the platforms we wish to support.\nextras_require = {\n dep: parse_requirements_file('requirements/' + dep + '.txt')\n for dep in ['docs', 'optional', 'test', 'data']\n}\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n\n return config\n\n\nif __name__ == \"__main__\":\n cmdclass = {'build_py': build_py,\n 'sdist': sdist}\n try:\n # test if build dependencies exist.\n # if not, some commands are still viable.\n # note: this must be kept in sync with pyproject.toml\n from numpy.distutils.core import setup\n import cython\n extra = {'configuration': configuration}\n cmdclass['build_ext'] = ConditionalOpenMP\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate',\n 'sdist')):\n # For these actions, compilation is not required.\n #\n # They are required to succeed for example when pip is\n # used to install scikit-image when Numpy/cython are not\n # yet present in the system.\n from setuptools import setup\n extra = {}\n else:\n print(textwrap.dedent(\"\"\"\n To install scikit-image from source, you will need NumPy\n and Cython.\n Install NumPy, Cython with your python package manager.\n If you are using pip, the commands are:\n\n pip install numpy cython pythran\n\n For more details, see:\n\n https://scikit-image.org/docs/stable/install.html\n \"\"\"))\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n project_urls=PROJECT_URLS,\n version=VERSION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n extras_require=extras_require,\n python_requires='>=3.8',\n packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),\n include_package_data=False,\n zip_safe=False, # the package can run out of an .egg file\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n cmdclass=cmdclass,\n **extra\n )\n", "path": "setup.py"}, {"content": "import sys\nimport os\nfrom packaging import version\nfrom multiprocessing import cpu_count\n\nCYTHON_VERSION = '0.23.4'\n\n# WindowsError is not defined on unix systems\ntry:\n WindowsError\nexcept NameError:\n class WindowsError(Exception):\n pass\n\n\ndef _compiled_filename(f):\n \"\"\"Check for the presence of a .pyx[.in] file as a .c or .cpp.\"\"\"\n basename = f.replace('.in', '').replace('.pyx', '')\n for ext in ('.c', '.cpp'):\n filename = basename + ext\n if os.path.exists(filename):\n return filename\n else:\n raise RuntimeError('Cython >= %s is required to build '\n 'scikit-image from git checkout' %\n CYTHON_VERSION)\n\n\ndef cython(pyx_files, working_path=''):\n \"\"\"Use Cython to convert the given files to C.\n\n Parameters\n ----------\n pyx_files : list of str\n The input .pyx files.\n\n \"\"\"\n # Do not build cython files if target is clean\n if len(sys.argv) >= 2 and sys.argv[1] == 'clean':\n return\n\n try:\n from Cython import __version__\n if version.parse(__version__) < version.parse(CYTHON_VERSION):\n raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)\n\n from Cython.Build import cythonize\n except ImportError:\n # If cython is not found, the build will make use of\n # the distributed .c or .cpp files if present\n c_files_used = [_compiled_filename(os.path.join(working_path, f))\n for f in pyx_files]\n\n print(\"Cython >= %s not found; falling back to pre-built %s\" \\\n % (CYTHON_VERSION, \" \".join(c_files_used)))\n else:\n pyx_files = [os.path.join(working_path, f) for f in pyx_files]\n for i, pyxfile in enumerate(pyx_files):\n if pyxfile.endswith('.pyx.in'):\n process_tempita_pyx(pyxfile)\n pyx_files[i] = pyxfile.replace('.pyx.in', '.pyx')\n\n # skip cythonize when creating an sdist\n # (we do not want the large cython-generated sources to be included)\n if 'sdist' not in sys.argv:\n # Cython doesn't automatically choose a number of threads > 1\n # https://github.com/cython/cython/blob/a0bbb940c847dfe92cac446c8784c34c28c92836/Cython/Build/Dependencies.py#L923-L925\n cythonize(pyx_files, nthreads=cpu_count(),\n compiler_directives={'language_level': 3})\n\n\ndef process_tempita_pyx(fromfile):\n try:\n try:\n from Cython import Tempita as tempita\n except ImportError:\n import tempita\n except ImportError:\n raise Exception('Building requires Tempita: '\n 'pip install --user Tempita')\n template = tempita.Template.from_filename(fromfile,\n encoding=sys.getdefaultencoding())\n pyxcontent = template.substitute()\n if not fromfile.endswith('.pyx.in'):\n raise ValueError(\"Unexpected extension of %s.\" % fromfile)\n\n pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending\n with open(pyxfile, \"w\") as f:\n f.write(pyxcontent)\n", "path": "skimage/_build.py"}]}
| 3,886 | 549 |
gh_patches_debug_57235
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-411
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use pycryptodome instead of pycrypto.
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 2.7.12
* OS: Ubuntu 18.04
* Pymodbus: 2.1.0 [twisted]
* Modbus Hardware (if used):
### Pymodbus Specific
* Server: tcp - async
### Description
I am trying to use Mod bus server on TCP protocol, but when I installed pymodbus and I saw it's installed pycrypto, which is deprecated and dead software.
I already have installed pycryptodome in my application, which is a conflict with pycrypto,
we can't have both pycrypto and pycryptodome at the same time,
Can we have pymodbus[twisted] release which can use pycryptodome instead of pycrypto?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Installs pymodbus using distutils
4
5 Run:
6 python setup.py install
7 to install the package from the source archive.
8
9 For information about setuptools
10 http://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords
11 """
12
13 # --------------------------------------------------------------------------- #
14 # initialization
15 # --------------------------------------------------------------------------- #
16 try: # if not installed, install and proceed
17 from setuptools import setup, find_packages
18 except ImportError:
19 from ez_setup import use_setuptools
20 use_setuptools()
21 from setuptools import setup, find_packages
22
23 try:
24 from setup_commands import command_classes
25 except ImportError:
26 command_classes={}
27 from pymodbus import __version__, __author__, __maintainer__
28
29 with open('requirements.txt') as reqs:
30 install_requires = [
31 line for line in reqs.read().split('\n')
32 if (line and not line.startswith('--'))
33 ]
34 install_requires.append("pyserial >= 3.4")
35 # --------------------------------------------------------------------------- #
36 # configuration
37 # --------------------------------------------------------------------------- #
38 setup(
39 name="pymodbus",
40 version=__version__,
41 description="A fully featured modbus protocol stack in python",
42 long_description="""
43 Pymodbus aims to be a fully implemented modbus protocol stack
44 implemented using twisted/asyncio/tornado.
45 Its orignal goal was to allow simulation of thousands of modbus devices
46 on a single machine for monitoring software testing.
47 """,
48 classifiers=[
49 'Development Status :: 4 - Beta',
50 'Environment :: Console',
51 'Environment :: X11 Applications :: GTK',
52 'Framework :: Twisted',
53 'Intended Audience :: Developers',
54 'License :: OSI Approved :: BSD License',
55 'Operating System :: POSIX :: Linux',
56 'Operating System :: Unix',
57 'Programming Language :: Python',
58 'Topic :: System :: Networking',
59 'Topic :: Utilities'
60 ],
61 keywords='modbus, twisted, scada',
62 author=__author__,
63 author_email='[email protected]',
64 maintainer=__maintainer__,
65 maintainer_email='[email protected]',
66 url='https://github.com/riptideio/pymodbus/',
67 license='BSD',
68 packages=find_packages(exclude=['examples', 'test']),
69 exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},
70 py_modules=['ez_setup'],
71 platforms=['Linux', 'Mac OS X', 'Win'],
72 include_package_data=True,
73 zip_safe=True,
74 install_requires=install_requires,
75 extras_require={
76 'quality': [
77 'coverage >= 3.5.3',
78 'nose >= 1.2.1',
79 'mock >= 1.0.0',
80 'pep8 >= 1.3.3'
81 ],
82 'documents': ['sphinx >= 1.1.3',
83 'sphinx_rtd_theme',
84 'humanfriendly'],
85 'twisted': [
86 'twisted >= 12.2.0',
87 'pyasn1 >= 0.1.4',
88 'pycrypto >= 2.6'
89 ],
90 'tornado': [
91 'tornado >= 4.5.3'
92 ],
93 'repl': [
94 'click>=6.7',
95 'prompt-toolkit==2.0.4',
96 'pygments==2.2.0'
97 ]
98 },
99 entry_points={
100 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],
101 },
102 test_suite='nose.collector',
103 cmdclass=command_classes,
104 )
105
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,7 +85,6 @@
'twisted': [
'twisted >= 12.2.0',
'pyasn1 >= 0.1.4',
- 'pycrypto >= 2.6'
],
'tornado': [
'tornado >= 4.5.3'
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,7 +85,6 @@\n 'twisted': [\n 'twisted >= 12.2.0',\n 'pyasn1 >= 0.1.4',\n- 'pycrypto >= 2.6'\n ],\n 'tornado': [\n 'tornado >= 4.5.3'\n", "issue": "Use pycryptodome instead of pycrypto.\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: Ubuntu 18.04\r\n* Pymodbus: 2.1.0 [twisted]\r\n* Modbus Hardware (if used): \r\n\r\n### Pymodbus Specific\r\n* Server: tcp - async\r\n\r\n### Description\r\n\r\nI am trying to use Mod bus server on TCP protocol, but when I installed pymodbus and I saw it's installed pycrypto, which is deprecated and dead software. \r\n\r\nI already have installed pycryptodome in my application, which is a conflict with pycrypto, \r\nwe can't have both pycrypto and pycryptodome at the same time,\r\n\r\nCan we have pymodbus[twisted] release which can use pycryptodome instead of pycrypto?\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls pymodbus using distutils\n\nRun:\n python setup.py install\nto install the package from the source archive.\n\nFor information about setuptools\nhttp://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords\n\"\"\"\n\n# --------------------------------------------------------------------------- #\n# initialization\n# --------------------------------------------------------------------------- #\ntry: # if not installed, install and proceed\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\ntry:\n from setup_commands import command_classes\nexcept ImportError:\n command_classes={}\nfrom pymodbus import __version__, __author__, __maintainer__\n\nwith open('requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n')\n if (line and not line.startswith('--'))\n ]\n install_requires.append(\"pyserial >= 3.4\")\n# --------------------------------------------------------------------------- #\n# configuration\n# --------------------------------------------------------------------------- #\nsetup(\n name=\"pymodbus\",\n version=__version__,\n description=\"A fully featured modbus protocol stack in python\",\n long_description=\"\"\"\n Pymodbus aims to be a fully implemented modbus protocol stack \n implemented using twisted/asyncio/tornado. \n Its orignal goal was to allow simulation of thousands of modbus devices\n on a single machine for monitoring software testing.\n \"\"\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: X11 Applications :: GTK',\n 'Framework :: Twisted',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Topic :: System :: Networking',\n 'Topic :: Utilities'\n ],\n keywords='modbus, twisted, scada',\n author=__author__,\n author_email='[email protected]',\n maintainer=__maintainer__,\n maintainer_email='[email protected]',\n url='https://github.com/riptideio/pymodbus/',\n license='BSD',\n packages=find_packages(exclude=['examples', 'test']),\n exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},\n py_modules=['ez_setup'],\n platforms=['Linux', 'Mac OS X', 'Win'],\n include_package_data=True,\n zip_safe=True,\n install_requires=install_requires,\n extras_require={\n 'quality': [\n 'coverage >= 3.5.3',\n 'nose >= 1.2.1',\n 'mock >= 1.0.0',\n 'pep8 >= 1.3.3'\n ],\n 'documents': ['sphinx >= 1.1.3',\n 'sphinx_rtd_theme',\n 'humanfriendly'],\n 'twisted': [\n 'twisted >= 12.2.0',\n 'pyasn1 >= 0.1.4',\n 'pycrypto >= 2.6'\n ],\n 'tornado': [\n 'tornado >= 4.5.3'\n ],\n 'repl': [\n 'click>=6.7',\n 'prompt-toolkit==2.0.4',\n 'pygments==2.2.0'\n ]\n },\n entry_points={\n 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],\n },\n test_suite='nose.collector',\n cmdclass=command_classes,\n)\n\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls pymodbus using distutils\n\nRun:\n python setup.py install\nto install the package from the source archive.\n\nFor information about setuptools\nhttp://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords\n\"\"\"\n\n# --------------------------------------------------------------------------- #\n# initialization\n# --------------------------------------------------------------------------- #\ntry: # if not installed, install and proceed\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\ntry:\n from setup_commands import command_classes\nexcept ImportError:\n command_classes={}\nfrom pymodbus import __version__, __author__, __maintainer__\n\nwith open('requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n')\n if (line and not line.startswith('--'))\n ]\n install_requires.append(\"pyserial >= 3.4\")\n# --------------------------------------------------------------------------- #\n# configuration\n# --------------------------------------------------------------------------- #\nsetup(\n name=\"pymodbus\",\n version=__version__,\n description=\"A fully featured modbus protocol stack in python\",\n long_description=\"\"\"\n Pymodbus aims to be a fully implemented modbus protocol stack \n implemented using twisted/asyncio/tornado. \n Its orignal goal was to allow simulation of thousands of modbus devices\n on a single machine for monitoring software testing.\n \"\"\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: X11 Applications :: GTK',\n 'Framework :: Twisted',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Topic :: System :: Networking',\n 'Topic :: Utilities'\n ],\n keywords='modbus, twisted, scada',\n author=__author__,\n author_email='[email protected]',\n maintainer=__maintainer__,\n maintainer_email='[email protected]',\n url='https://github.com/riptideio/pymodbus/',\n license='BSD',\n packages=find_packages(exclude=['examples', 'test']),\n exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},\n py_modules=['ez_setup'],\n platforms=['Linux', 'Mac OS X', 'Win'],\n include_package_data=True,\n zip_safe=True,\n install_requires=install_requires,\n extras_require={\n 'quality': [\n 'coverage >= 3.5.3',\n 'nose >= 1.2.1',\n 'mock >= 1.0.0',\n 'pep8 >= 1.3.3'\n ],\n 'documents': ['sphinx >= 1.1.3',\n 'sphinx_rtd_theme',\n 'humanfriendly'],\n 'twisted': [\n 'twisted >= 12.2.0',\n 'pyasn1 >= 0.1.4',\n ],\n 'tornado': [\n 'tornado >= 4.5.3'\n ],\n 'repl': [\n 'click>=6.7',\n 'prompt-toolkit==2.0.4',\n 'pygments==2.2.0'\n ]\n },\n entry_points={\n 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],\n },\n test_suite='nose.collector',\n cmdclass=command_classes,\n)\n\n", "path": "setup.py"}]}
| 1,535 | 95 |
gh_patches_debug_15259
|
rasdani/github-patches
|
git_diff
|
facebookresearch__Mephisto-832
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make URLs in terminal output clickable on launch
<img width="1028" alt="CleanShot 2022-07-15 at 10 43 57@2x" src="https://user-images.githubusercontent.com/425059/179247049-927a78f7-d6fd-414c-8d60-5732cc6393a3.png">
It's annoying to have to copy and paste the URLs from the terminal output into a browesr on task launch.
```
# change:
localhost:3000/?worker_id=x&assignment_id=1
# to:
http://localhost:3000/?worker_id=x&assignment_id=1
```
Adding a protocol (http: / https://) before the URL will make it easy to simply click on them to open (in some terminals). We should add this.
---
Note: I'm not sure if we need to decide between http or https based on certain scenarios
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mephisto/abstractions/providers/mock/mock_unit.py`
Content:
```
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from mephisto.data_model.unit import Unit
8 from mephisto.data_model.constants.assignment_state import AssignmentState
9 from mephisto.abstractions.blueprint import AgentState
10
11 from mephisto.abstractions.providers.mock.provider_type import PROVIDER_TYPE
12 from typing import List, Optional, Tuple, Dict, Mapping, Any, Type, TYPE_CHECKING
13
14 if TYPE_CHECKING:
15 from mephisto.abstractions.database import MephistoDB
16 from mephisto.data_model.assignment import Assignment
17 from mephisto.abstractions.providers.mock.mock_datastore import MockDatastore
18
19 from mephisto.utils.logger_core import get_logger
20
21 logger = get_logger(name=__name__)
22
23
24 class MockUnit(Unit):
25 """
26 This class tracks the status of an individual worker's contribution to a
27 higher level assignment. It is the smallest 'unit' of work to complete
28 the assignment, and this class is only responsible for checking
29 the status of that work itself being done.
30
31 It should be extended for usage with a specific crowd provider
32 """
33
34 def __init__(
35 self,
36 db: "MephistoDB",
37 db_id: str,
38 row: Optional[Mapping[str, Any]] = None,
39 _used_new_call: bool = False,
40 ):
41 super().__init__(db, db_id, row=row, _used_new_call=_used_new_call)
42 self.datastore: "MockDatastore" = db.get_datastore_for_provider(PROVIDER_TYPE)
43
44 def launch(self, task_url: str) -> None:
45 """Mock launches do nothing right now beyond updating state"""
46 self.set_db_status(status=AssignmentState.LAUNCHED)
47
48 # TODO(OWN) get this link to the frontend
49 port = task_url.split(":")[1].split("/")[0]
50 print(task_url)
51 print(
52 f"Mock task launched: localhost:{port} for preview, "
53 f"localhost:{port}/?worker_id=x&assignment_id={self.db_id}"
54 )
55 logger.info(
56 f"Mock task launched: localhost:{port} for preview, "
57 f"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}"
58 )
59
60 return None
61
62 def expire(self) -> float:
63 """Expiration is immediate on Mocks"""
64 if self.get_status() not in [
65 AssignmentState.EXPIRED,
66 AssignmentState.COMPLETED,
67 ]:
68 self.set_db_status(AssignmentState.EXPIRED)
69 self.datastore.set_unit_expired(self.db_id, True)
70 return 0.0
71
72 def is_expired(self) -> bool:
73 """Determine if this unit is expired as according to the vendor."""
74 return self.datastore.get_unit_expired(self.db_id)
75
76 @staticmethod
77 def new(
78 db: "MephistoDB", assignment: "Assignment", index: int, pay_amount: float
79 ) -> "Unit":
80 """Create a Unit for the given assignment"""
81 return MockUnit._register_unit(db, assignment, index, pay_amount, PROVIDER_TYPE)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mephisto/abstractions/providers/mock/mock_unit.py b/mephisto/abstractions/providers/mock/mock_unit.py
--- a/mephisto/abstractions/providers/mock/mock_unit.py
+++ b/mephisto/abstractions/providers/mock/mock_unit.py
@@ -49,12 +49,12 @@
port = task_url.split(":")[1].split("/")[0]
print(task_url)
print(
- f"Mock task launched: localhost:{port} for preview, "
- f"localhost:{port}/?worker_id=x&assignment_id={self.db_id}"
+ f"Mock task launched: http://localhost:{port} for preview, "
+ f"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id}"
)
logger.info(
- f"Mock task launched: localhost:{port} for preview, "
- f"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}"
+ f"Mock task launched: http://localhost:{port} for preview, "
+ f"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}"
)
return None
|
{"golden_diff": "diff --git a/mephisto/abstractions/providers/mock/mock_unit.py b/mephisto/abstractions/providers/mock/mock_unit.py\n--- a/mephisto/abstractions/providers/mock/mock_unit.py\n+++ b/mephisto/abstractions/providers/mock/mock_unit.py\n@@ -49,12 +49,12 @@\n port = task_url.split(\":\")[1].split(\"/\")[0]\n print(task_url)\n print(\n- f\"Mock task launched: localhost:{port} for preview, \"\n- f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n+ f\"Mock task launched: http://localhost:{port} for preview, \"\n+ f\"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n )\n logger.info(\n- f\"Mock task launched: localhost:{port} for preview, \"\n- f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n+ f\"Mock task launched: http://localhost:{port} for preview, \"\n+ f\"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n )\n \n return None\n", "issue": "Make URLs in terminal output clickable on launch\n<img width=\"1028\" alt=\"CleanShot 2022-07-15 at 10 43 57@2x\" src=\"https://user-images.githubusercontent.com/425059/179247049-927a78f7-d6fd-414c-8d60-5732cc6393a3.png\">\r\n\r\nIt's annoying to have to copy and paste the URLs from the terminal output into a browesr on task launch.\r\n\r\n```\r\n# change:\r\n\r\nlocalhost:3000/?worker_id=x&assignment_id=1\r\n\r\n# to:\r\n\r\nhttp://localhost:3000/?worker_id=x&assignment_id=1\r\n```\r\n\r\nAdding a protocol (http: / https://) before the URL will make it easy to simply click on them to open (in some terminals). We should add this.\r\n\r\n---\r\n\r\nNote: I'm not sure if we need to decide between http or https based on certain scenarios\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom mephisto.data_model.unit import Unit\nfrom mephisto.data_model.constants.assignment_state import AssignmentState\nfrom mephisto.abstractions.blueprint import AgentState\n\nfrom mephisto.abstractions.providers.mock.provider_type import PROVIDER_TYPE\nfrom typing import List, Optional, Tuple, Dict, Mapping, Any, Type, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mephisto.abstractions.database import MephistoDB\n from mephisto.data_model.assignment import Assignment\n from mephisto.abstractions.providers.mock.mock_datastore import MockDatastore\n\nfrom mephisto.utils.logger_core import get_logger\n\nlogger = get_logger(name=__name__)\n\n\nclass MockUnit(Unit):\n \"\"\"\n This class tracks the status of an individual worker's contribution to a\n higher level assignment. It is the smallest 'unit' of work to complete\n the assignment, and this class is only responsible for checking\n the status of that work itself being done.\n\n It should be extended for usage with a specific crowd provider\n \"\"\"\n\n def __init__(\n self,\n db: \"MephistoDB\",\n db_id: str,\n row: Optional[Mapping[str, Any]] = None,\n _used_new_call: bool = False,\n ):\n super().__init__(db, db_id, row=row, _used_new_call=_used_new_call)\n self.datastore: \"MockDatastore\" = db.get_datastore_for_provider(PROVIDER_TYPE)\n\n def launch(self, task_url: str) -> None:\n \"\"\"Mock launches do nothing right now beyond updating state\"\"\"\n self.set_db_status(status=AssignmentState.LAUNCHED)\n\n # TODO(OWN) get this link to the frontend\n port = task_url.split(\":\")[1].split(\"/\")[0]\n print(task_url)\n print(\n f\"Mock task launched: localhost:{port} for preview, \"\n f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n )\n logger.info(\n f\"Mock task launched: localhost:{port} for preview, \"\n f\"localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n )\n\n return None\n\n def expire(self) -> float:\n \"\"\"Expiration is immediate on Mocks\"\"\"\n if self.get_status() not in [\n AssignmentState.EXPIRED,\n AssignmentState.COMPLETED,\n ]:\n self.set_db_status(AssignmentState.EXPIRED)\n self.datastore.set_unit_expired(self.db_id, True)\n return 0.0\n\n def is_expired(self) -> bool:\n \"\"\"Determine if this unit is expired as according to the vendor.\"\"\"\n return self.datastore.get_unit_expired(self.db_id)\n\n @staticmethod\n def new(\n db: \"MephistoDB\", assignment: \"Assignment\", index: int, pay_amount: float\n ) -> \"Unit\":\n \"\"\"Create a Unit for the given assignment\"\"\"\n return MockUnit._register_unit(db, assignment, index, pay_amount, PROVIDER_TYPE)\n", "path": "mephisto/abstractions/providers/mock/mock_unit.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom mephisto.data_model.unit import Unit\nfrom mephisto.data_model.constants.assignment_state import AssignmentState\nfrom mephisto.abstractions.blueprint import AgentState\n\nfrom mephisto.abstractions.providers.mock.provider_type import PROVIDER_TYPE\nfrom typing import List, Optional, Tuple, Dict, Mapping, Any, Type, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mephisto.abstractions.database import MephistoDB\n from mephisto.data_model.assignment import Assignment\n from mephisto.abstractions.providers.mock.mock_datastore import MockDatastore\n\nfrom mephisto.utils.logger_core import get_logger\n\nlogger = get_logger(name=__name__)\n\n\nclass MockUnit(Unit):\n \"\"\"\n This class tracks the status of an individual worker's contribution to a\n higher level assignment. It is the smallest 'unit' of work to complete\n the assignment, and this class is only responsible for checking\n the status of that work itself being done.\n\n It should be extended for usage with a specific crowd provider\n \"\"\"\n\n def __init__(\n self,\n db: \"MephistoDB\",\n db_id: str,\n row: Optional[Mapping[str, Any]] = None,\n _used_new_call: bool = False,\n ):\n super().__init__(db, db_id, row=row, _used_new_call=_used_new_call)\n self.datastore: \"MockDatastore\" = db.get_datastore_for_provider(PROVIDER_TYPE)\n\n def launch(self, task_url: str) -> None:\n \"\"\"Mock launches do nothing right now beyond updating state\"\"\"\n self.set_db_status(status=AssignmentState.LAUNCHED)\n\n # TODO(OWN) get this link to the frontend\n port = task_url.split(\":\")[1].split(\"/\")[0]\n print(task_url)\n print(\n f\"Mock task launched: http://localhost:{port} for preview, \"\n f\"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id}\"\n )\n logger.info(\n f\"Mock task launched: http://localhost:{port} for preview, \"\n f\"http://localhost:{port}/?worker_id=x&assignment_id={self.db_id} for assignment {self.assignment_id}\"\n )\n\n return None\n\n def expire(self) -> float:\n \"\"\"Expiration is immediate on Mocks\"\"\"\n if self.get_status() not in [\n AssignmentState.EXPIRED,\n AssignmentState.COMPLETED,\n ]:\n self.set_db_status(AssignmentState.EXPIRED)\n self.datastore.set_unit_expired(self.db_id, True)\n return 0.0\n\n def is_expired(self) -> bool:\n \"\"\"Determine if this unit is expired as according to the vendor.\"\"\"\n return self.datastore.get_unit_expired(self.db_id)\n\n @staticmethod\n def new(\n db: \"MephistoDB\", assignment: \"Assignment\", index: int, pay_amount: float\n ) -> \"Unit\":\n \"\"\"Create a Unit for the given assignment\"\"\"\n return MockUnit._register_unit(db, assignment, index, pay_amount, PROVIDER_TYPE)\n", "path": "mephisto/abstractions/providers/mock/mock_unit.py"}]}
| 1,371 | 274 |
gh_patches_debug_35951
|
rasdani/github-patches
|
git_diff
|
cisagov__manage.get.gov-1997
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Admin: Indicate whether `creator` has other domains
### Issue description
Problem: an analyst would like to know whether the ~~**submitter**~~ **creator** is already associated with other domains in the system. This is an important indicator as to whether this submitter is trustworthy and established.
Solution: add an indicator near the **creator** name or email address, which shows how many other domains they are associated with in our system. This will be enhanced with a list of other requests or domains in a future ticket.
### Acceptance criteria
- [ ] a labeled indicator near the creator name, shows how many "Ready" domains they are a domain manager for, based on the user domain roles.
- [ ] a labeled indicator near the creator name, shows how many domain requests they have in progress.
- [ ] Labels are in plain english, as shown in Additional context.
### Additional context
Approved domains: nn
Active Requests : nn
Rejected or Ineligible: nn
active requests will not include requests in the following states:started, approved, withdrawn
approved domains will not include deleted domains
### Links to other issues
Blocked by #1852.
Related to: #1875 (1850 should be done first)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/models/contact.py`
Content:
```
1 from django.db import models
2
3 from .utility.time_stamped_model import TimeStampedModel
4
5 from phonenumber_field.modelfields import PhoneNumberField # type: ignore
6
7
8 class Contact(TimeStampedModel):
9 """Contact information follows a similar pattern for each contact."""
10
11 user = models.OneToOneField(
12 "registrar.User",
13 null=True,
14 blank=True,
15 on_delete=models.SET_NULL,
16 )
17
18 first_name = models.CharField(
19 null=True,
20 blank=True,
21 verbose_name="first name / given name",
22 db_index=True,
23 )
24 middle_name = models.CharField(
25 null=True,
26 blank=True,
27 )
28 last_name = models.CharField(
29 null=True,
30 blank=True,
31 verbose_name="last name / family name",
32 db_index=True,
33 )
34 title = models.CharField(
35 null=True,
36 blank=True,
37 verbose_name="title or role in your organization",
38 )
39 email = models.EmailField(
40 null=True,
41 blank=True,
42 db_index=True,
43 max_length=320,
44 )
45 phone = PhoneNumberField(
46 null=True,
47 blank=True,
48 db_index=True,
49 )
50
51 def _get_all_relations(self):
52 """Returns an array of all fields which are relations"""
53 return [f.name for f in self._meta.get_fields() if f.is_relation]
54
55 def has_more_than_one_join(self, expected_relation):
56 """Helper for finding whether an object is joined more than once.
57 expected_relation is the one relation with one expected join"""
58 # all_relations is the list of all_relations (from contact) to be checked for existing joins
59 all_relations = self._get_all_relations()
60 return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)
61
62 def _has_more_than_one_join_per_relation(self, relation, expected_relation):
63 """Helper for finding whether an object is joined more than once."""
64 # threshold is the number of related objects that are acceptable
65 # when determining if related objects exist. threshold is 0 for most
66 # relationships. if the relationship is expected_relation, we know that
67 # there is already exactly 1 acceptable relationship (the one we are
68 # attempting to delete), so the threshold is 1
69 threshold = 1 if relation == expected_relation else 0
70
71 # Raise a KeyError if rel is not a defined field on the db_obj model
72 # This will help catch any errors in relation passed.
73 if relation not in [field.name for field in self._meta.get_fields()]:
74 raise KeyError(f"{relation} is not a defined field on the {self._meta.model_name} model.")
75
76 # if attr rel in db_obj is not None, then test if reference object(s) exist
77 if getattr(self, relation) is not None:
78 field = self._meta.get_field(relation)
79 if isinstance(field, models.OneToOneField):
80 # if the rel field is a OneToOne field, then we have already
81 # determined that the object exists (is not None)
82 # so return True unless the relation being tested is the expected_relation
83 is_not_expected_relation = relation != expected_relation
84 return is_not_expected_relation
85 elif isinstance(field, models.ForeignObjectRel):
86 # if the rel field is a ManyToOne or ManyToMany, then we need
87 # to determine if the count of related objects is greater than
88 # the threshold
89 return getattr(self, relation).count() > threshold
90 return False
91
92 def get_formatted_name(self):
93 """Returns the contact's name in Western order."""
94 names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]
95 return " ".join(names) if names else "Unknown"
96
97 def save(self, *args, **kwargs):
98 # Call the parent class's save method to perform the actual save
99 super().save(*args, **kwargs)
100
101 # Update the related User object's first_name and last_name
102 if self.user and (not self.user.first_name or not self.user.last_name):
103 self.user.first_name = self.first_name
104 self.user.last_name = self.last_name
105 self.user.save()
106
107 def __str__(self):
108 if self.first_name or self.last_name:
109 return self.get_formatted_name()
110 elif self.email:
111 return self.email
112 elif self.pk:
113 return str(self.pk)
114 else:
115 return ""
116
```
Path: `src/registrar/models/user.py`
Content:
```
1 import logging
2
3 from django.contrib.auth.models import AbstractUser
4 from django.db import models
5
6 from registrar.models.user_domain_role import UserDomainRole
7
8 from .domain_invitation import DomainInvitation
9 from .transition_domain import TransitionDomain
10 from .verified_by_staff import VerifiedByStaff
11 from .domain import Domain
12
13 from phonenumber_field.modelfields import PhoneNumberField # type: ignore
14
15
16 logger = logging.getLogger(__name__)
17
18
19 class User(AbstractUser):
20 """
21 A custom user model that performs identically to the default user model
22 but can be customized later.
23 """
24
25 # #### Constants for choice fields ####
26 RESTRICTED = "restricted"
27 STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)
28
29 status = models.CharField(
30 max_length=10,
31 choices=STATUS_CHOICES,
32 default=None, # Set the default value to None
33 null=True, # Allow the field to be null
34 blank=True, # Allow the field to be blank
35 )
36
37 domains = models.ManyToManyField(
38 "registrar.Domain",
39 through="registrar.UserDomainRole",
40 related_name="users",
41 )
42
43 phone = PhoneNumberField(
44 null=True,
45 blank=True,
46 help_text="Phone",
47 db_index=True,
48 )
49
50 def __str__(self):
51 # this info is pulled from Login.gov
52 if self.first_name or self.last_name:
53 return f"{self.first_name or ''} {self.last_name or ''} {self.email or ''}"
54 elif self.email:
55 return self.email
56 else:
57 return self.username
58
59 def restrict_user(self):
60 self.status = self.RESTRICTED
61 self.save()
62
63 def unrestrict_user(self):
64 self.status = None
65 self.save()
66
67 def is_restricted(self):
68 return self.status == self.RESTRICTED
69
70 @classmethod
71 def needs_identity_verification(cls, email, uuid):
72 """A method used by our oidc classes to test whether a user needs email/uuid verification
73 or the full identity PII verification"""
74
75 # An existing user who is a domain manager of a domain (that is,
76 # they have an entry in UserDomainRole for their User)
77 try:
78 existing_user = cls.objects.get(username=uuid)
79 if existing_user and UserDomainRole.objects.filter(user=existing_user).exists():
80 return False
81 except cls.DoesNotExist:
82 # Do nothing when the user is not found, as we're checking for existence.
83 pass
84 except Exception as err:
85 raise err
86
87 # A new incoming user who is a domain manager for one of the domains
88 # that we inputted from Verisign (that is, their email address appears
89 # in the username field of a TransitionDomain)
90 if TransitionDomain.objects.filter(username=email).exists():
91 return False
92
93 # New users flagged by Staff to bypass ial2
94 if VerifiedByStaff.objects.filter(email=email).exists():
95 return False
96
97 # A new incoming user who is being invited to be a domain manager (that is,
98 # their email address is in DomainInvitation for an invitation that is not yet "retrieved").
99 invited = DomainInvitation.DomainInvitationStatus.INVITED
100 if DomainInvitation.objects.filter(email=email, status=invited).exists():
101 return False
102
103 return True
104
105 def check_domain_invitations_on_login(self):
106 """When a user first arrives on the site, we need to retrieve any domain
107 invitations that match their email address."""
108 for invitation in DomainInvitation.objects.filter(
109 email__iexact=self.email, status=DomainInvitation.DomainInvitationStatus.INVITED
110 ):
111 try:
112 invitation.retrieve()
113 invitation.save()
114 except RuntimeError:
115 # retrieving should not fail because of a missing user, but
116 # if it does fail, log the error so a new user can continue
117 # logging in
118 logger.warn("Failed to retrieve invitation %s", invitation, exc_info=True)
119
120 def create_domain_and_invite(self, transition_domain: TransitionDomain):
121 transition_domain_name = transition_domain.domain_name
122 transition_domain_status = transition_domain.status
123 transition_domain_email = transition_domain.username
124
125 # type safety check. name should never be none
126 if transition_domain_name is not None:
127 new_domain = Domain(name=transition_domain_name, state=transition_domain_status)
128 new_domain.save()
129 # check that a domain invitation doesn't already
130 # exist for this e-mail / Domain pair
131 domain_email_already_in_domain_invites = DomainInvitation.objects.filter(
132 email=transition_domain_email.lower(), domain=new_domain
133 ).exists()
134 if not domain_email_already_in_domain_invites:
135 # Create new domain invitation
136 new_domain_invitation = DomainInvitation(email=transition_domain_email.lower(), domain=new_domain)
137 new_domain_invitation.save()
138
139 def on_each_login(self):
140 """Callback each time the user is authenticated.
141
142 When a user arrives on the site each time, we need to retrieve any domain
143 invitations that match their email address.
144
145 We also need to check if they are logging in with the same e-mail
146 as a transition domain and update our domainInfo objects accordingly.
147 """
148
149 self.check_domain_invitations_on_login()
150
151 class Meta:
152 permissions = [
153 ("analyst_access_permission", "Analyst Access Permission"),
154 ("full_access_permission", "Full Access Permission"),
155 ]
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py
--- a/src/registrar/models/contact.py
+++ b/src/registrar/models/contact.py
@@ -94,6 +94,9 @@
names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]
return " ".join(names) if names else "Unknown"
+ def has_contact_info(self):
+ return bool(self.title or self.email or self.phone)
+
def save(self, *args, **kwargs):
# Call the parent class's save method to perform the actual save
super().save(*args, **kwargs)
diff --git a/src/registrar/models/user.py b/src/registrar/models/user.py
--- a/src/registrar/models/user.py
+++ b/src/registrar/models/user.py
@@ -9,6 +9,7 @@
from .transition_domain import TransitionDomain
from .verified_by_staff import VerifiedByStaff
from .domain import Domain
+from .domain_request import DomainRequest
from phonenumber_field.modelfields import PhoneNumberField # type: ignore
@@ -67,6 +68,33 @@
def is_restricted(self):
return self.status == self.RESTRICTED
+ def get_approved_domains_count(self):
+ """Return count of approved domains"""
+ allowed_states = [Domain.State.UNKNOWN, Domain.State.DNS_NEEDED, Domain.State.READY, Domain.State.ON_HOLD]
+ approved_domains_count = self.domains.filter(state__in=allowed_states).count()
+ return approved_domains_count
+
+ def get_active_requests_count(self):
+ """Return count of active requests"""
+ allowed_states = [
+ DomainRequest.DomainRequestStatus.SUBMITTED,
+ DomainRequest.DomainRequestStatus.IN_REVIEW,
+ DomainRequest.DomainRequestStatus.ACTION_NEEDED,
+ ]
+ active_requests_count = self.domain_requests_created.filter(status__in=allowed_states).count()
+ return active_requests_count
+
+ def get_rejected_requests_count(self):
+ """Return count of rejected requests"""
+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.REJECTED).count()
+
+ def get_ineligible_requests_count(self):
+ """Return count of ineligible requests"""
+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.INELIGIBLE).count()
+
+ def has_contact_info(self):
+ return bool(self.contact.title or self.contact.email or self.contact.phone)
+
@classmethod
def needs_identity_verification(cls, email, uuid):
"""A method used by our oidc classes to test whether a user needs email/uuid verification
|
{"golden_diff": "diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py\n--- a/src/registrar/models/contact.py\n+++ b/src/registrar/models/contact.py\n@@ -94,6 +94,9 @@\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n \n+ def has_contact_info(self):\n+ return bool(self.title or self.email or self.phone)\n+\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\ndiff --git a/src/registrar/models/user.py b/src/registrar/models/user.py\n--- a/src/registrar/models/user.py\n+++ b/src/registrar/models/user.py\n@@ -9,6 +9,7 @@\n from .transition_domain import TransitionDomain\n from .verified_by_staff import VerifiedByStaff\n from .domain import Domain\n+from .domain_request import DomainRequest\n \n from phonenumber_field.modelfields import PhoneNumberField # type: ignore\n \n@@ -67,6 +68,33 @@\n def is_restricted(self):\n return self.status == self.RESTRICTED\n \n+ def get_approved_domains_count(self):\n+ \"\"\"Return count of approved domains\"\"\"\n+ allowed_states = [Domain.State.UNKNOWN, Domain.State.DNS_NEEDED, Domain.State.READY, Domain.State.ON_HOLD]\n+ approved_domains_count = self.domains.filter(state__in=allowed_states).count()\n+ return approved_domains_count\n+\n+ def get_active_requests_count(self):\n+ \"\"\"Return count of active requests\"\"\"\n+ allowed_states = [\n+ DomainRequest.DomainRequestStatus.SUBMITTED,\n+ DomainRequest.DomainRequestStatus.IN_REVIEW,\n+ DomainRequest.DomainRequestStatus.ACTION_NEEDED,\n+ ]\n+ active_requests_count = self.domain_requests_created.filter(status__in=allowed_states).count()\n+ return active_requests_count\n+\n+ def get_rejected_requests_count(self):\n+ \"\"\"Return count of rejected requests\"\"\"\n+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.REJECTED).count()\n+\n+ def get_ineligible_requests_count(self):\n+ \"\"\"Return count of ineligible requests\"\"\"\n+ return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.INELIGIBLE).count()\n+\n+ def has_contact_info(self):\n+ return bool(self.contact.title or self.contact.email or self.contact.phone)\n+\n @classmethod\n def needs_identity_verification(cls, email, uuid):\n \"\"\"A method used by our oidc classes to test whether a user needs email/uuid verification\n", "issue": "Django Admin: Indicate whether `creator` has other domains\n### Issue description\n\nProblem: an analyst would like to know whether the ~~**submitter**~~ **creator** is already associated with other domains in the system. This is an important indicator as to whether this submitter is trustworthy and established.\n\nSolution: add an indicator near the **creator** name or email address, which shows how many other domains they are associated with in our system. This will be enhanced with a list of other requests or domains in a future ticket.\n\n\n### Acceptance criteria\n\n- [ ] a labeled indicator near the creator name, shows how many \"Ready\" domains they are a domain manager for, based on the user domain roles.\n- [ ] a labeled indicator near the creator name, shows how many domain requests they have in progress.\n- [ ] Labels are in plain english, as shown in Additional context.\n\n\n### Additional context\nApproved domains: nn\nActive Requests : nn \nRejected or Ineligible: nn\n\nactive requests will not include requests in the following states:started, approved, withdrawn\napproved domains will not include deleted domains\n\n### Links to other issues\n\nBlocked by #1852.\nRelated to: #1875 (1850 should be done first)\n", "before_files": [{"content": "from django.db import models\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.CharField(\n null=True,\n blank=True,\n )\n last_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n db_index=True,\n max_length=320,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n db_index=True,\n )\n\n def _get_all_relations(self):\n \"\"\"Returns an array of all fields which are relations\"\"\"\n return [f.name for f in self._meta.get_fields() if f.is_relation]\n\n def has_more_than_one_join(self, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\n expected_relation is the one relation with one expected join\"\"\"\n # all_relations is the list of all_relations (from contact) to be checked for existing joins\n all_relations = self._get_all_relations()\n return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)\n\n def _has_more_than_one_join_per_relation(self, relation, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\"\"\"\n # threshold is the number of related objects that are acceptable\n # when determining if related objects exist. threshold is 0 for most\n # relationships. if the relationship is expected_relation, we know that\n # there is already exactly 1 acceptable relationship (the one we are\n # attempting to delete), so the threshold is 1\n threshold = 1 if relation == expected_relation else 0\n\n # Raise a KeyError if rel is not a defined field on the db_obj model\n # This will help catch any errors in relation passed.\n if relation not in [field.name for field in self._meta.get_fields()]:\n raise KeyError(f\"{relation} is not a defined field on the {self._meta.model_name} model.\")\n\n # if attr rel in db_obj is not None, then test if reference object(s) exist\n if getattr(self, relation) is not None:\n field = self._meta.get_field(relation)\n if isinstance(field, models.OneToOneField):\n # if the rel field is a OneToOne field, then we have already\n # determined that the object exists (is not None)\n # so return True unless the relation being tested is the expected_relation\n is_not_expected_relation = relation != expected_relation\n return is_not_expected_relation\n elif isinstance(field, models.ForeignObjectRel):\n # if the rel field is a ManyToOne or ManyToMany, then we need\n # to determine if the count of related objects is greater than\n # the threshold\n return getattr(self, relation).count() > threshold\n return False\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}, {"content": "import logging\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom registrar.models.user_domain_role import UserDomainRole\n\nfrom .domain_invitation import DomainInvitation\nfrom .transition_domain import TransitionDomain\nfrom .verified_by_staff import VerifiedByStaff\nfrom .domain import Domain\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass User(AbstractUser):\n \"\"\"\n A custom user model that performs identically to the default user model\n but can be customized later.\n \"\"\"\n\n # #### Constants for choice fields ####\n RESTRICTED = \"restricted\"\n STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)\n\n status = models.CharField(\n max_length=10,\n choices=STATUS_CHOICES,\n default=None, # Set the default value to None\n null=True, # Allow the field to be null\n blank=True, # Allow the field to be blank\n )\n\n domains = models.ManyToManyField(\n \"registrar.Domain\",\n through=\"registrar.UserDomainRole\",\n related_name=\"users\",\n )\n\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def __str__(self):\n # this info is pulled from Login.gov\n if self.first_name or self.last_name:\n return f\"{self.first_name or ''} {self.last_name or ''} {self.email or ''}\"\n elif self.email:\n return self.email\n else:\n return self.username\n\n def restrict_user(self):\n self.status = self.RESTRICTED\n self.save()\n\n def unrestrict_user(self):\n self.status = None\n self.save()\n\n def is_restricted(self):\n return self.status == self.RESTRICTED\n\n @classmethod\n def needs_identity_verification(cls, email, uuid):\n \"\"\"A method used by our oidc classes to test whether a user needs email/uuid verification\n or the full identity PII verification\"\"\"\n\n # An existing user who is a domain manager of a domain (that is,\n # they have an entry in UserDomainRole for their User)\n try:\n existing_user = cls.objects.get(username=uuid)\n if existing_user and UserDomainRole.objects.filter(user=existing_user).exists():\n return False\n except cls.DoesNotExist:\n # Do nothing when the user is not found, as we're checking for existence.\n pass\n except Exception as err:\n raise err\n\n # A new incoming user who is a domain manager for one of the domains\n # that we inputted from Verisign (that is, their email address appears\n # in the username field of a TransitionDomain)\n if TransitionDomain.objects.filter(username=email).exists():\n return False\n\n # New users flagged by Staff to bypass ial2\n if VerifiedByStaff.objects.filter(email=email).exists():\n return False\n\n # A new incoming user who is being invited to be a domain manager (that is,\n # their email address is in DomainInvitation for an invitation that is not yet \"retrieved\").\n invited = DomainInvitation.DomainInvitationStatus.INVITED\n if DomainInvitation.objects.filter(email=email, status=invited).exists():\n return False\n\n return True\n\n def check_domain_invitations_on_login(self):\n \"\"\"When a user first arrives on the site, we need to retrieve any domain\n invitations that match their email address.\"\"\"\n for invitation in DomainInvitation.objects.filter(\n email__iexact=self.email, status=DomainInvitation.DomainInvitationStatus.INVITED\n ):\n try:\n invitation.retrieve()\n invitation.save()\n except RuntimeError:\n # retrieving should not fail because of a missing user, but\n # if it does fail, log the error so a new user can continue\n # logging in\n logger.warn(\"Failed to retrieve invitation %s\", invitation, exc_info=True)\n\n def create_domain_and_invite(self, transition_domain: TransitionDomain):\n transition_domain_name = transition_domain.domain_name\n transition_domain_status = transition_domain.status\n transition_domain_email = transition_domain.username\n\n # type safety check. name should never be none\n if transition_domain_name is not None:\n new_domain = Domain(name=transition_domain_name, state=transition_domain_status)\n new_domain.save()\n # check that a domain invitation doesn't already\n # exist for this e-mail / Domain pair\n domain_email_already_in_domain_invites = DomainInvitation.objects.filter(\n email=transition_domain_email.lower(), domain=new_domain\n ).exists()\n if not domain_email_already_in_domain_invites:\n # Create new domain invitation\n new_domain_invitation = DomainInvitation(email=transition_domain_email.lower(), domain=new_domain)\n new_domain_invitation.save()\n\n def on_each_login(self):\n \"\"\"Callback each time the user is authenticated.\n\n When a user arrives on the site each time, we need to retrieve any domain\n invitations that match their email address.\n\n We also need to check if they are logging in with the same e-mail\n as a transition domain and update our domainInfo objects accordingly.\n \"\"\"\n\n self.check_domain_invitations_on_login()\n\n class Meta:\n permissions = [\n (\"analyst_access_permission\", \"Analyst Access Permission\"),\n (\"full_access_permission\", \"Full Access Permission\"),\n ]\n", "path": "src/registrar/models/user.py"}], "after_files": [{"content": "from django.db import models\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nclass Contact(TimeStampedModel):\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.CharField(\n null=True,\n blank=True,\n )\n last_name = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.CharField(\n null=True,\n blank=True,\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n db_index=True,\n max_length=320,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n db_index=True,\n )\n\n def _get_all_relations(self):\n \"\"\"Returns an array of all fields which are relations\"\"\"\n return [f.name for f in self._meta.get_fields() if f.is_relation]\n\n def has_more_than_one_join(self, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\n expected_relation is the one relation with one expected join\"\"\"\n # all_relations is the list of all_relations (from contact) to be checked for existing joins\n all_relations = self._get_all_relations()\n return any(self._has_more_than_one_join_per_relation(rel, expected_relation) for rel in all_relations)\n\n def _has_more_than_one_join_per_relation(self, relation, expected_relation):\n \"\"\"Helper for finding whether an object is joined more than once.\"\"\"\n # threshold is the number of related objects that are acceptable\n # when determining if related objects exist. threshold is 0 for most\n # relationships. if the relationship is expected_relation, we know that\n # there is already exactly 1 acceptable relationship (the one we are\n # attempting to delete), so the threshold is 1\n threshold = 1 if relation == expected_relation else 0\n\n # Raise a KeyError if rel is not a defined field on the db_obj model\n # This will help catch any errors in relation passed.\n if relation not in [field.name for field in self._meta.get_fields()]:\n raise KeyError(f\"{relation} is not a defined field on the {self._meta.model_name} model.\")\n\n # if attr rel in db_obj is not None, then test if reference object(s) exist\n if getattr(self, relation) is not None:\n field = self._meta.get_field(relation)\n if isinstance(field, models.OneToOneField):\n # if the rel field is a OneToOne field, then we have already\n # determined that the object exists (is not None)\n # so return True unless the relation being tested is the expected_relation\n is_not_expected_relation = relation != expected_relation\n return is_not_expected_relation\n elif isinstance(field, models.ForeignObjectRel):\n # if the rel field is a ManyToOne or ManyToMany, then we need\n # to determine if the count of related objects is greater than\n # the threshold\n return getattr(self, relation).count() > threshold\n return False\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def has_contact_info(self):\n return bool(self.title or self.email or self.phone)\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}, {"content": "import logging\n\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\nfrom registrar.models.user_domain_role import UserDomainRole\n\nfrom .domain_invitation import DomainInvitation\nfrom .transition_domain import TransitionDomain\nfrom .verified_by_staff import VerifiedByStaff\nfrom .domain import Domain\nfrom .domain_request import DomainRequest\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass User(AbstractUser):\n \"\"\"\n A custom user model that performs identically to the default user model\n but can be customized later.\n \"\"\"\n\n # #### Constants for choice fields ####\n RESTRICTED = \"restricted\"\n STATUS_CHOICES = ((RESTRICTED, RESTRICTED),)\n\n status = models.CharField(\n max_length=10,\n choices=STATUS_CHOICES,\n default=None, # Set the default value to None\n null=True, # Allow the field to be null\n blank=True, # Allow the field to be blank\n )\n\n domains = models.ManyToManyField(\n \"registrar.Domain\",\n through=\"registrar.UserDomainRole\",\n related_name=\"users\",\n )\n\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def __str__(self):\n # this info is pulled from Login.gov\n if self.first_name or self.last_name:\n return f\"{self.first_name or ''} {self.last_name or ''} {self.email or ''}\"\n elif self.email:\n return self.email\n else:\n return self.username\n\n def restrict_user(self):\n self.status = self.RESTRICTED\n self.save()\n\n def unrestrict_user(self):\n self.status = None\n self.save()\n\n def is_restricted(self):\n return self.status == self.RESTRICTED\n\n def get_approved_domains_count(self):\n \"\"\"Return count of approved domains\"\"\"\n allowed_states = [Domain.State.UNKNOWN, Domain.State.DNS_NEEDED, Domain.State.READY, Domain.State.ON_HOLD]\n approved_domains_count = self.domains.filter(state__in=allowed_states).count()\n return approved_domains_count\n\n def get_active_requests_count(self):\n \"\"\"Return count of active requests\"\"\"\n allowed_states = [\n DomainRequest.DomainRequestStatus.SUBMITTED,\n DomainRequest.DomainRequestStatus.IN_REVIEW,\n DomainRequest.DomainRequestStatus.ACTION_NEEDED,\n ]\n active_requests_count = self.domain_requests_created.filter(status__in=allowed_states).count()\n return active_requests_count\n\n def get_rejected_requests_count(self):\n \"\"\"Return count of rejected requests\"\"\"\n return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.REJECTED).count()\n\n def get_ineligible_requests_count(self):\n \"\"\"Return count of ineligible requests\"\"\"\n return self.domain_requests_created.filter(status=DomainRequest.DomainRequestStatus.INELIGIBLE).count()\n\n def has_contact_info(self):\n return bool(self.contact.title or self.contact.email or self.contact.phone)\n\n @classmethod\n def needs_identity_verification(cls, email, uuid):\n \"\"\"A method used by our oidc classes to test whether a user needs email/uuid verification\n or the full identity PII verification\"\"\"\n\n # An existing user who is a domain manager of a domain (that is,\n # they have an entry in UserDomainRole for their User)\n try:\n existing_user = cls.objects.get(username=uuid)\n if existing_user and UserDomainRole.objects.filter(user=existing_user).exists():\n return False\n except cls.DoesNotExist:\n # Do nothing when the user is not found, as we're checking for existence.\n pass\n except Exception as err:\n raise err\n\n # A new incoming user who is a domain manager for one of the domains\n # that we inputted from Verisign (that is, their email address appears\n # in the username field of a TransitionDomain)\n if TransitionDomain.objects.filter(username=email).exists():\n return False\n\n # New users flagged by Staff to bypass ial2\n if VerifiedByStaff.objects.filter(email=email).exists():\n return False\n\n # A new incoming user who is being invited to be a domain manager (that is,\n # their email address is in DomainInvitation for an invitation that is not yet \"retrieved\").\n invited = DomainInvitation.DomainInvitationStatus.INVITED\n if DomainInvitation.objects.filter(email=email, status=invited).exists():\n return False\n\n return True\n\n def check_domain_invitations_on_login(self):\n \"\"\"When a user first arrives on the site, we need to retrieve any domain\n invitations that match their email address.\"\"\"\n for invitation in DomainInvitation.objects.filter(\n email__iexact=self.email, status=DomainInvitation.DomainInvitationStatus.INVITED\n ):\n try:\n invitation.retrieve()\n invitation.save()\n except RuntimeError:\n # retrieving should not fail because of a missing user, but\n # if it does fail, log the error so a new user can continue\n # logging in\n logger.warn(\"Failed to retrieve invitation %s\", invitation, exc_info=True)\n\n def create_domain_and_invite(self, transition_domain: TransitionDomain):\n transition_domain_name = transition_domain.domain_name\n transition_domain_status = transition_domain.status\n transition_domain_email = transition_domain.username\n\n # type safety check. name should never be none\n if transition_domain_name is not None:\n new_domain = Domain(name=transition_domain_name, state=transition_domain_status)\n new_domain.save()\n # check that a domain invitation doesn't already\n # exist for this e-mail / Domain pair\n domain_email_already_in_domain_invites = DomainInvitation.objects.filter(\n email=transition_domain_email.lower(), domain=new_domain\n ).exists()\n if not domain_email_already_in_domain_invites:\n # Create new domain invitation\n new_domain_invitation = DomainInvitation(email=transition_domain_email.lower(), domain=new_domain)\n new_domain_invitation.save()\n\n def on_each_login(self):\n \"\"\"Callback each time the user is authenticated.\n\n When a user arrives on the site each time, we need to retrieve any domain\n invitations that match their email address.\n\n We also need to check if they are logging in with the same e-mail\n as a transition domain and update our domainInfo objects accordingly.\n \"\"\"\n\n self.check_domain_invitations_on_login()\n\n class Meta:\n permissions = [\n (\"analyst_access_permission\", \"Analyst Access Permission\"),\n (\"full_access_permission\", \"Full Access Permission\"),\n ]\n", "path": "src/registrar/models/user.py"}]}
| 3,265 | 572 |
gh_patches_debug_6216
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-2075
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pesquisa de normas jurídicas
Olá, estou testando o SAPL 3.1 e percebi que a pesquisa de normas jurídicas não retorna resultado caso seja acrescentado o número "0" antes da numeração da norma.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/comissoes/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.contenttypes.models import ContentType
3 from django.core.exceptions import ValidationError
4 from django.db import transaction
5 from django.db.models import Q
6 from django.forms import ModelForm
7 from django.utils.translation import ugettext_lazy as _
8
9 from sapl.base.models import Autor, TipoAutor
10 from sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,
11 Participacao, Reuniao, Periodo)
12 from sapl.parlamentares.models import Legislatura, Mandato, Parlamentar
13
14 class ComposicaoForm(forms.ModelForm):
15
16 comissao = forms.CharField(required=False, label='Comissao', widget=forms.HiddenInput())
17
18 class Meta:
19 model = Composicao
20 exclude = []
21
22 def __init__(self, user=None, **kwargs):
23 super(ComposicaoForm, self).__init__(**kwargs)
24 self.fields['comissao'].widget.attrs['disabled'] = 'disabled'
25
26 def clean(self):
27 cleaned_data = super(ComposicaoForm, self).clean()
28
29 if not self.is_valid():
30 return cleaned_data
31
32 periodo = cleaned_data['periodo']
33 comissao_pk = self.initial['comissao'].id
34 intersecao_periodo = Composicao.objects.filter(
35 Q(periodo__data_inicio__lte=periodo.data_fim,
36 periodo__data_fim__gte=periodo.data_fim) |
37 Q(periodo__data_inicio__gte=periodo.data_inicio,
38 periodo__data_fim__lte=periodo.data_inicio),
39 comissao_id=comissao_pk)
40
41 if intersecao_periodo:
42 raise ValidationError('O período informado '
43 'choca com períodos já '
44 'cadastrados para esta comissão')
45
46 return cleaned_data
47
48 class PeriodoForm(forms.ModelForm):
49
50 class Meta:
51 model = Periodo
52 exclude = []
53
54 def clean(self):
55 cleaned_data = super(PeriodoForm, self).clean()
56
57 if not self.is_valid():
58 return cleaned_data
59
60 data_inicio = cleaned_data['data_inicio']
61 data_fim = cleaned_data['data_fim']
62
63 if data_fim and data_fim < data_inicio:
64 raise ValidationError('A Data Final não pode ser menor que '
65 'a Data Inicial')
66 return cleaned_data
67
68
69 class ParticipacaoCreateForm(forms.ModelForm):
70
71 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
72
73 class Meta:
74 model = Participacao
75 fields = '__all__'
76 exclude = ['composicao']
77
78 def __init__(self, user=None, **kwargs):
79 super(ParticipacaoCreateForm, self).__init__(**kwargs)
80
81 if self.instance:
82 comissao = kwargs['initial']
83 comissao_pk = int(comissao['parent_pk'])
84 composicao = Composicao.objects.get(id=comissao_pk)
85 participantes = composicao.participacao_set.all()
86 id_part = [p.parlamentar.id for p in participantes]
87 else:
88 id_part = []
89
90 qs = self.create_participacao()
91
92 parlamentares = Mandato.objects.filter(qs,
93 parlamentar__ativo=True
94 ).prefetch_related('parlamentar').\
95 values_list('parlamentar',
96 flat=True
97 ).distinct()
98
99 qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\
100 exclude(id__in=id_part)
101 eligible = self.verifica()
102 result = list(set(qs) & set(eligible))
103 if not cmp(result, eligible): # se igual a 0 significa que o qs e o eli são iguais!
104 self.fields['parlamentar'].queryset = qs
105 else:
106 ids = [e.id for e in eligible]
107 qs = Parlamentar.objects.filter(id__in=ids)
108 self.fields['parlamentar'].queryset = qs
109
110
111 def clean(self):
112 cleaned_data = super(ParticipacaoCreateForm, self).clean()
113
114 if not self.is_valid():
115 return cleaned_data
116
117 data_designacao = cleaned_data['data_designacao']
118 data_desligamento = cleaned_data['data_desligamento']
119
120 if data_desligamento and \
121 data_designacao > data_desligamento:
122 raise ValidationError(_('Data de designação não pode ser superior '
123 'à data de desligamento'))
124
125 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
126 cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
127
128 if cleaned_data['cargo'].nome in cargos_unicos:
129 msg = _('Este cargo é único para esta Comissão.')
130 raise ValidationError(msg)
131 return cleaned_data
132
133
134 def create_participacao(self):
135 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
136 data_inicio_comissao = composicao.periodo.data_inicio
137 data_fim_comissao = composicao.periodo.data_fim
138 q1 = Q(data_fim_mandato__isnull=False,
139 data_fim_mandato__gte=data_inicio_comissao)
140 q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \
141 & Q(data_inicio_mandato__lte=data_fim_comissao)
142 q3 = Q(data_fim_mandato__isnull=True,
143 data_inicio_mandato__lte=data_inicio_comissao)
144 qs = q1 | q2 | q3
145 return qs
146
147 def verifica(self):
148 composicao = Composicao.objects.get(id=self.initial['parent_pk'])
149 participantes = composicao.participacao_set.all()
150 participantes_id = [p.parlamentar.id for p in participantes]
151 parlamentares = Parlamentar.objects.all().exclude(
152 id__in=participantes_id).order_by('nome_completo')
153 parlamentares = [p for p in parlamentares if p.ativo]
154
155 lista = []
156
157 for p in parlamentares:
158 mandatos = p.mandato_set.all()
159 for m in mandatos:
160 data_inicio = m.data_inicio_mandato
161 data_fim = m.data_fim_mandato
162 comp_data_inicio = composicao.periodo.data_inicio
163 comp_data_fim = composicao.periodo.data_fim
164 if (data_fim and data_fim >= comp_data_inicio)\
165 or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\
166 or (data_fim is None and data_inicio <= comp_data_inicio):
167 lista.append(p)
168
169 lista = list(set(lista))
170
171 return lista
172
173
174 class ParticipacaoEditForm(forms.ModelForm):
175
176 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
177 nome_parlamentar = forms.CharField(required=False, label='Parlamentar')
178
179 class Meta:
180 model = Participacao
181 fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',
182 'data_designacao', 'data_desligamento',
183 'motivo_desligamento', 'observacao']
184 widgets = {
185 'parlamentar': forms.HiddenInput(),
186 }
187
188 def __init__(self, user=None, **kwargs):
189 super(ParticipacaoEditForm, self).__init__(**kwargs)
190 self.initial['nome_parlamentar'] = Parlamentar.objects.get(
191 id=self.initial['parlamentar']).nome_parlamentar
192 self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'
193
194 def clean(self):
195 cleaned_data = super(ParticipacaoEditForm, self).clean()
196
197 if not self.is_valid():
198 return cleaned_data
199
200 data_designacao = cleaned_data['data_designacao']
201 data_desligamento = cleaned_data['data_desligamento']
202
203 if data_desligamento and \
204 data_designacao > data_desligamento:
205 raise ValidationError(_('Data de designação não pode ser superior '
206 'à data de desligamento'))
207
208 composicao_id = self.instance.composicao_id
209
210 composicao = Composicao.objects.get(id=composicao_id)
211 cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]
212
213 if cleaned_data['cargo'].nome in cargos_unicos:
214 msg = _('Este cargo é único para esta Comissão.')
215 raise ValidationError(msg)
216
217 return cleaned_data
218
219
220 class ComissaoForm(forms.ModelForm):
221
222 class Meta:
223 model = Comissao
224 fields = '__all__'
225
226 def __init__(self, user=None, **kwargs):
227 super(ComissaoForm, self).__init__(**kwargs)
228 inst = self.instance
229 if inst.pk:
230 if inst.tipo.natureza == 'P':
231 self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'
232 self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'
233 self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'
234 self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'
235 self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'
236
237
238
239 def clean(self):
240 super(ComissaoForm, self).clean()
241
242 if not self.is_valid():
243 return self.cleaned_data
244
245 if len(self.cleaned_data['nome']) > 50:
246 msg = _('Nome da Comissão deve ter no máximo 50 caracteres.')
247 raise ValidationError(msg)
248 if self.cleaned_data['data_extincao']:
249 if (self.cleaned_data['data_extincao'] <
250 self.cleaned_data['data_criacao']):
251 msg = _('Data de extinção não pode ser menor que a de criação')
252 raise ValidationError(msg)
253 return self.cleaned_data
254
255 @transaction.atomic
256 def save(self, commit=True):
257 inst = self.instance
258 if not inst.pk:
259 comissao = super(ComissaoForm, self).save(commit)
260 content_type = ContentType.objects.get_for_model(Comissao)
261 object_id = comissao.pk
262 tipo = TipoAutor.objects.get(descricao__icontains='Comiss')
263 nome = comissao.sigla + ' - ' + comissao.nome
264 Autor.objects.create(
265 content_type=content_type,
266 object_id=object_id,
267 tipo=tipo,
268 nome=nome
269 )
270 return comissao
271 else:
272 comissao = super(ComissaoForm, self).save(commit)
273 return comissao
274
275
276 class ReuniaoForm(ModelForm):
277
278 comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),
279 widget=forms.HiddenInput())
280
281 class Meta:
282 model = Reuniao
283 exclude = ['cod_andamento_reuniao']
284
285 def clean(self):
286 super(ReuniaoForm, self).clean()
287
288 if not self.is_valid():
289 return self.cleaned_data
290
291 if self.cleaned_data['hora_fim']:
292 if (self.cleaned_data['hora_fim'] <
293 self.cleaned_data['hora_inicio']):
294 msg = _('A hora de término da reunião não pode ser menor que a de início')
295 raise ValidationError(msg)
296 return self.cleaned_data
297
298 class DocumentoAcessorioCreateForm(forms.ModelForm):
299
300 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
301
302 class Meta:
303 model = DocumentoAcessorio
304 exclude = ['reuniao']
305
306 def __init__(self, user=None, **kwargs):
307 super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)
308
309 if self.instance:
310 reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])
311 comissao = reuniao.comissao
312 comissao_pk = comissao.id
313 documentos = reuniao.documentoacessorio_set.all()
314 return self.create_documentoacessorio()
315
316
317 def create_documentoacessorio(self):
318 reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])
319
320
321 class DocumentoAcessorioEditForm(forms.ModelForm):
322
323 parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())
324
325 class Meta:
326 model = DocumentoAcessorio
327 fields = ['nome', 'data', 'autor', 'ementa',
328 'indexacao', 'arquivo']
329
330 def __init__(self, user=None, **kwargs):
331 super(DocumentoAcessorioEditForm, self).__init__(**kwargs)
332
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sapl/comissoes/forms.py b/sapl/comissoes/forms.py
--- a/sapl/comissoes/forms.py
+++ b/sapl/comissoes/forms.py
@@ -100,7 +100,7 @@
exclude(id__in=id_part)
eligible = self.verifica()
result = list(set(qs) & set(eligible))
- if not cmp(result, eligible): # se igual a 0 significa que o qs e o eli são iguais!
+ if result == eligible:
self.fields['parlamentar'].queryset = qs
else:
ids = [e.id for e in eligible]
|
{"golden_diff": "diff --git a/sapl/comissoes/forms.py b/sapl/comissoes/forms.py\n--- a/sapl/comissoes/forms.py\n+++ b/sapl/comissoes/forms.py\n@@ -100,7 +100,7 @@\n exclude(id__in=id_part)\n eligible = self.verifica()\n result = list(set(qs) & set(eligible))\n- if not cmp(result, eligible): # se igual a 0 significa que o qs e o eli s\u00e3o iguais!\n+ if result == eligible:\n self.fields['parlamentar'].queryset = qs\n else:\n ids = [e.id for e in eligible]\n", "issue": "Pesquisa de normas jur\u00eddicas\nOl\u00e1, estou testando o SAPL 3.1 e percebi que a pesquisa de normas jur\u00eddicas n\u00e3o retorna resultado caso seja acrescentado o n\u00famero \"0\" antes da numera\u00e7\u00e3o da norma. \r\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,\n Participacao, Reuniao, Periodo)\nfrom sapl.parlamentares.models import Legislatura, Mandato, Parlamentar\n\nclass ComposicaoForm(forms.ModelForm):\n\n comissao = forms.CharField(required=False, label='Comissao', widget=forms.HiddenInput())\n\n class Meta:\n model = Composicao\n exclude = []\n\n def __init__(self, user=None, **kwargs):\n super(ComposicaoForm, self).__init__(**kwargs)\n self.fields['comissao'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ComposicaoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n periodo = cleaned_data['periodo']\n comissao_pk = self.initial['comissao'].id\n intersecao_periodo = Composicao.objects.filter(\n Q(periodo__data_inicio__lte=periodo.data_fim,\n periodo__data_fim__gte=periodo.data_fim) |\n Q(periodo__data_inicio__gte=periodo.data_inicio,\n periodo__data_fim__lte=periodo.data_inicio),\n comissao_id=comissao_pk)\n\n if intersecao_periodo:\n raise ValidationError('O per\u00edodo informado '\n 'choca com per\u00edodos j\u00e1 '\n 'cadastrados para esta comiss\u00e3o')\n\n return cleaned_data\n\nclass PeriodoForm(forms.ModelForm):\n\n class Meta:\n model = Periodo\n exclude = []\n\n def clean(self):\n cleaned_data = super(PeriodoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_inicio = cleaned_data['data_inicio']\n data_fim = cleaned_data['data_fim']\n\n if data_fim and data_fim < data_inicio:\n raise ValidationError('A Data Final n\u00e3o pode ser menor que '\n 'a Data Inicial')\n return cleaned_data\n\n\nclass ParticipacaoCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = Participacao\n fields = '__all__'\n exclude = ['composicao']\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n comissao = kwargs['initial']\n comissao_pk = int(comissao['parent_pk'])\n composicao = Composicao.objects.get(id=comissao_pk)\n participantes = composicao.participacao_set.all()\n id_part = [p.parlamentar.id for p in participantes]\n else:\n id_part = []\n\n qs = self.create_participacao()\n\n parlamentares = Mandato.objects.filter(qs,\n parlamentar__ativo=True\n ).prefetch_related('parlamentar').\\\n values_list('parlamentar',\n flat=True\n ).distinct()\n\n qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\\\n exclude(id__in=id_part)\n eligible = self.verifica()\n result = list(set(qs) & set(eligible))\n if not cmp(result, eligible): # se igual a 0 significa que o qs e o eli s\u00e3o iguais!\n self.fields['parlamentar'].queryset = qs\n else:\n ids = [e.id for e in eligible]\n qs = Parlamentar.objects.filter(id__in=ids)\n self.fields['parlamentar'].queryset = qs\n\n\n def clean(self):\n cleaned_data = super(ParticipacaoCreateForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n return cleaned_data\n\n\n def create_participacao(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n data_inicio_comissao = composicao.periodo.data_inicio\n data_fim_comissao = composicao.periodo.data_fim\n q1 = Q(data_fim_mandato__isnull=False,\n data_fim_mandato__gte=data_inicio_comissao)\n q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \\\n & Q(data_inicio_mandato__lte=data_fim_comissao)\n q3 = Q(data_fim_mandato__isnull=True,\n data_inicio_mandato__lte=data_inicio_comissao)\n qs = q1 | q2 | q3\n return qs\n\n def verifica(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n participantes = composicao.participacao_set.all()\n participantes_id = [p.parlamentar.id for p in participantes]\n parlamentares = Parlamentar.objects.all().exclude(\n id__in=participantes_id).order_by('nome_completo')\n parlamentares = [p for p in parlamentares if p.ativo]\n\n lista = []\n\n for p in parlamentares:\n mandatos = p.mandato_set.all()\n for m in mandatos:\n data_inicio = m.data_inicio_mandato\n data_fim = m.data_fim_mandato\n comp_data_inicio = composicao.periodo.data_inicio\n comp_data_fim = composicao.periodo.data_fim\n if (data_fim and data_fim >= comp_data_inicio)\\\n or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\\\n or (data_fim is None and data_inicio <= comp_data_inicio):\n lista.append(p)\n\n lista = list(set(lista))\n\n return lista\n\n\nclass ParticipacaoEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n nome_parlamentar = forms.CharField(required=False, label='Parlamentar')\n\n class Meta:\n model = Participacao\n fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',\n 'data_designacao', 'data_desligamento',\n 'motivo_desligamento', 'observacao']\n widgets = {\n 'parlamentar': forms.HiddenInput(),\n }\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoEditForm, self).__init__(**kwargs)\n self.initial['nome_parlamentar'] = Parlamentar.objects.get(\n id=self.initial['parlamentar']).nome_parlamentar\n self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ParticipacaoEditForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao_id = self.instance.composicao_id\n\n composicao = Composicao.objects.get(id=composicao_id)\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n\n return cleaned_data\n\n\nclass ComissaoForm(forms.ModelForm):\n\n class Meta:\n model = Comissao\n fields = '__all__'\n\n def __init__(self, user=None, **kwargs):\n super(ComissaoForm, self).__init__(**kwargs)\n inst = self.instance\n if inst.pk:\n if inst.tipo.natureza == 'P':\n self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'\n\n\n\n def clean(self):\n super(ComissaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if len(self.cleaned_data['nome']) > 50:\n msg = _('Nome da Comiss\u00e3o deve ter no m\u00e1ximo 50 caracteres.')\n raise ValidationError(msg)\n if self.cleaned_data['data_extincao']:\n if (self.cleaned_data['data_extincao'] <\n self.cleaned_data['data_criacao']):\n msg = _('Data de extin\u00e7\u00e3o n\u00e3o pode ser menor que a de cria\u00e7\u00e3o')\n raise ValidationError(msg)\n return self.cleaned_data\n\n @transaction.atomic\n def save(self, commit=True):\n inst = self.instance\n if not inst.pk:\n comissao = super(ComissaoForm, self).save(commit)\n content_type = ContentType.objects.get_for_model(Comissao)\n object_id = comissao.pk\n tipo = TipoAutor.objects.get(descricao__icontains='Comiss')\n nome = comissao.sigla + ' - ' + comissao.nome\n Autor.objects.create(\n content_type=content_type,\n object_id=object_id,\n tipo=tipo,\n nome=nome\n )\n return comissao\n else:\n comissao = super(ComissaoForm, self).save(commit)\n return comissao\n\n\nclass ReuniaoForm(ModelForm):\n\n comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),\n widget=forms.HiddenInput())\n\n class Meta:\n model = Reuniao\n exclude = ['cod_andamento_reuniao']\n\n def clean(self):\n super(ReuniaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if self.cleaned_data['hora_fim']:\n if (self.cleaned_data['hora_fim'] <\n self.cleaned_data['hora_inicio']):\n msg = _('A hora de t\u00e9rmino da reuni\u00e3o n\u00e3o pode ser menor que a de in\u00edcio')\n raise ValidationError(msg)\n return self.cleaned_data\n\nclass DocumentoAcessorioCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n exclude = ['reuniao']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n comissao = reuniao.comissao\n comissao_pk = comissao.id\n documentos = reuniao.documentoacessorio_set.all()\n return self.create_documentoacessorio()\n\n\n def create_documentoacessorio(self):\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n\n\nclass DocumentoAcessorioEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n fields = ['nome', 'data', 'autor', 'ementa',\n 'indexacao', 'arquivo']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioEditForm, self).__init__(**kwargs)\n", "path": "sapl/comissoes/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import Q\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.base.models import Autor, TipoAutor\nfrom sapl.comissoes.models import (Comissao, Composicao, DocumentoAcessorio,\n Participacao, Reuniao, Periodo)\nfrom sapl.parlamentares.models import Legislatura, Mandato, Parlamentar\n\nclass ComposicaoForm(forms.ModelForm):\n\n comissao = forms.CharField(required=False, label='Comissao', widget=forms.HiddenInput())\n\n class Meta:\n model = Composicao\n exclude = []\n\n def __init__(self, user=None, **kwargs):\n super(ComposicaoForm, self).__init__(**kwargs)\n self.fields['comissao'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ComposicaoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n periodo = cleaned_data['periodo']\n comissao_pk = self.initial['comissao'].id\n intersecao_periodo = Composicao.objects.filter(\n Q(periodo__data_inicio__lte=periodo.data_fim,\n periodo__data_fim__gte=periodo.data_fim) |\n Q(periodo__data_inicio__gte=periodo.data_inicio,\n periodo__data_fim__lte=periodo.data_inicio),\n comissao_id=comissao_pk)\n\n if intersecao_periodo:\n raise ValidationError('O per\u00edodo informado '\n 'choca com per\u00edodos j\u00e1 '\n 'cadastrados para esta comiss\u00e3o')\n\n return cleaned_data\n\nclass PeriodoForm(forms.ModelForm):\n\n class Meta:\n model = Periodo\n exclude = []\n\n def clean(self):\n cleaned_data = super(PeriodoForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_inicio = cleaned_data['data_inicio']\n data_fim = cleaned_data['data_fim']\n\n if data_fim and data_fim < data_inicio:\n raise ValidationError('A Data Final n\u00e3o pode ser menor que '\n 'a Data Inicial')\n return cleaned_data\n\n\nclass ParticipacaoCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = Participacao\n fields = '__all__'\n exclude = ['composicao']\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n comissao = kwargs['initial']\n comissao_pk = int(comissao['parent_pk'])\n composicao = Composicao.objects.get(id=comissao_pk)\n participantes = composicao.participacao_set.all()\n id_part = [p.parlamentar.id for p in participantes]\n else:\n id_part = []\n\n qs = self.create_participacao()\n\n parlamentares = Mandato.objects.filter(qs,\n parlamentar__ativo=True\n ).prefetch_related('parlamentar').\\\n values_list('parlamentar',\n flat=True\n ).distinct()\n\n qs = Parlamentar.objects.filter(id__in=parlamentares).distinct().\\\n exclude(id__in=id_part)\n eligible = self.verifica()\n result = list(set(qs) & set(eligible))\n if result == eligible:\n self.fields['parlamentar'].queryset = qs\n else:\n ids = [e.id for e in eligible]\n qs = Parlamentar.objects.filter(id__in=ids)\n self.fields['parlamentar'].queryset = qs\n\n\n def clean(self):\n cleaned_data = super(ParticipacaoCreateForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n return cleaned_data\n\n\n def create_participacao(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n data_inicio_comissao = composicao.periodo.data_inicio\n data_fim_comissao = composicao.periodo.data_fim\n q1 = Q(data_fim_mandato__isnull=False,\n data_fim_mandato__gte=data_inicio_comissao)\n q2 = Q(data_inicio_mandato__gte=data_inicio_comissao) \\\n & Q(data_inicio_mandato__lte=data_fim_comissao)\n q3 = Q(data_fim_mandato__isnull=True,\n data_inicio_mandato__lte=data_inicio_comissao)\n qs = q1 | q2 | q3\n return qs\n\n def verifica(self):\n composicao = Composicao.objects.get(id=self.initial['parent_pk'])\n participantes = composicao.participacao_set.all()\n participantes_id = [p.parlamentar.id for p in participantes]\n parlamentares = Parlamentar.objects.all().exclude(\n id__in=participantes_id).order_by('nome_completo')\n parlamentares = [p for p in parlamentares if p.ativo]\n\n lista = []\n\n for p in parlamentares:\n mandatos = p.mandato_set.all()\n for m in mandatos:\n data_inicio = m.data_inicio_mandato\n data_fim = m.data_fim_mandato\n comp_data_inicio = composicao.periodo.data_inicio\n comp_data_fim = composicao.periodo.data_fim\n if (data_fim and data_fim >= comp_data_inicio)\\\n or (data_inicio >= comp_data_inicio and data_inicio <= comp_data_fim)\\\n or (data_fim is None and data_inicio <= comp_data_inicio):\n lista.append(p)\n\n lista = list(set(lista))\n\n return lista\n\n\nclass ParticipacaoEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n nome_parlamentar = forms.CharField(required=False, label='Parlamentar')\n\n class Meta:\n model = Participacao\n fields = ['nome_parlamentar', 'parlamentar', 'cargo', 'titular',\n 'data_designacao', 'data_desligamento',\n 'motivo_desligamento', 'observacao']\n widgets = {\n 'parlamentar': forms.HiddenInput(),\n }\n\n def __init__(self, user=None, **kwargs):\n super(ParticipacaoEditForm, self).__init__(**kwargs)\n self.initial['nome_parlamentar'] = Parlamentar.objects.get(\n id=self.initial['parlamentar']).nome_parlamentar\n self.fields['nome_parlamentar'].widget.attrs['disabled'] = 'disabled'\n\n def clean(self):\n cleaned_data = super(ParticipacaoEditForm, self).clean()\n\n if not self.is_valid():\n return cleaned_data\n\n data_designacao = cleaned_data['data_designacao']\n data_desligamento = cleaned_data['data_desligamento']\n\n if data_desligamento and \\\n data_designacao > data_desligamento:\n raise ValidationError(_('Data de designa\u00e7\u00e3o n\u00e3o pode ser superior '\n '\u00e0 data de desligamento'))\n\n composicao_id = self.instance.composicao_id\n\n composicao = Composicao.objects.get(id=composicao_id)\n cargos_unicos = [c.cargo.nome for c in composicao.participacao_set.filter(cargo__unico=True)]\n\n if cleaned_data['cargo'].nome in cargos_unicos:\n msg = _('Este cargo \u00e9 \u00fanico para esta Comiss\u00e3o.')\n raise ValidationError(msg)\n\n return cleaned_data\n\n\nclass ComissaoForm(forms.ModelForm):\n\n class Meta:\n model = Comissao\n fields = '__all__'\n\n def __init__(self, user=None, **kwargs):\n super(ComissaoForm, self).__init__(**kwargs)\n inst = self.instance\n if inst.pk:\n if inst.tipo.natureza == 'P':\n self.fields['apelido_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_instalacao_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_final_prevista_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_prorrogada_temp'].widget.attrs['disabled'] = 'disabled'\n self.fields['data_fim_comissao'].widget.attrs['disabled'] = 'disabled'\n\n\n\n def clean(self):\n super(ComissaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if len(self.cleaned_data['nome']) > 50:\n msg = _('Nome da Comiss\u00e3o deve ter no m\u00e1ximo 50 caracteres.')\n raise ValidationError(msg)\n if self.cleaned_data['data_extincao']:\n if (self.cleaned_data['data_extincao'] <\n self.cleaned_data['data_criacao']):\n msg = _('Data de extin\u00e7\u00e3o n\u00e3o pode ser menor que a de cria\u00e7\u00e3o')\n raise ValidationError(msg)\n return self.cleaned_data\n\n @transaction.atomic\n def save(self, commit=True):\n inst = self.instance\n if not inst.pk:\n comissao = super(ComissaoForm, self).save(commit)\n content_type = ContentType.objects.get_for_model(Comissao)\n object_id = comissao.pk\n tipo = TipoAutor.objects.get(descricao__icontains='Comiss')\n nome = comissao.sigla + ' - ' + comissao.nome\n Autor.objects.create(\n content_type=content_type,\n object_id=object_id,\n tipo=tipo,\n nome=nome\n )\n return comissao\n else:\n comissao = super(ComissaoForm, self).save(commit)\n return comissao\n\n\nclass ReuniaoForm(ModelForm):\n\n comissao = forms.ModelChoiceField(queryset=Comissao.objects.all(),\n widget=forms.HiddenInput())\n\n class Meta:\n model = Reuniao\n exclude = ['cod_andamento_reuniao']\n\n def clean(self):\n super(ReuniaoForm, self).clean()\n\n if not self.is_valid():\n return self.cleaned_data\n\n if self.cleaned_data['hora_fim']:\n if (self.cleaned_data['hora_fim'] <\n self.cleaned_data['hora_inicio']):\n msg = _('A hora de t\u00e9rmino da reuni\u00e3o n\u00e3o pode ser menor que a de in\u00edcio')\n raise ValidationError(msg)\n return self.cleaned_data\n\nclass DocumentoAcessorioCreateForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n exclude = ['reuniao']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioCreateForm, self).__init__(**kwargs)\n\n if self.instance:\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n comissao = reuniao.comissao\n comissao_pk = comissao.id\n documentos = reuniao.documentoacessorio_set.all()\n return self.create_documentoacessorio()\n\n\n def create_documentoacessorio(self):\n reuniao = Reuniao.objects.get(id=self.initial['parent_pk'])\n\n\nclass DocumentoAcessorioEditForm(forms.ModelForm):\n\n parent_pk = forms.CharField(required=False) # widget=forms.HiddenInput())\n\n class Meta:\n model = DocumentoAcessorio\n fields = ['nome', 'data', 'autor', 'ementa',\n 'indexacao', 'arquivo']\n\n def __init__(self, user=None, **kwargs):\n super(DocumentoAcessorioEditForm, self).__init__(**kwargs)\n", "path": "sapl/comissoes/forms.py"}]}
| 3,997 | 150 |
gh_patches_debug_7617
|
rasdani/github-patches
|
git_diff
|
larq__larq-39
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add test coverage report to Azure Pipelines
https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/python?view=azure-devops#test-with-pytest-and-collect-coverage-metrics-with-pytest-cov
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3
4 def readme():
5 with open("README.md", "r") as f:
6 return f.read()
7
8
9 setup(
10 name="pl-xquant",
11 version="0.0.0",
12 author="Plumerai",
13 author_email="[email protected]",
14 description="An Open Source Machine Learning Framework for Training Extreme Quantized Neural Networks",
15 long_description=readme(),
16 long_description_content_type="text/markdown",
17 url="https://github.com/lgeiger/xquant",
18 packages=find_packages(),
19 license="Apache 2.0",
20 install_requires=["numpy >= 1.15.4, < 2.0"],
21 extras_require={
22 "tensorflow": ["tensorflow>=1.13.1"],
23 "tensorflow_gpu": ["tensorflow-gpu>=1.13.1"],
24 "test": ["absl-py>=0.7.0", "pytest>=4.3.1"],
25 "docs": [
26 "pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip",
27 "mkdocs-material>=4.1.0",
28 "pymdown-extensions>=6.0",
29 "mknotebooks>=0.1.5",
30 ],
31 },
32 classifiers=[
33 "Development Status :: 2 - Pre-Alpha",
34 "Intended Audience :: Developers",
35 "Intended Audience :: Education",
36 "Intended Audience :: Science/Research",
37 "License :: OSI Approved :: Apache Software License",
38 "Programming Language :: Python :: 3",
39 "Programming Language :: Python :: 3 :: Only",
40 "Programming Language :: Python :: 3.6",
41 "Programming Language :: Python :: 3.7",
42 "Topic :: Scientific/Engineering",
43 "Topic :: Scientific/Engineering :: Mathematics",
44 "Topic :: Scientific/Engineering :: Artificial Intelligence",
45 "Topic :: Software Development",
46 "Topic :: Software Development :: Libraries",
47 "Topic :: Software Development :: Libraries :: Python Modules",
48 ],
49 )
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
extras_require={
"tensorflow": ["tensorflow>=1.13.1"],
"tensorflow_gpu": ["tensorflow-gpu>=1.13.1"],
- "test": ["absl-py>=0.7.0", "pytest>=4.3.1"],
+ "test": ["absl-py>=0.7.0", "pytest>=4.3.1", "pytest-cov>=2.6.1"],
"docs": [
"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip",
"mkdocs-material>=4.1.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.13.1\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.13.1\"],\n- \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\"],\n+ \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\", \"pytest-cov>=2.6.1\"],\n \"docs\": [\n \"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip\",\n \"mkdocs-material>=4.1.0\",\n", "issue": "Add test coverage report to Azure Pipelines\nhttps://docs.microsoft.com/en-us/azure/devops/pipelines/languages/python?view=azure-devops#test-with-pytest-and-collect-coverage-metrics-with-pytest-cov\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"pl-xquant\",\n version=\"0.0.0\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Framework for Training Extreme Quantized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/lgeiger/xquant\",\n packages=find_packages(),\n license=\"Apache 2.0\",\n install_requires=[\"numpy >= 1.15.4, < 2.0\"],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.13.1\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.13.1\"],\n \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\"],\n \"docs\": [\n \"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip\",\n \"mkdocs-material>=4.1.0\",\n \"pymdown-extensions>=6.0\",\n \"mknotebooks>=0.1.5\",\n ],\n },\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\n\ndef readme():\n with open(\"README.md\", \"r\") as f:\n return f.read()\n\n\nsetup(\n name=\"pl-xquant\",\n version=\"0.0.0\",\n author=\"Plumerai\",\n author_email=\"[email protected]\",\n description=\"An Open Source Machine Learning Framework for Training Extreme Quantized Neural Networks\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/lgeiger/xquant\",\n packages=find_packages(),\n license=\"Apache 2.0\",\n install_requires=[\"numpy >= 1.15.4, < 2.0\"],\n extras_require={\n \"tensorflow\": [\"tensorflow>=1.13.1\"],\n \"tensorflow_gpu\": [\"tensorflow-gpu>=1.13.1\"],\n \"test\": [\"absl-py>=0.7.0\", \"pytest>=4.3.1\", \"pytest-cov>=2.6.1\"],\n \"docs\": [\n \"pydoc-markdown@https://github.com/lgeiger/pydoc-markdown/archive/master.zip\",\n \"mkdocs-material>=4.1.0\",\n \"pymdown-extensions>=6.0\",\n \"mknotebooks>=0.1.5\",\n ],\n },\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]}
| 835 | 169 |
gh_patches_debug_15952
|
rasdani/github-patches
|
git_diff
|
kevoreilly__CAPEv2-935
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CAPA rule path does not exist
# Expected Behavior
CAPA is able to load its rules.
# Current Behavior
CAPA is unable to load rules. Splits the file path into a list and loops through each character. Enumerates the / directory then moves to the next character o at which point it errors as the path does not exist:
OSError: rule path o does not exist or cannot be accessed
# Failure Information (for bugs)
The argument capa.main.RULES_PATH_DEFAULT_STRING passed to get_rules is a string, but should be a list of file paths:
https://github.com/kevoreilly/CAPEv2/blob/d9124712cabe5bf9a7a3a98da93cbdbd37a53da3/lib/cuckoo/common/integrations/capa.py#L44
## Steps to Reproduce
Enable CAPA in the config and run web server.
## Context
commit 1bd0bf62055fc3741ea19a85d510d54052dbf431
Ubuntu 20.04.04 LTS
## Failure Logs
File "/opt/CAPEv2/web/../lib/cuckoo/common/integrations/parse_pe.py", line 74, in <module>
from lib.cuckoo.common.integrations.capa import HAVE_FLARE_CAPA, flare_capa_details
File "/opt/CAPEv2/web/../lib/cuckoo/common/integrations/capa.py", line 48, in <module>
rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)
File "/usr/local/lib/python3.8/dist-packages/capa/main.py", line 580, in get_rules
raise IOError("rule path %s does not exist or cannot be accessed" % rule_path)
OSError: rule path o does not exist or cannot be accessed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/cuckoo/common/integrations/capa.py`
Content:
```
1 # Copyright (C) 2010-2015 Cuckoo Foundation.
2 # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
3 # See the file 'docs/LICENSE' for copying permission.
4
5 import collections
6 import logging
7 import os
8 from typing import Any, Dict, List
9
10 from lib.cuckoo.common.config import Config
11 from lib.cuckoo.common.constants import CUCKOO_ROOT
12
13 log = logging.getLogger(__name__)
14
15 processing_conf = Config("processing")
16
17 """
18 from lib.cuckoo.common.integrations.capa import flare_capa_details, HAVE_FLARE_CAPA
19 path = "/opt/CAPEv2/storage/binaries/da034c11f0c396f6cd11d22f833f9501dc75a33047ba3bd5870ff79e479bc004"
20 details = flare_capa_details(path, "static", on_demand=True)
21 """
22
23 HAVE_FLARE_CAPA = False
24 if processing_conf.flare_capa.enabled:
25 try:
26 from capa.version import __version__ as capa_version
27
28 if capa_version[0] != "3":
29 print("FLARE-CAPA missed, pip3 install -U flare-capa")
30 else:
31 import capa.main
32 import capa.render.utils as rutils
33 import capa.rules
34 from capa.main import UnsupportedRuntimeError
35 from capa.render.result_document import (
36 convert_capabilities_to_result_document as capa_convert_capabilities_to_result_document,
37 )
38 from capa.rules import InvalidRuleSet, InvalidRuleWithPath
39
40 rules_path = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
41 if os.path.exists(rules_path):
42 capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
43 try:
44 rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)
45 rules = capa.rules.RuleSet(rules)
46 HAVE_FLARE_CAPA = True
47 except InvalidRuleWithPath:
48 print("FLARE_CAPA InvalidRuleWithPath")
49 HAVE_FLARE_CAPA = False
50 except InvalidRuleSet:
51 print("FLARE_CAPA InvalidRuleSet")
52 HAVE_FLARE_CAPA = False
53 else:
54 print("FLARE CAPA rules missed! You can download them using python3 community.py -cr")
55 HAVE_FLARE_CAPA = False
56
57 signatures_path = os.path.join(CUCKOO_ROOT, "data", "capa-signatures")
58 if os.path.exists(signatures_path):
59 capa.main.SIGNATURES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, "data", "capa-signatures")
60 try:
61 signatures = capa.main.get_signatures(capa.main.SIGNATURES_PATH_DEFAULT_STRING)
62 HAVE_FLARE_CAPA = True
63 except IOError:
64 print("FLARE_CAPA InvalidSignatures")
65 else:
66 print("FLARE CAPA rules missed! You can download them using python3 community.py -cr")
67 HAVE_FLARE_CAPA = False
68 except ImportError as e:
69 HAVE_FLARE_CAPA = False
70 print(e)
71 print("FLARE-CAPA missed, pip3 install -U flare-capa")
72
73
74 def render_meta(doc: Dict[str, Any]) -> Dict[str, Any]:
75 return {
76 "md5": doc["meta"]["sample"]["md5"],
77 "sha1": doc["meta"]["sample"]["sha1"],
78 "sha256": doc["meta"]["sample"]["sha256"],
79 "path": doc["meta"]["sample"]["path"],
80 }
81
82
83 def find_subrule_matches(doc: Dict[str, Any]) -> set:
84 """
85 collect the rule names that have been matched as a subrule match.
86 this way we can avoid displaying entries for things that are too specific.
87 """
88
89 def rec(node: dict) -> set:
90 matches = set()
91 if not node["success"]:
92 # there's probably a bug here for rules that do `not: match: ...`
93 # but we don't have any examples of this yet
94 return
95
96 elif node["node"]["type"] == "statement":
97 for child in node["children"]:
98 rec(child)
99
100 elif node["node"]["type"] == "feature":
101 if node["node"]["feature"]["type"] == "match":
102 matches.add(node["node"]["feature"]["match"])
103 return matches
104
105 matches = set()
106
107 for rule in rutils.capability_rules(doc):
108 for node in rule["matches"].values():
109 matches = matches.union(rec(node))
110
111 return matches
112
113
114 def render_capabilities(doc: Dict[str, Any]) -> Dict[str, List[str]]:
115 """
116 example::
117 {'accept command line arguments': ['host-interaction/cli'],
118 'allocate thread local storage (2 matches)': ['host-interaction/process'],
119 'check for time delay via GetTickCount': ['anti-analysis/anti-debugging/debugger-detection'],
120 'check if process is running under wine': ['anti-analysis/anti-emulation/wine'],
121 'contain a resource (.rsrc) section': ['executable/pe/section/rsrc'],
122 'write file (3 matches)': ['host-interaction/file-system/write']
123 }
124 """
125 subrule_matches = find_subrule_matches(doc)
126
127 capability_dict = {}
128 for rule in rutils.capability_rules(doc):
129 if rule["meta"]["name"] in subrule_matches:
130 # rules that are also matched by other rules should not get rendered by default.
131 # this cuts down on the amount of output while giving approx the same detail.
132 # see #224
133 continue
134
135 count = len(rule["matches"])
136 if count == 1:
137 capability = rule["meta"]["name"]
138 else:
139 capability = f"{rule['meta']['name']} ({count} matches)"
140
141 capability_dict.setdefault(rule["meta"]["namespace"], []).append(capability)
142 return capability_dict
143
144
145 def render_attack(doc: Dict[str, Any]) -> Dict[str, List[str]]:
146 """
147 example::
148 {'COLLECTION': ['Input Capture::Keylogging [T1056.001]'],
149 'DEFENSE EVASION': ['Obfuscated Files or Information [T1027]',
150 'Virtualization/Sandbox Evasion::System Checks '
151 '[T1497.001]'],
152 'DISCOVERY': ['File and Directory Discovery [T1083]',
153 'Query Registry [T1012]',
154 'System Information Discovery [T1082]'],
155 'EXECUTION': ['Shared Modules [T1129]']
156 }
157 """
158 attck_dict = {}
159 tactics = collections.defaultdict(set)
160 for rule in rutils.capability_rules(doc):
161 for attack in rule["meta"].get("att&ck", {}):
162 tactics[attack["tactic"]].add((attack["technique"], attack.get("subtechnique"), attack["id"]))
163
164 for tactic, techniques in sorted(tactics.items()):
165 inner_rows = []
166 for technique, subtechnique, id in sorted(techniques):
167 if subtechnique is None:
168 inner_rows.append(f"{technique} {id}")
169 else:
170 inner_rows.append(f"{technique}::{subtechnique} {id}")
171 attck_dict.setdefault(tactic.upper(), inner_rows)
172 return attck_dict
173
174
175 def render_mbc(doc: Dict[str, Any]) -> Dict[str, List[str]]:
176 """
177 example::
178 {'ANTI-BEHAVIORAL ANALYSIS': ['Debugger Detection::Timing/Delay Check '
179 'GetTickCount [B0001.032]',
180 'Emulator Detection [B0004]',
181 'Virtual Machine Detection::Instruction '
182 'Testing [B0009.029]',
183 'Virtual Machine Detection [B0009]'],
184 'COLLECTION': ['Keylogging::Polling [F0002.002]'],
185 'CRYPTOGRAPHY': ['Encrypt Data::RC4 [C0027.009]',
186 'Generate Pseudo-random Sequence::RC4 PRGA '
187 '[C0021.004]']
188 }
189 """
190 mbc_dict = {}
191 objectives = collections.defaultdict(set)
192 for rule in rutils.capability_rules(doc):
193 for mbc in rule["meta"].get("mbc", {}):
194 objectives[mbc["objective"]].add((mbc["behavior"], mbc.get("method"), mbc["id"]))
195
196 for objective, behaviors in sorted(objectives.items()):
197 inner_rows = []
198 for behavior, method, id in sorted(behaviors):
199 if method is None:
200 inner_rows.append(f"{behavior} [{id}]")
201 else:
202 inner_rows.append(f"{behavior}::{method} [{id}]")
203 mbc_dict.setdefault(objective.upper(), inner_rows)
204 return mbc_dict
205
206
207 def render_dictionary(doc: Dict[str, Any]) -> Dict[str, Any]:
208 ostream = render_meta(doc)
209 ostream["ATTCK"] = render_attack(doc)
210 ostream["MBC"] = render_mbc(doc)
211 ostream["CAPABILITY"] = render_capabilities(doc)
212
213 return ostream
214
215
216 # ===== CAPA END
217 def flare_capa_details(file_path: str, category: str = False, on_demand=False, disable_progress=True) -> Dict[str, Any]:
218 capa_dictionary = {}
219 if (
220 HAVE_FLARE_CAPA
221 and processing_conf.flare_capa.enabled
222 and processing_conf.flare_capa.get(category, False)
223 and not processing_conf.flare_capa.on_demand
224 or on_demand
225 ):
226 try:
227 extractor = capa.main.get_extractor(
228 file_path, "auto", capa.main.BACKEND_VIV, signatures, disable_progress=disable_progress
229 )
230 meta = capa.main.collect_metadata("", file_path, capa.main.RULES_PATH_DEFAULT_STRING, extractor)
231 capabilities, counts = capa.main.find_capabilities(rules, extractor, disable_progress=True)
232 meta["analysis"].update(counts)
233 doc = capa_convert_capabilities_to_result_document(meta, rules, capabilities)
234 capa_dictionary = render_dictionary(doc)
235 except MemoryError:
236 log.warning("FLARE CAPA -> MemoryError")
237 except UnsupportedRuntimeError:
238 log.error("FLARE CAPA -> UnsupportedRuntimeError")
239 except Exception as e:
240 log.error(e, exc_info=True)
241 return capa_dictionary
242
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/cuckoo/common/integrations/capa.py b/lib/cuckoo/common/integrations/capa.py
--- a/lib/cuckoo/common/integrations/capa.py
+++ b/lib/cuckoo/common/integrations/capa.py
@@ -40,8 +40,10 @@
rules_path = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
if os.path.exists(rules_path):
capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, "data", "capa-rules")
+ rules_list = []
+ rules_list.append(rules_path)
try:
- rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)
+ rules = capa.main.get_rules(rules_list, disable_progress=True)
rules = capa.rules.RuleSet(rules)
HAVE_FLARE_CAPA = True
except InvalidRuleWithPath:
|
{"golden_diff": "diff --git a/lib/cuckoo/common/integrations/capa.py b/lib/cuckoo/common/integrations/capa.py\n--- a/lib/cuckoo/common/integrations/capa.py\n+++ b/lib/cuckoo/common/integrations/capa.py\n@@ -40,8 +40,10 @@\n rules_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n if os.path.exists(rules_path):\n capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n+ rules_list = []\n+ rules_list.append(rules_path)\n try:\n- rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)\n+ rules = capa.main.get_rules(rules_list, disable_progress=True)\n rules = capa.rules.RuleSet(rules)\n HAVE_FLARE_CAPA = True\n except InvalidRuleWithPath:\n", "issue": "CAPA rule path does not exist\n# Expected Behavior\r\n\r\nCAPA is able to load its rules.\r\n\r\n# Current Behavior\r\n\r\nCAPA is unable to load rules. Splits the file path into a list and loops through each character. Enumerates the / directory then moves to the next character o at which point it errors as the path does not exist:\r\nOSError: rule path o does not exist or cannot be accessed\r\n\r\n# Failure Information (for bugs)\r\n\r\nThe argument capa.main.RULES_PATH_DEFAULT_STRING passed to get_rules is a string, but should be a list of file paths:\r\nhttps://github.com/kevoreilly/CAPEv2/blob/d9124712cabe5bf9a7a3a98da93cbdbd37a53da3/lib/cuckoo/common/integrations/capa.py#L44\r\n\r\n## Steps to Reproduce\r\n\r\nEnable CAPA in the config and run web server.\r\n\r\n## Context\r\n\r\ncommit 1bd0bf62055fc3741ea19a85d510d54052dbf431\r\nUbuntu 20.04.04 LTS\r\n\r\n## Failure Logs\r\n\r\n File \"/opt/CAPEv2/web/../lib/cuckoo/common/integrations/parse_pe.py\", line 74, in <module>\r\n from lib.cuckoo.common.integrations.capa import HAVE_FLARE_CAPA, flare_capa_details\r\n File \"/opt/CAPEv2/web/../lib/cuckoo/common/integrations/capa.py\", line 48, in <module>\r\n rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)\r\n File \"/usr/local/lib/python3.8/dist-packages/capa/main.py\", line 580, in get_rules\r\n raise IOError(\"rule path %s does not exist or cannot be accessed\" % rule_path)\r\nOSError: rule path o does not exist or cannot be accessed\n", "before_files": [{"content": "# Copyright (C) 2010-2015 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# See the file 'docs/LICENSE' for copying permission.\n\nimport collections\nimport logging\nimport os\nfrom typing import Any, Dict, List\n\nfrom lib.cuckoo.common.config import Config\nfrom lib.cuckoo.common.constants import CUCKOO_ROOT\n\nlog = logging.getLogger(__name__)\n\nprocessing_conf = Config(\"processing\")\n\n\"\"\"\nfrom lib.cuckoo.common.integrations.capa import flare_capa_details, HAVE_FLARE_CAPA\npath = \"/opt/CAPEv2/storage/binaries/da034c11f0c396f6cd11d22f833f9501dc75a33047ba3bd5870ff79e479bc004\"\ndetails = flare_capa_details(path, \"static\", on_demand=True)\n\"\"\"\n\nHAVE_FLARE_CAPA = False\nif processing_conf.flare_capa.enabled:\n try:\n from capa.version import __version__ as capa_version\n\n if capa_version[0] != \"3\":\n print(\"FLARE-CAPA missed, pip3 install -U flare-capa\")\n else:\n import capa.main\n import capa.render.utils as rutils\n import capa.rules\n from capa.main import UnsupportedRuntimeError\n from capa.render.result_document import (\n convert_capabilities_to_result_document as capa_convert_capabilities_to_result_document,\n )\n from capa.rules import InvalidRuleSet, InvalidRuleWithPath\n\n rules_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n if os.path.exists(rules_path):\n capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n try:\n rules = capa.main.get_rules(capa.main.RULES_PATH_DEFAULT_STRING, disable_progress=True)\n rules = capa.rules.RuleSet(rules)\n HAVE_FLARE_CAPA = True\n except InvalidRuleWithPath:\n print(\"FLARE_CAPA InvalidRuleWithPath\")\n HAVE_FLARE_CAPA = False\n except InvalidRuleSet:\n print(\"FLARE_CAPA InvalidRuleSet\")\n HAVE_FLARE_CAPA = False\n else:\n print(\"FLARE CAPA rules missed! You can download them using python3 community.py -cr\")\n HAVE_FLARE_CAPA = False\n\n signatures_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-signatures\")\n if os.path.exists(signatures_path):\n capa.main.SIGNATURES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-signatures\")\n try:\n signatures = capa.main.get_signatures(capa.main.SIGNATURES_PATH_DEFAULT_STRING)\n HAVE_FLARE_CAPA = True\n except IOError:\n print(\"FLARE_CAPA InvalidSignatures\")\n else:\n print(\"FLARE CAPA rules missed! You can download them using python3 community.py -cr\")\n HAVE_FLARE_CAPA = False\n except ImportError as e:\n HAVE_FLARE_CAPA = False\n print(e)\n print(\"FLARE-CAPA missed, pip3 install -U flare-capa\")\n\n\ndef render_meta(doc: Dict[str, Any]) -> Dict[str, Any]:\n return {\n \"md5\": doc[\"meta\"][\"sample\"][\"md5\"],\n \"sha1\": doc[\"meta\"][\"sample\"][\"sha1\"],\n \"sha256\": doc[\"meta\"][\"sample\"][\"sha256\"],\n \"path\": doc[\"meta\"][\"sample\"][\"path\"],\n }\n\n\ndef find_subrule_matches(doc: Dict[str, Any]) -> set:\n \"\"\"\n collect the rule names that have been matched as a subrule match.\n this way we can avoid displaying entries for things that are too specific.\n \"\"\"\n\n def rec(node: dict) -> set:\n matches = set()\n if not node[\"success\"]:\n # there's probably a bug here for rules that do `not: match: ...`\n # but we don't have any examples of this yet\n return\n\n elif node[\"node\"][\"type\"] == \"statement\":\n for child in node[\"children\"]:\n rec(child)\n\n elif node[\"node\"][\"type\"] == \"feature\":\n if node[\"node\"][\"feature\"][\"type\"] == \"match\":\n matches.add(node[\"node\"][\"feature\"][\"match\"])\n return matches\n\n matches = set()\n\n for rule in rutils.capability_rules(doc):\n for node in rule[\"matches\"].values():\n matches = matches.union(rec(node))\n\n return matches\n\n\ndef render_capabilities(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'accept command line arguments': ['host-interaction/cli'],\n 'allocate thread local storage (2 matches)': ['host-interaction/process'],\n 'check for time delay via GetTickCount': ['anti-analysis/anti-debugging/debugger-detection'],\n 'check if process is running under wine': ['anti-analysis/anti-emulation/wine'],\n 'contain a resource (.rsrc) section': ['executable/pe/section/rsrc'],\n 'write file (3 matches)': ['host-interaction/file-system/write']\n }\n \"\"\"\n subrule_matches = find_subrule_matches(doc)\n\n capability_dict = {}\n for rule in rutils.capability_rules(doc):\n if rule[\"meta\"][\"name\"] in subrule_matches:\n # rules that are also matched by other rules should not get rendered by default.\n # this cuts down on the amount of output while giving approx the same detail.\n # see #224\n continue\n\n count = len(rule[\"matches\"])\n if count == 1:\n capability = rule[\"meta\"][\"name\"]\n else:\n capability = f\"{rule['meta']['name']} ({count} matches)\"\n\n capability_dict.setdefault(rule[\"meta\"][\"namespace\"], []).append(capability)\n return capability_dict\n\n\ndef render_attack(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'COLLECTION': ['Input Capture::Keylogging [T1056.001]'],\n 'DEFENSE EVASION': ['Obfuscated Files or Information [T1027]',\n 'Virtualization/Sandbox Evasion::System Checks '\n '[T1497.001]'],\n 'DISCOVERY': ['File and Directory Discovery [T1083]',\n 'Query Registry [T1012]',\n 'System Information Discovery [T1082]'],\n 'EXECUTION': ['Shared Modules [T1129]']\n }\n \"\"\"\n attck_dict = {}\n tactics = collections.defaultdict(set)\n for rule in rutils.capability_rules(doc):\n for attack in rule[\"meta\"].get(\"att&ck\", {}):\n tactics[attack[\"tactic\"]].add((attack[\"technique\"], attack.get(\"subtechnique\"), attack[\"id\"]))\n\n for tactic, techniques in sorted(tactics.items()):\n inner_rows = []\n for technique, subtechnique, id in sorted(techniques):\n if subtechnique is None:\n inner_rows.append(f\"{technique} {id}\")\n else:\n inner_rows.append(f\"{technique}::{subtechnique} {id}\")\n attck_dict.setdefault(tactic.upper(), inner_rows)\n return attck_dict\n\n\ndef render_mbc(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'ANTI-BEHAVIORAL ANALYSIS': ['Debugger Detection::Timing/Delay Check '\n 'GetTickCount [B0001.032]',\n 'Emulator Detection [B0004]',\n 'Virtual Machine Detection::Instruction '\n 'Testing [B0009.029]',\n 'Virtual Machine Detection [B0009]'],\n 'COLLECTION': ['Keylogging::Polling [F0002.002]'],\n 'CRYPTOGRAPHY': ['Encrypt Data::RC4 [C0027.009]',\n 'Generate Pseudo-random Sequence::RC4 PRGA '\n '[C0021.004]']\n }\n \"\"\"\n mbc_dict = {}\n objectives = collections.defaultdict(set)\n for rule in rutils.capability_rules(doc):\n for mbc in rule[\"meta\"].get(\"mbc\", {}):\n objectives[mbc[\"objective\"]].add((mbc[\"behavior\"], mbc.get(\"method\"), mbc[\"id\"]))\n\n for objective, behaviors in sorted(objectives.items()):\n inner_rows = []\n for behavior, method, id in sorted(behaviors):\n if method is None:\n inner_rows.append(f\"{behavior} [{id}]\")\n else:\n inner_rows.append(f\"{behavior}::{method} [{id}]\")\n mbc_dict.setdefault(objective.upper(), inner_rows)\n return mbc_dict\n\n\ndef render_dictionary(doc: Dict[str, Any]) -> Dict[str, Any]:\n ostream = render_meta(doc)\n ostream[\"ATTCK\"] = render_attack(doc)\n ostream[\"MBC\"] = render_mbc(doc)\n ostream[\"CAPABILITY\"] = render_capabilities(doc)\n\n return ostream\n\n\n# ===== CAPA END\ndef flare_capa_details(file_path: str, category: str = False, on_demand=False, disable_progress=True) -> Dict[str, Any]:\n capa_dictionary = {}\n if (\n HAVE_FLARE_CAPA\n and processing_conf.flare_capa.enabled\n and processing_conf.flare_capa.get(category, False)\n and not processing_conf.flare_capa.on_demand\n or on_demand\n ):\n try:\n extractor = capa.main.get_extractor(\n file_path, \"auto\", capa.main.BACKEND_VIV, signatures, disable_progress=disable_progress\n )\n meta = capa.main.collect_metadata(\"\", file_path, capa.main.RULES_PATH_DEFAULT_STRING, extractor)\n capabilities, counts = capa.main.find_capabilities(rules, extractor, disable_progress=True)\n meta[\"analysis\"].update(counts)\n doc = capa_convert_capabilities_to_result_document(meta, rules, capabilities)\n capa_dictionary = render_dictionary(doc)\n except MemoryError:\n log.warning(\"FLARE CAPA -> MemoryError\")\n except UnsupportedRuntimeError:\n log.error(\"FLARE CAPA -> UnsupportedRuntimeError\")\n except Exception as e:\n log.error(e, exc_info=True)\n return capa_dictionary\n", "path": "lib/cuckoo/common/integrations/capa.py"}], "after_files": [{"content": "# Copyright (C) 2010-2015 Cuckoo Foundation.\n# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org\n# See the file 'docs/LICENSE' for copying permission.\n\nimport collections\nimport logging\nimport os\nfrom typing import Any, Dict, List\n\nfrom lib.cuckoo.common.config import Config\nfrom lib.cuckoo.common.constants import CUCKOO_ROOT\n\nlog = logging.getLogger(__name__)\n\nprocessing_conf = Config(\"processing\")\n\n\"\"\"\nfrom lib.cuckoo.common.integrations.capa import flare_capa_details, HAVE_FLARE_CAPA\npath = \"/opt/CAPEv2/storage/binaries/da034c11f0c396f6cd11d22f833f9501dc75a33047ba3bd5870ff79e479bc004\"\ndetails = flare_capa_details(path, \"static\", on_demand=True)\n\"\"\"\n\nHAVE_FLARE_CAPA = False\nif processing_conf.flare_capa.enabled:\n try:\n from capa.version import __version__ as capa_version\n\n if capa_version[0] != \"3\":\n print(\"FLARE-CAPA missed, pip3 install -U flare-capa\")\n else:\n import capa.main\n import capa.render.utils as rutils\n import capa.rules\n from capa.main import UnsupportedRuntimeError\n from capa.render.result_document import (\n convert_capabilities_to_result_document as capa_convert_capabilities_to_result_document,\n )\n from capa.rules import InvalidRuleSet, InvalidRuleWithPath\n\n rules_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n if os.path.exists(rules_path):\n capa.main.RULES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-rules\")\n rules_list = []\n rules_list.append(rules_path)\n try:\n rules = capa.main.get_rules(rules_list, disable_progress=True)\n rules = capa.rules.RuleSet(rules)\n HAVE_FLARE_CAPA = True\n except InvalidRuleWithPath:\n print(\"FLARE_CAPA InvalidRuleWithPath\")\n HAVE_FLARE_CAPA = False\n except InvalidRuleSet:\n print(\"FLARE_CAPA InvalidRuleSet\")\n HAVE_FLARE_CAPA = False\n else:\n print(\"FLARE CAPA rules missed! You can download them using python3 community.py -cr\")\n HAVE_FLARE_CAPA = False\n\n signatures_path = os.path.join(CUCKOO_ROOT, \"data\", \"capa-signatures\")\n if os.path.exists(signatures_path):\n capa.main.SIGNATURES_PATH_DEFAULT_STRING = os.path.join(CUCKOO_ROOT, \"data\", \"capa-signatures\")\n try:\n signatures = capa.main.get_signatures(capa.main.SIGNATURES_PATH_DEFAULT_STRING)\n HAVE_FLARE_CAPA = True\n except IOError:\n print(\"FLARE_CAPA InvalidSignatures\")\n else:\n print(\"FLARE CAPA rules missed! You can download them using python3 community.py -cr\")\n HAVE_FLARE_CAPA = False\n except ImportError as e:\n HAVE_FLARE_CAPA = False\n print(e)\n print(\"FLARE-CAPA missed, pip3 install -U flare-capa\")\n\n\ndef render_meta(doc: Dict[str, Any]) -> Dict[str, Any]:\n return {\n \"md5\": doc[\"meta\"][\"sample\"][\"md5\"],\n \"sha1\": doc[\"meta\"][\"sample\"][\"sha1\"],\n \"sha256\": doc[\"meta\"][\"sample\"][\"sha256\"],\n \"path\": doc[\"meta\"][\"sample\"][\"path\"],\n }\n\n\ndef find_subrule_matches(doc: Dict[str, Any]) -> set:\n \"\"\"\n collect the rule names that have been matched as a subrule match.\n this way we can avoid displaying entries for things that are too specific.\n \"\"\"\n\n def rec(node: dict) -> set:\n matches = set()\n if not node[\"success\"]:\n # there's probably a bug here for rules that do `not: match: ...`\n # but we don't have any examples of this yet\n return\n\n elif node[\"node\"][\"type\"] == \"statement\":\n for child in node[\"children\"]:\n rec(child)\n\n elif node[\"node\"][\"type\"] == \"feature\":\n if node[\"node\"][\"feature\"][\"type\"] == \"match\":\n matches.add(node[\"node\"][\"feature\"][\"match\"])\n return matches\n\n matches = set()\n\n for rule in rutils.capability_rules(doc):\n for node in rule[\"matches\"].values():\n matches = matches.union(rec(node))\n\n return matches\n\n\ndef render_capabilities(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'accept command line arguments': ['host-interaction/cli'],\n 'allocate thread local storage (2 matches)': ['host-interaction/process'],\n 'check for time delay via GetTickCount': ['anti-analysis/anti-debugging/debugger-detection'],\n 'check if process is running under wine': ['anti-analysis/anti-emulation/wine'],\n 'contain a resource (.rsrc) section': ['executable/pe/section/rsrc'],\n 'write file (3 matches)': ['host-interaction/file-system/write']\n }\n \"\"\"\n subrule_matches = find_subrule_matches(doc)\n\n capability_dict = {}\n for rule in rutils.capability_rules(doc):\n if rule[\"meta\"][\"name\"] in subrule_matches:\n # rules that are also matched by other rules should not get rendered by default.\n # this cuts down on the amount of output while giving approx the same detail.\n # see #224\n continue\n\n count = len(rule[\"matches\"])\n if count == 1:\n capability = rule[\"meta\"][\"name\"]\n else:\n capability = f\"{rule['meta']['name']} ({count} matches)\"\n\n capability_dict.setdefault(rule[\"meta\"][\"namespace\"], []).append(capability)\n return capability_dict\n\n\ndef render_attack(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'COLLECTION': ['Input Capture::Keylogging [T1056.001]'],\n 'DEFENSE EVASION': ['Obfuscated Files or Information [T1027]',\n 'Virtualization/Sandbox Evasion::System Checks '\n '[T1497.001]'],\n 'DISCOVERY': ['File and Directory Discovery [T1083]',\n 'Query Registry [T1012]',\n 'System Information Discovery [T1082]'],\n 'EXECUTION': ['Shared Modules [T1129]']\n }\n \"\"\"\n attck_dict = {}\n tactics = collections.defaultdict(set)\n for rule in rutils.capability_rules(doc):\n for attack in rule[\"meta\"].get(\"att&ck\", {}):\n tactics[attack[\"tactic\"]].add((attack[\"technique\"], attack.get(\"subtechnique\"), attack[\"id\"]))\n\n for tactic, techniques in sorted(tactics.items()):\n inner_rows = []\n for technique, subtechnique, id in sorted(techniques):\n if subtechnique is None:\n inner_rows.append(f\"{technique} {id}\")\n else:\n inner_rows.append(f\"{technique}::{subtechnique} {id}\")\n attck_dict.setdefault(tactic.upper(), inner_rows)\n return attck_dict\n\n\ndef render_mbc(doc: Dict[str, Any]) -> Dict[str, List[str]]:\n \"\"\"\n example::\n {'ANTI-BEHAVIORAL ANALYSIS': ['Debugger Detection::Timing/Delay Check '\n 'GetTickCount [B0001.032]',\n 'Emulator Detection [B0004]',\n 'Virtual Machine Detection::Instruction '\n 'Testing [B0009.029]',\n 'Virtual Machine Detection [B0009]'],\n 'COLLECTION': ['Keylogging::Polling [F0002.002]'],\n 'CRYPTOGRAPHY': ['Encrypt Data::RC4 [C0027.009]',\n 'Generate Pseudo-random Sequence::RC4 PRGA '\n '[C0021.004]']\n }\n \"\"\"\n mbc_dict = {}\n objectives = collections.defaultdict(set)\n for rule in rutils.capability_rules(doc):\n for mbc in rule[\"meta\"].get(\"mbc\", {}):\n objectives[mbc[\"objective\"]].add((mbc[\"behavior\"], mbc.get(\"method\"), mbc[\"id\"]))\n\n for objective, behaviors in sorted(objectives.items()):\n inner_rows = []\n for behavior, method, id in sorted(behaviors):\n if method is None:\n inner_rows.append(f\"{behavior} [{id}]\")\n else:\n inner_rows.append(f\"{behavior}::{method} [{id}]\")\n mbc_dict.setdefault(objective.upper(), inner_rows)\n return mbc_dict\n\n\ndef render_dictionary(doc: Dict[str, Any]) -> Dict[str, Any]:\n ostream = render_meta(doc)\n ostream[\"ATTCK\"] = render_attack(doc)\n ostream[\"MBC\"] = render_mbc(doc)\n ostream[\"CAPABILITY\"] = render_capabilities(doc)\n\n return ostream\n\n\n# ===== CAPA END\ndef flare_capa_details(file_path: str, category: str = False, on_demand=False, disable_progress=True) -> Dict[str, Any]:\n capa_dictionary = {}\n if (\n HAVE_FLARE_CAPA\n and processing_conf.flare_capa.enabled\n and processing_conf.flare_capa.get(category, False)\n and not processing_conf.flare_capa.on_demand\n or on_demand\n ):\n try:\n extractor = capa.main.get_extractor(\n file_path, \"auto\", capa.main.BACKEND_VIV, signatures, disable_progress=disable_progress\n )\n meta = capa.main.collect_metadata(\"\", file_path, capa.main.RULES_PATH_DEFAULT_STRING, extractor)\n capabilities, counts = capa.main.find_capabilities(rules, extractor, disable_progress=True)\n meta[\"analysis\"].update(counts)\n doc = capa_convert_capabilities_to_result_document(meta, rules, capabilities)\n capa_dictionary = render_dictionary(doc)\n except MemoryError:\n log.warning(\"FLARE CAPA -> MemoryError\")\n except UnsupportedRuntimeError:\n log.error(\"FLARE CAPA -> UnsupportedRuntimeError\")\n except Exception as e:\n log.error(e, exc_info=True)\n return capa_dictionary\n", "path": "lib/cuckoo/common/integrations/capa.py"}]}
| 3,601 | 207 |
gh_patches_debug_21826
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1802
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check resource error in container network API
```
docker python client v2.4.2
python v2.7.12
docker v17.03.1-ce
Ubuntu 16.04
```
PR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'.
Reproduce using the following snippet:
```python
import docker
cli = docker.APIClient(base_url='unix:///var/run/docker.sock')
cli.pull(repository='ubuntu', tag='latest')
name = 'my_ubuntu'
container = cli.create_container(image='ubuntu:latest', name=name)
cli.connect_container_to_network(container=name, net_id='bridge')
```
This causes:
```
Traceback (most recent call last):
File "test.py", line 8, in <module>
cli.connect_container_to_network(container=name, net_id='bridge')
File "/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 17, in wrapped
'Resource ID was not provided'
docker.errors.NullResource: Resource ID was not provided
```
client.networks.create check_duplicates docs not reflective of behavior
Docs say it does, but it's actually set to `None`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/version.py`
Content:
```
1 version = "2.6.0"
2 version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
3
```
Path: `docker/transport/unixconn.py`
Content:
```
1 import six
2 import requests.adapters
3 import socket
4
5 from .. import constants
6
7 if six.PY3:
8 import http.client as httplib
9 else:
10 import httplib
11
12 try:
13 import requests.packages.urllib3 as urllib3
14 except ImportError:
15 import urllib3
16
17
18 RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
19
20
21 class UnixHTTPResponse(httplib.HTTPResponse, object):
22 def __init__(self, sock, *args, **kwargs):
23 disable_buffering = kwargs.pop('disable_buffering', False)
24 super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
25 if disable_buffering is True:
26 # We must first create a new pointer then close the old one
27 # to avoid closing the underlying socket.
28 new_fp = sock.makefile('rb', 0)
29 self.fp.close()
30 self.fp = new_fp
31
32
33 class UnixHTTPConnection(httplib.HTTPConnection, object):
34
35 def __init__(self, base_url, unix_socket, timeout=60):
36 super(UnixHTTPConnection, self).__init__(
37 'localhost', timeout=timeout
38 )
39 self.base_url = base_url
40 self.unix_socket = unix_socket
41 self.timeout = timeout
42 self.disable_buffering = False
43
44 def connect(self):
45 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
46 sock.settimeout(self.timeout)
47 sock.connect(self.unix_socket)
48 self.sock = sock
49
50 def putheader(self, header, *values):
51 super(UnixHTTPConnection, self).putheader(header, *values)
52 if header == 'Connection' and 'Upgrade' in values:
53 self.disable_buffering = True
54
55 def response_class(self, sock, *args, **kwargs):
56 if self.disable_buffering:
57 kwargs['disable_buffering'] = True
58
59 return UnixHTTPResponse(sock, *args, **kwargs)
60
61
62 class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
63 def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
64 super(UnixHTTPConnectionPool, self).__init__(
65 'localhost', timeout=timeout, maxsize=maxsize
66 )
67 self.base_url = base_url
68 self.socket_path = socket_path
69 self.timeout = timeout
70
71 def _new_conn(self):
72 return UnixHTTPConnection(
73 self.base_url, self.socket_path, self.timeout
74 )
75
76
77 class UnixAdapter(requests.adapters.HTTPAdapter):
78
79 __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
80 'socket_path',
81 'timeout']
82
83 def __init__(self, socket_url, timeout=60,
84 pool_connections=constants.DEFAULT_NUM_POOLS):
85 socket_path = socket_url.replace('http+unix://', '')
86 if not socket_path.startswith('/'):
87 socket_path = '/' + socket_path
88 self.socket_path = socket_path
89 self.timeout = timeout
90 self.pools = RecentlyUsedContainer(
91 pool_connections, dispose_func=lambda p: p.close()
92 )
93 super(UnixAdapter, self).__init__()
94
95 def get_connection(self, url, proxies=None):
96 with self.pools.lock:
97 pool = self.pools.get(url)
98 if pool:
99 return pool
100
101 pool = UnixHTTPConnectionPool(
102 url, self.socket_path, self.timeout
103 )
104 self.pools[url] = pool
105
106 return pool
107
108 def request_url(self, request, proxies):
109 # The select_proxy utility in requests errors out when the provided URL
110 # doesn't have a hostname, like is the case when using a UNIX socket.
111 # Since proxies are an irrelevant notion in the case of UNIX sockets
112 # anyway, we simply return the path URL directly.
113 # See also: https://github.com/docker/docker-py/issues/811
114 return request.path_url
115
116 def close(self):
117 self.pools.clear()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py
--- a/docker/transport/unixconn.py
+++ b/docker/transport/unixconn.py
@@ -21,13 +21,12 @@
class UnixHTTPResponse(httplib.HTTPResponse, object):
def __init__(self, sock, *args, **kwargs):
disable_buffering = kwargs.pop('disable_buffering', False)
+ if six.PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not disable_buffering
super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
- if disable_buffering is True:
- # We must first create a new pointer then close the old one
- # to avoid closing the underlying socket.
- new_fp = sock.makefile('rb', 0)
- self.fp.close()
- self.fp = new_fp
class UnixHTTPConnection(httplib.HTTPConnection, object):
diff --git a/docker/version.py b/docker/version.py
--- a/docker/version.py
+++ b/docker/version.py
@@ -1,2 +1,2 @@
-version = "2.6.0"
+version = "2.6.1"
version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
|
{"golden_diff": "diff --git a/docker/transport/unixconn.py b/docker/transport/unixconn.py\n--- a/docker/transport/unixconn.py\n+++ b/docker/transport/unixconn.py\n@@ -21,13 +21,12 @@\n class UnixHTTPResponse(httplib.HTTPResponse, object):\n def __init__(self, sock, *args, **kwargs):\n disable_buffering = kwargs.pop('disable_buffering', False)\n+ if six.PY2:\n+ # FIXME: We may need to disable buffering on Py3 as well,\n+ # but there's no clear way to do it at the moment. See:\n+ # https://github.com/docker/docker-py/issues/1799\n+ kwargs['buffering'] = not disable_buffering\n super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)\n- if disable_buffering is True:\n- # We must first create a new pointer then close the old one\n- # to avoid closing the underlying socket.\n- new_fp = sock.makefile('rb', 0)\n- self.fp.close()\n- self.fp = new_fp\n \n \n class UnixHTTPConnection(httplib.HTTPConnection, object):\ndiff --git a/docker/version.py b/docker/version.py\n--- a/docker/version.py\n+++ b/docker/version.py\n@@ -1,2 +1,2 @@\n-version = \"2.6.0\"\n+version = \"2.6.1\"\n version_info = tuple([int(d) for d in version.split(\"-\")[0].split(\".\")])\n", "issue": "Check resource error in container network API\n```\r\ndocker python client v2.4.2\r\npython v2.7.12\r\ndocker v17.03.1-ce\r\nUbuntu 16.04\r\n```\r\n\r\nPR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'.\r\n\r\nReproduce using the following snippet:\r\n```python\r\nimport docker\r\n\r\ncli = docker.APIClient(base_url='unix:///var/run/docker.sock')\r\ncli.pull(repository='ubuntu', tag='latest')\r\n\r\nname = 'my_ubuntu'\r\ncontainer = cli.create_container(image='ubuntu:latest', name=name)\r\ncli.connect_container_to_network(container=name, net_id='bridge')\r\n```\r\n\r\nThis causes:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 8, in <module>\r\n cli.connect_container_to_network(container=name, net_id='bridge')\r\n File \"/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 17, in wrapped\r\n 'Resource ID was not provided'\r\ndocker.errors.NullResource: Resource ID was not provided\r\n```\nclient.networks.create check_duplicates docs not reflective of behavior\nDocs say it does, but it's actually set to `None`.\n", "before_files": [{"content": "version = \"2.6.0\"\nversion_info = tuple([int(d) for d in version.split(\"-\")[0].split(\".\")])\n", "path": "docker/version.py"}, {"content": "import six\nimport requests.adapters\nimport socket\n\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass UnixHTTPResponse(httplib.HTTPResponse, object):\n def __init__(self, sock, *args, **kwargs):\n disable_buffering = kwargs.pop('disable_buffering', False)\n super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)\n if disable_buffering is True:\n # We must first create a new pointer then close the old one\n # to avoid closing the underlying socket.\n new_fp = sock.makefile('rb', 0)\n self.fp.close()\n self.fp = new_fp\n\n\nclass UnixHTTPConnection(httplib.HTTPConnection, object):\n\n def __init__(self, base_url, unix_socket, timeout=60):\n super(UnixHTTPConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.base_url = base_url\n self.unix_socket = unix_socket\n self.timeout = timeout\n self.disable_buffering = False\n\n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.settimeout(self.timeout)\n sock.connect(self.unix_socket)\n self.sock = sock\n\n def putheader(self, header, *values):\n super(UnixHTTPConnection, self).putheader(header, *values)\n if header == 'Connection' and 'Upgrade' in values:\n self.disable_buffering = True\n\n def response_class(self, sock, *args, **kwargs):\n if self.disable_buffering:\n kwargs['disable_buffering'] = True\n\n return UnixHTTPResponse(sock, *args, **kwargs)\n\n\nclass UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n def __init__(self, base_url, socket_path, timeout=60, maxsize=10):\n super(UnixHTTPConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.base_url = base_url\n self.socket_path = socket_path\n self.timeout = timeout\n\n def _new_conn(self):\n return UnixHTTPConnection(\n self.base_url, self.socket_path, self.timeout\n )\n\n\nclass UnixAdapter(requests.adapters.HTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',\n 'socket_path',\n 'timeout']\n\n def __init__(self, socket_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS):\n socket_path = socket_url.replace('http+unix://', '')\n if not socket_path.startswith('/'):\n socket_path = '/' + socket_path\n self.socket_path = socket_path\n self.timeout = timeout\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(UnixAdapter, self).__init__()\n\n def get_connection(self, url, proxies=None):\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n pool = UnixHTTPConnectionPool(\n url, self.socket_path, self.timeout\n )\n self.pools[url] = pool\n\n return pool\n\n def request_url(self, request, proxies):\n # The select_proxy utility in requests errors out when the provided URL\n # doesn't have a hostname, like is the case when using a UNIX socket.\n # Since proxies are an irrelevant notion in the case of UNIX sockets\n # anyway, we simply return the path URL directly.\n # See also: https://github.com/docker/docker-py/issues/811\n return request.path_url\n\n def close(self):\n self.pools.clear()\n", "path": "docker/transport/unixconn.py"}], "after_files": [{"content": "version = \"2.6.1\"\nversion_info = tuple([int(d) for d in version.split(\"-\")[0].split(\".\")])\n", "path": "docker/version.py"}, {"content": "import six\nimport requests.adapters\nimport socket\n\nfrom .. import constants\n\nif six.PY3:\n import http.client as httplib\nelse:\n import httplib\n\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nRecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer\n\n\nclass UnixHTTPResponse(httplib.HTTPResponse, object):\n def __init__(self, sock, *args, **kwargs):\n disable_buffering = kwargs.pop('disable_buffering', False)\n if six.PY2:\n # FIXME: We may need to disable buffering on Py3 as well,\n # but there's no clear way to do it at the moment. See:\n # https://github.com/docker/docker-py/issues/1799\n kwargs['buffering'] = not disable_buffering\n super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)\n\n\nclass UnixHTTPConnection(httplib.HTTPConnection, object):\n\n def __init__(self, base_url, unix_socket, timeout=60):\n super(UnixHTTPConnection, self).__init__(\n 'localhost', timeout=timeout\n )\n self.base_url = base_url\n self.unix_socket = unix_socket\n self.timeout = timeout\n self.disable_buffering = False\n\n def connect(self):\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.settimeout(self.timeout)\n sock.connect(self.unix_socket)\n self.sock = sock\n\n def putheader(self, header, *values):\n super(UnixHTTPConnection, self).putheader(header, *values)\n if header == 'Connection' and 'Upgrade' in values:\n self.disable_buffering = True\n\n def response_class(self, sock, *args, **kwargs):\n if self.disable_buffering:\n kwargs['disable_buffering'] = True\n\n return UnixHTTPResponse(sock, *args, **kwargs)\n\n\nclass UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):\n def __init__(self, base_url, socket_path, timeout=60, maxsize=10):\n super(UnixHTTPConnectionPool, self).__init__(\n 'localhost', timeout=timeout, maxsize=maxsize\n )\n self.base_url = base_url\n self.socket_path = socket_path\n self.timeout = timeout\n\n def _new_conn(self):\n return UnixHTTPConnection(\n self.base_url, self.socket_path, self.timeout\n )\n\n\nclass UnixAdapter(requests.adapters.HTTPAdapter):\n\n __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',\n 'socket_path',\n 'timeout']\n\n def __init__(self, socket_url, timeout=60,\n pool_connections=constants.DEFAULT_NUM_POOLS):\n socket_path = socket_url.replace('http+unix://', '')\n if not socket_path.startswith('/'):\n socket_path = '/' + socket_path\n self.socket_path = socket_path\n self.timeout = timeout\n self.pools = RecentlyUsedContainer(\n pool_connections, dispose_func=lambda p: p.close()\n )\n super(UnixAdapter, self).__init__()\n\n def get_connection(self, url, proxies=None):\n with self.pools.lock:\n pool = self.pools.get(url)\n if pool:\n return pool\n\n pool = UnixHTTPConnectionPool(\n url, self.socket_path, self.timeout\n )\n self.pools[url] = pool\n\n return pool\n\n def request_url(self, request, proxies):\n # The select_proxy utility in requests errors out when the provided URL\n # doesn't have a hostname, like is the case when using a UNIX socket.\n # Since proxies are an irrelevant notion in the case of UNIX sockets\n # anyway, we simply return the path URL directly.\n # See also: https://github.com/docker/docker-py/issues/811\n return request.path_url\n\n def close(self):\n self.pools.clear()\n", "path": "docker/transport/unixconn.py"}]}
| 1,706 | 335 |
gh_patches_debug_17479
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1557
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Metadata column repeated when exported as csv
Hi I have recently come across a bug when you export data as csv
<environment.-->
* Operating System:MacOS 10.14
* Python Version Used: 3.9.5
* Doccano installed through pip3 install doccano
I have created a DocumentClassification project and have imported some json data.
The json data is in the format of
```bash
{"text":"The ravioli was excellent" , "hidden":"The FOOD was excellent"}
```
When these sentences are imported, the "hidden" : "The FOOD was excellent" becomes part of the Metadata. I have quite a few of these sentences and have labelled them with my own labels
The issue is when I export the dataset as csv, the Metadata column repeats. For example if I have 10 labelled sentences, the Metadata column is repeated 10 times per row of data in excel.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/api/views/download/data.py`
Content:
```
1 from typing import Any, Dict, List
2
3
4 class Record:
5
6 def __init__(self,
7 id: int,
8 data: str,
9 label: List[Any],
10 user: str,
11 metadata: Dict[Any, Any]):
12 self.id = id
13 self.data = data
14 self.label = label
15 self.user = user
16 self.metadata = metadata
17
18 def __str__(self):
19 return f'{self.data}\t{self.label}'
20
```
Path: `backend/api/views/download/writer.py`
Content:
```
1 import abc
2 import csv
3 import itertools
4 import json
5 import os
6 import uuid
7 import zipfile
8 from collections import defaultdict
9 from typing import Dict, Iterable, Iterator, List
10
11 from .data import Record
12
13
14 class BaseWriter:
15
16 def __init__(self, tmpdir: str):
17 self.tmpdir = tmpdir
18
19 @abc.abstractmethod
20 def write(self, records: Iterator[Record]) -> str:
21 raise NotImplementedError()
22
23 def write_zip(self, filenames: Iterable):
24 save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))
25 with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:
26 for file in filenames:
27 zf.write(filename=file, arcname=os.path.basename(file))
28 return save_file
29
30
31 class LineWriter(BaseWriter):
32 extension = 'txt'
33
34 def write(self, records: Iterator[Record]) -> str:
35 files = {}
36 for record in records:
37 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
38 if filename not in files:
39 f = open(filename, mode='a')
40 files[filename] = f
41 f = files[filename]
42 line = self.create_line(record)
43 f.write(f'{line}\n')
44 for f in files.values():
45 f.close()
46 save_file = self.write_zip(files)
47 for file in files:
48 os.remove(file)
49 return save_file
50
51 @abc.abstractmethod
52 def create_line(self, record) -> str:
53 raise NotImplementedError()
54
55
56 class CsvWriter(BaseWriter):
57 extension = 'csv'
58
59 def write(self, records: Iterator[Record]) -> str:
60 writers = {}
61 file_handlers = set()
62 records = list(records)
63 header = self.create_header(records)
64 for record in records:
65 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
66 if filename not in writers:
67 f = open(filename, mode='a', encoding='utf-8')
68 writer = csv.DictWriter(f, header)
69 writer.writeheader()
70 writers[filename] = writer
71 file_handlers.add(f)
72 writer = writers[filename]
73 line = self.create_line(record)
74 writer.writerow(line)
75
76 for f in file_handlers:
77 f.close()
78 save_file = self.write_zip(writers)
79 for file in writers:
80 os.remove(file)
81 return save_file
82
83 def create_line(self, record) -> Dict:
84 return {
85 'id': record.id,
86 'data': record.data,
87 'label': '#'.join(record.label),
88 **record.metadata
89 }
90
91 def create_header(self, records: List[Record]) -> Iterable[str]:
92 header = ['id', 'data', 'label']
93 header += list(itertools.chain(*[r.metadata.keys() for r in records]))
94 return header
95
96
97 class JSONWriter(BaseWriter):
98 extension = 'json'
99
100 def write(self, records: Iterator[Record]) -> str:
101 writers = {}
102 contents = defaultdict(list)
103 for record in records:
104 filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')
105 if filename not in writers:
106 f = open(filename, mode='a', encoding='utf-8')
107 writers[filename] = f
108 line = self.create_line(record)
109 contents[filename].append(line)
110
111 for filename, f in writers.items():
112 content = contents[filename]
113 json.dump(content, f, ensure_ascii=False)
114 f.close()
115
116 save_file = self.write_zip(writers)
117 for file in writers:
118 os.remove(file)
119 return save_file
120
121 def create_line(self, record) -> Dict:
122 return {
123 'id': record.id,
124 'data': record.data,
125 'label': record.label,
126 **record.metadata
127 }
128
129
130 class JSONLWriter(LineWriter):
131 extension = 'jsonl'
132
133 def create_line(self, record):
134 return json.dumps({
135 'id': record.id,
136 'data': record.data,
137 'label': record.label,
138 **record.metadata
139 }, ensure_ascii=False)
140
141
142 class FastTextWriter(LineWriter):
143 extension = 'txt'
144
145 def create_line(self, record):
146 line = [f'__label__{label}' for label in record.label]
147 line.append(record.data)
148 line = ' '.join(line)
149 return line
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/api/views/download/data.py b/backend/api/views/download/data.py
--- a/backend/api/views/download/data.py
+++ b/backend/api/views/download/data.py
@@ -1,3 +1,4 @@
+import json
from typing import Any, Dict, List
@@ -16,4 +17,10 @@
self.metadata = metadata
def __str__(self):
- return f'{self.data}\t{self.label}'
+ return json.dumps({
+ 'id': self.id,
+ 'data': self.data,
+ 'label': self.label,
+ 'user': self.user,
+ 'metadata': self.metadata
+ })
diff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py
--- a/backend/api/views/download/writer.py
+++ b/backend/api/views/download/writer.py
@@ -90,7 +90,7 @@
def create_header(self, records: List[Record]) -> Iterable[str]:
header = ['id', 'data', 'label']
- header += list(itertools.chain(*[r.metadata.keys() for r in records]))
+ header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))
return header
|
{"golden_diff": "diff --git a/backend/api/views/download/data.py b/backend/api/views/download/data.py\n--- a/backend/api/views/download/data.py\n+++ b/backend/api/views/download/data.py\n@@ -1,3 +1,4 @@\n+import json\n from typing import Any, Dict, List\n \n \n@@ -16,4 +17,10 @@\n self.metadata = metadata\n \n def __str__(self):\n- return f'{self.data}\\t{self.label}'\n+ return json.dumps({\n+ 'id': self.id,\n+ 'data': self.data,\n+ 'label': self.label,\n+ 'user': self.user,\n+ 'metadata': self.metadata\n+ })\ndiff --git a/backend/api/views/download/writer.py b/backend/api/views/download/writer.py\n--- a/backend/api/views/download/writer.py\n+++ b/backend/api/views/download/writer.py\n@@ -90,7 +90,7 @@\n \n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n- header += list(itertools.chain(*[r.metadata.keys() for r in records]))\n+ header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n", "issue": "Metadata column repeated when exported as csv\nHi I have recently come across a bug when you export data as csv\r\n<environment.-->\r\n*\u00a0\u00a0 Operating System:MacOS 10.14\r\n*\u00a0\u00a0 Python Version Used: 3.9.5\r\n*\u00a0\u00a0 Doccano installed through pip3 install\u00a0\u00a0\u00a0\u00a0 doccano \r\n\r\nI have created a DocumentClassification project and have imported some json data. \r\n\r\nThe json data is in the format of \r\n\r\n```bash\r\n{\"text\":\"The ravioli was excellent\" , \"hidden\":\"The FOOD was excellent\"} \r\n```\r\n\r\nWhen these sentences are imported, the \"hidden\" : \"The FOOD was excellent\" becomes part of the Metadata. I have quite a few of these sentences and have labelled them with my own labels \r\n\r\nThe issue is when I export the dataset as csv, the Metadata column repeats. For example if I have 10 labelled sentences, the Metadata column is repeated 10 times per row of data in excel. \n", "before_files": [{"content": "from typing import Any, Dict, List\n\n\nclass Record:\n\n def __init__(self,\n id: int,\n data: str,\n label: List[Any],\n user: str,\n metadata: Dict[Any, Any]):\n self.id = id\n self.data = data\n self.label = label\n self.user = user\n self.metadata = metadata\n\n def __str__(self):\n return f'{self.data}\\t{self.label}'\n", "path": "backend/api/views/download/data.py"}, {"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(record.label),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += list(itertools.chain(*[r.metadata.keys() for r in records]))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}], "after_files": [{"content": "import json\nfrom typing import Any, Dict, List\n\n\nclass Record:\n\n def __init__(self,\n id: int,\n data: str,\n label: List[Any],\n user: str,\n metadata: Dict[Any, Any]):\n self.id = id\n self.data = data\n self.label = label\n self.user = user\n self.metadata = metadata\n\n def __str__(self):\n return json.dumps({\n 'id': self.id,\n 'data': self.data,\n 'label': self.label,\n 'user': self.user,\n 'metadata': self.metadata\n })\n", "path": "backend/api/views/download/data.py"}, {"content": "import abc\nimport csv\nimport itertools\nimport json\nimport os\nimport uuid\nimport zipfile\nfrom collections import defaultdict\nfrom typing import Dict, Iterable, Iterator, List\n\nfrom .data import Record\n\n\nclass BaseWriter:\n\n def __init__(self, tmpdir: str):\n self.tmpdir = tmpdir\n\n @abc.abstractmethod\n def write(self, records: Iterator[Record]) -> str:\n raise NotImplementedError()\n\n def write_zip(self, filenames: Iterable):\n save_file = '{}.zip'.format(os.path.join(self.tmpdir, str(uuid.uuid4())))\n with zipfile.ZipFile(save_file, 'w', compression=zipfile.ZIP_DEFLATED) as zf:\n for file in filenames:\n zf.write(filename=file, arcname=os.path.basename(file))\n return save_file\n\n\nclass LineWriter(BaseWriter):\n extension = 'txt'\n\n def write(self, records: Iterator[Record]) -> str:\n files = {}\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in files:\n f = open(filename, mode='a')\n files[filename] = f\n f = files[filename]\n line = self.create_line(record)\n f.write(f'{line}\\n')\n for f in files.values():\n f.close()\n save_file = self.write_zip(files)\n for file in files:\n os.remove(file)\n return save_file\n\n @abc.abstractmethod\n def create_line(self, record) -> str:\n raise NotImplementedError()\n\n\nclass CsvWriter(BaseWriter):\n extension = 'csv'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n file_handlers = set()\n records = list(records)\n header = self.create_header(records)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writer = csv.DictWriter(f, header)\n writer.writeheader()\n writers[filename] = writer\n file_handlers.add(f)\n writer = writers[filename]\n line = self.create_line(record)\n writer.writerow(line)\n\n for f in file_handlers:\n f.close()\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': '#'.join(record.label),\n **record.metadata\n }\n\n def create_header(self, records: List[Record]) -> Iterable[str]:\n header = ['id', 'data', 'label']\n header += sorted(set(itertools.chain(*[r.metadata.keys() for r in records])))\n return header\n\n\nclass JSONWriter(BaseWriter):\n extension = 'json'\n\n def write(self, records: Iterator[Record]) -> str:\n writers = {}\n contents = defaultdict(list)\n for record in records:\n filename = os.path.join(self.tmpdir, f'{record.user}.{self.extension}')\n if filename not in writers:\n f = open(filename, mode='a', encoding='utf-8')\n writers[filename] = f\n line = self.create_line(record)\n contents[filename].append(line)\n\n for filename, f in writers.items():\n content = contents[filename]\n json.dump(content, f, ensure_ascii=False)\n f.close()\n\n save_file = self.write_zip(writers)\n for file in writers:\n os.remove(file)\n return save_file\n\n def create_line(self, record) -> Dict:\n return {\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }\n\n\nclass JSONLWriter(LineWriter):\n extension = 'jsonl'\n\n def create_line(self, record):\n return json.dumps({\n 'id': record.id,\n 'data': record.data,\n 'label': record.label,\n **record.metadata\n }, ensure_ascii=False)\n\n\nclass FastTextWriter(LineWriter):\n extension = 'txt'\n\n def create_line(self, record):\n line = [f'__label__{label}' for label in record.label]\n line.append(record.data)\n line = ' '.join(line)\n return line\n", "path": "backend/api/views/download/writer.py"}]}
| 1,909 | 272 |
gh_patches_debug_25715
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-704
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor issues in feedback
- We have said that rating questions go up to 5, they actually go to 6. This must be either changed, or the committees must be informed so questions can be altered.
- Norwegian dates in the mail sent to users
- The domain used for the links should be https://online.ntnu.no, not morgan.online.ntnu.no
- Online linjeforening should be Linjeforeningen Online
- Errors on required fields should stand out more, see appendix 1 for current.
"Hei, vi ønsker tilbakemelding på "Kurs med Steria" som du var med på den 23. January:
morgan.online.ntnu.no/feedback/events/event/23/2/
Fristen for å svare på skjema er 30. January innen kl 23:59.
Vær oppmerksom på at du får prikk dersom du ikke svarer på disse spørsmålene innen fristen.
Eventuelle spørsmål sendes til [email protected]
Mvh
Online linjeforening"
Appendix 1

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/feedback/mommy.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import datetime
3 import socket
4
5 from django.utils import timezone
6 from django.contrib.contenttypes.models import ContentType
7 from django.conf import settings
8 from django.core.mail import EmailMessage
9
10 from apps.events.models import Event, AttendanceEvent, Attendee
11 from apps.feedback.models import FeedbackRelation
12 from apps.marks.models import Mark, UserEntry
13 from apps.mommy import Task, schedule
14
15 class FeedbackMail(Task):
16
17 @staticmethod
18 def run():
19 active_feedbacks = FeedbackRelation.objects.filter(active=True)
20
21 for feedback in active_feedbacks:
22 message = FeedbackMail.generate_message(feedback)
23
24 if message.send:
25 EmailMessage(message.subject, unicode(message), message.committee_mail, [], message.attended_mails).send()
26
27 if message.results_message:
28 EmailMessage("Feedback resultat", message.results_message,"[email protected]", [message.committee_mail]).send()
29
30 @staticmethod
31 def generate_message(feedback):
32 today = timezone.now().date()
33 yesterday = today + datetime.timedelta(days=-1)
34 not_responded = FeedbackMail.get_users(feedback)
35 message = Message()
36
37 #return if everyone has answered
38 if not not_responded:
39 return message
40
41 message.attended_mails = FeedbackMail.get_user_mails(not_responded)
42
43 message.committee_mail = FeedbackMail.get_committee_email(feedback)
44 deadline = feedback.deadline.strftime("%d. %B").encode("utf-8")
45 title = str(FeedbackMail.get_title(feedback)).encode("utf-8")
46 message.link = str(u"\n\n" + FeedbackMail.get_link(feedback)).encode("utf-8")
47 results_link = str(FeedbackMail.get_link(feedback) + "results").encode("utf-8")
48
49 start_date = feedback.get_start_date()
50 deadline_diff = (feedback.deadline - today).days
51
52 message.subject = u"Feedback: %s" % (title)
53 message.intro = u"Hei, vi ønsker tilbakemelding på \"%s\"" % (title)
54 message.mark = FeedbackMail.mark_message(feedback)
55 message.contact = u"\n\nEventuelle spørsmål sendes til %s " % (message.committee_mail)
56 message.start_date = FeedbackMail.start_date_message(start_date)
57
58 if deadline_diff < 0: #Deadline passed
59 feedback.active = False
60 feedback.save()
61
62 if feedback.gives_mark:
63 FeedbackMail.set_marks(title, not_responded)
64
65 message.intro = u"Fristen for å svare på \"%s\" har gått ut og du har fått en prikk." % (title)
66 message.mark = ""
67 message.start_date = ""
68 message.link = ""
69 message.send = True
70
71 elif deadline_diff < 1: #Last warning
72 message.deadline = u"\n\nI dag innen 23:59 er siste frist til å svare på skjemaet."
73
74 message.results_message = u"Hei, siste purremail på feedback skjema har blitt sendt til alle " \
75 u"gjenværende deltagere på \"%s\".\nDere kan se feedback-resultatene på:\n%s\n" % \
76 (title, results_link)
77 message.send = True
78 elif deadline_diff < 3 and feedback.gives_mark: # 3 days from the deadline
79 message.deadline = u"\n\nFristen for å svare på skjema er %s innen kl 23:59." % (deadline)
80 message.send = True
81 elif FeedbackMail.send_first_notification(feedback): #Day after the event or feedback creation
82 message.deadline = u"\n\nFristen for å svare på skjema er %s innen kl 23:59." % (deadline)
83
84 message.results_message = u"Hei, nå har feedbackmail blitt sendt til alle " \
85 u"deltagere på \"%s\".\nDere kan se feedback-resultatene på:\n%s\n" % \
86 (title, results_link)
87 message.send = True
88
89 return message
90
91 @staticmethod
92 def send_first_notification(feedback):
93 start_date = FeedbackMail.start_date(feedback)
94
95 #The object that requires feedback doesnt have a start date
96 if not start_date:
97 yesterday = timezone.now().date() - datetime.timedelta(days=1)
98 if feedback.created_date == yesterday.date():
99 #Send the first notification the day after the feedback relation was created
100 return True
101 else:
102 day_after_event = start_date + datetime.timedelta(1)
103 if day_after_event == datetime.datetime.date(timezone.now()):
104 #Send the first notification the day after the event
105 return True
106 return False
107
108 @staticmethod
109 def start_date(feedback):
110 start_date = feedback.get_start_date()
111
112 if start_date:
113 return start_date.date()
114 else:
115 return False
116
117 @staticmethod
118 def start_date_message(start_date):
119 #If the object(event) doesnt have start date it will send
120 #the first notification the day after the feedbackrelation is made
121 if start_date:
122 start_date_string = start_date.strftime("%d. %B").encode("utf-8")
123 message_start_date = u"som du var med på den %s:" % (start_date_string)
124 else:
125 message_start_date = ""
126
127 return message_start_date
128
129 @staticmethod
130 def get_users(feedback):
131 return feedback.get_slackers()
132
133 @staticmethod
134 def get_user_mails(not_responded):
135 return [user.email for user in not_responded]
136
137 @staticmethod
138 def get_link(feedback):
139 hostname = socket.getfqdn()
140 return str(hostname + feedback.get_absolute_url())
141
142 @staticmethod
143 def get_title(feedback):
144 return feedback.get_title()
145
146 @staticmethod
147 def get_committee_email(feedback):
148 return feedback.get_email()
149
150 @staticmethod
151 def mark_message(feedback):
152 if feedback.gives_mark:
153 return u"\nVær oppmerksom på at du får prikk dersom du ikke svarer " \
154 u"på disse spørsmålene innen fristen."
155 else:
156 return ""
157
158 @staticmethod
159 def set_marks(title, not_responded):
160 mark = Mark()
161 mark.title = u"Manglende tilbakemelding på %s" % (title)
162 mark.category = 4 #Missed feedback
163 mark.description = u"Du har fått en prikk fordi du ikke har levert tilbakemelding."
164 mark.save()
165
166 for user in not_responded:
167 user_entry = UserEntry()
168 user_entry.user = user
169 user_entry.mark = mark
170 user_entry.save()
171
172 class Message():
173 subject = ""
174 intro = ""
175 start_date = ""
176 deadline = ""
177 mark = ""
178 contact = ""
179 link = ""
180 send = False
181 end = u"\n\nMvh\nOnline linjeforening"
182 results_message = False
183
184 committee_mail = ""
185 attended_mails = False
186
187
188 def __unicode__(self):
189 message = "%s %s %s %s %s %s %s" % (
190 self.intro,
191 self.start_date,
192 self.link,
193 self.deadline,
194 self.mark,
195 self.contact,
196 self.end)
197 return message
198
199 schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)
200
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/feedback/mommy.py b/apps/feedback/mommy.py
--- a/apps/feedback/mommy.py
+++ b/apps/feedback/mommy.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import datetime
import socket
+import locale
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
@@ -16,9 +17,10 @@
@staticmethod
def run():
- active_feedbacks = FeedbackRelation.objects.filter(active=True)
+ locale.setlocale(locale.LC_ALL, "nb_NO.UTF-8")
+ active_feedbacks = FeedbackRelation.objects.filter(active=True)
- for feedback in active_feedbacks:
+ for feedback in active_feedbacks:
message = FeedbackMail.generate_message(feedback)
if message.send:
@@ -178,7 +180,7 @@
contact = ""
link = ""
send = False
- end = u"\n\nMvh\nOnline linjeforening"
+ end = u"\n\nMvh\nLinjeforeningen Online"
results_message = False
committee_mail = ""
@@ -196,4 +198,4 @@
self.end)
return message
-schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)
+schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=00)
|
{"golden_diff": "diff --git a/apps/feedback/mommy.py b/apps/feedback/mommy.py\n--- a/apps/feedback/mommy.py\n+++ b/apps/feedback/mommy.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n import datetime\n import socket\n+import locale\n \n from django.utils import timezone\n from django.contrib.contenttypes.models import ContentType\n@@ -16,9 +17,10 @@\n \n @staticmethod\n def run():\n- active_feedbacks = FeedbackRelation.objects.filter(active=True)\n+ locale.setlocale(locale.LC_ALL, \"nb_NO.UTF-8\")\n+ active_feedbacks = FeedbackRelation.objects.filter(active=True)\n \n- for feedback in active_feedbacks:\n+ for feedback in active_feedbacks:\n message = FeedbackMail.generate_message(feedback)\n \n if message.send:\n@@ -178,7 +180,7 @@\n contact = \"\"\n link = \"\"\n send = False\n- end = u\"\\n\\nMvh\\nOnline linjeforening\"\n+ end = u\"\\n\\nMvh\\nLinjeforeningen Online\"\n results_message = False\n \n committee_mail = \"\"\n@@ -196,4 +198,4 @@\n self.end)\n return message\n \n-schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)\n+schedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=00)\n", "issue": "Minor issues in feedback\n- We have said that rating questions go up to 5, they actually go to 6. This must be either changed, or the committees must be informed so questions can be altered.\n- Norwegian dates in the mail sent to users\n- The domain used for the links should be https://online.ntnu.no, not morgan.online.ntnu.no\n- Online linjeforening should be Linjeforeningen Online\n- Errors on required fields should stand out more, see appendix 1 for current.\n\n\"Hei, vi \u00f8nsker tilbakemelding p\u00e5 \"Kurs med Steria\" som du var med p\u00e5 den 23. January:\n\nmorgan.online.ntnu.no/feedback/events/event/23/2/\n\nFristen for \u00e5 svare p\u00e5 skjema er 30. January innen kl 23:59.\nV\u00e6r oppmerksom p\u00e5 at du f\u00e5r prikk dersom du ikke svarer p\u00e5 disse sp\u00f8rsm\u00e5lene innen fristen.\n\nEventuelle sp\u00f8rsm\u00e5l sendes til [email protected]\n\nMvh\nOnline linjeforening\"\n\nAppendix 1\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport socket\n\nfrom django.utils import timezone\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\n\nfrom apps.events.models import Event, AttendanceEvent, Attendee\nfrom apps.feedback.models import FeedbackRelation\nfrom apps.marks.models import Mark, UserEntry\nfrom apps.mommy import Task, schedule\n\nclass FeedbackMail(Task):\n\n @staticmethod\n def run():\n active_feedbacks = FeedbackRelation.objects.filter(active=True)\n \n for feedback in active_feedbacks:\n message = FeedbackMail.generate_message(feedback)\n\n if message.send:\n EmailMessage(message.subject, unicode(message), message.committee_mail, [], message.attended_mails).send()\n\n if message.results_message:\n EmailMessage(\"Feedback resultat\", message.results_message,\"[email protected]\", [message.committee_mail]).send() \n\n @staticmethod\n def generate_message(feedback):\n today = timezone.now().date()\n yesterday = today + datetime.timedelta(days=-1)\n not_responded = FeedbackMail.get_users(feedback)\n message = Message()\n\n #return if everyone has answered\n if not not_responded:\n return message\n \n message.attended_mails = FeedbackMail.get_user_mails(not_responded)\n\n message.committee_mail = FeedbackMail.get_committee_email(feedback)\n deadline = feedback.deadline.strftime(\"%d. %B\").encode(\"utf-8\")\n title = str(FeedbackMail.get_title(feedback)).encode(\"utf-8\")\n message.link = str(u\"\\n\\n\" + FeedbackMail.get_link(feedback)).encode(\"utf-8\")\n results_link = str(FeedbackMail.get_link(feedback) + \"results\").encode(\"utf-8\")\n \n start_date = feedback.get_start_date()\n deadline_diff = (feedback.deadline - today).days\n\n message.subject = u\"Feedback: %s\" % (title)\n message.intro = u\"Hei, vi \u00f8nsker tilbakemelding p\u00e5 \\\"%s\\\"\" % (title)\n message.mark = FeedbackMail.mark_message(feedback)\n message.contact = u\"\\n\\nEventuelle sp\u00f8rsm\u00e5l sendes til %s \" % (message.committee_mail)\n message.start_date = FeedbackMail.start_date_message(start_date)\n\n if deadline_diff < 0: #Deadline passed\n feedback.active = False\n feedback.save()\n\n if feedback.gives_mark:\n FeedbackMail.set_marks(title, not_responded) \n \n message.intro = u\"Fristen for \u00e5 svare p\u00e5 \\\"%s\\\" har g\u00e5tt ut og du har f\u00e5tt en prikk.\" % (title)\n message.mark = \"\"\n message.start_date = \"\"\n message.link = \"\"\n message.send = True\n\n elif deadline_diff < 1: #Last warning\n message.deadline = u\"\\n\\nI dag innen 23:59 er siste frist til \u00e5 svare p\u00e5 skjemaet.\"\n \n message.results_message = u\"Hei, siste purremail p\u00e5 feedback skjema har blitt sendt til alle \" \\\n u\"gjenv\u00e6rende deltagere p\u00e5 \\\"%s\\\".\\nDere kan se feedback-resultatene p\u00e5:\\n%s\\n\" % \\\n (title, results_link)\n message.send = True\n elif deadline_diff < 3 and feedback.gives_mark: # 3 days from the deadline\n message.deadline = u\"\\n\\nFristen for \u00e5 svare p\u00e5 skjema er %s innen kl 23:59.\" % (deadline)\n message.send = True\n elif FeedbackMail.send_first_notification(feedback): #Day after the event or feedback creation \n message.deadline = u\"\\n\\nFristen for \u00e5 svare p\u00e5 skjema er %s innen kl 23:59.\" % (deadline)\n \n message.results_message = u\"Hei, n\u00e5 har feedbackmail blitt sendt til alle \" \\\n u\"deltagere p\u00e5 \\\"%s\\\".\\nDere kan se feedback-resultatene p\u00e5:\\n%s\\n\" % \\\n (title, results_link)\n message.send = True\n\n return message\n \n @staticmethod\n def send_first_notification(feedback):\n start_date = FeedbackMail.start_date(feedback)\n\n #The object that requires feedback doesnt have a start date\n if not start_date:\n yesterday = timezone.now().date() - datetime.timedelta(days=1)\n if feedback.created_date == yesterday.date():\n #Send the first notification the day after the feedback relation was created\n return True\n else:\n day_after_event = start_date + datetime.timedelta(1)\n if day_after_event == datetime.datetime.date(timezone.now()):\n #Send the first notification the day after the event\n return True\n return False\n\n @staticmethod\n def start_date(feedback):\n start_date = feedback.get_start_date()\n \n if start_date:\n return start_date.date()\n else:\n return False\n\n @staticmethod\n def start_date_message(start_date):\n #If the object(event) doesnt have start date it will send \n #the first notification the day after the feedbackrelation is made\n if start_date:\n start_date_string = start_date.strftime(\"%d. %B\").encode(\"utf-8\")\n message_start_date = u\"som du var med p\u00e5 den %s:\" % (start_date_string)\n else:\n message_start_date = \"\"\n \n return message_start_date \n\n @staticmethod\n def get_users(feedback):\n return feedback.get_slackers()\n\n @staticmethod\n def get_user_mails(not_responded):\n return [user.email for user in not_responded]\n\n @staticmethod\n def get_link(feedback):\n hostname = socket.getfqdn()\n return str(hostname + feedback.get_absolute_url())\n\n @staticmethod\n def get_title(feedback):\n return feedback.get_title()\n\n @staticmethod\n def get_committee_email(feedback):\n return feedback.get_email()\n\n @staticmethod\n def mark_message(feedback):\n if feedback.gives_mark:\n return u\"\\nV\u00e6r oppmerksom p\u00e5 at du f\u00e5r prikk dersom du ikke svarer \" \\\n u\"p\u00e5 disse sp\u00f8rsm\u00e5lene innen fristen.\"\n else:\n return \"\"\n\n @staticmethod\n def set_marks(title, not_responded):\n mark = Mark()\n mark.title = u\"Manglende tilbakemelding p\u00e5 %s\" % (title)\n mark.category = 4 #Missed feedback\n mark.description = u\"Du har f\u00e5tt en prikk fordi du ikke har levert tilbakemelding.\"\n mark.save()\n \n for user in not_responded:\n user_entry = UserEntry()\n user_entry.user = user\n user_entry.mark = mark\n user_entry.save()\n \nclass Message():\n subject = \"\"\n intro = \"\"\n start_date = \"\"\n deadline = \"\"\n mark = \"\"\n contact = \"\"\n link = \"\"\n send = False\n end = u\"\\n\\nMvh\\nOnline linjeforening\"\n results_message = False\n\n committee_mail = \"\"\n attended_mails = False\n\n\n def __unicode__(self):\n message = \"%s %s %s %s %s %s %s\" % (\n self.intro, \n self.start_date, \n self.link, \n self.deadline, \n self.mark, \n self.contact, \n self.end)\n return message\n\nschedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=0)\n", "path": "apps/feedback/mommy.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport datetime\nimport socket\nimport locale\n\nfrom django.utils import timezone\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage\n\nfrom apps.events.models import Event, AttendanceEvent, Attendee\nfrom apps.feedback.models import FeedbackRelation\nfrom apps.marks.models import Mark, UserEntry\nfrom apps.mommy import Task, schedule\n\nclass FeedbackMail(Task):\n\n @staticmethod\n def run():\n locale.setlocale(locale.LC_ALL, \"nb_NO.UTF-8\")\n active_feedbacks = FeedbackRelation.objects.filter(active=True)\n \n for feedback in active_feedbacks:\n message = FeedbackMail.generate_message(feedback)\n\n if message.send:\n EmailMessage(message.subject, unicode(message), message.committee_mail, [], message.attended_mails).send()\n\n if message.results_message:\n EmailMessage(\"Feedback resultat\", message.results_message,\"[email protected]\", [message.committee_mail]).send() \n\n @staticmethod\n def generate_message(feedback):\n today = timezone.now().date()\n yesterday = today + datetime.timedelta(days=-1)\n not_responded = FeedbackMail.get_users(feedback)\n message = Message()\n\n #return if everyone has answered\n if not not_responded:\n return message\n \n message.attended_mails = FeedbackMail.get_user_mails(not_responded)\n\n message.committee_mail = FeedbackMail.get_committee_email(feedback)\n deadline = feedback.deadline.strftime(\"%d. %B\").encode(\"utf-8\")\n title = str(FeedbackMail.get_title(feedback)).encode(\"utf-8\")\n message.link = str(u\"\\n\\n\" + FeedbackMail.get_link(feedback)).encode(\"utf-8\")\n results_link = str(FeedbackMail.get_link(feedback) + \"results\").encode(\"utf-8\")\n \n start_date = feedback.get_start_date()\n deadline_diff = (feedback.deadline - today).days\n\n message.subject = u\"Feedback: %s\" % (title)\n message.intro = u\"Hei, vi \u00f8nsker tilbakemelding p\u00e5 \\\"%s\\\"\" % (title)\n message.mark = FeedbackMail.mark_message(feedback)\n message.contact = u\"\\n\\nEventuelle sp\u00f8rsm\u00e5l sendes til %s \" % (message.committee_mail)\n message.start_date = FeedbackMail.start_date_message(start_date)\n\n if deadline_diff < 0: #Deadline passed\n feedback.active = False\n feedback.save()\n\n if feedback.gives_mark:\n FeedbackMail.set_marks(title, not_responded) \n \n message.intro = u\"Fristen for \u00e5 svare p\u00e5 \\\"%s\\\" har g\u00e5tt ut og du har f\u00e5tt en prikk.\" % (title)\n message.mark = \"\"\n message.start_date = \"\"\n message.link = \"\"\n message.send = True\n\n elif deadline_diff < 1: #Last warning\n message.deadline = u\"\\n\\nI dag innen 23:59 er siste frist til \u00e5 svare p\u00e5 skjemaet.\"\n \n message.results_message = u\"Hei, siste purremail p\u00e5 feedback skjema har blitt sendt til alle \" \\\n u\"gjenv\u00e6rende deltagere p\u00e5 \\\"%s\\\".\\nDere kan se feedback-resultatene p\u00e5:\\n%s\\n\" % \\\n (title, results_link)\n message.send = True\n elif deadline_diff < 3 and feedback.gives_mark: # 3 days from the deadline\n message.deadline = u\"\\n\\nFristen for \u00e5 svare p\u00e5 skjema er %s innen kl 23:59.\" % (deadline)\n message.send = True\n elif FeedbackMail.send_first_notification(feedback): #Day after the event or feedback creation \n message.deadline = u\"\\n\\nFristen for \u00e5 svare p\u00e5 skjema er %s innen kl 23:59.\" % (deadline)\n \n message.results_message = u\"Hei, n\u00e5 har feedbackmail blitt sendt til alle \" \\\n u\"deltagere p\u00e5 \\\"%s\\\".\\nDere kan se feedback-resultatene p\u00e5:\\n%s\\n\" % \\\n (title, results_link)\n message.send = True\n\n return message\n \n @staticmethod\n def send_first_notification(feedback):\n start_date = FeedbackMail.start_date(feedback)\n\n #The object that requires feedback doesnt have a start date\n if not start_date:\n yesterday = timezone.now().date() - datetime.timedelta(days=1)\n if feedback.created_date == yesterday.date():\n #Send the first notification the day after the feedback relation was created\n return True\n else:\n day_after_event = start_date + datetime.timedelta(1)\n if day_after_event == datetime.datetime.date(timezone.now()):\n #Send the first notification the day after the event\n return True\n return False\n\n @staticmethod\n def start_date(feedback):\n start_date = feedback.get_start_date()\n \n if start_date:\n return start_date.date()\n else:\n return False\n\n @staticmethod\n def start_date_message(start_date):\n #If the object(event) doesnt have start date it will send \n #the first notification the day after the feedbackrelation is made\n if start_date:\n start_date_string = start_date.strftime(\"%d. %B\").encode(\"utf-8\")\n message_start_date = u\"som du var med p\u00e5 den %s:\" % (start_date_string)\n else:\n message_start_date = \"\"\n \n return message_start_date \n\n @staticmethod\n def get_users(feedback):\n return feedback.get_slackers()\n\n @staticmethod\n def get_user_mails(not_responded):\n return [user.email for user in not_responded]\n\n @staticmethod\n def get_link(feedback):\n hostname = socket.getfqdn()\n return str(hostname + feedback.get_absolute_url())\n\n @staticmethod\n def get_title(feedback):\n return feedback.get_title()\n\n @staticmethod\n def get_committee_email(feedback):\n return feedback.get_email()\n\n @staticmethod\n def mark_message(feedback):\n if feedback.gives_mark:\n return u\"\\nV\u00e6r oppmerksom p\u00e5 at du f\u00e5r prikk dersom du ikke svarer \" \\\n u\"p\u00e5 disse sp\u00f8rsm\u00e5lene innen fristen.\"\n else:\n return \"\"\n\n @staticmethod\n def set_marks(title, not_responded):\n mark = Mark()\n mark.title = u\"Manglende tilbakemelding p\u00e5 %s\" % (title)\n mark.category = 4 #Missed feedback\n mark.description = u\"Du har f\u00e5tt en prikk fordi du ikke har levert tilbakemelding.\"\n mark.save()\n \n for user in not_responded:\n user_entry = UserEntry()\n user_entry.user = user\n user_entry.mark = mark\n user_entry.save()\n \nclass Message():\n subject = \"\"\n intro = \"\"\n start_date = \"\"\n deadline = \"\"\n mark = \"\"\n contact = \"\"\n link = \"\"\n send = False\n end = u\"\\n\\nMvh\\nLinjeforeningen Online\"\n results_message = False\n\n committee_mail = \"\"\n attended_mails = False\n\n\n def __unicode__(self):\n message = \"%s %s %s %s %s %s %s\" % (\n self.intro, \n self.start_date, \n self.link, \n self.deadline, \n self.mark, \n self.contact, \n self.end)\n return message\n\nschedule.register(FeedbackMail, day_of_week='mon-sun', hour=8, minute=00)\n", "path": "apps/feedback/mommy.py"}]}
| 2,717 | 324 |
gh_patches_debug_16388
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8385
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use Full URL for ImageRenditionField.
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
I'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.
Assuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Add `full_url` to the output of `ImageRenditionField`.
I propose it just replace the `url` field altogether, but both could be returned.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
I've been extending the `ImageRenditionField` for use in my own projects
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.)
Use Full URL for ImageRenditionField.
### Is your proposal related to a problem?
<!--
Provide a clear and concise description of what the problem is.
For example, "I'm always frustrated when..."
-->
I'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.
Assuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.
### Describe the solution you'd like
<!--
Provide a clear and concise description of what you want to happen.
-->
Add `full_url` to the output of `ImageRenditionField`.
I propose it just replace the `url` field altogether, but both could be returned.
### Describe alternatives you've considered
<!--
Let us know about other solutions you've tried or researched.
-->
I've been extending the `ImageRenditionField` for use in my own projects
### Additional context
<!--
Is there anything else you can add about the proposal?
You might want to link to related issues here, if you haven't already.
-->
(Write your answer here.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/images/api/fields.py`
Content:
```
1 from collections import OrderedDict
2
3 from rest_framework.fields import Field
4
5 from ..models import SourceImageIOError
6
7
8 class ImageRenditionField(Field):
9 """
10 A field that generates a rendition with the specified filter spec, and serialises
11 details of that rendition.
12
13 Example:
14 "thumbnail": {
15 "url": "/media/images/myimage.max-165x165.jpg",
16 "width": 165,
17 "height": 100,
18 "alt": "Image alt text"
19 }
20
21 If there is an error with the source image. The dict will only contain a single
22 key, "error", indicating this error:
23
24 "thumbnail": {
25 "error": "SourceImageIOError"
26 }
27 """
28
29 def __init__(self, filter_spec, *args, **kwargs):
30 self.filter_spec = filter_spec
31 super().__init__(*args, **kwargs)
32
33 def to_representation(self, image):
34 try:
35 thumbnail = image.get_rendition(self.filter_spec)
36
37 return OrderedDict(
38 [
39 ("url", thumbnail.url),
40 ("width", thumbnail.width),
41 ("height", thumbnail.height),
42 ("alt", thumbnail.alt),
43 ]
44 )
45 except SourceImageIOError:
46 return OrderedDict(
47 [
48 ("error", "SourceImageIOError"),
49 ]
50 )
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/images/api/fields.py b/wagtail/images/api/fields.py
--- a/wagtail/images/api/fields.py
+++ b/wagtail/images/api/fields.py
@@ -13,6 +13,7 @@
Example:
"thumbnail": {
"url": "/media/images/myimage.max-165x165.jpg",
+ "full_url": "https://media.example.com/media/images/myimage.max-165x165.jpg",
"width": 165,
"height": 100,
"alt": "Image alt text"
@@ -37,6 +38,7 @@
return OrderedDict(
[
("url", thumbnail.url),
+ ("full_url", thumbnail.full_url),
("width", thumbnail.width),
("height", thumbnail.height),
("alt", thumbnail.alt),
|
{"golden_diff": "diff --git a/wagtail/images/api/fields.py b/wagtail/images/api/fields.py\n--- a/wagtail/images/api/fields.py\n+++ b/wagtail/images/api/fields.py\n@@ -13,6 +13,7 @@\n Example:\n \"thumbnail\": {\n \"url\": \"/media/images/myimage.max-165x165.jpg\",\n+ \"full_url\": \"https://media.example.com/media/images/myimage.max-165x165.jpg\",\n \"width\": 165,\n \"height\": 100,\n \"alt\": \"Image alt text\"\n@@ -37,6 +38,7 @@\n return OrderedDict(\n [\n (\"url\", thumbnail.url),\n+ (\"full_url\", thumbnail.full_url),\n (\"width\", thumbnail.width),\n (\"height\", thumbnail.height),\n (\"alt\", thumbnail.alt),\n", "issue": "Use Full URL for ImageRenditionField.\n### Is your proposal related to a problem?\r\n\r\n<!--\r\n Provide a clear and concise description of what the problem is.\r\n For example, \"I'm always frustrated when...\"\r\n-->\r\n\r\nI'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.\r\n\r\nAssuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\nAdd `full_url` to the output of `ImageRenditionField`.\r\n\r\nI propose it just replace the `url` field altogether, but both could be returned.\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nI've been extending the `ImageRenditionField` for use in my own projects\r\n\r\n### Additional context\r\n\r\n<!--\r\n Is there anything else you can add about the proposal?\r\n You might want to link to related issues here, if you haven't already.\r\n-->\r\n\r\n(Write your answer here.)\r\n\nUse Full URL for ImageRenditionField.\n### Is your proposal related to a problem?\r\n\r\n<!--\r\n Provide a clear and concise description of what the problem is.\r\n For example, \"I'm always frustrated when...\"\r\n-->\r\n\r\nI'm a big fan of the new `full_url` field that images have and would like them to be easily used in the API.\r\n\r\nAssuming one's frontend app is living on a different domain to the Wagtail API then the default relative URLs aren't as useful.\r\n\r\n### Describe the solution you'd like\r\n\r\n<!--\r\n Provide a clear and concise description of what you want to happen.\r\n-->\r\n\r\nAdd `full_url` to the output of `ImageRenditionField`.\r\n\r\nI propose it just replace the `url` field altogether, but both could be returned.\r\n\r\n### Describe alternatives you've considered\r\n\r\n<!--\r\n Let us know about other solutions you've tried or researched.\r\n-->\r\n\r\nI've been extending the `ImageRenditionField` for use in my own projects\r\n\r\n### Additional context\r\n\r\n<!--\r\n Is there anything else you can add about the proposal?\r\n You might want to link to related issues here, if you haven't already.\r\n-->\r\n\r\n(Write your answer here.)\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.fields import Field\n\nfrom ..models import SourceImageIOError\n\n\nclass ImageRenditionField(Field):\n \"\"\"\n A field that generates a rendition with the specified filter spec, and serialises\n details of that rendition.\n\n Example:\n \"thumbnail\": {\n \"url\": \"/media/images/myimage.max-165x165.jpg\",\n \"width\": 165,\n \"height\": 100,\n \"alt\": \"Image alt text\"\n }\n\n If there is an error with the source image. The dict will only contain a single\n key, \"error\", indicating this error:\n\n \"thumbnail\": {\n \"error\": \"SourceImageIOError\"\n }\n \"\"\"\n\n def __init__(self, filter_spec, *args, **kwargs):\n self.filter_spec = filter_spec\n super().__init__(*args, **kwargs)\n\n def to_representation(self, image):\n try:\n thumbnail = image.get_rendition(self.filter_spec)\n\n return OrderedDict(\n [\n (\"url\", thumbnail.url),\n (\"width\", thumbnail.width),\n (\"height\", thumbnail.height),\n (\"alt\", thumbnail.alt),\n ]\n )\n except SourceImageIOError:\n return OrderedDict(\n [\n (\"error\", \"SourceImageIOError\"),\n ]\n )\n", "path": "wagtail/images/api/fields.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.fields import Field\n\nfrom ..models import SourceImageIOError\n\n\nclass ImageRenditionField(Field):\n \"\"\"\n A field that generates a rendition with the specified filter spec, and serialises\n details of that rendition.\n\n Example:\n \"thumbnail\": {\n \"url\": \"/media/images/myimage.max-165x165.jpg\",\n \"full_url\": \"https://media.example.com/media/images/myimage.max-165x165.jpg\",\n \"width\": 165,\n \"height\": 100,\n \"alt\": \"Image alt text\"\n }\n\n If there is an error with the source image. The dict will only contain a single\n key, \"error\", indicating this error:\n\n \"thumbnail\": {\n \"error\": \"SourceImageIOError\"\n }\n \"\"\"\n\n def __init__(self, filter_spec, *args, **kwargs):\n self.filter_spec = filter_spec\n super().__init__(*args, **kwargs)\n\n def to_representation(self, image):\n try:\n thumbnail = image.get_rendition(self.filter_spec)\n\n return OrderedDict(\n [\n (\"url\", thumbnail.url),\n (\"full_url\", thumbnail.full_url),\n (\"width\", thumbnail.width),\n (\"height\", thumbnail.height),\n (\"alt\", thumbnail.alt),\n ]\n )\n except SourceImageIOError:\n return OrderedDict(\n [\n (\"error\", \"SourceImageIOError\"),\n ]\n )\n", "path": "wagtail/images/api/fields.py"}]}
| 1,151 | 194 |
gh_patches_debug_7296
|
rasdani/github-patches
|
git_diff
|
LMFDB__lmfdb-5669
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
L-function of Dirichlet character does not link to Dirichlet character page
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/utils/names_and_urls.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from lmfdb.utils.utilities import key_for_numerically_sort
3 from flask import url_for
4 #######################################################################
5 # Functions for interacting with web structure
6 #######################################################################
7
8 # TODO This needs to be able to handle any sort of object
9 # There should probably be a more relevant field
10 # in the database, instead of trying to extract this from a URL
11 def name_and_object_from_url(url, check_existence=False):
12 # the import is here to avoid circular imports
13 from lmfdb import db
14 url_split = url.rstrip('/').lstrip('/').split("/")
15 name = '??'
16 obj_exists = False
17
18 if url_split[0] == "EllipticCurve":
19 # every EC instance was added from EC
20 obj_exists = True
21 if url_split[1] == 'Q':
22 if len(url_split) == 4: # isogeny class
23 # EllipticCurve/Q/341641/a
24 label_isogeny_class = ".".join(url_split[-2:])
25 if check_existence:
26 obj_exists = db.ec_curvedata.exists({"lmfdb_iso": label_isogeny_class})
27 elif len(url_split) == 5: # curve
28 # EllipticCurve/Q/48/a/6
29 label_curve = ".".join(url_split[-3:-1]) + url_split[-1]
30 if check_existence:
31 obj_exists = db.ec_curvedata.exists({"lmfdb_label": label_curve})
32 else:
33 raise NotImplementedError
34 else:
35 if len(url_split) == 4: # isogeny class
36 # EllipticCurve/2.2.140.1/14.1/a
37 field, cond, isog = url_split[-3:]
38 label_isogeny_class = "-".join([field, cond, isog])
39 if check_existence:
40 obj_exists = db.ec_nfcurves.exists({"class_label": label_isogeny_class})
41 elif len(url_split) == 5: # curve
42 # EllipticCurve/2.0.4.1/1250.3/a/3
43 field, cond, isog, ind = url_split[-4:]
44 label_curve = "-".join([field, cond, isog]) + ind
45 if check_existence:
46 obj_exists = db.ec_nfcurves.exists({"label": label_curve})
47 if len(url_split) == 4: # isogeny class
48 #name = 'Isogeny class ' + label_isogeny_class
49 name = 'Elliptic curve ' + label_isogeny_class
50 elif len(url_split) == 5: # curve
51 #name = 'Curve ' + label_curve
52 name = 'Elliptic curve ' + label_curve
53
54 elif url_split[0] == "Genus2Curve":
55 obj_exists = True
56 assert url_split[1] == 'Q'
57 if len(url_split) == 4: # isog class
58 # Genus2Curve/Q/310329/a
59 label_isogeny_class = ".".join(url_split[-2:])
60 if check_existence:
61 obj_exists = db.g2c_curves.exists({"class": label_isogeny_class})
62 #name = 'Isogeny class ' + label_isogeny_class
63 name = 'Genus 2 curve ' + label_isogeny_class
64 if len(url_split) == 6: # curve
65 # Genus2Curve/Q/1728/b/442368/1
66 label_curve = ".".join(url_split[-4:])
67 if check_existence:
68 obj_exists = db.g2c_curves.exists({"label": label_curve})
69 #name = 'Curve ' + label_curve
70 name = 'Genus 2 curve ' + label_curve
71
72 elif url_split[0] == "ModularForm":
73 if url_split[1] == 'GL2':
74 if url_split[2] == 'Q' and url_split[3] == 'holomorphic':
75 if len(url_split) == 10:
76 # ModularForm/GL2/Q/holomorphic/24/2/f/a/11/2
77 newform_label = ".".join(url_split[-6:-2])
78 conrey_newform_label = ".".join(url_split[-6:])
79 name = 'Modular form ' + conrey_newform_label
80 obj_exists = True
81 if check_existence:
82 obj_exists = db.mf_newforms.label_exists(newform_label)
83 elif len(url_split) == 8:
84 # ModularForm/GL2/Q/holomorphic/24/2/f/a
85 newform_label = ".".join(url_split[-4:])
86 name = 'Modular form ' + newform_label
87 obj_exists = True
88 if check_existence:
89 obj_exists = db.mf_newforms.label_exists(newform_label)
90
91 elif url_split[2] == 'TotallyReal':
92 # ModularForm/GL2/TotallyReal/2.2.140.1/holomorphic/2.2.140.1-14.1-a
93 label = url_split[-1]
94 name = 'Hilbert modular form ' + label
95 obj_exists = True
96 if check_existence:
97 obj_exists = db.hmf_forms.label_exists(label)
98
99 elif url_split[2] == 'ImaginaryQuadratic':
100 # ModularForm/GL2/ImaginaryQuadratic/2.0.4.1/98.1/a
101 label = '-'.join(url_split[-3:])
102 name = 'Bianchi modular form ' + label
103 obj_exists = 'CM' not in label
104 if check_existence:
105 obj_exists = db.bmf_forms.label_exists(label)
106 elif url_split[0] == "ArtinRepresentation":
107 label = url_split[1]
108 name = 'Artin representation ' + label
109 obj_exists = True
110 if check_existence:
111 obj_exists = db.artin_reps.label_exists(label.split('c')[0])
112 elif url_split[0] == "NumberField":
113 from lmfdb.number_fields.web_number_field import field_pretty
114 label = url_split[1]
115 name = 'Number field ' + field_pretty(label)
116 obj_exists = True
117 if check_existence:
118 obj_exists = db.number_fields.label_exists(label)
119 elif url_split[0] == "SatoTateGroup":
120 from lmfdb.sato_tate_groups.main import st_name
121 name, label = st_name(url_split[1])
122 if name is None:
123 name = label
124 obj_exists = False
125 else:
126 name = 'Sato Tate group $%s$' % name
127 obj_exists = True
128 else:
129 # FIXME
130 #print("unknown url", url)
131 pass
132
133 return name, obj_exists
134
135
136 def names_and_urls(instances, exclude={}):
137 res = []
138 names = set()
139 urls = set()
140 exclude = set(exclude)
141 root = url_for('index')
142
143 # remove duplicate urls
144 for instance in instances:
145 if not isinstance(instance, str):
146 instance = instance['url']
147 if instance not in exclude and '|' not in instance:
148 urls.add(instance)
149
150 for url in urls:
151 name, obj_exists = name_and_object_from_url(url)
152 if not name:
153 name = ''
154 if obj_exists:
155 url = root + url
156 else:
157 # do not display unknown objects
158 continue
159 name = '(%s)' % (name)
160 url = ""
161 # avoid duplicates that might have arise from different instances
162 if name not in names:
163 res.append((name, url))
164 names.add(name)
165 # sort based on name + label
166 res.sort(key=lambda x: key_for_numerically_sort(x[0]))
167 return res
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lmfdb/utils/names_and_urls.py b/lmfdb/utils/names_and_urls.py
--- a/lmfdb/utils/names_and_urls.py
+++ b/lmfdb/utils/names_and_urls.py
@@ -125,10 +125,14 @@
else:
name = 'Sato Tate group $%s$' % name
obj_exists = True
+ elif url_split[:2] == ["Character", "Dirichlet"]:
+ modulus = int(url_split[2])
+ conrey = int(url_split[3])
+ name = "Character $\chi_{%d}(%d, \cdot)$" % (modulus, conrey)
+ obj_exists = True
else:
# FIXME
- #print("unknown url", url)
- pass
+ assert False, url
return name, obj_exists
|
{"golden_diff": "diff --git a/lmfdb/utils/names_and_urls.py b/lmfdb/utils/names_and_urls.py\n--- a/lmfdb/utils/names_and_urls.py\n+++ b/lmfdb/utils/names_and_urls.py\n@@ -125,10 +125,14 @@\n else:\n name = 'Sato Tate group $%s$' % name\n obj_exists = True\n+ elif url_split[:2] == [\"Character\", \"Dirichlet\"]:\n+ modulus = int(url_split[2])\n+ conrey = int(url_split[3])\n+ name = \"Character $\\chi_{%d}(%d, \\cdot)$\" % (modulus, conrey)\n+ obj_exists = True\n else:\n # FIXME\n- #print(\"unknown url\", url)\n- pass\n+ assert False, url\n \n return name, obj_exists\n", "issue": "L-function of Dirichlet character does not link to Dirichlet character page\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.utils.utilities import key_for_numerically_sort\nfrom flask import url_for\n#######################################################################\n# Functions for interacting with web structure\n#######################################################################\n\n# TODO This needs to be able to handle any sort of object\n# There should probably be a more relevant field\n# in the database, instead of trying to extract this from a URL\ndef name_and_object_from_url(url, check_existence=False):\n # the import is here to avoid circular imports\n from lmfdb import db\n url_split = url.rstrip('/').lstrip('/').split(\"/\")\n name = '??'\n obj_exists = False\n\n if url_split[0] == \"EllipticCurve\":\n # every EC instance was added from EC\n obj_exists = True\n if url_split[1] == 'Q':\n if len(url_split) == 4: # isogeny class\n # EllipticCurve/Q/341641/a\n label_isogeny_class = \".\".join(url_split[-2:])\n if check_existence:\n obj_exists = db.ec_curvedata.exists({\"lmfdb_iso\": label_isogeny_class})\n elif len(url_split) == 5: # curve\n # EllipticCurve/Q/48/a/6\n label_curve = \".\".join(url_split[-3:-1]) + url_split[-1]\n if check_existence:\n obj_exists = db.ec_curvedata.exists({\"lmfdb_label\": label_curve})\n else:\n raise NotImplementedError\n else:\n if len(url_split) == 4: # isogeny class\n # EllipticCurve/2.2.140.1/14.1/a\n field, cond, isog = url_split[-3:]\n label_isogeny_class = \"-\".join([field, cond, isog])\n if check_existence:\n obj_exists = db.ec_nfcurves.exists({\"class_label\": label_isogeny_class})\n elif len(url_split) == 5: # curve\n # EllipticCurve/2.0.4.1/1250.3/a/3\n field, cond, isog, ind = url_split[-4:]\n label_curve = \"-\".join([field, cond, isog]) + ind\n if check_existence:\n obj_exists = db.ec_nfcurves.exists({\"label\": label_curve})\n if len(url_split) == 4: # isogeny class\n #name = 'Isogeny class ' + label_isogeny_class\n name = 'Elliptic curve ' + label_isogeny_class\n elif len(url_split) == 5: # curve\n #name = 'Curve ' + label_curve\n name = 'Elliptic curve ' + label_curve\n\n elif url_split[0] == \"Genus2Curve\":\n obj_exists = True\n assert url_split[1] == 'Q'\n if len(url_split) == 4: # isog class\n # Genus2Curve/Q/310329/a\n label_isogeny_class = \".\".join(url_split[-2:])\n if check_existence:\n obj_exists = db.g2c_curves.exists({\"class\": label_isogeny_class})\n #name = 'Isogeny class ' + label_isogeny_class\n name = 'Genus 2 curve ' + label_isogeny_class\n if len(url_split) == 6: # curve\n # Genus2Curve/Q/1728/b/442368/1\n label_curve = \".\".join(url_split[-4:])\n if check_existence:\n obj_exists = db.g2c_curves.exists({\"label\": label_curve})\n #name = 'Curve ' + label_curve\n name = 'Genus 2 curve ' + label_curve\n\n elif url_split[0] == \"ModularForm\":\n if url_split[1] == 'GL2':\n if url_split[2] == 'Q' and url_split[3] == 'holomorphic':\n if len(url_split) == 10:\n # ModularForm/GL2/Q/holomorphic/24/2/f/a/11/2\n newform_label = \".\".join(url_split[-6:-2])\n conrey_newform_label = \".\".join(url_split[-6:])\n name = 'Modular form ' + conrey_newform_label\n obj_exists = True\n if check_existence:\n obj_exists = db.mf_newforms.label_exists(newform_label)\n elif len(url_split) == 8:\n # ModularForm/GL2/Q/holomorphic/24/2/f/a\n newform_label = \".\".join(url_split[-4:])\n name = 'Modular form ' + newform_label\n obj_exists = True\n if check_existence:\n obj_exists = db.mf_newforms.label_exists(newform_label)\n\n elif url_split[2] == 'TotallyReal':\n # ModularForm/GL2/TotallyReal/2.2.140.1/holomorphic/2.2.140.1-14.1-a\n label = url_split[-1]\n name = 'Hilbert modular form ' + label\n obj_exists = True\n if check_existence:\n obj_exists = db.hmf_forms.label_exists(label)\n\n elif url_split[2] == 'ImaginaryQuadratic':\n # ModularForm/GL2/ImaginaryQuadratic/2.0.4.1/98.1/a\n label = '-'.join(url_split[-3:])\n name = 'Bianchi modular form ' + label\n obj_exists = 'CM' not in label\n if check_existence:\n obj_exists = db.bmf_forms.label_exists(label)\n elif url_split[0] == \"ArtinRepresentation\":\n label = url_split[1]\n name = 'Artin representation ' + label\n obj_exists = True\n if check_existence:\n obj_exists = db.artin_reps.label_exists(label.split('c')[0])\n elif url_split[0] == \"NumberField\":\n from lmfdb.number_fields.web_number_field import field_pretty\n label = url_split[1]\n name = 'Number field ' + field_pretty(label)\n obj_exists = True\n if check_existence:\n obj_exists = db.number_fields.label_exists(label)\n elif url_split[0] == \"SatoTateGroup\":\n from lmfdb.sato_tate_groups.main import st_name\n name, label = st_name(url_split[1])\n if name is None:\n name = label\n obj_exists = False\n else:\n name = 'Sato Tate group $%s$' % name\n obj_exists = True\n else:\n # FIXME\n #print(\"unknown url\", url)\n pass\n\n return name, obj_exists\n\n\ndef names_and_urls(instances, exclude={}):\n res = []\n names = set()\n urls = set()\n exclude = set(exclude)\n root = url_for('index')\n\n # remove duplicate urls\n for instance in instances:\n if not isinstance(instance, str):\n instance = instance['url']\n if instance not in exclude and '|' not in instance:\n urls.add(instance)\n\n for url in urls:\n name, obj_exists = name_and_object_from_url(url)\n if not name:\n name = ''\n if obj_exists:\n url = root + url\n else:\n # do not display unknown objects\n continue\n name = '(%s)' % (name)\n url = \"\"\n # avoid duplicates that might have arise from different instances\n if name not in names:\n res.append((name, url))\n names.add(name)\n # sort based on name + label\n res.sort(key=lambda x: key_for_numerically_sort(x[0]))\n return res\n", "path": "lmfdb/utils/names_and_urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.utils.utilities import key_for_numerically_sort\nfrom flask import url_for\n#######################################################################\n# Functions for interacting with web structure\n#######################################################################\n\n# TODO This needs to be able to handle any sort of object\n# There should probably be a more relevant field\n# in the database, instead of trying to extract this from a URL\ndef name_and_object_from_url(url, check_existence=False):\n # the import is here to avoid circular imports\n from lmfdb import db\n url_split = url.rstrip('/').lstrip('/').split(\"/\")\n name = '??'\n obj_exists = False\n\n if url_split[0] == \"EllipticCurve\":\n # every EC instance was added from EC\n obj_exists = True\n if url_split[1] == 'Q':\n if len(url_split) == 4: # isogeny class\n # EllipticCurve/Q/341641/a\n label_isogeny_class = \".\".join(url_split[-2:])\n if check_existence:\n obj_exists = db.ec_curvedata.exists({\"lmfdb_iso\": label_isogeny_class})\n elif len(url_split) == 5: # curve\n # EllipticCurve/Q/48/a/6\n label_curve = \".\".join(url_split[-3:-1]) + url_split[-1]\n if check_existence:\n obj_exists = db.ec_curvedata.exists({\"lmfdb_label\": label_curve})\n else:\n raise NotImplementedError\n else:\n if len(url_split) == 4: # isogeny class\n # EllipticCurve/2.2.140.1/14.1/a\n field, cond, isog = url_split[-3:]\n label_isogeny_class = \"-\".join([field, cond, isog])\n if check_existence:\n obj_exists = db.ec_nfcurves.exists({\"class_label\": label_isogeny_class})\n elif len(url_split) == 5: # curve\n # EllipticCurve/2.0.4.1/1250.3/a/3\n field, cond, isog, ind = url_split[-4:]\n label_curve = \"-\".join([field, cond, isog]) + ind\n if check_existence:\n obj_exists = db.ec_nfcurves.exists({\"label\": label_curve})\n if len(url_split) == 4: # isogeny class\n #name = 'Isogeny class ' + label_isogeny_class\n name = 'Elliptic curve ' + label_isogeny_class\n elif len(url_split) == 5: # curve\n #name = 'Curve ' + label_curve\n name = 'Elliptic curve ' + label_curve\n\n elif url_split[0] == \"Genus2Curve\":\n obj_exists = True\n assert url_split[1] == 'Q'\n if len(url_split) == 4: # isog class\n # Genus2Curve/Q/310329/a\n label_isogeny_class = \".\".join(url_split[-2:])\n if check_existence:\n obj_exists = db.g2c_curves.exists({\"class\": label_isogeny_class})\n #name = 'Isogeny class ' + label_isogeny_class\n name = 'Genus 2 curve ' + label_isogeny_class\n if len(url_split) == 6: # curve\n # Genus2Curve/Q/1728/b/442368/1\n label_curve = \".\".join(url_split[-4:])\n if check_existence:\n obj_exists = db.g2c_curves.exists({\"label\": label_curve})\n #name = 'Curve ' + label_curve\n name = 'Genus 2 curve ' + label_curve\n\n elif url_split[0] == \"ModularForm\":\n if url_split[1] == 'GL2':\n if url_split[2] == 'Q' and url_split[3] == 'holomorphic':\n if len(url_split) == 10:\n # ModularForm/GL2/Q/holomorphic/24/2/f/a/11/2\n newform_label = \".\".join(url_split[-6:-2])\n conrey_newform_label = \".\".join(url_split[-6:])\n name = 'Modular form ' + conrey_newform_label\n obj_exists = True\n if check_existence:\n obj_exists = db.mf_newforms.label_exists(newform_label)\n elif len(url_split) == 8:\n # ModularForm/GL2/Q/holomorphic/24/2/f/a\n newform_label = \".\".join(url_split[-4:])\n name = 'Modular form ' + newform_label\n obj_exists = True\n if check_existence:\n obj_exists = db.mf_newforms.label_exists(newform_label)\n\n elif url_split[2] == 'TotallyReal':\n # ModularForm/GL2/TotallyReal/2.2.140.1/holomorphic/2.2.140.1-14.1-a\n label = url_split[-1]\n name = 'Hilbert modular form ' + label\n obj_exists = True\n if check_existence:\n obj_exists = db.hmf_forms.label_exists(label)\n\n elif url_split[2] == 'ImaginaryQuadratic':\n # ModularForm/GL2/ImaginaryQuadratic/2.0.4.1/98.1/a\n label = '-'.join(url_split[-3:])\n name = 'Bianchi modular form ' + label\n obj_exists = 'CM' not in label\n if check_existence:\n obj_exists = db.bmf_forms.label_exists(label)\n elif url_split[0] == \"ArtinRepresentation\":\n label = url_split[1]\n name = 'Artin representation ' + label\n obj_exists = True\n if check_existence:\n obj_exists = db.artin_reps.label_exists(label.split('c')[0])\n elif url_split[0] == \"NumberField\":\n from lmfdb.number_fields.web_number_field import field_pretty\n label = url_split[1]\n name = 'Number field ' + field_pretty(label)\n obj_exists = True\n if check_existence:\n obj_exists = db.number_fields.label_exists(label)\n elif url_split[0] == \"SatoTateGroup\":\n from lmfdb.sato_tate_groups.main import st_name\n name, label = st_name(url_split[1])\n if name is None:\n name = label\n obj_exists = False\n else:\n name = 'Sato Tate group $%s$' % name\n obj_exists = True\n elif url_split[:2] == [\"Character\", \"Dirichlet\"]:\n modulus = int(url_split[2])\n conrey = int(url_split[3])\n name = \"Character $\\chi_{%d}(%d, \\cdot)$\" % (modulus, conrey)\n obj_exists = True\n else:\n # FIXME\n assert False, url\n\n return name, obj_exists\n\n\ndef names_and_urls(instances, exclude={}):\n res = []\n names = set()\n urls = set()\n exclude = set(exclude)\n root = url_for('index')\n\n # remove duplicate urls\n for instance in instances:\n if not isinstance(instance, str):\n instance = instance['url']\n if instance not in exclude and '|' not in instance:\n urls.add(instance)\n\n for url in urls:\n name, obj_exists = name_and_object_from_url(url)\n if not name:\n name = ''\n if obj_exists:\n url = root + url\n else:\n # do not display unknown objects\n continue\n name = '(%s)' % (name)\n url = \"\"\n # avoid duplicates that might have arise from different instances\n if name not in names:\n res.append((name, url))\n names.add(name)\n # sort based on name + label\n res.sort(key=lambda x: key_for_numerically_sort(x[0]))\n return res\n", "path": "lmfdb/utils/names_and_urls.py"}]}
| 2,410 | 196 |
gh_patches_debug_36634
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-4289
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Check: CKV_GCP_19: "Ensure GKE basic auth is disabled"
**Describe the issue**
The default for this is disabled yet the alert keeps firing.
**Examples**
Please share an example code sample (in the IaC of your choice) + the expected outcomes.
**Version (please complete the following information):**
- 2.2.255
**Additional context**
Add any other context about the problem here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/gcp/GKEBasicAuth.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3 from typing import List
4
5
6 class GKEBasicAuth(BaseResourceCheck):
7 def __init__(self):
8 name = "Ensure GKE basic auth is disabled"
9 id = "CKV_GCP_19"
10 supported_resources = ['google_container_cluster']
11 categories = [CheckCategories.KUBERNETES]
12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
13
14 def scan_resource_conf(self, conf):
15 """
16 Looks for password configuration at azure_instance:
17 https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html
18 :param conf: google_compute_ssl_policy configuration
19 :return: <CheckResult>
20 """
21 if 'master_auth' in conf.keys():
22 username = conf['master_auth'][0].get('username')
23 password = conf['master_auth'][0].get('password')
24 if username or password:
25 # only if both are set to the empty string it is fine
26 # https://www.terraform.io/docs/providers/google/r/container_cluster.html
27 if username and password:
28 if username[0] == '' and password[0] == '':
29 return CheckResult.PASSED
30 return CheckResult.FAILED
31 return CheckResult.PASSED
32 return CheckResult.FAILED
33
34 def get_evaluated_keys(self) -> List[str]:
35 return ['master_auth/[0]/username', 'master_auth/[0]/password']
36
37
38 check = GKEBasicAuth()
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py
--- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py
+++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py
@@ -1,37 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
-from typing import List
class GKEBasicAuth(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure GKE basic auth is disabled"
id = "CKV_GCP_19"
- supported_resources = ['google_container_cluster']
- categories = [CheckCategories.KUBERNETES]
+ supported_resources = ('google_container_cluster',)
+ categories = (CheckCategories.KUBERNETES,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
- """
- Looks for password configuration at azure_instance:
- https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html
- :param conf: google_compute_ssl_policy configuration
- :return: <CheckResult>
- """
- if 'master_auth' in conf.keys():
- username = conf['master_auth'][0].get('username')
- password = conf['master_auth'][0].get('password')
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ # since GKE 1.19 the usage of basic auth is deprecated and in the provider version 4+ removed
+ master_auth = conf.get("master_auth")
+ if master_auth and isinstance(master_auth, list):
+ username = master_auth[0].get('username')
+ password = master_auth[0].get('password')
if username or password:
# only if both are set to the empty string it is fine
- # https://www.terraform.io/docs/providers/google/r/container_cluster.html
+ # https://registry.terraform.io/providers/hashicorp/google/3.90.1/docs/resources/container_cluster.html
if username and password:
if username[0] == '' and password[0] == '':
return CheckResult.PASSED
return CheckResult.FAILED
- return CheckResult.PASSED
- return CheckResult.FAILED
- def get_evaluated_keys(self) -> List[str]:
+ return CheckResult.PASSED
+
+ def get_evaluated_keys(self) -> list[str]:
return ['master_auth/[0]/username', 'master_auth/[0]/password']
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n--- a/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n+++ b/checkov/terraform/checks/resource/gcp/GKEBasicAuth.py\n@@ -1,37 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n-from typing import List\n \n \n class GKEBasicAuth(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Ensure GKE basic auth is disabled\"\n id = \"CKV_GCP_19\"\n- supported_resources = ['google_container_cluster']\n- categories = [CheckCategories.KUBERNETES]\n+ supported_resources = ('google_container_cluster',)\n+ categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- \"\"\"\n- Looks for password configuration at azure_instance:\n- https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n- :param conf: google_compute_ssl_policy configuration\n- :return: <CheckResult>\n- \"\"\"\n- if 'master_auth' in conf.keys():\n- username = conf['master_auth'][0].get('username')\n- password = conf['master_auth'][0].get('password')\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ # since GKE 1.19 the usage of basic auth is deprecated and in the provider version 4+ removed\n+ master_auth = conf.get(\"master_auth\")\n+ if master_auth and isinstance(master_auth, list):\n+ username = master_auth[0].get('username')\n+ password = master_auth[0].get('password')\n if username or password:\n # only if both are set to the empty string it is fine\n- # https://www.terraform.io/docs/providers/google/r/container_cluster.html\n+ # https://registry.terraform.io/providers/hashicorp/google/3.90.1/docs/resources/container_cluster.html\n if username and password:\n if username[0] == '' and password[0] == '':\n return CheckResult.PASSED\n return CheckResult.FAILED\n- return CheckResult.PASSED\n- return CheckResult.FAILED\n \n- def get_evaluated_keys(self) -> List[str]:\n+ return CheckResult.PASSED\n+\n+ def get_evaluated_keys(self) -> list[str]:\n return ['master_auth/[0]/username', 'master_auth/[0]/password']\n", "issue": "Check: CKV_GCP_19: \"Ensure GKE basic auth is disabled\"\n**Describe the issue**\r\nThe default for this is disabled yet the alert keeps firing. \r\n\r\n**Examples**\r\nPlease share an example code sample (in the IaC of your choice) + the expected outcomes.\r\n\r\n**Version (please complete the following information):**\r\n- 2.2.255\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom typing import List\n\n\nclass GKEBasicAuth(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure GKE basic auth is disabled\"\n id = \"CKV_GCP_19\"\n supported_resources = ['google_container_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for password configuration at azure_instance:\n https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n :param conf: google_compute_ssl_policy configuration\n :return: <CheckResult>\n \"\"\"\n if 'master_auth' in conf.keys():\n username = conf['master_auth'][0].get('username')\n password = conf['master_auth'][0].get('password')\n if username or password:\n # only if both are set to the empty string it is fine\n # https://www.terraform.io/docs/providers/google/r/container_cluster.html\n if username and password:\n if username[0] == '' and password[0] == '':\n return CheckResult.PASSED\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['master_auth/[0]/username', 'master_auth/[0]/password']\n\n\ncheck = GKEBasicAuth()\n", "path": "checkov/terraform/checks/resource/gcp/GKEBasicAuth.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass GKEBasicAuth(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure GKE basic auth is disabled\"\n id = \"CKV_GCP_19\"\n supported_resources = ('google_container_cluster',)\n categories = (CheckCategories.KUBERNETES,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n # since GKE 1.19 the usage of basic auth is deprecated and in the provider version 4+ removed\n master_auth = conf.get(\"master_auth\")\n if master_auth and isinstance(master_auth, list):\n username = master_auth[0].get('username')\n password = master_auth[0].get('password')\n if username or password:\n # only if both are set to the empty string it is fine\n # https://registry.terraform.io/providers/hashicorp/google/3.90.1/docs/resources/container_cluster.html\n if username and password:\n if username[0] == '' and password[0] == '':\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> list[str]:\n return ['master_auth/[0]/username', 'master_auth/[0]/password']\n\n\ncheck = GKEBasicAuth()\n", "path": "checkov/terraform/checks/resource/gcp/GKEBasicAuth.py"}]}
| 778 | 628 |
gh_patches_debug_21720
|
rasdani/github-patches
|
git_diff
|
deeppavlov__DeepPavlov-676
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make ROOT_PATH, MODELS_PATH and DOWNLOADS_PATH environment variables
All config files I've seen so far have the following variables:
```
"ROOT_PATH": "~/.deeppavlov",
"DOWNLOADS_PATH": "{ROOT_PATH}/downloads",
"MODELS_PATH": "{ROOT_PATH}/models"
```
Should we make them environment variables?
This would be better for the following reasons:
1. No need to define the same variables across all configs
1. No need to redefine variables for your system. For example, I do not store source code and downloads/models at same place (cause of their huge size), so I need to change configs for me and change them _back_ to make PR (that is kind of ridiculous). If these variables were in the environment, I'd have to change them only one time - after deeppavlov installation.
1. Make configs working directory independent (no '~/'-paths)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deeppavlov/core/commands/utils.py`
Content:
```
1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from pathlib import Path
15 from typing import Union, Dict, TypeVar
16
17 from deeppavlov.core.common.file import read_json, find_config
18
19 # noinspection PyShadowingBuiltins
20 _T = TypeVar('_T', str, float, bool, list, dict)
21
22
23 def _parse_config_property(item: _T, variables: Dict[str, Union[str, Path, float, bool, None]]) -> _T:
24 """Recursively apply config's variables values to its property"""
25 if isinstance(item, str):
26 return item.format(**variables)
27 elif isinstance(item, list):
28 return [_parse_config_property(item, variables) for item in item]
29 elif isinstance(item, dict):
30 return {k: _parse_config_property(v, variables) for k, v in item.items()}
31 else:
32 return item
33
34
35 def parse_config(config: Union[str, Path, dict]) -> dict:
36 """Read config's variables and apply their values to all its properties"""
37 if isinstance(config, (str, Path)):
38 config = read_json(find_config(config))
39
40 variables = {
41 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent
42 }
43 for name, value in config.get('metadata', {}).get('variables', {}).items():
44 variables[name] = value.format(**variables)
45
46 return _parse_config_property(config, variables)
47
48
49 def expand_path(path: Union[str, Path]) -> Path:
50 """Convert relative paths to absolute with resolving user directory."""
51 return Path(path).expanduser().resolve()
52
53
54 def import_packages(packages: list) -> None:
55 """Import packages from list to execute their code."""
56 for package in packages:
57 __import__(package)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deeppavlov/core/commands/utils.py b/deeppavlov/core/commands/utils.py
--- a/deeppavlov/core/commands/utils.py
+++ b/deeppavlov/core/commands/utils.py
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
from pathlib import Path
from typing import Union, Dict, TypeVar
@@ -38,9 +39,12 @@
config = read_json(find_config(config))
variables = {
- 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent
+ 'DEEPPAVLOV_PATH': os.getenv(f'DP_DEEPPAVLOV_PATH', Path(__file__).parent.parent.parent)
}
for name, value in config.get('metadata', {}).get('variables', {}).items():
+ env_name = f'DP_{name}'
+ if env_name in os.environ:
+ value = os.getenv(env_name)
variables[name] = value.format(**variables)
return _parse_config_property(config, variables)
|
{"golden_diff": "diff --git a/deeppavlov/core/commands/utils.py b/deeppavlov/core/commands/utils.py\n--- a/deeppavlov/core/commands/utils.py\n+++ b/deeppavlov/core/commands/utils.py\n@@ -11,6 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n+import os\n from pathlib import Path\n from typing import Union, Dict, TypeVar\n \n@@ -38,9 +39,12 @@\n config = read_json(find_config(config))\n \n variables = {\n- 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent\n+ 'DEEPPAVLOV_PATH': os.getenv(f'DP_DEEPPAVLOV_PATH', Path(__file__).parent.parent.parent)\n }\n for name, value in config.get('metadata', {}).get('variables', {}).items():\n+ env_name = f'DP_{name}'\n+ if env_name in os.environ:\n+ value = os.getenv(env_name)\n variables[name] = value.format(**variables)\n \n return _parse_config_property(config, variables)\n", "issue": "Make ROOT_PATH, MODELS_PATH and DOWNLOADS_PATH environment variables\nAll config files I've seen so far have the following variables:\r\n```\r\n\"ROOT_PATH\": \"~/.deeppavlov\",\r\n\"DOWNLOADS_PATH\": \"{ROOT_PATH}/downloads\",\r\n\"MODELS_PATH\": \"{ROOT_PATH}/models\"\r\n```\r\nShould we make them environment variables?\r\nThis would be better for the following reasons:\r\n1. No need to define the same variables across all configs\r\n1. No need to redefine variables for your system. For example, I do not store source code and downloads/models at same place (cause of their huge size), so I need to change configs for me and change them _back_ to make PR (that is kind of ridiculous). If these variables were in the environment, I'd have to change them only one time - after deeppavlov installation.\r\n1. Make configs working directory independent (no '~/'-paths)\n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom pathlib import Path\nfrom typing import Union, Dict, TypeVar\n\nfrom deeppavlov.core.common.file import read_json, find_config\n\n# noinspection PyShadowingBuiltins\n_T = TypeVar('_T', str, float, bool, list, dict)\n\n\ndef _parse_config_property(item: _T, variables: Dict[str, Union[str, Path, float, bool, None]]) -> _T:\n \"\"\"Recursively apply config's variables values to its property\"\"\"\n if isinstance(item, str):\n return item.format(**variables)\n elif isinstance(item, list):\n return [_parse_config_property(item, variables) for item in item]\n elif isinstance(item, dict):\n return {k: _parse_config_property(v, variables) for k, v in item.items()}\n else:\n return item\n\n\ndef parse_config(config: Union[str, Path, dict]) -> dict:\n \"\"\"Read config's variables and apply their values to all its properties\"\"\"\n if isinstance(config, (str, Path)):\n config = read_json(find_config(config))\n\n variables = {\n 'DEEPPAVLOV_PATH': Path(__file__).parent.parent.parent\n }\n for name, value in config.get('metadata', {}).get('variables', {}).items():\n variables[name] = value.format(**variables)\n\n return _parse_config_property(config, variables)\n\n\ndef expand_path(path: Union[str, Path]) -> Path:\n \"\"\"Convert relative paths to absolute with resolving user directory.\"\"\"\n return Path(path).expanduser().resolve()\n\n\ndef import_packages(packages: list) -> None:\n \"\"\"Import packages from list to execute their code.\"\"\"\n for package in packages:\n __import__(package)\n", "path": "deeppavlov/core/commands/utils.py"}], "after_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom pathlib import Path\nfrom typing import Union, Dict, TypeVar\n\nfrom deeppavlov.core.common.file import read_json, find_config\n\n# noinspection PyShadowingBuiltins\n_T = TypeVar('_T', str, float, bool, list, dict)\n\n\ndef _parse_config_property(item: _T, variables: Dict[str, Union[str, Path, float, bool, None]]) -> _T:\n \"\"\"Recursively apply config's variables values to its property\"\"\"\n if isinstance(item, str):\n return item.format(**variables)\n elif isinstance(item, list):\n return [_parse_config_property(item, variables) for item in item]\n elif isinstance(item, dict):\n return {k: _parse_config_property(v, variables) for k, v in item.items()}\n else:\n return item\n\n\ndef parse_config(config: Union[str, Path, dict]) -> dict:\n \"\"\"Read config's variables and apply their values to all its properties\"\"\"\n if isinstance(config, (str, Path)):\n config = read_json(find_config(config))\n\n variables = {\n 'DEEPPAVLOV_PATH': os.getenv(f'DP_DEEPPAVLOV_PATH', Path(__file__).parent.parent.parent)\n }\n for name, value in config.get('metadata', {}).get('variables', {}).items():\n env_name = f'DP_{name}'\n if env_name in os.environ:\n value = os.getenv(env_name)\n variables[name] = value.format(**variables)\n\n return _parse_config_property(config, variables)\n\n\ndef expand_path(path: Union[str, Path]) -> Path:\n \"\"\"Convert relative paths to absolute with resolving user directory.\"\"\"\n return Path(path).expanduser().resolve()\n\n\ndef import_packages(packages: list) -> None:\n \"\"\"Import packages from list to execute their code.\"\"\"\n for package in packages:\n __import__(package)\n", "path": "deeppavlov/core/commands/utils.py"}]}
| 1,060 | 264 |
gh_patches_debug_37231
|
rasdani/github-patches
|
git_diff
|
opendatacube__datacube-core-982
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extra files in source distribution packaging
`python setup.py sdist` builds source distribution with docs and tests and bunch of random files in it, that should not be included. Strangely `bdist_wheel` does not include those and generates proper release file.
https://github.com/conda-forge/datacube-feedstock/issues/25
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/utils/xarray_geoextensions.py`
Content:
```
1 """
2 Add geometric extensions to :class:`xarray.Dataset` and :class:`xarray.DataArray` for use
3 with Data Cube by Monkey Patching those classes.
4
5 This extension is reliant on an `xarray` object having a `.crs` property of type
6 :class:`datacube.utils.geometry.CRS`. This is used to inspect the spatial dimensions of the
7 :class:`Dataset` or :class:`DataArray`, and provide new attributes for accessing a
8 :class:`datacube.utils.geometry.GeoBox`, affine transform and extent for the dataset as
9 `.geobox`, `.affine` and `.extent` respectively.
10
11 """
12
13 import xarray
14
15 from datacube.utils import geometry, spatial_dims
16 from datacube.utils.math import affine_from_axis
17
18
19 def _norm_crs(crs):
20 if crs is None or isinstance(crs, geometry.CRS):
21 return crs
22 elif isinstance(crs, str):
23 return geometry.CRS(crs)
24 else:
25 raise ValueError('Can not interpret {} as CRS'.format(type(crs)))
26
27
28 def _get_crs_from_attrs(obj, sdims):
29 """ Looks for attribute named `crs` containing CRS string
30 1. Checks spatials coords attrs
31 2. Checks data variable attrs
32 3. Checks dataset attrs
33
34 Returns
35 =======
36 Content for `.attrs[crs]` usually it's a string
37 None if not present in any of the places listed above
38 """
39 if isinstance(obj, xarray.Dataset):
40 if len(obj.data_vars) > 0:
41 data_array = next(iter(obj.data_vars.values()))
42 else:
43 # fall back option
44 return obj.attrs.get('crs', None)
45 else:
46 data_array = obj
47
48 crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)
49 crs = None
50 if len(crs_set) > 1:
51 raise ValueError('Spatial dimensions have different crs.')
52 elif len(crs_set) == 1:
53 crs = crs_set.pop()
54
55 if crs is None:
56 # fall back option
57 crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)
58 return crs
59
60
61 def _get_crs_from_coord(obj, mode='strict'):
62 """ Looks for dimensionless coordinate with `spatial_ref` attribute.
63
64 obj: Dataset | DataArray
65 mode: strict|any|all
66 strict -- raise Error if multiple candidates
67 any -- return first one
68 all -- return a list of all found CRSs
69
70 Returns
71 =======
72 None - if none found
73 crs:str - if found one
74 crs:str - if found several but mode is any
75
76 (crs: str, crs: str) - if found several and mode=all
77 """
78 grid_mapping = obj.attrs.get('grid_mapping', None)
79
80 # First check CF convention "pointer"
81 if grid_mapping is not None and grid_mapping in obj.coords:
82 coord = obj.coords[grid_mapping]
83 spatial_ref = coord.attrs.get('spatial_ref', None)
84 if spatial_ref is not None:
85 return spatial_ref
86 else:
87 raise ValueError(f"Coordinate '{grid_mapping}' has no `spatial_ref` attribute")
88
89 # No explicit `grid_mapping` find some "CRS" coordinate
90 candidates = tuple(coord.attrs['spatial_ref'] for coord in obj.coords.values()
91 if coord.ndim == 0 and 'spatial_ref' in coord.attrs)
92
93 if len(candidates) == 0:
94 return None
95 if len(candidates) == 1:
96 return candidates[0]
97
98 if mode == 'strict':
99 raise ValueError("Too many candidates when looking for CRS")
100 elif mode == 'all':
101 return candidates
102 elif mode == 'any':
103 return candidates[0]
104 else:
105 raise ValueError(f"Mode needs to be: strict|any|all got {mode}")
106
107
108 def _xarray_affine_impl(obj):
109 sdims = spatial_dims(obj, relaxed=True)
110 if sdims is None:
111 return None, None
112
113 yy, xx = (obj[dim] for dim in sdims)
114 fallback_res = (coord.attrs.get('resolution', None) for coord in (xx, yy))
115
116 return affine_from_axis(xx.values, yy.values, fallback_res), sdims
117
118
119 def _xarray_affine(obj):
120 transform, _ = _xarray_affine_impl(obj)
121 return transform
122
123
124 def _xarray_extent(obj):
125 geobox = obj.geobox
126 return None if geobox is None else geobox.extent
127
128
129 def _xarray_geobox(obj):
130 transform, sdims = _xarray_affine_impl(obj)
131 if sdims is None:
132 return None
133
134 crs = None
135 try:
136 crs = _get_crs_from_coord(obj)
137 except ValueError:
138 pass
139
140 if crs is None:
141 try:
142 crs = _get_crs_from_attrs(obj, sdims)
143 except ValueError:
144 pass
145
146 if crs is None:
147 return None
148
149 try:
150 crs = _norm_crs(crs)
151 except ValueError:
152 return None
153
154 h, w = (obj.coords[dim].size for dim in sdims)
155
156 return geometry.GeoBox(w, h, transform, crs)
157
158
159 xarray.Dataset.geobox = property(_xarray_geobox) # type: ignore
160 xarray.Dataset.affine = property(_xarray_affine) # type: ignore
161 xarray.Dataset.extent = property(_xarray_extent) # type: ignore
162 xarray.DataArray.geobox = property(_xarray_geobox) # type: ignore
163 xarray.DataArray.affine = property(_xarray_affine) # type: ignore
164 xarray.DataArray.extent = property(_xarray_extent) # type: ignore
165
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/datacube/utils/xarray_geoextensions.py b/datacube/utils/xarray_geoextensions.py
--- a/datacube/utils/xarray_geoextensions.py
+++ b/datacube/utils/xarray_geoextensions.py
@@ -9,9 +9,8 @@
`.geobox`, `.affine` and `.extent` respectively.
"""
-
+import warnings
import xarray
-
from datacube.utils import geometry, spatial_dims
from datacube.utils.math import affine_from_axis
@@ -27,34 +26,48 @@
def _get_crs_from_attrs(obj, sdims):
""" Looks for attribute named `crs` containing CRS string
- 1. Checks spatials coords attrs
- 2. Checks data variable attrs
- 3. Checks dataset attrs
+ - Checks spatials coords attrs
+ - Checks data variable attrs
+ - Checks dataset attrs
Returns
=======
Content for `.attrs[crs]` usually it's a string
None if not present in any of the places listed above
"""
- if isinstance(obj, xarray.Dataset):
- if len(obj.data_vars) > 0:
- data_array = next(iter(obj.data_vars.values()))
+ crs_set = set()
+
+ def _add_candidate(crs):
+ if crs is None:
+ return
+ if isinstance(crs, str):
+ crs_set.add(crs)
else:
- # fall back option
- return obj.attrs.get('crs', None)
+ warnings.warn(f"Ignoring crs attribute of type: {type(crs)}")
+
+ def process_attrs(attrs):
+ _add_candidate(attrs.get('crs', None))
+ _add_candidate(attrs.get('crs_wkt', None))
+
+ def process_datavar(x):
+ process_attrs(x.attrs)
+ for dim in sdims:
+ if dim in x.coords:
+ process_attrs(x.coords[dim].attrs)
+
+ if isinstance(obj, xarray.Dataset):
+ process_attrs(obj.attrs)
+ for dv in obj.data_vars.values():
+ process_datavar(dv)
else:
- data_array = obj
+ process_datavar(obj)
- crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)
crs = None
if len(crs_set) > 1:
raise ValueError('Spatial dimensions have different crs.')
elif len(crs_set) == 1:
crs = crs_set.pop()
- if crs is None:
- # fall back option
- crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)
return crs
@@ -148,7 +161,8 @@
try:
crs = _norm_crs(crs)
- except ValueError:
+ except (ValueError, geometry.CRSError):
+ warnings.warn(f"Encountered malformed CRS: {crs}")
return None
h, w = (obj.coords[dim].size for dim in sdims)
|
{"golden_diff": "diff --git a/datacube/utils/xarray_geoextensions.py b/datacube/utils/xarray_geoextensions.py\n--- a/datacube/utils/xarray_geoextensions.py\n+++ b/datacube/utils/xarray_geoextensions.py\n@@ -9,9 +9,8 @@\n `.geobox`, `.affine` and `.extent` respectively.\n \n \"\"\"\n-\n+import warnings\n import xarray\n-\n from datacube.utils import geometry, spatial_dims\n from datacube.utils.math import affine_from_axis\n \n@@ -27,34 +26,48 @@\n \n def _get_crs_from_attrs(obj, sdims):\n \"\"\" Looks for attribute named `crs` containing CRS string\n- 1. Checks spatials coords attrs\n- 2. Checks data variable attrs\n- 3. Checks dataset attrs\n+ - Checks spatials coords attrs\n+ - Checks data variable attrs\n+ - Checks dataset attrs\n \n Returns\n =======\n Content for `.attrs[crs]` usually it's a string\n None if not present in any of the places listed above\n \"\"\"\n- if isinstance(obj, xarray.Dataset):\n- if len(obj.data_vars) > 0:\n- data_array = next(iter(obj.data_vars.values()))\n+ crs_set = set()\n+\n+ def _add_candidate(crs):\n+ if crs is None:\n+ return\n+ if isinstance(crs, str):\n+ crs_set.add(crs)\n else:\n- # fall back option\n- return obj.attrs.get('crs', None)\n+ warnings.warn(f\"Ignoring crs attribute of type: {type(crs)}\")\n+\n+ def process_attrs(attrs):\n+ _add_candidate(attrs.get('crs', None))\n+ _add_candidate(attrs.get('crs_wkt', None))\n+\n+ def process_datavar(x):\n+ process_attrs(x.attrs)\n+ for dim in sdims:\n+ if dim in x.coords:\n+ process_attrs(x.coords[dim].attrs)\n+\n+ if isinstance(obj, xarray.Dataset):\n+ process_attrs(obj.attrs)\n+ for dv in obj.data_vars.values():\n+ process_datavar(dv)\n else:\n- data_array = obj\n+ process_datavar(obj)\n \n- crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)\n crs = None\n if len(crs_set) > 1:\n raise ValueError('Spatial dimensions have different crs.')\n elif len(crs_set) == 1:\n crs = crs_set.pop()\n \n- if crs is None:\n- # fall back option\n- crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)\n return crs\n \n \n@@ -148,7 +161,8 @@\n \n try:\n crs = _norm_crs(crs)\n- except ValueError:\n+ except (ValueError, geometry.CRSError):\n+ warnings.warn(f\"Encountered malformed CRS: {crs}\")\n return None\n \n h, w = (obj.coords[dim].size for dim in sdims)\n", "issue": "Extra files in source distribution packaging\n`python setup.py sdist` builds source distribution with docs and tests and bunch of random files in it, that should not be included. Strangely `bdist_wheel` does not include those and generates proper release file.\r\n\r\nhttps://github.com/conda-forge/datacube-feedstock/issues/25\n", "before_files": [{"content": "\"\"\"\nAdd geometric extensions to :class:`xarray.Dataset` and :class:`xarray.DataArray` for use\nwith Data Cube by Monkey Patching those classes.\n\nThis extension is reliant on an `xarray` object having a `.crs` property of type\n:class:`datacube.utils.geometry.CRS`. This is used to inspect the spatial dimensions of the\n:class:`Dataset` or :class:`DataArray`, and provide new attributes for accessing a\n:class:`datacube.utils.geometry.GeoBox`, affine transform and extent for the dataset as\n`.geobox`, `.affine` and `.extent` respectively.\n\n\"\"\"\n\nimport xarray\n\nfrom datacube.utils import geometry, spatial_dims\nfrom datacube.utils.math import affine_from_axis\n\n\ndef _norm_crs(crs):\n if crs is None or isinstance(crs, geometry.CRS):\n return crs\n elif isinstance(crs, str):\n return geometry.CRS(crs)\n else:\n raise ValueError('Can not interpret {} as CRS'.format(type(crs)))\n\n\ndef _get_crs_from_attrs(obj, sdims):\n \"\"\" Looks for attribute named `crs` containing CRS string\n 1. Checks spatials coords attrs\n 2. Checks data variable attrs\n 3. Checks dataset attrs\n\n Returns\n =======\n Content for `.attrs[crs]` usually it's a string\n None if not present in any of the places listed above\n \"\"\"\n if isinstance(obj, xarray.Dataset):\n if len(obj.data_vars) > 0:\n data_array = next(iter(obj.data_vars.values()))\n else:\n # fall back option\n return obj.attrs.get('crs', None)\n else:\n data_array = obj\n\n crs_set = set(data_array[d].attrs.get('crs', None) for d in sdims)\n crs = None\n if len(crs_set) > 1:\n raise ValueError('Spatial dimensions have different crs.')\n elif len(crs_set) == 1:\n crs = crs_set.pop()\n\n if crs is None:\n # fall back option\n crs = data_array.attrs.get('crs', None) or obj.attrs.get('crs', None)\n return crs\n\n\ndef _get_crs_from_coord(obj, mode='strict'):\n \"\"\" Looks for dimensionless coordinate with `spatial_ref` attribute.\n\n obj: Dataset | DataArray\n mode: strict|any|all\n strict -- raise Error if multiple candidates\n any -- return first one\n all -- return a list of all found CRSs\n\n Returns\n =======\n None - if none found\n crs:str - if found one\n crs:str - if found several but mode is any\n\n (crs: str, crs: str) - if found several and mode=all\n \"\"\"\n grid_mapping = obj.attrs.get('grid_mapping', None)\n\n # First check CF convention \"pointer\"\n if grid_mapping is not None and grid_mapping in obj.coords:\n coord = obj.coords[grid_mapping]\n spatial_ref = coord.attrs.get('spatial_ref', None)\n if spatial_ref is not None:\n return spatial_ref\n else:\n raise ValueError(f\"Coordinate '{grid_mapping}' has no `spatial_ref` attribute\")\n\n # No explicit `grid_mapping` find some \"CRS\" coordinate\n candidates = tuple(coord.attrs['spatial_ref'] for coord in obj.coords.values()\n if coord.ndim == 0 and 'spatial_ref' in coord.attrs)\n\n if len(candidates) == 0:\n return None\n if len(candidates) == 1:\n return candidates[0]\n\n if mode == 'strict':\n raise ValueError(\"Too many candidates when looking for CRS\")\n elif mode == 'all':\n return candidates\n elif mode == 'any':\n return candidates[0]\n else:\n raise ValueError(f\"Mode needs to be: strict|any|all got {mode}\")\n\n\ndef _xarray_affine_impl(obj):\n sdims = spatial_dims(obj, relaxed=True)\n if sdims is None:\n return None, None\n\n yy, xx = (obj[dim] for dim in sdims)\n fallback_res = (coord.attrs.get('resolution', None) for coord in (xx, yy))\n\n return affine_from_axis(xx.values, yy.values, fallback_res), sdims\n\n\ndef _xarray_affine(obj):\n transform, _ = _xarray_affine_impl(obj)\n return transform\n\n\ndef _xarray_extent(obj):\n geobox = obj.geobox\n return None if geobox is None else geobox.extent\n\n\ndef _xarray_geobox(obj):\n transform, sdims = _xarray_affine_impl(obj)\n if sdims is None:\n return None\n\n crs = None\n try:\n crs = _get_crs_from_coord(obj)\n except ValueError:\n pass\n\n if crs is None:\n try:\n crs = _get_crs_from_attrs(obj, sdims)\n except ValueError:\n pass\n\n if crs is None:\n return None\n\n try:\n crs = _norm_crs(crs)\n except ValueError:\n return None\n\n h, w = (obj.coords[dim].size for dim in sdims)\n\n return geometry.GeoBox(w, h, transform, crs)\n\n\nxarray.Dataset.geobox = property(_xarray_geobox) # type: ignore\nxarray.Dataset.affine = property(_xarray_affine) # type: ignore\nxarray.Dataset.extent = property(_xarray_extent) # type: ignore\nxarray.DataArray.geobox = property(_xarray_geobox) # type: ignore\nxarray.DataArray.affine = property(_xarray_affine) # type: ignore\nxarray.DataArray.extent = property(_xarray_extent) # type: ignore\n", "path": "datacube/utils/xarray_geoextensions.py"}], "after_files": [{"content": "\"\"\"\nAdd geometric extensions to :class:`xarray.Dataset` and :class:`xarray.DataArray` for use\nwith Data Cube by Monkey Patching those classes.\n\nThis extension is reliant on an `xarray` object having a `.crs` property of type\n:class:`datacube.utils.geometry.CRS`. This is used to inspect the spatial dimensions of the\n:class:`Dataset` or :class:`DataArray`, and provide new attributes for accessing a\n:class:`datacube.utils.geometry.GeoBox`, affine transform and extent for the dataset as\n`.geobox`, `.affine` and `.extent` respectively.\n\n\"\"\"\nimport warnings\nimport xarray\nfrom datacube.utils import geometry, spatial_dims\nfrom datacube.utils.math import affine_from_axis\n\n\ndef _norm_crs(crs):\n if crs is None or isinstance(crs, geometry.CRS):\n return crs\n elif isinstance(crs, str):\n return geometry.CRS(crs)\n else:\n raise ValueError('Can not interpret {} as CRS'.format(type(crs)))\n\n\ndef _get_crs_from_attrs(obj, sdims):\n \"\"\" Looks for attribute named `crs` containing CRS string\n - Checks spatials coords attrs\n - Checks data variable attrs\n - Checks dataset attrs\n\n Returns\n =======\n Content for `.attrs[crs]` usually it's a string\n None if not present in any of the places listed above\n \"\"\"\n crs_set = set()\n\n def _add_candidate(crs):\n if crs is None:\n return\n if isinstance(crs, str):\n crs_set.add(crs)\n else:\n warnings.warn(f\"Ignoring crs attribute of type: {type(crs)}\")\n\n def process_attrs(attrs):\n _add_candidate(attrs.get('crs', None))\n _add_candidate(attrs.get('crs_wkt', None))\n\n def process_datavar(x):\n process_attrs(x.attrs)\n for dim in sdims:\n if dim in x.coords:\n process_attrs(x.coords[dim].attrs)\n\n if isinstance(obj, xarray.Dataset):\n process_attrs(obj.attrs)\n for dv in obj.data_vars.values():\n process_datavar(dv)\n else:\n process_datavar(obj)\n\n crs = None\n if len(crs_set) > 1:\n raise ValueError('Spatial dimensions have different crs.')\n elif len(crs_set) == 1:\n crs = crs_set.pop()\n\n return crs\n\n\ndef _get_crs_from_coord(obj, mode='strict'):\n \"\"\" Looks for dimensionless coordinate with `spatial_ref` attribute.\n\n obj: Dataset | DataArray\n mode: strict|any|all\n strict -- raise Error if multiple candidates\n any -- return first one\n all -- return a list of all found CRSs\n\n Returns\n =======\n None - if none found\n crs:str - if found one\n crs:str - if found several but mode is any\n\n (crs: str, crs: str) - if found several and mode=all\n \"\"\"\n grid_mapping = obj.attrs.get('grid_mapping', None)\n\n # First check CF convention \"pointer\"\n if grid_mapping is not None and grid_mapping in obj.coords:\n coord = obj.coords[grid_mapping]\n spatial_ref = coord.attrs.get('spatial_ref', None)\n if spatial_ref is not None:\n return spatial_ref\n else:\n raise ValueError(f\"Coordinate '{grid_mapping}' has no `spatial_ref` attribute\")\n\n # No explicit `grid_mapping` find some \"CRS\" coordinate\n candidates = tuple(coord.attrs['spatial_ref'] for coord in obj.coords.values()\n if coord.ndim == 0 and 'spatial_ref' in coord.attrs)\n\n if len(candidates) == 0:\n return None\n if len(candidates) == 1:\n return candidates[0]\n\n if mode == 'strict':\n raise ValueError(\"Too many candidates when looking for CRS\")\n elif mode == 'all':\n return candidates\n elif mode == 'any':\n return candidates[0]\n else:\n raise ValueError(f\"Mode needs to be: strict|any|all got {mode}\")\n\n\ndef _xarray_affine_impl(obj):\n sdims = spatial_dims(obj, relaxed=True)\n if sdims is None:\n return None, None\n\n yy, xx = (obj[dim] for dim in sdims)\n fallback_res = (coord.attrs.get('resolution', None) for coord in (xx, yy))\n\n return affine_from_axis(xx.values, yy.values, fallback_res), sdims\n\n\ndef _xarray_affine(obj):\n transform, _ = _xarray_affine_impl(obj)\n return transform\n\n\ndef _xarray_extent(obj):\n geobox = obj.geobox\n return None if geobox is None else geobox.extent\n\n\ndef _xarray_geobox(obj):\n transform, sdims = _xarray_affine_impl(obj)\n if sdims is None:\n return None\n\n crs = None\n try:\n crs = _get_crs_from_coord(obj)\n except ValueError:\n pass\n\n if crs is None:\n try:\n crs = _get_crs_from_attrs(obj, sdims)\n except ValueError:\n pass\n\n if crs is None:\n return None\n\n try:\n crs = _norm_crs(crs)\n except (ValueError, geometry.CRSError):\n warnings.warn(f\"Encountered malformed CRS: {crs}\")\n return None\n\n h, w = (obj.coords[dim].size for dim in sdims)\n\n return geometry.GeoBox(w, h, transform, crs)\n\n\nxarray.Dataset.geobox = property(_xarray_geobox) # type: ignore\nxarray.Dataset.affine = property(_xarray_affine) # type: ignore\nxarray.Dataset.extent = property(_xarray_extent) # type: ignore\nxarray.DataArray.geobox = property(_xarray_geobox) # type: ignore\nxarray.DataArray.affine = property(_xarray_affine) # type: ignore\nxarray.DataArray.extent = property(_xarray_extent) # type: ignore\n", "path": "datacube/utils/xarray_geoextensions.py"}]}
| 2,026 | 690 |
gh_patches_debug_13961
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-2083
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[TODO][CI][pip upstream changes] Fix failing nightlies running against `pip`'s `main` branch
Failure example: https://github.com/jazzband/pip-tools/actions/runs/8794562108/job/24134206791
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/exceptions.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Iterable
4
5 from pip._internal.index.package_finder import PackageFinder
6 from pip._internal.models.candidate import InstallationCandidate
7 from pip._internal.req import InstallRequirement
8 from pip._internal.utils.misc import redact_auth_from_url
9
10
11 class PipToolsError(Exception):
12 pass
13
14
15 class NoCandidateFound(PipToolsError):
16 def __init__(
17 self,
18 ireq: InstallRequirement,
19 candidates_tried: Iterable[InstallationCandidate],
20 finder: PackageFinder,
21 ) -> None:
22 self.ireq = ireq
23 self.candidates_tried = candidates_tried
24 self.finder = finder
25
26 def __str__(self) -> str:
27 versions = []
28 pre_versions = []
29
30 for candidate in sorted(self.candidates_tried):
31 version = str(candidate.version)
32 if candidate.version.is_prerelease:
33 pre_versions.append(version)
34 else:
35 versions.append(version)
36
37 lines = [f"Could not find a version that matches {self.ireq}"]
38
39 if versions:
40 lines.append(f"Tried: {', '.join(versions)}")
41
42 if pre_versions:
43 if self.finder.allow_all_prereleases:
44 line = "Tried"
45 else:
46 line = "Skipped"
47
48 line += f" pre-versions: {', '.join(pre_versions)}"
49 lines.append(line)
50
51 if versions or pre_versions:
52 lines.append(
53 "There are incompatible versions in the resolved dependencies:"
54 )
55 source_ireqs = getattr(self.ireq, "_source_ireqs", [])
56 lines.extend(f" {ireq}" for ireq in source_ireqs)
57 else:
58 redacted_urls = tuple(
59 redact_auth_from_url(url) for url in self.finder.index_urls
60 )
61 lines.append("No versions found")
62 lines.append(
63 "{} {} reachable?".format(
64 "Were" if len(redacted_urls) > 1 else "Was",
65 " or ".join(redacted_urls),
66 )
67 )
68 return "\n".join(lines)
69
70
71 class IncompatibleRequirements(PipToolsError):
72 def __init__(self, ireq_a: InstallRequirement, ireq_b: InstallRequirement) -> None:
73 self.ireq_a = ireq_a
74 self.ireq_b = ireq_b
75
76 def __str__(self) -> str:
77 message = "Incompatible requirements found: {} and {}"
78 return message.format(self.ireq_a, self.ireq_b)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/exceptions.py b/piptools/exceptions.py
--- a/piptools/exceptions.py
+++ b/piptools/exceptions.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import operator
from typing import Iterable
from pip._internal.index.package_finder import PackageFinder
@@ -27,7 +28,9 @@
versions = []
pre_versions = []
- for candidate in sorted(self.candidates_tried):
+ for candidate in sorted(
+ self.candidates_tried, key=operator.attrgetter("version")
+ ):
version = str(candidate.version)
if candidate.version.is_prerelease:
pre_versions.append(version)
|
{"golden_diff": "diff --git a/piptools/exceptions.py b/piptools/exceptions.py\n--- a/piptools/exceptions.py\n+++ b/piptools/exceptions.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import operator\n from typing import Iterable\n \n from pip._internal.index.package_finder import PackageFinder\n@@ -27,7 +28,9 @@\n versions = []\n pre_versions = []\n \n- for candidate in sorted(self.candidates_tried):\n+ for candidate in sorted(\n+ self.candidates_tried, key=operator.attrgetter(\"version\")\n+ ):\n version = str(candidate.version)\n if candidate.version.is_prerelease:\n pre_versions.append(version)\n", "issue": "[TODO][CI][pip upstream changes] Fix failing nightlies running against `pip`'s `main` branch\nFailure example: https://github.com/jazzband/pip-tools/actions/runs/8794562108/job/24134206791\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Iterable\n\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.models.candidate import InstallationCandidate\nfrom pip._internal.req import InstallRequirement\nfrom pip._internal.utils.misc import redact_auth_from_url\n\n\nclass PipToolsError(Exception):\n pass\n\n\nclass NoCandidateFound(PipToolsError):\n def __init__(\n self,\n ireq: InstallRequirement,\n candidates_tried: Iterable[InstallationCandidate],\n finder: PackageFinder,\n ) -> None:\n self.ireq = ireq\n self.candidates_tried = candidates_tried\n self.finder = finder\n\n def __str__(self) -> str:\n versions = []\n pre_versions = []\n\n for candidate in sorted(self.candidates_tried):\n version = str(candidate.version)\n if candidate.version.is_prerelease:\n pre_versions.append(version)\n else:\n versions.append(version)\n\n lines = [f\"Could not find a version that matches {self.ireq}\"]\n\n if versions:\n lines.append(f\"Tried: {', '.join(versions)}\")\n\n if pre_versions:\n if self.finder.allow_all_prereleases:\n line = \"Tried\"\n else:\n line = \"Skipped\"\n\n line += f\" pre-versions: {', '.join(pre_versions)}\"\n lines.append(line)\n\n if versions or pre_versions:\n lines.append(\n \"There are incompatible versions in the resolved dependencies:\"\n )\n source_ireqs = getattr(self.ireq, \"_source_ireqs\", [])\n lines.extend(f\" {ireq}\" for ireq in source_ireqs)\n else:\n redacted_urls = tuple(\n redact_auth_from_url(url) for url in self.finder.index_urls\n )\n lines.append(\"No versions found\")\n lines.append(\n \"{} {} reachable?\".format(\n \"Were\" if len(redacted_urls) > 1 else \"Was\",\n \" or \".join(redacted_urls),\n )\n )\n return \"\\n\".join(lines)\n\n\nclass IncompatibleRequirements(PipToolsError):\n def __init__(self, ireq_a: InstallRequirement, ireq_b: InstallRequirement) -> None:\n self.ireq_a = ireq_a\n self.ireq_b = ireq_b\n\n def __str__(self) -> str:\n message = \"Incompatible requirements found: {} and {}\"\n return message.format(self.ireq_a, self.ireq_b)\n", "path": "piptools/exceptions.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport operator\nfrom typing import Iterable\n\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.models.candidate import InstallationCandidate\nfrom pip._internal.req import InstallRequirement\nfrom pip._internal.utils.misc import redact_auth_from_url\n\n\nclass PipToolsError(Exception):\n pass\n\n\nclass NoCandidateFound(PipToolsError):\n def __init__(\n self,\n ireq: InstallRequirement,\n candidates_tried: Iterable[InstallationCandidate],\n finder: PackageFinder,\n ) -> None:\n self.ireq = ireq\n self.candidates_tried = candidates_tried\n self.finder = finder\n\n def __str__(self) -> str:\n versions = []\n pre_versions = []\n\n for candidate in sorted(\n self.candidates_tried, key=operator.attrgetter(\"version\")\n ):\n version = str(candidate.version)\n if candidate.version.is_prerelease:\n pre_versions.append(version)\n else:\n versions.append(version)\n\n lines = [f\"Could not find a version that matches {self.ireq}\"]\n\n if versions:\n lines.append(f\"Tried: {', '.join(versions)}\")\n\n if pre_versions:\n if self.finder.allow_all_prereleases:\n line = \"Tried\"\n else:\n line = \"Skipped\"\n\n line += f\" pre-versions: {', '.join(pre_versions)}\"\n lines.append(line)\n\n if versions or pre_versions:\n lines.append(\n \"There are incompatible versions in the resolved dependencies:\"\n )\n source_ireqs = getattr(self.ireq, \"_source_ireqs\", [])\n lines.extend(f\" {ireq}\" for ireq in source_ireqs)\n else:\n redacted_urls = tuple(\n redact_auth_from_url(url) for url in self.finder.index_urls\n )\n lines.append(\"No versions found\")\n lines.append(\n \"{} {} reachable?\".format(\n \"Were\" if len(redacted_urls) > 1 else \"Was\",\n \" or \".join(redacted_urls),\n )\n )\n return \"\\n\".join(lines)\n\n\nclass IncompatibleRequirements(PipToolsError):\n def __init__(self, ireq_a: InstallRequirement, ireq_b: InstallRequirement) -> None:\n self.ireq_a = ireq_a\n self.ireq_b = ireq_b\n\n def __str__(self) -> str:\n message = \"Incompatible requirements found: {} and {}\"\n return message.format(self.ireq_a, self.ireq_b)\n", "path": "piptools/exceptions.py"}]}
| 1,024 | 155 |
gh_patches_debug_32237
|
rasdani/github-patches
|
git_diff
|
dmlc__dgl-5059
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Sparse] Create a mock implementation in mock_sparse for BSDDMM.
## 🔨Work Item
**IMPORTANT:**
* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.
* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.
Project tracker: https://github.com/orgs/dmlc/projects/2
## Description
<!-- short description of the work item -->
## Depending work items or issues
<!-- what must be done before this -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/dgl/mock_sparse/sddmm.py`
Content:
```
1 """Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module."""
2 import torch
3
4 from .sp_matrix import SparseMatrix
5
6 __all__ = ["sddmm"]
7
8
9 def sddmm(
10 A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor
11 ) -> SparseMatrix:
12 r"""Sampled-Dense-Dense Matrix Multiplication (SDDMM).
13
14 ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``
15 at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``
16 is added to the resulting matrix.
17
18 Mathematically ``sddmm`` is formulated as:
19
20 .. math::
21 out = (mat1 @ mat2) * spy(A) + A
22
23 Parameters
24 ----------
25 A : SparseMatrix
26 Sparse matrix of shape `(M, N)`.
27 mat1 : Tensor
28 Dense matrix of shape `(M, K)`
29 mat2 : Tensor
30 Dense matrix of shape `(K, N)`
31
32 Returns
33 -------
34 SparseMatrix
35 Sparse matrix of shape `(M, N)`.
36
37 Examples
38 --------
39
40 >>> row = torch.Tensor([1, 1, 2])
41 >>> col = torch.Tensor([2, 3, 3])
42 >>> val = torch.arange(1, 4).float()
43 >>> A = SparseMatrix(row, col, val, (3, 4))
44 >>> mat1 = torch.randn(3, 5)
45 >>> mat2 = torch.randn(5, 4)
46 >>> dgl.mock_sparse.sddmm(A, mat1, mat2)
47 SparseMatrix(indices=tensor([[1, 1, 2],
48 [2, 3, 3]]),
49 values=tensor([1.8035, 2.3375, 3.1255]),
50 shape=(3, 4), nnz=3)
51 """
52 assert A.val.dim() == 1, (
53 f"Nonzero elements have values of shape ({A.val.shape[1]}). Expects "
54 "scalar values. "
55 )
56 # PyTorch's sddmm operator only supports CSR format.
57 res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)
58 return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/dgl/mock_sparse/sddmm.py b/python/dgl/mock_sparse/sddmm.py
--- a/python/dgl/mock_sparse/sddmm.py
+++ b/python/dgl/mock_sparse/sddmm.py
@@ -1,9 +1,9 @@
"""Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module."""
import torch
-from .sp_matrix import SparseMatrix
+from .sp_matrix import create_from_coo, SparseMatrix
-__all__ = ["sddmm"]
+__all__ = ["sddmm", "mock_bsddmm"]
def sddmm(
@@ -56,3 +56,56 @@
# PyTorch's sddmm operator only supports CSR format.
res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)
return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)
+
+
+def mock_bsddmm(
+ A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor
+) -> SparseMatrix:
+ r"""Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).
+
+ ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices
+ independently.
+
+ In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be
+ reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.
+
+ Parameters
+ ----------
+ A : SparseMatrix
+ Sparse matrix of shape `(M, N)`.
+ mat1 : Tensor
+ Dense matrix of shape `(B, M, K)` or `(B, M,)`
+ mat2 : Tensor
+ Dense matrix of shape `(B, K, N)` or `(B, K,)`
+
+ Returns
+ -------
+ SparseMatrix
+ Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.
+
+ Examples
+ --------
+
+ >>> row = torch.tensor([1, 1, 2])
+ >>> col = torch.tensor([2, 3, 3])
+ >>> val = torch.arange(1, 4).float()
+ >>> A = create_from_coo(row, col, val, (3, 4))
+ >>> mat1 = torch.randn(2, 3, 5)
+ >>> mat2 = torch.randn(2, 5, 4)
+ >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)
+ SparseMatrix(indices=tensor([[1, 1, 2],
+ [2, 3, 3]]),
+ values=tensor([[-0.6765, -0.4017],
+ [ 3.3290, 6.9016],
+ [ 4.8184, 5.8882]]),
+ shape=(3, 4), nnz=3)
+ """
+ batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]
+ batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]
+ batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]
+ return create_from_coo(
+ row=A.row,
+ col=A.col,
+ val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),
+ shape=A.shape,
+ )
|
{"golden_diff": "diff --git a/python/dgl/mock_sparse/sddmm.py b/python/dgl/mock_sparse/sddmm.py\n--- a/python/dgl/mock_sparse/sddmm.py\n+++ b/python/dgl/mock_sparse/sddmm.py\n@@ -1,9 +1,9 @@\n \"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\n import torch\n \n-from .sp_matrix import SparseMatrix\n+from .sp_matrix import create_from_coo, SparseMatrix\n \n-__all__ = [\"sddmm\"]\n+__all__ = [\"sddmm\", \"mock_bsddmm\"]\n \n \n def sddmm(\n@@ -56,3 +56,56 @@\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n+\n+\n+def mock_bsddmm(\n+ A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n+) -> SparseMatrix:\n+ r\"\"\"Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n+\n+ ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices\n+ independently.\n+\n+ In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be\n+ reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.\n+\n+ Parameters\n+ ----------\n+ A : SparseMatrix\n+ Sparse matrix of shape `(M, N)`.\n+ mat1 : Tensor\n+ Dense matrix of shape `(B, M, K)` or `(B, M,)`\n+ mat2 : Tensor\n+ Dense matrix of shape `(B, K, N)` or `(B, K,)`\n+\n+ Returns\n+ -------\n+ SparseMatrix\n+ Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.\n+\n+ Examples\n+ --------\n+\n+ >>> row = torch.tensor([1, 1, 2])\n+ >>> col = torch.tensor([2, 3, 3])\n+ >>> val = torch.arange(1, 4).float()\n+ >>> A = create_from_coo(row, col, val, (3, 4))\n+ >>> mat1 = torch.randn(2, 3, 5)\n+ >>> mat2 = torch.randn(2, 5, 4)\n+ >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)\n+ SparseMatrix(indices=tensor([[1, 1, 2],\n+ [2, 3, 3]]),\n+ values=tensor([[-0.6765, -0.4017],\n+ [ 3.3290, 6.9016],\n+ [ 4.8184, 5.8882]]),\n+ shape=(3, 4), nnz=3)\n+ \"\"\"\n+ batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]\n+ batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]\n+ batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]\n+ return create_from_coo(\n+ row=A.row,\n+ col=A.col,\n+ val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),\n+ shape=A.shape,\n+ )\n", "issue": "[Sparse] Create a mock implementation in mock_sparse for BSDDMM.\n## \ud83d\udd28Work Item\r\n\r\n**IMPORTANT:**\r\n* This template is only for dev team to track project progress. For feature request or bug report, please use the corresponding issue templates.\r\n* DO NOT create a new work item if the purpose is to fix an existing issue or feature request. We will directly use the issue in the project tracker.\r\n\r\nProject tracker: https://github.com/orgs/dmlc/projects/2\r\n\r\n## Description\r\n\r\n<!-- short description of the work item -->\r\n\r\n## Depending work items or issues\r\n\r\n<!-- what must be done before this -->\r\n\n", "before_files": [{"content": "\"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\nimport torch\n\nfrom .sp_matrix import SparseMatrix\n\n__all__ = [\"sddmm\"]\n\n\ndef sddmm(\n A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n) -> SparseMatrix:\n r\"\"\"Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n\n ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``\n at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``\n is added to the resulting matrix.\n\n Mathematically ``sddmm`` is formulated as:\n\n .. math::\n out = (mat1 @ mat2) * spy(A) + A\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix of shape `(M, N)`.\n mat1 : Tensor\n Dense matrix of shape `(M, K)`\n mat2 : Tensor\n Dense matrix of shape `(K, N)`\n\n Returns\n -------\n SparseMatrix\n Sparse matrix of shape `(M, N)`.\n\n Examples\n --------\n\n >>> row = torch.Tensor([1, 1, 2])\n >>> col = torch.Tensor([2, 3, 3])\n >>> val = torch.arange(1, 4).float()\n >>> A = SparseMatrix(row, col, val, (3, 4))\n >>> mat1 = torch.randn(3, 5)\n >>> mat2 = torch.randn(5, 4)\n >>> dgl.mock_sparse.sddmm(A, mat1, mat2)\n SparseMatrix(indices=tensor([[1, 1, 2],\n [2, 3, 3]]),\n values=tensor([1.8035, 2.3375, 3.1255]),\n shape=(3, 4), nnz=3)\n \"\"\"\n assert A.val.dim() == 1, (\n f\"Nonzero elements have values of shape ({A.val.shape[1]}). Expects \"\n \"scalar values. \"\n )\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n", "path": "python/dgl/mock_sparse/sddmm.py"}], "after_files": [{"content": "\"\"\"Sampled Dense-Dense Matrix Multiplication (SDDMM) operator module.\"\"\"\nimport torch\n\nfrom .sp_matrix import create_from_coo, SparseMatrix\n\n__all__ = [\"sddmm\", \"mock_bsddmm\"]\n\n\ndef sddmm(\n A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n) -> SparseMatrix:\n r\"\"\"Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n\n ``sddmm`` multiplies two dense matrices :attr:``mat1`` and :attr:``mat2``\n at the nonzero locations of sparse matrix :attr:``A``. Values of :attr:``A``\n is added to the resulting matrix.\n\n Mathematically ``sddmm`` is formulated as:\n\n .. math::\n out = (mat1 @ mat2) * spy(A) + A\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix of shape `(M, N)`.\n mat1 : Tensor\n Dense matrix of shape `(M, K)`\n mat2 : Tensor\n Dense matrix of shape `(K, N)`\n\n Returns\n -------\n SparseMatrix\n Sparse matrix of shape `(M, N)`.\n\n Examples\n --------\n\n >>> row = torch.Tensor([1, 1, 2])\n >>> col = torch.Tensor([2, 3, 3])\n >>> val = torch.arange(1, 4).float()\n >>> A = SparseMatrix(row, col, val, (3, 4))\n >>> mat1 = torch.randn(3, 5)\n >>> mat2 = torch.randn(5, 4)\n >>> dgl.mock_sparse.sddmm(A, mat1, mat2)\n SparseMatrix(indices=tensor([[1, 1, 2],\n [2, 3, 3]]),\n values=tensor([1.8035, 2.3375, 3.1255]),\n shape=(3, 4), nnz=3)\n \"\"\"\n assert A.val.dim() == 1, (\n f\"Nonzero elements have values of shape ({A.val.shape[1]}). Expects \"\n \"scalar values. \"\n )\n # PyTorch's sddmm operator only supports CSR format.\n res = torch.sparse.sampled_addmm(A.adj.to_sparse_csr(), mat1, mat2)\n return SparseMatrix(A.row, A.col, res.values(), A.adj.shape)\n\n\ndef mock_bsddmm(\n A: SparseMatrix, mat1: torch.Tensor, mat2: torch.Tensor\n) -> SparseMatrix:\n r\"\"\"Batched Sampled-Dense-Dense Matrix Multiplication (SDDMM).\n\n ``bsddmm`` conducts `sddmm` for each batch of the two dense matrices\n independently.\n\n In particular, :attr:``mat1`` and :attr:``mat2`` can be 2-D, which will be\n reshape as `(B, M, 1)` and `(B, 1, K)` in the computation.\n\n Parameters\n ----------\n A : SparseMatrix\n Sparse matrix of shape `(M, N)`.\n mat1 : Tensor\n Dense matrix of shape `(B, M, K)` or `(B, M,)`\n mat2 : Tensor\n Dense matrix of shape `(B, K, N)` or `(B, K,)`\n\n Returns\n -------\n SparseMatrix\n Sparse matrix of shape `(M, N)` with non-zero values of `B` dimension.\n\n Examples\n --------\n\n >>> row = torch.tensor([1, 1, 2])\n >>> col = torch.tensor([2, 3, 3])\n >>> val = torch.arange(1, 4).float()\n >>> A = create_from_coo(row, col, val, (3, 4))\n >>> mat1 = torch.randn(2, 3, 5)\n >>> mat2 = torch.randn(2, 5, 4)\n >>> dgl.mock_sparse.mock_bsddmm(A, mat1, mat2)\n SparseMatrix(indices=tensor([[1, 1, 2],\n [2, 3, 3]]),\n values=tensor([[-0.6765, -0.4017],\n [ 3.3290, 6.9016],\n [ 4.8184, 5.8882]]),\n shape=(3, 4), nnz=3)\n \"\"\"\n batch_mat1 = [mat1[i, ...] for i in range(mat1.shape[0])]\n batch_mat2 = [mat2[i, ...] for i in range(mat2.shape[0])]\n batch_ret = [sddmm(A, lhs, rhs) for lhs, rhs in zip(batch_mat1, batch_mat2)]\n return create_from_coo(\n row=A.row,\n col=A.col,\n val=torch.stack([sp_mat.val for sp_mat in batch_ret], dim=-1),\n shape=A.shape,\n )\n", "path": "python/dgl/mock_sparse/sddmm.py"}]}
| 1,031 | 812 |
gh_patches_debug_26211
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-111151
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sharded checkpointing fails on load for certain tensor sizes
### 🐛 Describe the bug
Sharded checkpointing (particularly with FSDP for the optimizer state) uses ChunkShardingSpec to save/load tensors. ChunkShardingSpec's behavior is similar to torch.chunk and will result in some chunks of size 0.
This can be reproduced by trying to save a tensor of size 6 with 4 gpus. This tensor is sharded across the first 3 gpus. The resulting size of the chunks will look like [2, 2, 2, 0]. On save, it seems like ChunkShardingSpec is aware of which gpus contain shards, so it saves the tensor with shard metadata showing the size of chunks to be [2, 2, 2].
The problem occurs when attempting to load the sharded checkpoint. ChunkShardingSpec attempts to rebuild the metadata, this time being unaware of how many gpus originally contained shards. It knows that there is a tensor of size 6 and 4 gpus though, so it generates shard metadata with chunk sizes [2, 2, 2], [skipping the last gpu since it has size 0](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L94). Then when attempting to shard the tensor, the 4th gpu has no shard metadata, so a [local_tensor is never created](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L165), resulting in an [assertion error on the 4th rank](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L172) and a [type error on all other ranks](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/distributed_c10d.py#L808) because None is contained in the scatter_list.
There are a couple possible solutions to this.
1. Add shardmetadata for all gpus that allow for a tensor to be size 0
2. Change ChunkShardingSpec to distribute a tensor evenly across gpus (e.g. [2, 2, 1, 1] instead of [2, 2, 2, 0])
I've implemented and tested both solutions and both are backwards compatible with previously saved sharded checkpoints on versions 2.0.1, 2.1.0-rc3, and 8/27 nightly (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.0.1 with the new ChunkShardingSpec). Both solutions are also cross-version compatible for 2.0.1->2.1.0-rc3, 2.0.1->8/27 nightly, and 8/27 nightly->2.0.1 (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.1.0 with the new ChunkShardingSpec). The solutions might be version/cross-version compatible for other combinations, but I haven't tested those.
### Versions
This happens with pytorch 2.0.1, 2.1.0-rc3, and 8/27 nightly.
cc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py`
Content:
```
1 from dataclasses import dataclass
2 import torch
3 import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
4 from torch.distributed._shard.metadata import ShardMetadata
5 from torch.distributed._shard.sharded_tensor.shard import Shard
6 from torch.distributed._shard.sharded_tensor.utils import (
7 _parse_and_validate_remote_device
8 )
9 from torch.distributed._shard._utils import narrow_tensor
10 import torch.distributed as dist
11 import torch.distributed.distributed_c10d as distributed_c10d
12 from typing import List, Union, TYPE_CHECKING
13 from ._internals import (
14 get_chunked_dim_size,
15 get_split_size,
16 )
17
18 from .api import ShardingSpec
19
20 if TYPE_CHECKING:
21 # Only include ShardedTensor when do type checking, exclude it
22 # from run-time to resolve circular dependency.
23 from torch.distributed._shard.sharded_tensor import ShardedTensor
24
25 @dataclass
26 class ChunkShardingSpec(ShardingSpec):
27 """
28 This is a type of PlacementSpec that defines the placement as being sharded
29 across multiple devices. In particular, it represents sharding a Tensor
30 along a single dimension into equal chunks (similar to :meth:`torch.chunk`).
31
32 The semantics of how a tensor is partitioned is inline with
33 :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the
34 specified ``dim`` and ``chunks`` in torch.chunk is the number of elements
35 in the placement specified.
36
37 Args:
38 dim (int or str):
39 The dimension to shard on, could be an integer representing the
40 dimension or a string in case of named tensors where dimensions are
41 named. Note that named tensor support is not added yet.
42 placement(List[Union[_remote_device, str]]):
43 Specifies the placement of each shard of the Tensor. The size of
44 the list represents the number of shards to be created. This could
45 be a list of
46 :class:`torch.distributed._remote_device`'s. This list
47 could also contain a string which represents remote
48 device as accepted by
49 :class:`torch.distributed._remote_device`
50 """
51
52 ShardingDim = Union[int, str]
53
54 dim: ShardingDim
55 placements: List[Union[torch.distributed._remote_device, str]]
56
57 def __post_init__(self):
58 self._verify_dim(self.dim)
59 for i, remote_device in enumerate(self.placements):
60 if not isinstance(remote_device, torch.distributed._remote_device):
61 self.placements[i] = torch.distributed._remote_device(remote_device)
62
63 @staticmethod
64 def _verify_dim(dim):
65 # Validate the sharding spec.
66 # TODO: support named dimension
67 if isinstance(dim, str):
68 raise NotImplementedError(
69 "ChunkShardingSpec does not support named dimension yet!"
70 )
71
72 if not isinstance(dim, int):
73 raise ValueError(
74 f"Sharding dim needs to be an integer, found: {dim}"
75 )
76
77 def build_metadata(self,
78 tensor_sizes: torch.Size,
79 tensor_properties: sharded_tensor_meta.TensorProperties,
80 ) -> sharded_tensor_meta.ShardedTensorMetadata:
81 tensor_num_dim = len(tensor_sizes)
82
83 self._verify_dim(self.dim)
84 if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]
85 raise ValueError(f"Invalid sharding dim: {self.dim}")
86
87 shards_metadata = []
88 sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]
89 chunks = len(self.placements)
90 split_size = get_split_size(sharding_dim_size, chunks)
91 for idx, placement in enumerate(self.placements):
92 # generate ShardMetadata for each placement device
93 chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
94 if chunked_dim_size > 0:
95 shard_size = list(tensor_sizes)
96 current_offsets = [0] * tensor_num_dim
97 current_offsets[self.dim] = split_size * idx # type: ignore[index]
98 shard_size[self.dim] = chunked_dim_size # type: ignore[index]
99
100 shard_metadata = ShardMetadata(
101 shard_offsets=current_offsets,
102 shard_sizes=shard_size,
103 placement=placement,
104 )
105 shards_metadata.append(shard_metadata)
106
107 # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]
108
109 return sharded_tensor_meta.ShardedTensorMetadata(
110 shards_metadata,
111 tensor_sizes,
112 tensor_properties
113 )
114
115
116 def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
117 """
118 Args:
119 src_rank: group rank relative to ``process_group``
120
121 N.B. If ``process_group`` is None, ``src_rank`` is a global rank.
122 """
123 # relative imports to avoid circular dependency
124 from torch.distributed._shard.sharded_tensor import (
125 ShardedTensor
126 )
127 tensor_properties = sharded_tensor_meta.TensorProperties(
128 dtype=tensor.dtype,
129 layout=tensor.layout,
130 requires_grad=tensor.requires_grad,
131 memory_format=torch.contiguous_format,
132 pin_memory=tensor.is_pinned()
133 )
134 current_rank = dist.get_rank(process_group)
135 tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
136 local_shards = []
137 local_tensor = None
138 local_metadata = None
139 tensors_to_scatter = [None] * dist.get_world_size(process_group)
140
141 sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
142 chunks = len(self.placements)
143 split_size = get_split_size(sharding_dim_size, chunks)
144 scatter_shape = list(tensor.size())
145 scatter_shape[self.dim] = split_size # type: ignore[index]
146
147 for shard_meta in tensor_meta.shards_metadata:
148 rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
149 if current_rank == src_rank:
150 # Reshape to get shard for this rank and we don't want autograd
151 # recording here for the narrow op and 'local_shard' should be a
152 # leaf variable in the autograd graph.
153 narrowed_tensor = narrow_tensor(tensor, shard_meta)
154 if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]
155 # for the last shard that might be smaller to other shards
156 # resize the narrowed tensor to the same size and use it for
157 # the scatter collective as dist.scatter requires same size
158 # inputs on every rank
159 tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)
160 else:
161 tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
162
163 tensors_to_scatter[rank] = tensor_to_scatter
164
165 if current_rank == rank:
166 local_tensor = torch.empty(
167 scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)
168 local_metadata = shard_meta
169
170 # each rank should have local_tensor and local_metadata initialized if we build
171 # the metadata list in a correct way.
172 assert local_tensor is not None
173 assert local_metadata is not None
174
175 # Scatter the shards to all ranks in the pg
176 # scatter takes the global rank as ``src``
177 src_for_scatter = src_rank
178 if process_group is not None and process_group is not distributed_c10d._get_default_group():
179 src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)
180
181 dist.scatter(
182 local_tensor,
183 scatter_list=tensors_to_scatter if current_rank == src_rank else None,
184 src=src_for_scatter,
185 group=process_group
186 )
187
188 if list(local_tensor.size()) != local_metadata.shard_sizes:
189 # detach again after receiving to ensure local shards remain a leaf node
190 local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()
191
192 # Sync requires_grad to local_shard.
193 local_tensor.requires_grad = tensor.requires_grad
194
195 local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))
196
197 st = ShardedTensor._init_from_local_shards_and_global_metadata(
198 local_shards,
199 tensor_meta,
200 process_group=process_group)
201
202 # Manually set sharding_spec
203 st._sharding_spec = self
204
205 return st
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py
@@ -91,20 +91,17 @@
for idx, placement in enumerate(self.placements):
# generate ShardMetadata for each placement device
chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
- if chunked_dim_size > 0:
- shard_size = list(tensor_sizes)
- current_offsets = [0] * tensor_num_dim
- current_offsets[self.dim] = split_size * idx # type: ignore[index]
- shard_size[self.dim] = chunked_dim_size # type: ignore[index]
-
- shard_metadata = ShardMetadata(
- shard_offsets=current_offsets,
- shard_sizes=shard_size,
- placement=placement,
- )
- shards_metadata.append(shard_metadata)
-
- # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]
+ shard_size = list(tensor_sizes)
+ current_offsets = [0] * tensor_num_dim
+ current_offsets[self.dim] = split_size * idx # type: ignore[index]
+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]
+
+ shard_metadata = ShardMetadata(
+ shard_offsets=current_offsets,
+ shard_sizes=shard_size,
+ placement=placement,
+ )
+ shards_metadata.append(shard_metadata)
return sharded_tensor_meta.ShardedTensorMetadata(
shards_metadata,
|
{"golden_diff": "diff --git a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py\n--- a/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py\n+++ b/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py\n@@ -91,20 +91,17 @@\n for idx, placement in enumerate(self.placements):\n # generate ShardMetadata for each placement device\n chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n- if chunked_dim_size > 0:\n- shard_size = list(tensor_sizes)\n- current_offsets = [0] * tensor_num_dim\n- current_offsets[self.dim] = split_size * idx # type: ignore[index]\n- shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n-\n- shard_metadata = ShardMetadata(\n- shard_offsets=current_offsets,\n- shard_sizes=shard_size,\n- placement=placement,\n- )\n- shards_metadata.append(shard_metadata)\n-\n- # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]\n+ shard_size = list(tensor_sizes)\n+ current_offsets = [0] * tensor_num_dim\n+ current_offsets[self.dim] = split_size * idx # type: ignore[index]\n+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n+\n+ shard_metadata = ShardMetadata(\n+ shard_offsets=current_offsets,\n+ shard_sizes=shard_size,\n+ placement=placement,\n+ )\n+ shards_metadata.append(shard_metadata)\n \n return sharded_tensor_meta.ShardedTensorMetadata(\n shards_metadata,\n", "issue": "Sharded checkpointing fails on load for certain tensor sizes\n### \ud83d\udc1b Describe the bug\n\nSharded checkpointing (particularly with FSDP for the optimizer state) uses ChunkShardingSpec to save/load tensors. ChunkShardingSpec's behavior is similar to torch.chunk and will result in some chunks of size 0.\r\n\r\nThis can be reproduced by trying to save a tensor of size 6 with 4 gpus. This tensor is sharded across the first 3 gpus. The resulting size of the chunks will look like [2, 2, 2, 0]. On save, it seems like ChunkShardingSpec is aware of which gpus contain shards, so it saves the tensor with shard metadata showing the size of chunks to be [2, 2, 2].\r\n\r\nThe problem occurs when attempting to load the sharded checkpoint. ChunkShardingSpec attempts to rebuild the metadata, this time being unaware of how many gpus originally contained shards. It knows that there is a tensor of size 6 and 4 gpus though, so it generates shard metadata with chunk sizes [2, 2, 2], [skipping the last gpu since it has size 0](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L94). Then when attempting to shard the tensor, the 4th gpu has no shard metadata, so a [local_tensor is never created](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L165), resulting in an [assertion error on the 4th rank](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py#L172) and a [type error on all other ranks](https://github.com/pytorch/pytorch/blob/ddbaad6d746d34b56f2d2cc0e02d7e0d0301e626/torch/distributed/distributed_c10d.py#L808) because None is contained in the scatter_list.\r\n\r\nThere are a couple possible solutions to this.\r\n1. Add shardmetadata for all gpus that allow for a tensor to be size 0\r\n2. Change ChunkShardingSpec to distribute a tensor evenly across gpus (e.g. [2, 2, 1, 1] instead of [2, 2, 2, 0])\r\n\r\nI've implemented and tested both solutions and both are backwards compatible with previously saved sharded checkpoints on versions 2.0.1, 2.1.0-rc3, and 8/27 nightly (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.0.1 with the new ChunkShardingSpec). Both solutions are also cross-version compatible for 2.0.1->2.1.0-rc3, 2.0.1->8/27 nightly, and 8/27 nightly->2.0.1 (e.g. save on 2.0.1 with the old ChunkShardingSpec and load on 2.1.0 with the new ChunkShardingSpec). The solutions might be version/cross-version compatible for other combinations, but I haven't tested those.\n\n### Versions\n\nThis happens with pytorch 2.0.1, 2.1.0-rc3, and 8/27 nightly.\n\ncc @mrshenli @pritamdamania87 @zhaojuanmao @satgera @rohan-varma @gqchen @aazzolini @osalpekar @jiayisuse @H-Huang @kwen2501 @awgu @penguinwu\n", "before_files": [{"content": "from dataclasses import dataclass\nimport torch\nimport torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta\nfrom torch.distributed._shard.metadata import ShardMetadata\nfrom torch.distributed._shard.sharded_tensor.shard import Shard\nfrom torch.distributed._shard.sharded_tensor.utils import (\n _parse_and_validate_remote_device\n)\nfrom torch.distributed._shard._utils import narrow_tensor\nimport torch.distributed as dist\nimport torch.distributed.distributed_c10d as distributed_c10d\nfrom typing import List, Union, TYPE_CHECKING\nfrom ._internals import (\n get_chunked_dim_size,\n get_split_size,\n)\n\nfrom .api import ShardingSpec\n\nif TYPE_CHECKING:\n # Only include ShardedTensor when do type checking, exclude it\n # from run-time to resolve circular dependency.\n from torch.distributed._shard.sharded_tensor import ShardedTensor\n\n@dataclass\nclass ChunkShardingSpec(ShardingSpec):\n \"\"\"\n This is a type of PlacementSpec that defines the placement as being sharded\n across multiple devices. In particular, it represents sharding a Tensor\n along a single dimension into equal chunks (similar to :meth:`torch.chunk`).\n\n The semantics of how a tensor is partitioned is inline with\n :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the\n specified ``dim`` and ``chunks`` in torch.chunk is the number of elements\n in the placement specified.\n\n Args:\n dim (int or str):\n The dimension to shard on, could be an integer representing the\n dimension or a string in case of named tensors where dimensions are\n named. Note that named tensor support is not added yet.\n placement(List[Union[_remote_device, str]]):\n Specifies the placement of each shard of the Tensor. The size of\n the list represents the number of shards to be created. This could\n be a list of\n :class:`torch.distributed._remote_device`'s. This list\n could also contain a string which represents remote\n device as accepted by\n :class:`torch.distributed._remote_device`\n \"\"\"\n\n ShardingDim = Union[int, str]\n\n dim: ShardingDim\n placements: List[Union[torch.distributed._remote_device, str]]\n\n def __post_init__(self):\n self._verify_dim(self.dim)\n for i, remote_device in enumerate(self.placements):\n if not isinstance(remote_device, torch.distributed._remote_device):\n self.placements[i] = torch.distributed._remote_device(remote_device)\n\n @staticmethod\n def _verify_dim(dim):\n # Validate the sharding spec.\n # TODO: support named dimension\n if isinstance(dim, str):\n raise NotImplementedError(\n \"ChunkShardingSpec does not support named dimension yet!\"\n )\n\n if not isinstance(dim, int):\n raise ValueError(\n f\"Sharding dim needs to be an integer, found: {dim}\"\n )\n\n def build_metadata(self,\n tensor_sizes: torch.Size,\n tensor_properties: sharded_tensor_meta.TensorProperties,\n ) -> sharded_tensor_meta.ShardedTensorMetadata:\n tensor_num_dim = len(tensor_sizes)\n\n self._verify_dim(self.dim)\n if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]\n raise ValueError(f\"Invalid sharding dim: {self.dim}\")\n\n shards_metadata = []\n sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]\n chunks = len(self.placements)\n split_size = get_split_size(sharding_dim_size, chunks)\n for idx, placement in enumerate(self.placements):\n # generate ShardMetadata for each placement device\n chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n if chunked_dim_size > 0:\n shard_size = list(tensor_sizes)\n current_offsets = [0] * tensor_num_dim\n current_offsets[self.dim] = split_size * idx # type: ignore[index]\n shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n\n shard_metadata = ShardMetadata(\n shard_offsets=current_offsets,\n shard_sizes=shard_size,\n placement=placement,\n )\n shards_metadata.append(shard_metadata)\n\n # current_offsets[self.dim] += chunked_dim_size # type: ignore[index]\n\n return sharded_tensor_meta.ShardedTensorMetadata(\n shards_metadata,\n tensor_sizes,\n tensor_properties\n )\n\n\n def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> \"ShardedTensor\":\n \"\"\"\n Args:\n src_rank: group rank relative to ``process_group``\n\n N.B. If ``process_group`` is None, ``src_rank`` is a global rank.\n \"\"\"\n # relative imports to avoid circular dependency\n from torch.distributed._shard.sharded_tensor import (\n ShardedTensor\n )\n tensor_properties = sharded_tensor_meta.TensorProperties(\n dtype=tensor.dtype,\n layout=tensor.layout,\n requires_grad=tensor.requires_grad,\n memory_format=torch.contiguous_format,\n pin_memory=tensor.is_pinned()\n )\n current_rank = dist.get_rank(process_group)\n tensor_meta = self.build_metadata(tensor.size(), tensor_properties)\n local_shards = []\n local_tensor = None\n local_metadata = None\n tensors_to_scatter = [None] * dist.get_world_size(process_group)\n\n sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]\n chunks = len(self.placements)\n split_size = get_split_size(sharding_dim_size, chunks)\n scatter_shape = list(tensor.size())\n scatter_shape[self.dim] = split_size # type: ignore[index]\n\n for shard_meta in tensor_meta.shards_metadata:\n rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)\n if current_rank == src_rank:\n # Reshape to get shard for this rank and we don't want autograd\n # recording here for the narrow op and 'local_shard' should be a\n # leaf variable in the autograd graph.\n narrowed_tensor = narrow_tensor(tensor, shard_meta)\n if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]\n # for the last shard that might be smaller to other shards\n # resize the narrowed tensor to the same size and use it for\n # the scatter collective as dist.scatter requires same size\n # inputs on every rank\n tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)\n else:\n tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()\n\n tensors_to_scatter[rank] = tensor_to_scatter\n\n if current_rank == rank:\n local_tensor = torch.empty(\n scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)\n local_metadata = shard_meta\n\n # each rank should have local_tensor and local_metadata initialized if we build\n # the metadata list in a correct way.\n assert local_tensor is not None\n assert local_metadata is not None\n\n # Scatter the shards to all ranks in the pg\n # scatter takes the global rank as ``src``\n src_for_scatter = src_rank\n if process_group is not None and process_group is not distributed_c10d._get_default_group():\n src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)\n\n dist.scatter(\n local_tensor,\n scatter_list=tensors_to_scatter if current_rank == src_rank else None,\n src=src_for_scatter,\n group=process_group\n )\n\n if list(local_tensor.size()) != local_metadata.shard_sizes:\n # detach again after receiving to ensure local shards remain a leaf node\n local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()\n\n # Sync requires_grad to local_shard.\n local_tensor.requires_grad = tensor.requires_grad\n\n local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))\n\n st = ShardedTensor._init_from_local_shards_and_global_metadata(\n local_shards,\n tensor_meta,\n process_group=process_group)\n\n # Manually set sharding_spec\n st._sharding_spec = self\n\n return st\n", "path": "torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py"}], "after_files": [{"content": "from dataclasses import dataclass\nimport torch\nimport torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta\nfrom torch.distributed._shard.metadata import ShardMetadata\nfrom torch.distributed._shard.sharded_tensor.shard import Shard\nfrom torch.distributed._shard.sharded_tensor.utils import (\n _parse_and_validate_remote_device\n)\nfrom torch.distributed._shard._utils import narrow_tensor\nimport torch.distributed as dist\nimport torch.distributed.distributed_c10d as distributed_c10d\nfrom typing import List, Union, TYPE_CHECKING\nfrom ._internals import (\n get_chunked_dim_size,\n get_split_size,\n)\n\nfrom .api import ShardingSpec\n\nif TYPE_CHECKING:\n # Only include ShardedTensor when do type checking, exclude it\n # from run-time to resolve circular dependency.\n from torch.distributed._shard.sharded_tensor import ShardedTensor\n\n@dataclass\nclass ChunkShardingSpec(ShardingSpec):\n \"\"\"\n This is a type of PlacementSpec that defines the placement as being sharded\n across multiple devices. In particular, it represents sharding a Tensor\n along a single dimension into equal chunks (similar to :meth:`torch.chunk`).\n\n The semantics of how a tensor is partitioned is inline with\n :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the\n specified ``dim`` and ``chunks`` in torch.chunk is the number of elements\n in the placement specified.\n\n Args:\n dim (int or str):\n The dimension to shard on, could be an integer representing the\n dimension or a string in case of named tensors where dimensions are\n named. Note that named tensor support is not added yet.\n placement(List[Union[_remote_device, str]]):\n Specifies the placement of each shard of the Tensor. The size of\n the list represents the number of shards to be created. This could\n be a list of\n :class:`torch.distributed._remote_device`'s. This list\n could also contain a string which represents remote\n device as accepted by\n :class:`torch.distributed._remote_device`\n \"\"\"\n\n ShardingDim = Union[int, str]\n\n dim: ShardingDim\n placements: List[Union[torch.distributed._remote_device, str]]\n\n def __post_init__(self):\n self._verify_dim(self.dim)\n for i, remote_device in enumerate(self.placements):\n if not isinstance(remote_device, torch.distributed._remote_device):\n self.placements[i] = torch.distributed._remote_device(remote_device)\n\n @staticmethod\n def _verify_dim(dim):\n # Validate the sharding spec.\n # TODO: support named dimension\n if isinstance(dim, str):\n raise NotImplementedError(\n \"ChunkShardingSpec does not support named dimension yet!\"\n )\n\n if not isinstance(dim, int):\n raise ValueError(\n f\"Sharding dim needs to be an integer, found: {dim}\"\n )\n\n def build_metadata(self,\n tensor_sizes: torch.Size,\n tensor_properties: sharded_tensor_meta.TensorProperties,\n ) -> sharded_tensor_meta.ShardedTensorMetadata:\n tensor_num_dim = len(tensor_sizes)\n\n self._verify_dim(self.dim)\n if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]\n raise ValueError(f\"Invalid sharding dim: {self.dim}\")\n\n shards_metadata = []\n sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]\n chunks = len(self.placements)\n split_size = get_split_size(sharding_dim_size, chunks)\n for idx, placement in enumerate(self.placements):\n # generate ShardMetadata for each placement device\n chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)\n shard_size = list(tensor_sizes)\n current_offsets = [0] * tensor_num_dim\n current_offsets[self.dim] = split_size * idx # type: ignore[index]\n shard_size[self.dim] = chunked_dim_size # type: ignore[index]\n\n shard_metadata = ShardMetadata(\n shard_offsets=current_offsets,\n shard_sizes=shard_size,\n placement=placement,\n )\n shards_metadata.append(shard_metadata)\n\n return sharded_tensor_meta.ShardedTensorMetadata(\n shards_metadata,\n tensor_sizes,\n tensor_properties\n )\n\n\n def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> \"ShardedTensor\":\n \"\"\"\n Args:\n src_rank: group rank relative to ``process_group``\n\n N.B. If ``process_group`` is None, ``src_rank`` is a global rank.\n \"\"\"\n # relative imports to avoid circular dependency\n from torch.distributed._shard.sharded_tensor import (\n ShardedTensor\n )\n tensor_properties = sharded_tensor_meta.TensorProperties(\n dtype=tensor.dtype,\n layout=tensor.layout,\n requires_grad=tensor.requires_grad,\n memory_format=torch.contiguous_format,\n pin_memory=tensor.is_pinned()\n )\n current_rank = dist.get_rank(process_group)\n tensor_meta = self.build_metadata(tensor.size(), tensor_properties)\n local_shards = []\n local_tensor = None\n local_metadata = None\n tensors_to_scatter = [None] * dist.get_world_size(process_group)\n\n sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]\n chunks = len(self.placements)\n split_size = get_split_size(sharding_dim_size, chunks)\n scatter_shape = list(tensor.size())\n scatter_shape[self.dim] = split_size # type: ignore[index]\n\n for shard_meta in tensor_meta.shards_metadata:\n rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)\n if current_rank == src_rank:\n # Reshape to get shard for this rank and we don't want autograd\n # recording here for the narrow op and 'local_shard' should be a\n # leaf variable in the autograd graph.\n narrowed_tensor = narrow_tensor(tensor, shard_meta)\n if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]\n # for the last shard that might be smaller to other shards\n # resize the narrowed tensor to the same size and use it for\n # the scatter collective as dist.scatter requires same size\n # inputs on every rank\n tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)\n else:\n tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()\n\n tensors_to_scatter[rank] = tensor_to_scatter\n\n if current_rank == rank:\n local_tensor = torch.empty(\n scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)\n local_metadata = shard_meta\n\n # each rank should have local_tensor and local_metadata initialized if we build\n # the metadata list in a correct way.\n assert local_tensor is not None\n assert local_metadata is not None\n\n # Scatter the shards to all ranks in the pg\n # scatter takes the global rank as ``src``\n src_for_scatter = src_rank\n if process_group is not None and process_group is not distributed_c10d._get_default_group():\n src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)\n\n dist.scatter(\n local_tensor,\n scatter_list=tensors_to_scatter if current_rank == src_rank else None,\n src=src_for_scatter,\n group=process_group\n )\n\n if list(local_tensor.size()) != local_metadata.shard_sizes:\n # detach again after receiving to ensure local shards remain a leaf node\n local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()\n\n # Sync requires_grad to local_shard.\n local_tensor.requires_grad = tensor.requires_grad\n\n local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))\n\n st = ShardedTensor._init_from_local_shards_and_global_metadata(\n local_shards,\n tensor_meta,\n process_group=process_group)\n\n # Manually set sharding_spec\n st._sharding_spec = self\n\n return st\n", "path": "torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py"}]}
| 3,519 | 386 |
gh_patches_debug_64879
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-5396
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
protobuf error on clean streamlit install of 1.12.3
### Summary
I installed streamlit, and couldn't use it due to a reported error from protobuf. Protobuf version 3.20.2 was installed automatically along with streamlit.
### Steps to reproduce
Code snippet:
```
pip install streamlit
streamlit run something.py
```
Error
```
streamlit run app.py
Traceback (most recent call last):
File "/private/tmp/tttt/.direnv/python-3.10.3/bin/streamlit", line 5, in <module>
from streamlit.web.cli import main
File "/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/__init__.py", line 48, in <module>
from streamlit.proto.RootContainer_pb2 import RootContainer
File "/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/proto/RootContainer_pb2.py", line 6, in <module>
from google.protobuf import descriptor as _descriptor
File "/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/descriptor.py", line 47, in <module>
from google.protobuf.pyext import _message
ImportError: dlopen(/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/pyext/_message.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace (__ZN6google8protobuf15FieldDescriptor12TypeOnceInitEPKS1_)
```
This was solved by downgrading protobuf
```
pip install protobuf==3.19.4
```
(Please provide a code snippet! This will help expedite us finding and solving the problem.)
If applicable, please provide the steps we should take to reproduce the bug:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
**Expected behavior:**
App opens
**Actual behavior:**
Error message
### Is this a regression?
Yes
### Debug info
- Streamlit version: (get it with `$ streamlit version`)
```
In [4]: version._get_installed_streamlit_version()
Out[4]: <Version('1.12.3.dev20220919')>
```
- Python version: (get it with `$ python --version`) 3.10.3
- Using Conda? PipEnv? PyEnv? Pex? pyenv
- OS version: OSX
- Browser version:
### Additional information
Came from https://discuss.streamlit.io/t/streamlit-hello-returns-importerror/30929/3?u=blackary
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/setup.py`
Content:
```
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import setuptools
17 import sys
18
19 from setuptools.command.install import install
20
21 VERSION = "1.12.2" # PEP-440
22
23 NAME = "streamlit"
24
25 DESCRIPTION = "The fastest way to build data apps in Python"
26
27 LONG_DESCRIPTION = (
28 "Streamlit's open-source app framework is the easiest way "
29 "for data scientists and machine learning engineers to "
30 "create beautiful, performant apps in only a few hours! "
31 "All in pure Python. All for free."
32 )
33
34 # IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.
35 # And if you do add one, make the required version as general as possible.
36 # But include relevant lower bounds for any features we use from our dependencies.
37 INSTALL_REQUIRES = [
38 "altair>=3.2.0",
39 "blinker>=1.0.0",
40 "cachetools>=4.0",
41 "click>=7.0",
42 # 1.4 introduced the functionality found in python 3.8's importlib.metadata module
43 "importlib-metadata>=1.4",
44 "numpy",
45 "packaging>=14.1",
46 "pandas>=0.21.0",
47 "pillow>=6.2.0",
48 "protobuf<4,>=3.12",
49 "pyarrow>=4.0",
50 "pydeck>=0.1.dev5",
51 "pympler>=0.9",
52 "python-dateutil",
53 "requests>=2.4",
54 "rich>=10.11.0",
55 "semver",
56 "toml",
57 # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262
58 "tornado>=5.0",
59 "typing-extensions>=3.10.0.0",
60 "tzlocal>=1.1",
61 "validators>=0.2",
62 # Don't require watchdog on MacOS, since it'll fail without xcode tools.
63 # Without watchdog, we fallback to a polling file watcher to check for app changes.
64 "watchdog; platform_system != 'Darwin'",
65 ]
66
67 # We want to exclude some dependencies in our internal conda distribution of
68 # Streamlit.
69 CONDA_OPTIONAL_DEPENDENCIES = [
70 "gitpython!=3.1.19",
71 ]
72
73 # NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set
74 # automatically when using the `conda build` command) because the
75 # `load_setup_py_data()` conda build helper function does not have the
76 # CONDA_BUILD environment variable set when it runs to generate our build
77 # recipe from meta.yaml.
78 if not os.getenv("ST_CONDA_BUILD"):
79 INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)
80
81
82 class VerifyVersionCommand(install):
83 """Custom command to verify that the git tag matches our version"""
84
85 description = "verify that the git tag matches our version"
86
87 def run(self):
88 tag = os.getenv("CIRCLE_TAG")
89
90 if tag != VERSION:
91 info = "Git tag: {0} does not match the version of this app: {1}".format(
92 tag, VERSION
93 )
94 sys.exit(info)
95
96
97 setuptools.setup(
98 name=NAME,
99 version=VERSION,
100 description=DESCRIPTION,
101 long_description=LONG_DESCRIPTION,
102 url="https://streamlit.io",
103 project_urls={
104 "Source": "https://github.com/streamlit/streamlit",
105 },
106 author="Streamlit Inc",
107 author_email="[email protected]",
108 # We exclude Python 3.9.7 from our compatible versions due to a bug in that version
109 # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and
110 # https://bugs.python.org/issue45121
111 python_requires=">=3.7, !=3.9.7",
112 license="Apache 2",
113 # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html
114 package_data={"streamlit": ["py.typed", "hello/**/*.py"]},
115 packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
116 # Requirements
117 install_requires=INSTALL_REQUIRES,
118 zip_safe=False, # install source files not egg
119 include_package_data=True, # copy html and friends
120 entry_points={"console_scripts": ["streamlit = streamlit.web.cli:main"]},
121 # For Windows so that streamlit * commands work ie.
122 # - streamlit version
123 # - streamlit hello
124 scripts=["bin/streamlit.cmd"],
125 cmdclass={
126 "verify": VerifyVersionCommand,
127 },
128 )
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/setup.py b/lib/setup.py
--- a/lib/setup.py
+++ b/lib/setup.py
@@ -45,7 +45,8 @@
"packaging>=14.1",
"pandas>=0.21.0",
"pillow>=6.2.0",
- "protobuf<4,>=3.12",
+ # protobuf 3.20.2 is broken: https://github.com/protocolbuffers/protobuf/issues/10571
+ "protobuf<4,>=3.12,!=3.20.2",
"pyarrow>=4.0",
"pydeck>=0.1.dev5",
"pympler>=0.9",
|
{"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -45,7 +45,8 @@\n \"packaging>=14.1\",\n \"pandas>=0.21.0\",\n \"pillow>=6.2.0\",\n- \"protobuf<4,>=3.12\",\n+ # protobuf 3.20.2 is broken: https://github.com/protocolbuffers/protobuf/issues/10571\n+ \"protobuf<4,>=3.12,!=3.20.2\",\n \"pyarrow>=4.0\",\n \"pydeck>=0.1.dev5\",\n \"pympler>=0.9\",\n", "issue": "protobuf error on clean streamlit install of 1.12.3\n### Summary\r\n\r\nI installed streamlit, and couldn't use it due to a reported error from protobuf. Protobuf version 3.20.2 was installed automatically along with streamlit.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\npip install streamlit\r\nstreamlit run something.py\r\n```\r\n\r\nError\r\n```\r\nstreamlit run app.py\r\n\r\nTraceback (most recent call last):\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/bin/streamlit\", line 5, in <module>\r\n from streamlit.web.cli import main\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/__init__.py\", line 48, in <module>\r\n from streamlit.proto.RootContainer_pb2 import RootContainer\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/streamlit/proto/RootContainer_pb2.py\", line 6, in <module>\r\n from google.protobuf import descriptor as _descriptor\r\n File \"/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/descriptor.py\", line 47, in <module>\r\n from google.protobuf.pyext import _message\r\nImportError: dlopen(/private/tmp/tttt/.direnv/python-3.10.3/lib/python3.10/site-packages/google/protobuf/pyext/_message.cpython-310-darwin.so, 0x0002): symbol not found in flat namespace (__ZN6google8protobuf15FieldDescriptor12TypeOnceInitEPKS1_)\r\n```\r\n\r\nThis was solved by downgrading protobuf \r\n```\r\npip install protobuf==3.19.4\r\n```\r\n(Please provide a code snippet! This will help expedite us finding and solving the problem.)\r\n\r\nIf applicable, please provide the steps we should take to reproduce the bug:\r\n\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n\r\n**Expected behavior:**\r\n\r\nApp opens\r\n\r\n**Actual behavior:**\r\n\r\nError message\r\n\r\n### Is this a regression?\r\n\r\nYes\r\n\r\n### Debug info\r\n\r\n- Streamlit version: (get it with `$ streamlit version`) \r\n```\r\nIn [4]: version._get_installed_streamlit_version()\r\nOut[4]: <Version('1.12.3.dev20220919')>\r\n```\r\n- Python version: (get it with `$ python --version`) 3.10.3\r\n- Using Conda? PipEnv? PyEnv? Pex? pyenv\r\n- OS version: OSX\r\n- Browser version: \r\n\r\n### Additional information\r\n\r\nCame from https://discuss.streamlit.io/t/streamlit-hello-returns-importerror/30929/3?u=blackary\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\nVERSION = \"1.12.2\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\n# IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.\n# And if you do add one, make the required version as general as possible.\n# But include relevant lower bounds for any features we use from our dependencies.\nINSTALL_REQUIRES = [\n \"altair>=3.2.0\",\n \"blinker>=1.0.0\",\n \"cachetools>=4.0\",\n \"click>=7.0\",\n # 1.4 introduced the functionality found in python 3.8's importlib.metadata module\n \"importlib-metadata>=1.4\",\n \"numpy\",\n \"packaging>=14.1\",\n \"pandas>=0.21.0\",\n \"pillow>=6.2.0\",\n \"protobuf<4,>=3.12\",\n \"pyarrow>=4.0\",\n \"pydeck>=0.1.dev5\",\n \"pympler>=0.9\",\n \"python-dateutil\",\n \"requests>=2.4\",\n \"rich>=10.11.0\",\n \"semver\",\n \"toml\",\n # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262\n \"tornado>=5.0\",\n \"typing-extensions>=3.10.0.0\",\n \"tzlocal>=1.1\",\n \"validators>=0.2\",\n # Don't require watchdog on MacOS, since it'll fail without xcode tools.\n # Without watchdog, we fallback to a polling file watcher to check for app changes.\n \"watchdog; platform_system != 'Darwin'\",\n]\n\n# We want to exclude some dependencies in our internal conda distribution of\n# Streamlit.\nCONDA_OPTIONAL_DEPENDENCIES = [\n \"gitpython!=3.1.19\",\n]\n\n# NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set\n# automatically when using the `conda build` command) because the\n# `load_setup_py_data()` conda build helper function does not have the\n# CONDA_BUILD environment variable set when it runs to generate our build\n# recipe from meta.yaml.\nif not os.getenv(\"ST_CONDA_BUILD\"):\n INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n project_urls={\n \"Source\": \"https://github.com/streamlit/streamlit\",\n },\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n # We exclude Python 3.9.7 from our compatible versions due to a bug in that version\n # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and\n # https://bugs.python.org/issue45121\n python_requires=\">=3.7, !=3.9.7\",\n license=\"Apache 2\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=INSTALL_REQUIRES,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.web.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}], "after_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport setuptools\nimport sys\n\nfrom setuptools.command.install import install\n\nVERSION = \"1.12.2\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\n# IMPORTANT: We should try very hard *not* to add dependencies to Streamlit.\n# And if you do add one, make the required version as general as possible.\n# But include relevant lower bounds for any features we use from our dependencies.\nINSTALL_REQUIRES = [\n \"altair>=3.2.0\",\n \"blinker>=1.0.0\",\n \"cachetools>=4.0\",\n \"click>=7.0\",\n # 1.4 introduced the functionality found in python 3.8's importlib.metadata module\n \"importlib-metadata>=1.4\",\n \"numpy\",\n \"packaging>=14.1\",\n \"pandas>=0.21.0\",\n \"pillow>=6.2.0\",\n # protobuf 3.20.2 is broken: https://github.com/protocolbuffers/protobuf/issues/10571\n \"protobuf<4,>=3.12,!=3.20.2\",\n \"pyarrow>=4.0\",\n \"pydeck>=0.1.dev5\",\n \"pympler>=0.9\",\n \"python-dateutil\",\n \"requests>=2.4\",\n \"rich>=10.11.0\",\n \"semver\",\n \"toml\",\n # 5.0 has a fix for etag header: https://github.com/tornadoweb/tornado/issues/2262\n \"tornado>=5.0\",\n \"typing-extensions>=3.10.0.0\",\n \"tzlocal>=1.1\",\n \"validators>=0.2\",\n # Don't require watchdog on MacOS, since it'll fail without xcode tools.\n # Without watchdog, we fallback to a polling file watcher to check for app changes.\n \"watchdog; platform_system != 'Darwin'\",\n]\n\n# We want to exclude some dependencies in our internal conda distribution of\n# Streamlit.\nCONDA_OPTIONAL_DEPENDENCIES = [\n \"gitpython!=3.1.19\",\n]\n\n# NOTE: ST_CONDA_BUILD is used here (even though CONDA_BUILD is set\n# automatically when using the `conda build` command) because the\n# `load_setup_py_data()` conda build helper function does not have the\n# CONDA_BUILD environment variable set when it runs to generate our build\n# recipe from meta.yaml.\nif not os.getenv(\"ST_CONDA_BUILD\"):\n INSTALL_REQUIRES.extend(CONDA_OPTIONAL_DEPENDENCIES)\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n project_urls={\n \"Source\": \"https://github.com/streamlit/streamlit\",\n },\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n # We exclude Python 3.9.7 from our compatible versions due to a bug in that version\n # with typing.Protocol. See https://github.com/streamlit/streamlit/issues/5140 and\n # https://bugs.python.org/issue45121\n python_requires=\">=3.7, !=3.9.7\",\n license=\"Apache 2\",\n # PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html\n package_data={\"streamlit\": [\"py.typed\", \"hello/**/*.py\"]},\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=INSTALL_REQUIRES,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.web.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]}
| 2,340 | 164 |
gh_patches_debug_38783
|
rasdani/github-patches
|
git_diff
|
aws__aws-sam-cli-1030
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`sam build` for Gradle using gradlew does not work with --use-container
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed).
If you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->
### Description
Briefly describe the bug you are facing.
When a Gradle project uses `gradlew` scripts, this file does not get picked up with building inside a container.
### Steps to reproduce
Provide steps to replicate.
This integration test sets everything up to repro this issue - https://github.com/awslabs/aws-sam-cli/blob/develop/tests/integration/buildcmd/test_build_cmd.py#L256
### Observed result
Please provide command output with `--debug` flag set.
`gradle` installation within the container is used instead of the `gradlew` script
### Expected result
Describe what you expected.
`gradlew` script is used to build the project
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS:
2. `sam --version`:
`Add --debug flag to command you are running`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samcli/local/docker/lambda_build_container.py`
Content:
```
1 """
2 Represents Lambda Build Containers.
3 """
4
5 import json
6 import logging
7
8 try:
9 import pathlib
10 except ImportError:
11 import pathlib2 as pathlib
12
13 from .container import Container
14
15 LOG = logging.getLogger(__name__)
16
17
18 class LambdaBuildContainer(Container):
19 """
20 Class to manage Build containers that are capable of building AWS Lambda functions.
21 This container mounts necessary folders, issues a command to the Lambda Builder CLI,
22 and if the build was successful, copies back artifacts to the host filesystem
23 """
24
25 _IMAGE_REPO_NAME = "lambci/lambda"
26 _BUILDERS_EXECUTABLE = "lambda-builders"
27
28 def __init__(self, # pylint: disable=too-many-locals
29 protocol_version,
30 language,
31 dependency_manager,
32 application_framework,
33 source_dir,
34 manifest_path,
35 runtime,
36 optimizations=None,
37 options=None,
38 executable_search_paths=None,
39 log_level=None):
40
41 abs_manifest_path = pathlib.Path(manifest_path).resolve()
42 manifest_file_name = abs_manifest_path.name
43 manifest_dir = str(abs_manifest_path.parent)
44
45 source_dir = str(pathlib.Path(source_dir).resolve())
46
47 container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)
48
49 request_json = self._make_request(protocol_version,
50 language,
51 dependency_manager,
52 application_framework,
53 container_dirs,
54 manifest_file_name,
55 runtime,
56 optimizations,
57 options,
58 executable_search_paths)
59
60 image = LambdaBuildContainer._get_image(runtime)
61 entry = LambdaBuildContainer._get_entrypoint(request_json)
62 cmd = []
63
64 additional_volumes = {
65 # Manifest is mounted separately in order to support the case where manifest
66 # is outside of source directory
67 manifest_dir: {
68 "bind": container_dirs["manifest_dir"],
69 "mode": "ro"
70 }
71 }
72
73 env_vars = None
74 if log_level:
75 env_vars = {
76 "LAMBDA_BUILDERS_LOG_LEVEL": log_level
77 }
78
79 super(LambdaBuildContainer, self).__init__(
80 image,
81 cmd,
82 container_dirs["source_dir"],
83 source_dir,
84 additional_volumes=additional_volumes,
85 entrypoint=entry,
86 env_vars=env_vars)
87
88 @property
89 def executable_name(self):
90 return LambdaBuildContainer._BUILDERS_EXECUTABLE
91
92 @staticmethod
93 def _make_request(protocol_version,
94 language,
95 dependency_manager,
96 application_framework,
97 container_dirs,
98 manifest_file_name,
99 runtime,
100 optimizations,
101 options,
102 executable_search_paths):
103
104 return json.dumps({
105 "jsonschema": "2.0",
106 "id": 1,
107 "method": "LambdaBuilder.build",
108 "params": {
109 "__protocol_version": protocol_version,
110 "capability": {
111 "language": language,
112 "dependency_manager": dependency_manager,
113 "application_framework": application_framework
114 },
115 "source_dir": container_dirs["source_dir"],
116 "artifacts_dir": container_dirs["artifacts_dir"],
117 "scratch_dir": container_dirs["scratch_dir"],
118
119 # Path is always inside a Linux container. So '/' is valid
120 "manifest_path": "{}/{}".format(container_dirs["manifest_dir"], manifest_file_name),
121
122 "runtime": runtime,
123 "optimizations": optimizations,
124 "options": options,
125 "executable_search_paths": executable_search_paths
126 }
127 })
128
129 @staticmethod
130 def _get_entrypoint(request_json):
131 return [LambdaBuildContainer._BUILDERS_EXECUTABLE, request_json]
132
133 @staticmethod
134 def _get_container_dirs(source_dir, manifest_dir):
135 """
136 Provides paths to directories within the container that is required by the builder
137
138 Parameters
139 ----------
140 source_dir : str
141 Path to the function source code
142
143 manifest_dir : str
144 Path to the directory containing manifest
145
146 Returns
147 -------
148 dict
149 Contains paths to source, artifacts, scratch & manifest directories
150 """
151 base = "/tmp/samcli"
152 result = {
153 "source_dir": "{}/source".format(base),
154 "artifacts_dir": "{}/artifacts".format(base),
155 "scratch_dir": "{}/scratch".format(base),
156 "manifest_dir": "{}/manifest".format(base)
157 }
158
159 if pathlib.PurePath(source_dir) == pathlib.PurePath(manifest_dir):
160 # It is possible that the manifest resides within the source. In that case, we won't mount the manifest
161 # directory separately.
162 result["manifest_dir"] = result["source_dir"]
163
164 return result
165
166 @staticmethod
167 def _get_image(runtime):
168 return "{}:build-{}".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/samcli/local/docker/lambda_build_container.py b/samcli/local/docker/lambda_build_container.py
--- a/samcli/local/docker/lambda_build_container.py
+++ b/samcli/local/docker/lambda_build_container.py
@@ -46,6 +46,18 @@
container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)
+ # `executable_search_paths` are provided as a list of paths on the host file system that needs to passed to
+ # the builder. But these paths don't exist within the container. We use the following method to convert the
+ # host paths to container paths. But if a host path is NOT mounted within the container, we will simply ignore
+ # it. In essence, only when the path is already in the mounted path, can the path resolver within the
+ # container even find the executable.
+ executable_search_paths = LambdaBuildContainer._convert_to_container_dirs(
+ host_paths_to_convert=executable_search_paths,
+ host_to_container_path_mapping={
+ source_dir: container_dirs["source_dir"],
+ manifest_dir: container_dirs["manifest_dir"]
+ })
+
request_json = self._make_request(protocol_version,
language,
dependency_manager,
@@ -163,6 +175,54 @@
return result
+ @staticmethod
+ def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping):
+ """
+ Use this method to convert a list of host paths to a list of equivalent paths within the container
+ where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to
+ the Lambda Builder running within the container.
+
+ If a host path is not mounted within the container, then this method simply passes the path to the result
+ without any changes.
+
+ Ex:
+ [ "/home/foo", "/home/bar", "/home/not/mounted"] => ["/tmp/source", "/tmp/manifest", "/home/not/mounted"]
+
+ Parameters
+ ----------
+ host_paths_to_convert : list
+ List of paths in host that needs to be converted
+
+ host_to_container_path_mapping : dict
+ Mapping of paths in host to the equivalent paths within the container
+
+ Returns
+ -------
+ list
+ Equivalent paths within the container
+ """
+
+ if not host_paths_to_convert:
+ # Nothing to do
+ return host_paths_to_convert
+
+ # Make sure the key is absolute host path. Relative paths are tricky to work with because two different
+ # relative paths can point to the same directory ("../foo", "../../foo")
+ mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()}
+
+ result = []
+ for original_path in host_paths_to_convert:
+ abspath = str(pathlib.Path(original_path).resolve())
+
+ if abspath in mapping:
+ result.append(mapping[abspath])
+ else:
+ result.append(original_path)
+ LOG.debug("Cannot convert host path '%s' to its equivalent path within the container. "
+ "Host path is not mounted within the container", abspath)
+
+ return result
+
@staticmethod
def _get_image(runtime):
return "{}:build-{}".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)
|
{"golden_diff": "diff --git a/samcli/local/docker/lambda_build_container.py b/samcli/local/docker/lambda_build_container.py\n--- a/samcli/local/docker/lambda_build_container.py\n+++ b/samcli/local/docker/lambda_build_container.py\n@@ -46,6 +46,18 @@\n \n container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)\n \n+ # `executable_search_paths` are provided as a list of paths on the host file system that needs to passed to\n+ # the builder. But these paths don't exist within the container. We use the following method to convert the\n+ # host paths to container paths. But if a host path is NOT mounted within the container, we will simply ignore\n+ # it. In essence, only when the path is already in the mounted path, can the path resolver within the\n+ # container even find the executable.\n+ executable_search_paths = LambdaBuildContainer._convert_to_container_dirs(\n+ host_paths_to_convert=executable_search_paths,\n+ host_to_container_path_mapping={\n+ source_dir: container_dirs[\"source_dir\"],\n+ manifest_dir: container_dirs[\"manifest_dir\"]\n+ })\n+\n request_json = self._make_request(protocol_version,\n language,\n dependency_manager,\n@@ -163,6 +175,54 @@\n \n return result\n \n+ @staticmethod\n+ def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping):\n+ \"\"\"\n+ Use this method to convert a list of host paths to a list of equivalent paths within the container\n+ where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to\n+ the Lambda Builder running within the container.\n+\n+ If a host path is not mounted within the container, then this method simply passes the path to the result\n+ without any changes.\n+\n+ Ex:\n+ [ \"/home/foo\", \"/home/bar\", \"/home/not/mounted\"] => [\"/tmp/source\", \"/tmp/manifest\", \"/home/not/mounted\"]\n+\n+ Parameters\n+ ----------\n+ host_paths_to_convert : list\n+ List of paths in host that needs to be converted\n+\n+ host_to_container_path_mapping : dict\n+ Mapping of paths in host to the equivalent paths within the container\n+\n+ Returns\n+ -------\n+ list\n+ Equivalent paths within the container\n+ \"\"\"\n+\n+ if not host_paths_to_convert:\n+ # Nothing to do\n+ return host_paths_to_convert\n+\n+ # Make sure the key is absolute host path. Relative paths are tricky to work with because two different\n+ # relative paths can point to the same directory (\"../foo\", \"../../foo\")\n+ mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()}\n+\n+ result = []\n+ for original_path in host_paths_to_convert:\n+ abspath = str(pathlib.Path(original_path).resolve())\n+\n+ if abspath in mapping:\n+ result.append(mapping[abspath])\n+ else:\n+ result.append(original_path)\n+ LOG.debug(\"Cannot convert host path '%s' to its equivalent path within the container. \"\n+ \"Host path is not mounted within the container\", abspath)\n+\n+ return result\n+\n @staticmethod\n def _get_image(runtime):\n return \"{}:build-{}\".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)\n", "issue": "`sam build` for Gradle using gradlew does not work with --use-container\n<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed). \r\nIf you do find an existing Issue, re-open or add a comment to that Issue instead of creating a new one. -->\r\n\r\n### Description\r\n\r\nBriefly describe the bug you are facing.\r\nWhen a Gradle project uses `gradlew` scripts, this file does not get picked up with building inside a container.\r\n\r\n### Steps to reproduce\r\n\r\nProvide steps to replicate.\r\nThis integration test sets everything up to repro this issue - https://github.com/awslabs/aws-sam-cli/blob/develop/tests/integration/buildcmd/test_build_cmd.py#L256\r\n\r\n\r\n### Observed result\r\n\r\nPlease provide command output with `--debug` flag set.\r\n`gradle` installation within the container is used instead of the `gradlew` script\r\n\r\n### Expected result\r\n\r\nDescribe what you expected.\r\n`gradlew` script is used to build the project \r\n\r\n### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)\r\n\r\n1. OS:\r\n2. `sam --version`:\r\n\r\n`Add --debug flag to command you are running`\n", "before_files": [{"content": "\"\"\"\nRepresents Lambda Build Containers.\n\"\"\"\n\nimport json\nimport logging\n\ntry:\n import pathlib\nexcept ImportError:\n import pathlib2 as pathlib\n\nfrom .container import Container\n\nLOG = logging.getLogger(__name__)\n\n\nclass LambdaBuildContainer(Container):\n \"\"\"\n Class to manage Build containers that are capable of building AWS Lambda functions.\n This container mounts necessary folders, issues a command to the Lambda Builder CLI,\n and if the build was successful, copies back artifacts to the host filesystem\n \"\"\"\n\n _IMAGE_REPO_NAME = \"lambci/lambda\"\n _BUILDERS_EXECUTABLE = \"lambda-builders\"\n\n def __init__(self, # pylint: disable=too-many-locals\n protocol_version,\n language,\n dependency_manager,\n application_framework,\n source_dir,\n manifest_path,\n runtime,\n optimizations=None,\n options=None,\n executable_search_paths=None,\n log_level=None):\n\n abs_manifest_path = pathlib.Path(manifest_path).resolve()\n manifest_file_name = abs_manifest_path.name\n manifest_dir = str(abs_manifest_path.parent)\n\n source_dir = str(pathlib.Path(source_dir).resolve())\n\n container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)\n\n request_json = self._make_request(protocol_version,\n language,\n dependency_manager,\n application_framework,\n container_dirs,\n manifest_file_name,\n runtime,\n optimizations,\n options,\n executable_search_paths)\n\n image = LambdaBuildContainer._get_image(runtime)\n entry = LambdaBuildContainer._get_entrypoint(request_json)\n cmd = []\n\n additional_volumes = {\n # Manifest is mounted separately in order to support the case where manifest\n # is outside of source directory\n manifest_dir: {\n \"bind\": container_dirs[\"manifest_dir\"],\n \"mode\": \"ro\"\n }\n }\n\n env_vars = None\n if log_level:\n env_vars = {\n \"LAMBDA_BUILDERS_LOG_LEVEL\": log_level\n }\n\n super(LambdaBuildContainer, self).__init__(\n image,\n cmd,\n container_dirs[\"source_dir\"],\n source_dir,\n additional_volumes=additional_volumes,\n entrypoint=entry,\n env_vars=env_vars)\n\n @property\n def executable_name(self):\n return LambdaBuildContainer._BUILDERS_EXECUTABLE\n\n @staticmethod\n def _make_request(protocol_version,\n language,\n dependency_manager,\n application_framework,\n container_dirs,\n manifest_file_name,\n runtime,\n optimizations,\n options,\n executable_search_paths):\n\n return json.dumps({\n \"jsonschema\": \"2.0\",\n \"id\": 1,\n \"method\": \"LambdaBuilder.build\",\n \"params\": {\n \"__protocol_version\": protocol_version,\n \"capability\": {\n \"language\": language,\n \"dependency_manager\": dependency_manager,\n \"application_framework\": application_framework\n },\n \"source_dir\": container_dirs[\"source_dir\"],\n \"artifacts_dir\": container_dirs[\"artifacts_dir\"],\n \"scratch_dir\": container_dirs[\"scratch_dir\"],\n\n # Path is always inside a Linux container. So '/' is valid\n \"manifest_path\": \"{}/{}\".format(container_dirs[\"manifest_dir\"], manifest_file_name),\n\n \"runtime\": runtime,\n \"optimizations\": optimizations,\n \"options\": options,\n \"executable_search_paths\": executable_search_paths\n }\n })\n\n @staticmethod\n def _get_entrypoint(request_json):\n return [LambdaBuildContainer._BUILDERS_EXECUTABLE, request_json]\n\n @staticmethod\n def _get_container_dirs(source_dir, manifest_dir):\n \"\"\"\n Provides paths to directories within the container that is required by the builder\n\n Parameters\n ----------\n source_dir : str\n Path to the function source code\n\n manifest_dir : str\n Path to the directory containing manifest\n\n Returns\n -------\n dict\n Contains paths to source, artifacts, scratch & manifest directories\n \"\"\"\n base = \"/tmp/samcli\"\n result = {\n \"source_dir\": \"{}/source\".format(base),\n \"artifacts_dir\": \"{}/artifacts\".format(base),\n \"scratch_dir\": \"{}/scratch\".format(base),\n \"manifest_dir\": \"{}/manifest\".format(base)\n }\n\n if pathlib.PurePath(source_dir) == pathlib.PurePath(manifest_dir):\n # It is possible that the manifest resides within the source. In that case, we won't mount the manifest\n # directory separately.\n result[\"manifest_dir\"] = result[\"source_dir\"]\n\n return result\n\n @staticmethod\n def _get_image(runtime):\n return \"{}:build-{}\".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)\n", "path": "samcli/local/docker/lambda_build_container.py"}], "after_files": [{"content": "\"\"\"\nRepresents Lambda Build Containers.\n\"\"\"\n\nimport json\nimport logging\n\ntry:\n import pathlib\nexcept ImportError:\n import pathlib2 as pathlib\n\nfrom .container import Container\n\nLOG = logging.getLogger(__name__)\n\n\nclass LambdaBuildContainer(Container):\n \"\"\"\n Class to manage Build containers that are capable of building AWS Lambda functions.\n This container mounts necessary folders, issues a command to the Lambda Builder CLI,\n and if the build was successful, copies back artifacts to the host filesystem\n \"\"\"\n\n _IMAGE_REPO_NAME = \"lambci/lambda\"\n _BUILDERS_EXECUTABLE = \"lambda-builders\"\n\n def __init__(self, # pylint: disable=too-many-locals\n protocol_version,\n language,\n dependency_manager,\n application_framework,\n source_dir,\n manifest_path,\n runtime,\n optimizations=None,\n options=None,\n executable_search_paths=None,\n log_level=None):\n\n abs_manifest_path = pathlib.Path(manifest_path).resolve()\n manifest_file_name = abs_manifest_path.name\n manifest_dir = str(abs_manifest_path.parent)\n\n source_dir = str(pathlib.Path(source_dir).resolve())\n\n container_dirs = LambdaBuildContainer._get_container_dirs(source_dir, manifest_dir)\n\n # `executable_search_paths` are provided as a list of paths on the host file system that needs to passed to\n # the builder. But these paths don't exist within the container. We use the following method to convert the\n # host paths to container paths. But if a host path is NOT mounted within the container, we will simply ignore\n # it. In essence, only when the path is already in the mounted path, can the path resolver within the\n # container even find the executable.\n executable_search_paths = LambdaBuildContainer._convert_to_container_dirs(\n host_paths_to_convert=executable_search_paths,\n host_to_container_path_mapping={\n source_dir: container_dirs[\"source_dir\"],\n manifest_dir: container_dirs[\"manifest_dir\"]\n })\n\n request_json = self._make_request(protocol_version,\n language,\n dependency_manager,\n application_framework,\n container_dirs,\n manifest_file_name,\n runtime,\n optimizations,\n options,\n executable_search_paths)\n\n image = LambdaBuildContainer._get_image(runtime)\n entry = LambdaBuildContainer._get_entrypoint(request_json)\n cmd = []\n\n additional_volumes = {\n # Manifest is mounted separately in order to support the case where manifest\n # is outside of source directory\n manifest_dir: {\n \"bind\": container_dirs[\"manifest_dir\"],\n \"mode\": \"ro\"\n }\n }\n\n env_vars = None\n if log_level:\n env_vars = {\n \"LAMBDA_BUILDERS_LOG_LEVEL\": log_level\n }\n\n super(LambdaBuildContainer, self).__init__(\n image,\n cmd,\n container_dirs[\"source_dir\"],\n source_dir,\n additional_volumes=additional_volumes,\n entrypoint=entry,\n env_vars=env_vars)\n\n @property\n def executable_name(self):\n return LambdaBuildContainer._BUILDERS_EXECUTABLE\n\n @staticmethod\n def _make_request(protocol_version,\n language,\n dependency_manager,\n application_framework,\n container_dirs,\n manifest_file_name,\n runtime,\n optimizations,\n options,\n executable_search_paths):\n\n return json.dumps({\n \"jsonschema\": \"2.0\",\n \"id\": 1,\n \"method\": \"LambdaBuilder.build\",\n \"params\": {\n \"__protocol_version\": protocol_version,\n \"capability\": {\n \"language\": language,\n \"dependency_manager\": dependency_manager,\n \"application_framework\": application_framework\n },\n \"source_dir\": container_dirs[\"source_dir\"],\n \"artifacts_dir\": container_dirs[\"artifacts_dir\"],\n \"scratch_dir\": container_dirs[\"scratch_dir\"],\n\n # Path is always inside a Linux container. So '/' is valid\n \"manifest_path\": \"{}/{}\".format(container_dirs[\"manifest_dir\"], manifest_file_name),\n\n \"runtime\": runtime,\n \"optimizations\": optimizations,\n \"options\": options,\n \"executable_search_paths\": executable_search_paths\n }\n })\n\n @staticmethod\n def _get_entrypoint(request_json):\n return [LambdaBuildContainer._BUILDERS_EXECUTABLE, request_json]\n\n @staticmethod\n def _get_container_dirs(source_dir, manifest_dir):\n \"\"\"\n Provides paths to directories within the container that is required by the builder\n\n Parameters\n ----------\n source_dir : str\n Path to the function source code\n\n manifest_dir : str\n Path to the directory containing manifest\n\n Returns\n -------\n dict\n Contains paths to source, artifacts, scratch & manifest directories\n \"\"\"\n base = \"/tmp/samcli\"\n result = {\n \"source_dir\": \"{}/source\".format(base),\n \"artifacts_dir\": \"{}/artifacts\".format(base),\n \"scratch_dir\": \"{}/scratch\".format(base),\n \"manifest_dir\": \"{}/manifest\".format(base)\n }\n\n if pathlib.PurePath(source_dir) == pathlib.PurePath(manifest_dir):\n # It is possible that the manifest resides within the source. In that case, we won't mount the manifest\n # directory separately.\n result[\"manifest_dir\"] = result[\"source_dir\"]\n\n return result\n\n @staticmethod\n def _convert_to_container_dirs(host_paths_to_convert, host_to_container_path_mapping):\n \"\"\"\n Use this method to convert a list of host paths to a list of equivalent paths within the container\n where the given host path is mounted. This is necessary when SAM CLI needs to pass path information to\n the Lambda Builder running within the container.\n\n If a host path is not mounted within the container, then this method simply passes the path to the result\n without any changes.\n\n Ex:\n [ \"/home/foo\", \"/home/bar\", \"/home/not/mounted\"] => [\"/tmp/source\", \"/tmp/manifest\", \"/home/not/mounted\"]\n\n Parameters\n ----------\n host_paths_to_convert : list\n List of paths in host that needs to be converted\n\n host_to_container_path_mapping : dict\n Mapping of paths in host to the equivalent paths within the container\n\n Returns\n -------\n list\n Equivalent paths within the container\n \"\"\"\n\n if not host_paths_to_convert:\n # Nothing to do\n return host_paths_to_convert\n\n # Make sure the key is absolute host path. Relative paths are tricky to work with because two different\n # relative paths can point to the same directory (\"../foo\", \"../../foo\")\n mapping = {str(pathlib.Path(p).resolve()): v for p, v in host_to_container_path_mapping.items()}\n\n result = []\n for original_path in host_paths_to_convert:\n abspath = str(pathlib.Path(original_path).resolve())\n\n if abspath in mapping:\n result.append(mapping[abspath])\n else:\n result.append(original_path)\n LOG.debug(\"Cannot convert host path '%s' to its equivalent path within the container. \"\n \"Host path is not mounted within the container\", abspath)\n\n return result\n\n @staticmethod\n def _get_image(runtime):\n return \"{}:build-{}\".format(LambdaBuildContainer._IMAGE_REPO_NAME, runtime)\n", "path": "samcli/local/docker/lambda_build_container.py"}]}
| 1,920 | 754 |
gh_patches_debug_8390
|
rasdani/github-patches
|
git_diff
|
pydantic__pydantic-298
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Date annotation is parsed inconsistently depending on input data.
# Bug
I've tried compare two date fields of pydantic model's instances.
But I got: `TypeError: can't compare datetime.datetime to datetime.date`
I figured out that its a bug when parsing field annotated as date providing a datetime value.
Since one of my instances is created from data stored in persistence (which is typed as "datetime") and the other instance is created from a CSV, the first is treated as a datetime and the other is treated as date.
For bugs/questions:
* OS: **Linux 64-bit **
* Python version `import sys; print(sys.version)`: **3.7.1 (default, Oct 22 2018, 10:41:28) [GCC 8.2.1 20180831]**
* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**
```py
from datetime import date, datetime
from pydantic import BaseModel
class DateRange(BaseModel):
dt_start: date
dt_finish: date
data = DateRange(dt_start=datetime(2017,11, 7, 12, 14), dt_finish=datetime(2017,11, 26, 12, 14))
data_ = DateRange(dt_start='2017-01-01', dt_finish='2018-01-01')
print(type(data.dt_finish), type(data.dt_start))
print(type(data_.dt_finish), type(data_.dt_start))
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/datetime_parse.py`
Content:
```
1 """
2 Functions to parse datetime objects.
3
4 We're using regular expressions rather than time.strptime because:
5 - They provide both validation and parsing.
6 - They're more flexible for datetimes.
7 - The date/datetime/time constructors produce friendlier error messages.
8
9 Stolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at
10 9718fa2e8abe430c3526a9278dd976443d4ae3c6
11
12 Changed to:
13 * use standard python datetime types not django.utils.timezone
14 * raise ValueError when regex doesn't match rather than returning None
15 * support parsing unix timestamps for dates and datetimes
16 """
17 import re
18 from datetime import date, datetime, time, timedelta, timezone
19 from typing import Union
20
21 from . import errors
22 from .utils import change_exception
23
24 date_re = re.compile(r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$')
25
26 time_re = re.compile(
27 r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
28 )
29
30 datetime_re = re.compile(
31 r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
32 r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
33 r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
34 r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
35 )
36
37 standard_duration_re = re.compile(
38 r'^'
39 r'(?:(?P<days>-?\d+) (days?, )?)?'
40 r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?'
41 r'(?:(?P<minutes>-?\d+):)?'
42 r'(?P<seconds>-?\d+)'
43 r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
44 r'$'
45 )
46
47 # Support the sections of ISO 8601 date representation that are accepted by timedelta
48 iso8601_duration_re = re.compile(
49 r'^(?P<sign>[-+]?)'
50 r'P'
51 r'(?:(?P<days>\d+(.\d+)?)D)?'
52 r'(?:T'
53 r'(?:(?P<hours>\d+(.\d+)?)H)?'
54 r'(?:(?P<minutes>\d+(.\d+)?)M)?'
55 r'(?:(?P<seconds>\d+(.\d+)?)S)?'
56 r')?'
57 r'$'
58 )
59
60 EPOCH = datetime(1970, 1, 1)
61 MS_WATERSHED = int(1e11) # if greater than this, the number is in ms (in seconds this is 3rd March 5138)
62 StrIntFloat = Union[str, int, float]
63
64
65 def get_numeric(value: StrIntFloat):
66 if isinstance(value, (int, float)):
67 return value
68 try:
69 return int(value)
70 except ValueError:
71 pass
72 try:
73 return float(value)
74 except ValueError:
75 pass
76
77
78 def from_unix_seconds(seconds: int) -> datetime:
79 while seconds > MS_WATERSHED:
80 seconds /= 1000
81 dt = EPOCH + timedelta(seconds=seconds)
82 return dt.replace(tzinfo=timezone.utc)
83
84
85 def parse_date(value: Union[date, StrIntFloat]) -> date:
86 """
87 Parse a date/int/float/string and return a datetime.date.
88
89 Raise ValueError if the input is well formatted but not a valid date.
90 Raise ValueError if the input isn't well formatted.
91 """
92 if isinstance(value, date):
93 return value
94
95 number = get_numeric(value)
96 if number is not None:
97 return from_unix_seconds(number).date()
98
99 match = date_re.match(value)
100 if not match:
101 raise errors.DateError()
102
103 kw = {k: int(v) for k, v in match.groupdict().items()}
104
105 with change_exception(errors.DateError, ValueError):
106 return date(**kw)
107
108
109 def parse_time(value: Union[time, str]) -> time:
110 """
111 Parse a time/string and return a datetime.time.
112
113 This function doesn't support time zone offsets.
114
115 Raise ValueError if the input is well formatted but not a valid time.
116 Raise ValueError if the input isn't well formatted, in particular if it contains an offset.
117 """
118 if isinstance(value, time):
119 return value
120
121 match = time_re.match(value)
122 if not match:
123 raise errors.TimeError()
124
125 kw = match.groupdict()
126 if kw['microsecond']:
127 kw['microsecond'] = kw['microsecond'].ljust(6, '0')
128
129 kw = {k: int(v) for k, v in kw.items() if v is not None}
130
131 with change_exception(errors.TimeError, ValueError):
132 return time(**kw)
133
134
135 def parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:
136 """
137 Parse a datetime/int/float/string and return a datetime.datetime.
138
139 This function supports time zone offsets. When the input contains one,
140 the output uses a timezone with a fixed offset from UTC.
141
142 Raise ValueError if the input is well formatted but not a valid datetime.
143 Raise ValueError if the input isn't well formatted.
144 """
145 if isinstance(value, datetime):
146 return value
147
148 number = get_numeric(value)
149 if number is not None:
150 return from_unix_seconds(number)
151
152 match = datetime_re.match(value)
153 if not match:
154 raise errors.DateTimeError()
155
156 kw = match.groupdict()
157 if kw['microsecond']:
158 kw['microsecond'] = kw['microsecond'].ljust(6, '0')
159
160 tzinfo = kw.pop('tzinfo')
161 if tzinfo == 'Z':
162 tzinfo = timezone.utc
163 elif tzinfo is not None:
164 offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
165 offset = 60 * int(tzinfo[1:3]) + offset_mins
166 if tzinfo[0] == '-':
167 offset = -offset
168 tzinfo = timezone(timedelta(minutes=offset))
169
170 kw = {k: int(v) for k, v in kw.items() if v is not None}
171 kw['tzinfo'] = tzinfo
172
173 with change_exception(errors.DateTimeError, ValueError):
174 return datetime(**kw)
175
176
177 def parse_duration(value: StrIntFloat) -> timedelta:
178 """
179 Parse a duration int/float/string and return a datetime.timedelta.
180
181 The preferred format for durations in Django is '%d %H:%M:%S.%f'.
182
183 Also supports ISO 8601 representation.
184 """
185 if isinstance(value, timedelta):
186 return value
187
188 if isinstance(value, (int, float)):
189 # bellow code requires a string
190 value = str(value)
191
192 match = standard_duration_re.match(value) or iso8601_duration_re.match(value)
193 if not match:
194 raise errors.DurationError()
195
196 kw = match.groupdict()
197 sign = -1 if kw.pop('sign', '+') == '-' else 1
198 if kw.get('microseconds'):
199 kw['microseconds'] = kw['microseconds'].ljust(6, '0')
200
201 if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):
202 kw['microseconds'] = '-' + kw['microseconds']
203
204 kw = {k: float(v) for k, v in kw.items() if v is not None}
205
206 return sign * timedelta(**kw)
207
```
Path: `pydantic/version.py`
Content:
```
1 from distutils.version import StrictVersion
2
3 __all__ = ['VERSION']
4
5 VERSION = StrictVersion('0.14')
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py
--- a/pydantic/datetime_parse.py
+++ b/pydantic/datetime_parse.py
@@ -90,7 +90,10 @@
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, date):
- return value
+ if isinstance(value, datetime):
+ return value.date()
+ else:
+ return value
number = get_numeric(value)
if number is not None:
diff --git a/pydantic/version.py b/pydantic/version.py
--- a/pydantic/version.py
+++ b/pydantic/version.py
@@ -2,4 +2,4 @@
__all__ = ['VERSION']
-VERSION = StrictVersion('0.14')
+VERSION = StrictVersion('0.15a1')
|
{"golden_diff": "diff --git a/pydantic/datetime_parse.py b/pydantic/datetime_parse.py\n--- a/pydantic/datetime_parse.py\n+++ b/pydantic/datetime_parse.py\n@@ -90,7 +90,10 @@\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n- return value\n+ if isinstance(value, datetime):\n+ return value.date()\n+ else:\n+ return value\n \n number = get_numeric(value)\n if number is not None:\ndiff --git a/pydantic/version.py b/pydantic/version.py\n--- a/pydantic/version.py\n+++ b/pydantic/version.py\n@@ -2,4 +2,4 @@\n \n __all__ = ['VERSION']\n \n-VERSION = StrictVersion('0.14')\n+VERSION = StrictVersion('0.15a1')\n", "issue": "Date annotation is parsed inconsistently depending on input data.\n# Bug\r\nI've tried compare two date fields of pydantic model's instances.\r\nBut I got: `TypeError: can't compare datetime.datetime to datetime.date`\r\n\r\nI figured out that its a bug when parsing field annotated as date providing a datetime value.\r\n\r\nSince one of my instances is created from data stored in persistence (which is typed as \"datetime\") and the other instance is created from a CSV, the first is treated as a datetime and the other is treated as date.\r\n\r\nFor bugs/questions:\r\n* OS: **Linux 64-bit **\r\n* Python version `import sys; print(sys.version)`: **3.7.1 (default, Oct 22 2018, 10:41:28) [GCC 8.2.1 20180831]**\r\n* Pydantic version `import pydantic; print(pydantic.VERSION)`: **0.14**\r\n\r\n```py\r\nfrom datetime import date, datetime\r\n\r\nfrom pydantic import BaseModel\r\n\r\n\r\nclass DateRange(BaseModel):\r\n dt_start: date\r\n dt_finish: date\r\n\r\n\r\n\r\ndata = DateRange(dt_start=datetime(2017,11, 7, 12, 14), dt_finish=datetime(2017,11, 26, 12, 14))\r\ndata_ = DateRange(dt_start='2017-01-01', dt_finish='2018-01-01')\r\n\r\nprint(type(data.dt_finish), type(data.dt_start))\r\nprint(type(data_.dt_finish), type(data_.dt_start))\r\n...\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nFunctions to parse datetime objects.\n\nWe're using regular expressions rather than time.strptime because:\n- They provide both validation and parsing.\n- They're more flexible for datetimes.\n- The date/datetime/time constructors produce friendlier error messages.\n\nStolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at\n9718fa2e8abe430c3526a9278dd976443d4ae3c6\n\nChanged to:\n* use standard python datetime types not django.utils.timezone\n* raise ValueError when regex doesn't match rather than returning None\n* support parsing unix timestamps for dates and datetimes\n\"\"\"\nimport re\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Union\n\nfrom . import errors\nfrom .utils import change_exception\n\ndate_re = re.compile(r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})$')\n\ntime_re = re.compile(\n r'(?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})' r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n)\n\ndatetime_re = re.compile(\n r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'\n r'[T ](?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'\n r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'\n)\n\nstandard_duration_re = re.compile(\n r'^'\n r'(?:(?P<days>-?\\d+) (days?, )?)?'\n r'((?:(?P<hours>-?\\d+):)(?=\\d+:\\d+))?'\n r'(?:(?P<minutes>-?\\d+):)?'\n r'(?P<seconds>-?\\d+)'\n r'(?:\\.(?P<microseconds>\\d{1,6})\\d{0,6})?'\n r'$'\n)\n\n# Support the sections of ISO 8601 date representation that are accepted by timedelta\niso8601_duration_re = re.compile(\n r'^(?P<sign>[-+]?)'\n r'P'\n r'(?:(?P<days>\\d+(.\\d+)?)D)?'\n r'(?:T'\n r'(?:(?P<hours>\\d+(.\\d+)?)H)?'\n r'(?:(?P<minutes>\\d+(.\\d+)?)M)?'\n r'(?:(?P<seconds>\\d+(.\\d+)?)S)?'\n r')?'\n r'$'\n)\n\nEPOCH = datetime(1970, 1, 1)\nMS_WATERSHED = int(1e11) # if greater than this, the number is in ms (in seconds this is 3rd March 5138)\nStrIntFloat = Union[str, int, float]\n\n\ndef get_numeric(value: StrIntFloat):\n if isinstance(value, (int, float)):\n return value\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n\n\ndef from_unix_seconds(seconds: int) -> datetime:\n while seconds > MS_WATERSHED:\n seconds /= 1000\n dt = EPOCH + timedelta(seconds=seconds)\n return dt.replace(tzinfo=timezone.utc)\n\n\ndef parse_date(value: Union[date, StrIntFloat]) -> date:\n \"\"\"\n Parse a date/int/float/string and return a datetime.date.\n\n Raise ValueError if the input is well formatted but not a valid date.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n return value\n\n number = get_numeric(value)\n if number is not None:\n return from_unix_seconds(number).date()\n\n match = date_re.match(value)\n if not match:\n raise errors.DateError()\n\n kw = {k: int(v) for k, v in match.groupdict().items()}\n\n with change_exception(errors.DateError, ValueError):\n return date(**kw)\n\n\ndef parse_time(value: Union[time, str]) -> time:\n \"\"\"\n Parse a time/string and return a datetime.time.\n\n This function doesn't support time zone offsets.\n\n Raise ValueError if the input is well formatted but not a valid time.\n Raise ValueError if the input isn't well formatted, in particular if it contains an offset.\n \"\"\"\n if isinstance(value, time):\n return value\n\n match = time_re.match(value)\n if not match:\n raise errors.TimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n\n with change_exception(errors.TimeError, ValueError):\n return time(**kw)\n\n\ndef parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:\n \"\"\"\n Parse a datetime/int/float/string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, datetime):\n return value\n\n number = get_numeric(value)\n if number is not None:\n return from_unix_seconds(number)\n\n match = datetime_re.match(value)\n if not match:\n raise errors.DateTimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = kw.pop('tzinfo')\n if tzinfo == 'Z':\n tzinfo = timezone.utc\n elif tzinfo is not None:\n offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0\n offset = 60 * int(tzinfo[1:3]) + offset_mins\n if tzinfo[0] == '-':\n offset = -offset\n tzinfo = timezone(timedelta(minutes=offset))\n\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n kw['tzinfo'] = tzinfo\n\n with change_exception(errors.DateTimeError, ValueError):\n return datetime(**kw)\n\n\ndef parse_duration(value: StrIntFloat) -> timedelta:\n \"\"\"\n Parse a duration int/float/string and return a datetime.timedelta.\n\n The preferred format for durations in Django is '%d %H:%M:%S.%f'.\n\n Also supports ISO 8601 representation.\n \"\"\"\n if isinstance(value, timedelta):\n return value\n\n if isinstance(value, (int, float)):\n # bellow code requires a string\n value = str(value)\n\n match = standard_duration_re.match(value) or iso8601_duration_re.match(value)\n if not match:\n raise errors.DurationError()\n\n kw = match.groupdict()\n sign = -1 if kw.pop('sign', '+') == '-' else 1\n if kw.get('microseconds'):\n kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n\n if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):\n kw['microseconds'] = '-' + kw['microseconds']\n\n kw = {k: float(v) for k, v in kw.items() if v is not None}\n\n return sign * timedelta(**kw)\n", "path": "pydantic/datetime_parse.py"}, {"content": "from distutils.version import StrictVersion\n\n__all__ = ['VERSION']\n\nVERSION = StrictVersion('0.14')\n", "path": "pydantic/version.py"}], "after_files": [{"content": "\"\"\"\nFunctions to parse datetime objects.\n\nWe're using regular expressions rather than time.strptime because:\n- They provide both validation and parsing.\n- They're more flexible for datetimes.\n- The date/datetime/time constructors produce friendlier error messages.\n\nStolen from https://raw.githubusercontent.com/django/django/master/django/utils/dateparse.py at\n9718fa2e8abe430c3526a9278dd976443d4ae3c6\n\nChanged to:\n* use standard python datetime types not django.utils.timezone\n* raise ValueError when regex doesn't match rather than returning None\n* support parsing unix timestamps for dates and datetimes\n\"\"\"\nimport re\nfrom datetime import date, datetime, time, timedelta, timezone\nfrom typing import Union\n\nfrom . import errors\nfrom .utils import change_exception\n\ndate_re = re.compile(r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})$')\n\ntime_re = re.compile(\n r'(?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})' r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n)\n\ndatetime_re = re.compile(\n r'(?P<year>\\d{4})-(?P<month>\\d{1,2})-(?P<day>\\d{1,2})'\n r'[T ](?P<hour>\\d{1,2}):(?P<minute>\\d{1,2})'\n r'(?::(?P<second>\\d{1,2})(?:\\.(?P<microsecond>\\d{1,6})\\d{0,6})?)?'\n r'(?P<tzinfo>Z|[+-]\\d{2}(?::?\\d{2})?)?$'\n)\n\nstandard_duration_re = re.compile(\n r'^'\n r'(?:(?P<days>-?\\d+) (days?, )?)?'\n r'((?:(?P<hours>-?\\d+):)(?=\\d+:\\d+))?'\n r'(?:(?P<minutes>-?\\d+):)?'\n r'(?P<seconds>-?\\d+)'\n r'(?:\\.(?P<microseconds>\\d{1,6})\\d{0,6})?'\n r'$'\n)\n\n# Support the sections of ISO 8601 date representation that are accepted by timedelta\niso8601_duration_re = re.compile(\n r'^(?P<sign>[-+]?)'\n r'P'\n r'(?:(?P<days>\\d+(.\\d+)?)D)?'\n r'(?:T'\n r'(?:(?P<hours>\\d+(.\\d+)?)H)?'\n r'(?:(?P<minutes>\\d+(.\\d+)?)M)?'\n r'(?:(?P<seconds>\\d+(.\\d+)?)S)?'\n r')?'\n r'$'\n)\n\nEPOCH = datetime(1970, 1, 1)\nMS_WATERSHED = int(1e11) # if greater than this, the number is in ms (in seconds this is 3rd March 5138)\nStrIntFloat = Union[str, int, float]\n\n\ndef get_numeric(value: StrIntFloat):\n if isinstance(value, (int, float)):\n return value\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n\n\ndef from_unix_seconds(seconds: int) -> datetime:\n while seconds > MS_WATERSHED:\n seconds /= 1000\n dt = EPOCH + timedelta(seconds=seconds)\n return dt.replace(tzinfo=timezone.utc)\n\n\ndef parse_date(value: Union[date, StrIntFloat]) -> date:\n \"\"\"\n Parse a date/int/float/string and return a datetime.date.\n\n Raise ValueError if the input is well formatted but not a valid date.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, date):\n if isinstance(value, datetime):\n return value.date()\n else:\n return value\n\n number = get_numeric(value)\n if number is not None:\n return from_unix_seconds(number).date()\n\n match = date_re.match(value)\n if not match:\n raise errors.DateError()\n\n kw = {k: int(v) for k, v in match.groupdict().items()}\n\n with change_exception(errors.DateError, ValueError):\n return date(**kw)\n\n\ndef parse_time(value: Union[time, str]) -> time:\n \"\"\"\n Parse a time/string and return a datetime.time.\n\n This function doesn't support time zone offsets.\n\n Raise ValueError if the input is well formatted but not a valid time.\n Raise ValueError if the input isn't well formatted, in particular if it contains an offset.\n \"\"\"\n if isinstance(value, time):\n return value\n\n match = time_re.match(value)\n if not match:\n raise errors.TimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n\n with change_exception(errors.TimeError, ValueError):\n return time(**kw)\n\n\ndef parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:\n \"\"\"\n Parse a datetime/int/float/string and return a datetime.datetime.\n\n This function supports time zone offsets. When the input contains one,\n the output uses a timezone with a fixed offset from UTC.\n\n Raise ValueError if the input is well formatted but not a valid datetime.\n Raise ValueError if the input isn't well formatted.\n \"\"\"\n if isinstance(value, datetime):\n return value\n\n number = get_numeric(value)\n if number is not None:\n return from_unix_seconds(number)\n\n match = datetime_re.match(value)\n if not match:\n raise errors.DateTimeError()\n\n kw = match.groupdict()\n if kw['microsecond']:\n kw['microsecond'] = kw['microsecond'].ljust(6, '0')\n\n tzinfo = kw.pop('tzinfo')\n if tzinfo == 'Z':\n tzinfo = timezone.utc\n elif tzinfo is not None:\n offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0\n offset = 60 * int(tzinfo[1:3]) + offset_mins\n if tzinfo[0] == '-':\n offset = -offset\n tzinfo = timezone(timedelta(minutes=offset))\n\n kw = {k: int(v) for k, v in kw.items() if v is not None}\n kw['tzinfo'] = tzinfo\n\n with change_exception(errors.DateTimeError, ValueError):\n return datetime(**kw)\n\n\ndef parse_duration(value: StrIntFloat) -> timedelta:\n \"\"\"\n Parse a duration int/float/string and return a datetime.timedelta.\n\n The preferred format for durations in Django is '%d %H:%M:%S.%f'.\n\n Also supports ISO 8601 representation.\n \"\"\"\n if isinstance(value, timedelta):\n return value\n\n if isinstance(value, (int, float)):\n # bellow code requires a string\n value = str(value)\n\n match = standard_duration_re.match(value) or iso8601_duration_re.match(value)\n if not match:\n raise errors.DurationError()\n\n kw = match.groupdict()\n sign = -1 if kw.pop('sign', '+') == '-' else 1\n if kw.get('microseconds'):\n kw['microseconds'] = kw['microseconds'].ljust(6, '0')\n\n if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'):\n kw['microseconds'] = '-' + kw['microseconds']\n\n kw = {k: float(v) for k, v in kw.items() if v is not None}\n\n return sign * timedelta(**kw)\n", "path": "pydantic/datetime_parse.py"}, {"content": "from distutils.version import StrictVersion\n\n__all__ = ['VERSION']\n\nVERSION = StrictVersion('0.15a1')\n", "path": "pydantic/version.py"}]}
| 2,996 | 190 |
gh_patches_debug_35871
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2156
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RSS feeds give a 504
For example:
- http://rsr.akvo.org/rss/org-updates/273/
- http://rsr.akvo.org/rss/updates/788/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/feeds.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import re
8
9 from xml.sax.saxutils import XMLGenerator
10
11 from django.contrib.syndication.views import FeedDoesNotExist, Feed
12 from django.core.urlresolvers import reverse
13 from django.shortcuts import get_object_or_404
14 from django.utils.feedgenerator import Rss201rev2Feed
15 from django.utils.translation import ugettext_lazy as _
16
17 from akvo.rsr.models import Project, ProjectUpdate, Organisation
18
19
20 def __dict_replace(s, d):
21 """Replace substrings of a string using a dictionary."""
22 for key, value in d.items():
23 s = s.replace(key, value)
24 return s
25
26 def __escape(data, entities):
27 # must do ampersand first
28 data = data.replace("&", "&")
29 data = data.replace(">", ">")
30 data = data.replace("<", "<")
31 if entities:
32 data = __dict_replace(data, entities)
33 return data
34
35 def escape(data, entities={}):
36 """Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped
37
38 Escape &, <, and > in a string of data.
39
40 You can escape other strings of data by passing a dictionary as
41 the optional entities parameter. The keys and values must all be
42 strings; each key will be replaced with its corresponding value.
43
44 """
45 # find character data, re.DOTALL includes linefeed in .
46 pattern = re.compile('<!\[CDATA\[.*\]\]>', re.DOTALL)
47 iterator = pattern.finditer(data)
48 start = 0
49 bits = []
50 for match in iterator:
51 #grab chunk before first match
52 bit = data[start:match.span()[0]]
53 bit = __escape(bit, entities)
54 bits.append(bit)
55 #grab match
56 bit = data[match.span()[0]:match.span()[1]]
57 bits.extend(bit)
58 start = match.span()[1]
59 # escape tail bit after last match
60 bit = data[start:]
61 bit = __escape(bit, entities)
62 bits.extend(bit)
63 data = ''.join(bits)
64 return data
65
66
67 class RSRSimplerXMLGenerator(XMLGenerator):
68 """subclassed to be able to call custom escape() function, see above
69 """
70 def characters(self, content):
71 self._write(escape(content))
72
73 def addQuickElement(self, name, contents=None, attrs=None):
74 "Convenience method for adding an element with no children"
75 if attrs is None: attrs = {}
76 self.startElement(name, attrs)
77 if contents is not None:
78 self.characters(contents)
79 self.endElement(name)
80
81
82 class RSRMediaRssFeed(Rss201rev2Feed):
83 def rss_attributes(self):
84 attrs = super(RSRMediaRssFeed, self).rss_attributes()
85 attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
86 attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'
87 return attrs
88
89 def add_item_elements(self, handler, item):
90 """Callback to add elements to each item (item/entry) element."""
91 super(RSRMediaRssFeed, self).add_item_elements(handler, item)
92
93 if 'media:title' in item:
94 handler.addQuickElement(u"media:title", item['title'])
95 if 'media:description' in item:
96 handler.addQuickElement(u"media:description", item['media:description'])
97 if 'media:credit' in item:
98 handler.addQuickElement(u"media:credit", item['media:credit'])
99
100 if 'content_url' in item:
101 content = dict(url=item['content_url'])
102 if 'content_width' in item:
103 content['width'] = str(item['content_width'])
104 if 'content_height' in item:
105 content['height'] = str(item['content_height'])
106 handler.addQuickElement(u"media:content", '', content)
107
108 if 'thumbnail_url' in item:
109 thumbnail = dict(url=item['thumbnail_url'])
110 if 'thumbnail_width' in item:
111 thumbnail['width'] = str(item['thumbnail_width'])
112 if 'thumbnail_height' in item:
113 thumbnail['height'] = str(item['thumbnail_height'])
114 handler.addQuickElement(u"media:thumbnail", '', thumbnail)
115
116 if 'keywords' in item:
117 handler.addQuickElement(u"media:keywords", item['keywords'])
118
119 def write(self, outfile, encoding):
120 handler = RSRSimplerXMLGenerator(outfile, encoding)
121 handler.startDocument()
122 handler.startElement(u"rss", self.rss_attributes())
123 handler.startElement(u"channel", self.root_attributes())
124 self.add_root_elements(handler)
125 self.write_items(handler)
126 self.endChannelElement(handler)
127 handler.endElement(u"rss")
128
129 class UpdateFeed(Feed):
130 """base class generating Update feeds
131 """
132 feed_type = RSRMediaRssFeed
133
134 def link(self, obj):
135 if not obj:
136 raise FeedDoesNotExist
137 return obj.get_absolute_url()
138
139 def item_link(self, item):
140 return item.get_absolute_url()
141
142 def item_title(self, item):
143 return item.title
144
145 def item_description(self, item):
146 try:
147 size = item.photo.size
148 return '<![CDATA[<p><a href="%s"><img src="%s" alt="" /></a></p><p>%s</p>]]>' % (
149 item.get_absolute_url(),
150 item.photo.thumbnail.absolute_url,
151 item.text,
152 )
153 except:
154 return item.text
155
156 def item_pubdate(self, item):
157 return item.created_at
158
159 def item_author_name(self, item):
160 return item.user.get_full_name()
161
162 def item_credit(self, item):
163 return item.photo_credit
164
165 def item_extra_kwargs(self, item):
166 """return a dictionary to the feedgenerator for each item to be added to the feed.
167 """
168 try:
169 size = item.photo.size
170 photo = item.photo
171 kwargs = {
172 'media:title': item.title,
173 'media:description': item.photo_caption,
174 'media:credit': item.photo_credit,
175 'content_url': photo.url,
176 'content_width': photo.width,
177 'content_height': photo.height,
178 'thumbnail_url': photo.thumbnail.absolute_url,
179 'thumbnail_width': photo.thumbnail.width(),
180 'thumbnail_height': photo.thumbnail.height(),
181 }
182 return kwargs
183 except:
184 return {}
185
186
187 class ProjectUpdates(UpdateFeed):
188 """RSS feed for last 50 RSR updates of a project."""
189 def get_object(self, request, project_id):
190 return Project.objects.get(pk__exact=project_id)
191
192 def title(self, obj):
193 return _(u'Akvo RSR project %(id)d: %(project_title)s') % {
194 'id': obj.id,
195 'project_title': obj.title
196 }
197
198 def description(self, obj):
199 return _(u'Project updates for project %(project_title)s') % {
200 'project_title': obj.title
201 }
202
203 def items(self, obj):
204 # Limited to 50 items to prevent gateway timeouts.
205 return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]
206
207
208 class OrganisationUpdates(UpdateFeed):
209 """RSS feed for last 50 RSR updates of an organisation."""
210 feed_type = RSRMediaRssFeed
211
212 def get_object(self, request, org_id):
213 return get_object_or_404(Organisation, id=int(org_id))
214
215 def title(self, obj):
216 return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}
217
218 def description(self, obj):
219 if obj.name == obj.long_name:
220 return _(u"Project updates for projects partnered by %(org_name)s") % {
221 'org_name': obj.name
222 }
223 else:
224 return _(
225 u"Project updates for projects partnered by %(org_name)s - %(long_name)s"
226 ) % {'org_name': obj.name, 'long_name': obj.long_name}
227
228 def items(self, obj):
229 # Limited to 50 items to prevent gateway timeouts.
230 return obj.published_projects().all_updates()[:50]
231
232 def item_title(self, item):
233 return _(
234 u'Project %(project_id)d - %(project_title)s: %(update_title)s'
235 ) % {
236 'project_id': item.project.id,
237 'project_title': item.project.title,
238 'update_title': item.title
239 }
240
241
242 class AllProjectUpdates(UpdateFeed):
243 """RSS feed for last 50 RSR updates."""
244 title = _(u'Last 50 RSR project updates')
245
246 def link(self):
247 return reverse('update-directory')
248
249 description = _(u'Project updates for all Akvo RSR projects')
250
251 def items(self):
252 # Limited to 50 items to prevent gateway timeouts.
253 return ProjectUpdate.objects.select_related().order_by('-id')[:50]
254
255 def item_title(self, item):
256 return _(
257 u'Project %(project_id)d - %(project_title)s: %(update_title)s'
258 ) % {
259 'project_id': item.project.id,
260 'project_title': item.project.title,
261 'update_title': item.title
262 }
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py
--- a/akvo/rsr/feeds.py
+++ b/akvo/rsr/feeds.py
@@ -185,7 +185,7 @@
class ProjectUpdates(UpdateFeed):
- """RSS feed for last 50 RSR updates of a project."""
+ """RSS feed for last 25 RSR updates of a project."""
def get_object(self, request, project_id):
return Project.objects.get(pk__exact=project_id)
@@ -201,12 +201,12 @@
}
def items(self, obj):
- # Limited to 50 items to prevent gateway timeouts.
- return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]
+ # Limited to 25 items to prevent gateway timeouts.
+ return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]
class OrganisationUpdates(UpdateFeed):
- """RSS feed for last 50 RSR updates of an organisation."""
+ """RSS feed for last 25 RSR updates of an organisation."""
feed_type = RSRMediaRssFeed
def get_object(self, request, org_id):
@@ -226,8 +226,8 @@
) % {'org_name': obj.name, 'long_name': obj.long_name}
def items(self, obj):
- # Limited to 50 items to prevent gateway timeouts.
- return obj.published_projects().all_updates()[:50]
+ # Limited to 25 items to prevent gateway timeouts.
+ return obj.published_projects().all_updates()[:25]
def item_title(self, item):
return _(
@@ -240,8 +240,8 @@
class AllProjectUpdates(UpdateFeed):
- """RSS feed for last 50 RSR updates."""
- title = _(u'Last 50 RSR project updates')
+ """RSS feed for last 25 RSR updates."""
+ title = _(u'Last 25 RSR project updates')
def link(self):
return reverse('update-directory')
@@ -249,8 +249,8 @@
description = _(u'Project updates for all Akvo RSR projects')
def items(self):
- # Limited to 50 items to prevent gateway timeouts.
- return ProjectUpdate.objects.select_related().order_by('-id')[:50]
+ # Limited to 25 items to prevent gateway timeouts.
+ return ProjectUpdate.objects.select_related().order_by('-id')[:25]
def item_title(self, item):
return _(
|
{"golden_diff": "diff --git a/akvo/rsr/feeds.py b/akvo/rsr/feeds.py\n--- a/akvo/rsr/feeds.py\n+++ b/akvo/rsr/feeds.py\n@@ -185,7 +185,7 @@\n \n \n class ProjectUpdates(UpdateFeed):\n- \"\"\"RSS feed for last 50 RSR updates of a project.\"\"\"\n+ \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n \n@@ -201,12 +201,12 @@\n }\n \n def items(self, obj):\n- # Limited to 50 items to prevent gateway timeouts.\n- return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]\n+ # Limited to 25 items to prevent gateway timeouts.\n+ return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n \n \n class OrganisationUpdates(UpdateFeed):\n- \"\"\"RSS feed for last 50 RSR updates of an organisation.\"\"\"\n+ \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n \n def get_object(self, request, org_id):\n@@ -226,8 +226,8 @@\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n \n def items(self, obj):\n- # Limited to 50 items to prevent gateway timeouts.\n- return obj.published_projects().all_updates()[:50]\n+ # Limited to 25 items to prevent gateway timeouts.\n+ return obj.published_projects().all_updates()[:25]\n \n def item_title(self, item):\n return _(\n@@ -240,8 +240,8 @@\n \n \n class AllProjectUpdates(UpdateFeed):\n- \"\"\"RSS feed for last 50 RSR updates.\"\"\"\n- title = _(u'Last 50 RSR project updates')\n+ \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n+ title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n@@ -249,8 +249,8 @@\n description = _(u'Project updates for all Akvo RSR projects')\n \n def items(self):\n- # Limited to 50 items to prevent gateway timeouts.\n- return ProjectUpdate.objects.select_related().order_by('-id')[:50]\n+ # Limited to 25 items to prevent gateway timeouts.\n+ return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n \n def item_title(self, item):\n return _(\n", "issue": "RSS feeds give a 504\nFor example: \n- http://rsr.akvo.org/rss/org-updates/273/\n- http://rsr.akvo.org/rss/updates/788/\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&\")\n data = data.replace(\">\", \">\")\n data = data.replace(\"<\", \"<\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 50 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 50 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:50]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 50 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 50 items to prevent gateway timeouts.\n return obj.published_projects().all_updates()[:50]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 50 RSR updates.\"\"\"\n title = _(u'Last 50 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 50 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:50]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module. \n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport re\n\nfrom xml.sax.saxutils import XMLGenerator\n\nfrom django.contrib.syndication.views import FeedDoesNotExist, Feed\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.feedgenerator import Rss201rev2Feed\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom akvo.rsr.models import Project, ProjectUpdate, Organisation\n\n\ndef __dict_replace(s, d):\n \"\"\"Replace substrings of a string using a dictionary.\"\"\"\n for key, value in d.items():\n s = s.replace(key, value)\n return s\n\ndef __escape(data, entities):\n # must do ampersand first\n data = data.replace(\"&\", \"&\")\n data = data.replace(\">\", \">\")\n data = data.replace(\"<\", \"<\")\n if entities:\n data = __dict_replace(data, entities)\n return data\n\ndef escape(data, entities={}):\n \"\"\"Modification to xml.sax.saxutils.escape to that detects CDATA blocks that are not escaped\n\n Escape &, <, and > in a string of data.\n\n You can escape other strings of data by passing a dictionary as\n the optional entities parameter. The keys and values must all be\n strings; each key will be replaced with its corresponding value.\n\n \"\"\"\n # find character data, re.DOTALL includes linefeed in .\n pattern = re.compile('<!\\[CDATA\\[.*\\]\\]>', re.DOTALL)\n iterator = pattern.finditer(data)\n start = 0\n bits = []\n for match in iterator:\n #grab chunk before first match\n bit = data[start:match.span()[0]]\n bit = __escape(bit, entities)\n bits.append(bit)\n #grab match\n bit = data[match.span()[0]:match.span()[1]]\n bits.extend(bit)\n start = match.span()[1]\n # escape tail bit after last match\n bit = data[start:]\n bit = __escape(bit, entities)\n bits.extend(bit)\n data = ''.join(bits)\n return data\n\n\nclass RSRSimplerXMLGenerator(XMLGenerator):\n \"\"\"subclassed to be able to call custom escape() function, see above\n \"\"\"\n def characters(self, content):\n self._write(escape(content))\n\n def addQuickElement(self, name, contents=None, attrs=None):\n \"Convenience method for adding an element with no children\"\n if attrs is None: attrs = {}\n self.startElement(name, attrs)\n if contents is not None:\n self.characters(contents)\n self.endElement(name)\n\n\nclass RSRMediaRssFeed(Rss201rev2Feed):\n def rss_attributes(self):\n attrs = super(RSRMediaRssFeed, self).rss_attributes()\n attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'\n attrs['xmlns:atom'] = 'http://www.w3.org/2005/Atom'\n return attrs\n\n def add_item_elements(self, handler, item):\n \"\"\"Callback to add elements to each item (item/entry) element.\"\"\"\n super(RSRMediaRssFeed, self).add_item_elements(handler, item)\n\n if 'media:title' in item:\n handler.addQuickElement(u\"media:title\", item['title'])\n if 'media:description' in item:\n handler.addQuickElement(u\"media:description\", item['media:description'])\n if 'media:credit' in item:\n handler.addQuickElement(u\"media:credit\", item['media:credit'])\n\n if 'content_url' in item:\n content = dict(url=item['content_url'])\n if 'content_width' in item:\n content['width'] = str(item['content_width'])\n if 'content_height' in item:\n content['height'] = str(item['content_height'])\n handler.addQuickElement(u\"media:content\", '', content)\n\n if 'thumbnail_url' in item:\n thumbnail = dict(url=item['thumbnail_url'])\n if 'thumbnail_width' in item:\n thumbnail['width'] = str(item['thumbnail_width'])\n if 'thumbnail_height' in item:\n thumbnail['height'] = str(item['thumbnail_height'])\n handler.addQuickElement(u\"media:thumbnail\", '', thumbnail)\n\n if 'keywords' in item:\n handler.addQuickElement(u\"media:keywords\", item['keywords'])\n\n def write(self, outfile, encoding):\n handler = RSRSimplerXMLGenerator(outfile, encoding)\n handler.startDocument()\n handler.startElement(u\"rss\", self.rss_attributes())\n handler.startElement(u\"channel\", self.root_attributes())\n self.add_root_elements(handler)\n self.write_items(handler)\n self.endChannelElement(handler)\n handler.endElement(u\"rss\")\n\nclass UpdateFeed(Feed):\n \"\"\"base class generating Update feeds\n \"\"\"\n feed_type = RSRMediaRssFeed\n\n def link(self, obj):\n if not obj:\n raise FeedDoesNotExist\n return obj.get_absolute_url()\n\n def item_link(self, item):\n return item.get_absolute_url()\n\n def item_title(self, item):\n return item.title\n\n def item_description(self, item):\n try:\n size = item.photo.size\n return '<![CDATA[<p><a href=\"%s\"><img src=\"%s\" alt=\"\" /></a></p><p>%s</p>]]>' % (\n item.get_absolute_url(),\n item.photo.thumbnail.absolute_url,\n item.text,\n )\n except:\n return item.text\n\n def item_pubdate(self, item):\n return item.created_at\n\n def item_author_name(self, item):\n return item.user.get_full_name()\n\n def item_credit(self, item):\n return item.photo_credit\n\n def item_extra_kwargs(self, item):\n \"\"\"return a dictionary to the feedgenerator for each item to be added to the feed.\n \"\"\"\n try:\n size = item.photo.size\n photo = item.photo\n kwargs = {\n 'media:title': item.title,\n 'media:description': item.photo_caption,\n 'media:credit': item.photo_credit,\n 'content_url': photo.url,\n 'content_width': photo.width,\n 'content_height': photo.height,\n 'thumbnail_url': photo.thumbnail.absolute_url,\n 'thumbnail_width': photo.thumbnail.width(),\n 'thumbnail_height': photo.thumbnail.height(),\n }\n return kwargs\n except:\n return {}\n\n\nclass ProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of a project.\"\"\"\n def get_object(self, request, project_id):\n return Project.objects.get(pk__exact=project_id)\n\n def title(self, obj):\n return _(u'Akvo RSR project %(id)d: %(project_title)s') % {\n 'id': obj.id,\n 'project_title': obj.title\n }\n\n def description(self, obj):\n return _(u'Project updates for project %(project_title)s') % {\n 'project_title': obj.title\n }\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.filter(project__id=obj.id).order_by('-id')[:25]\n\n\nclass OrganisationUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates of an organisation.\"\"\"\n feed_type = RSRMediaRssFeed\n\n def get_object(self, request, org_id):\n return get_object_or_404(Organisation, id=int(org_id))\n\n def title(self, obj):\n return _(u'Projects of %(org_name)s') % {'org_name':obj.name,}\n\n def description(self, obj):\n if obj.name == obj.long_name:\n return _(u\"Project updates for projects partnered by %(org_name)s\") % {\n 'org_name': obj.name\n }\n else:\n return _(\n u\"Project updates for projects partnered by %(org_name)s - %(long_name)s\"\n ) % {'org_name': obj.name, 'long_name': obj.long_name}\n\n def items(self, obj):\n # Limited to 25 items to prevent gateway timeouts.\n return obj.published_projects().all_updates()[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n\n\nclass AllProjectUpdates(UpdateFeed):\n \"\"\"RSS feed for last 25 RSR updates.\"\"\"\n title = _(u'Last 25 RSR project updates')\n \n def link(self):\n return reverse('update-directory')\n\n description = _(u'Project updates for all Akvo RSR projects')\n\n def items(self):\n # Limited to 25 items to prevent gateway timeouts.\n return ProjectUpdate.objects.select_related().order_by('-id')[:25]\n\n def item_title(self, item):\n return _(\n u'Project %(project_id)d - %(project_title)s: %(update_title)s'\n ) % {\n 'project_id': item.project.id,\n 'project_title': item.project.title,\n 'update_title': item.title\n }\n", "path": "akvo/rsr/feeds.py"}]}
| 3,062 | 604 |
gh_patches_debug_224
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-7390
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PYTEST WARNING] Horn schunk
### Feature description
@skief @poyea Please could you resolve this warning
```
computer_vision/horn_schunck.py:15
/home/runner/work/Python/Python/computer_vision/horn_schunck.py:15:
DeprecationWarning: Please use `convolve` from the `scipy.ndimage` namespace, the `scipy.ndimage.filters` namespace is deprecated.
from scipy.ndimage.filters import convolve
```
origin: #7211
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `computer_vision/horn_schunck.py`
Content:
```
1 """
2 The Horn-Schunck method estimates the optical flow for every single pixel of
3 a sequence of images.
4 It works by assuming brightness constancy between two consecutive frames
5 and smoothness in the optical flow.
6
7 Useful resources:
8 Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method
9 Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf
10 """
11
12 from typing import SupportsIndex
13
14 import numpy as np
15 from scipy.ndimage.filters import convolve
16
17
18 def warp(
19 image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray
20 ) -> np.ndarray:
21 """
22 Warps the pixels of an image into a new image using the horizontal and vertical
23 flows.
24 Pixels that are warped from an invalid location are set to 0.
25
26 Parameters:
27 image: Grayscale image
28 horizontal_flow: Horizontal flow
29 vertical_flow: Vertical flow
30
31 Returns: Warped image
32
33 >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \
34 np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \
35 np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]))
36 array([[0, 0, 0],
37 [3, 1, 0],
38 [0, 2, 3]])
39 """
40 flow = np.stack((horizontal_flow, vertical_flow), 2)
41
42 # Create a grid of all pixel coordinates and subtract the flow to get the
43 # target pixels coordinates
44 grid = np.stack(
45 np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2
46 )
47 grid = np.round(grid - flow).astype(np.int32)
48
49 # Find the locations outside of the original image
50 invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]]))
51 grid[invalid] = 0
52
53 warped = image[grid[:, :, 1], grid[:, :, 0]]
54
55 # Set pixels at invalid locations to 0
56 warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0
57
58 return warped
59
60
61 def horn_schunck(
62 image0: np.ndarray,
63 image1: np.ndarray,
64 num_iter: SupportsIndex,
65 alpha: float | None = None,
66 ) -> tuple[np.ndarray, np.ndarray]:
67 """
68 This function performs the Horn-Schunck algorithm and returns the estimated
69 optical flow. It is assumed that the input images are grayscale and
70 normalized to be in [0, 1].
71
72 Parameters:
73 image0: First image of the sequence
74 image1: Second image of the sequence
75 alpha: Regularization constant
76 num_iter: Number of iterations performed
77
78 Returns: estimated horizontal & vertical flow
79
80 >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \
81 np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\
82 astype(np.int32)
83 array([[[ 0, -1, -1],
84 [ 0, -1, -1]],
85 <BLANKLINE>
86 [[ 0, 0, 0],
87 [ 0, 0, 0]]], dtype=int32)
88 """
89 if alpha is None:
90 alpha = 0.1
91
92 # Initialize flow
93 horizontal_flow = np.zeros_like(image0)
94 vertical_flow = np.zeros_like(image0)
95
96 # Prepare kernels for the calculation of the derivatives and the average velocity
97 kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25
98 kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25
99 kernel_t = np.array([[1, 1], [1, 1]]) * 0.25
100 kernel_laplacian = np.array(
101 [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]]
102 )
103
104 # Iteratively refine the flow
105 for _ in range(num_iter):
106 warped_image = warp(image0, horizontal_flow, vertical_flow)
107 derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x)
108 derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y)
109 derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t)
110
111 avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian)
112 avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian)
113
114 # This updates the flow as proposed in the paper (Step 12)
115 update = (
116 derivative_x * avg_horizontal_velocity
117 + derivative_y * avg_vertical_velocity
118 + derivative_t
119 )
120 update = update / (alpha**2 + derivative_x**2 + derivative_y**2)
121
122 horizontal_flow = avg_horizontal_velocity - derivative_x * update
123 vertical_flow = avg_vertical_velocity - derivative_y * update
124
125 return horizontal_flow, vertical_flow
126
127
128 if __name__ == "__main__":
129 import doctest
130
131 doctest.testmod()
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py
--- a/computer_vision/horn_schunck.py
+++ b/computer_vision/horn_schunck.py
@@ -12,7 +12,7 @@
from typing import SupportsIndex
import numpy as np
-from scipy.ndimage.filters import convolve
+from scipy.ndimage import convolve
def warp(
|
{"golden_diff": "diff --git a/computer_vision/horn_schunck.py b/computer_vision/horn_schunck.py\n--- a/computer_vision/horn_schunck.py\n+++ b/computer_vision/horn_schunck.py\n@@ -12,7 +12,7 @@\n from typing import SupportsIndex\n \n import numpy as np\n-from scipy.ndimage.filters import convolve\n+from scipy.ndimage import convolve\n \n \n def warp(\n", "issue": "[PYTEST WARNING] Horn schunk\n### Feature description\r\n\r\n@skief @poyea Please could you resolve this warning\r\n```\r\ncomputer_vision/horn_schunck.py:15\r\n /home/runner/work/Python/Python/computer_vision/horn_schunck.py:15:\r\n DeprecationWarning: Please use `convolve` from the `scipy.ndimage` namespace, the `scipy.ndimage.filters` namespace is deprecated.\r\n from scipy.ndimage.filters import convolve\r\n```\r\n\r\norigin: #7211\n", "before_files": [{"content": "\"\"\"\n The Horn-Schunck method estimates the optical flow for every single pixel of\n a sequence of images.\n It works by assuming brightness constancy between two consecutive frames\n and smoothness in the optical flow.\n\n Useful resources:\n Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method\n Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf\n\"\"\"\n\nfrom typing import SupportsIndex\n\nimport numpy as np\nfrom scipy.ndimage.filters import convolve\n\n\ndef warp(\n image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray\n) -> np.ndarray:\n \"\"\"\n Warps the pixels of an image into a new image using the horizontal and vertical\n flows.\n Pixels that are warped from an invalid location are set to 0.\n\n Parameters:\n image: Grayscale image\n horizontal_flow: Horizontal flow\n vertical_flow: Vertical flow\n\n Returns: Warped image\n\n >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \\\n np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \\\n np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]))\n array([[0, 0, 0],\n [3, 1, 0],\n [0, 2, 3]])\n \"\"\"\n flow = np.stack((horizontal_flow, vertical_flow), 2)\n\n # Create a grid of all pixel coordinates and subtract the flow to get the\n # target pixels coordinates\n grid = np.stack(\n np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2\n )\n grid = np.round(grid - flow).astype(np.int32)\n\n # Find the locations outside of the original image\n invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]]))\n grid[invalid] = 0\n\n warped = image[grid[:, :, 1], grid[:, :, 0]]\n\n # Set pixels at invalid locations to 0\n warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0\n\n return warped\n\n\ndef horn_schunck(\n image0: np.ndarray,\n image1: np.ndarray,\n num_iter: SupportsIndex,\n alpha: float | None = None,\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This function performs the Horn-Schunck algorithm and returns the estimated\n optical flow. It is assumed that the input images are grayscale and\n normalized to be in [0, 1].\n\n Parameters:\n image0: First image of the sequence\n image1: Second image of the sequence\n alpha: Regularization constant\n num_iter: Number of iterations performed\n\n Returns: estimated horizontal & vertical flow\n\n >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \\\n np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\\\n astype(np.int32)\n array([[[ 0, -1, -1],\n [ 0, -1, -1]],\n <BLANKLINE>\n [[ 0, 0, 0],\n [ 0, 0, 0]]], dtype=int32)\n \"\"\"\n if alpha is None:\n alpha = 0.1\n\n # Initialize flow\n horizontal_flow = np.zeros_like(image0)\n vertical_flow = np.zeros_like(image0)\n\n # Prepare kernels for the calculation of the derivatives and the average velocity\n kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25\n kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25\n kernel_t = np.array([[1, 1], [1, 1]]) * 0.25\n kernel_laplacian = np.array(\n [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]]\n )\n\n # Iteratively refine the flow\n for _ in range(num_iter):\n warped_image = warp(image0, horizontal_flow, vertical_flow)\n derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x)\n derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y)\n derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t)\n\n avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian)\n avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian)\n\n # This updates the flow as proposed in the paper (Step 12)\n update = (\n derivative_x * avg_horizontal_velocity\n + derivative_y * avg_vertical_velocity\n + derivative_t\n )\n update = update / (alpha**2 + derivative_x**2 + derivative_y**2)\n\n horizontal_flow = avg_horizontal_velocity - derivative_x * update\n vertical_flow = avg_vertical_velocity - derivative_y * update\n\n return horizontal_flow, vertical_flow\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "computer_vision/horn_schunck.py"}], "after_files": [{"content": "\"\"\"\n The Horn-Schunck method estimates the optical flow for every single pixel of\n a sequence of images.\n It works by assuming brightness constancy between two consecutive frames\n and smoothness in the optical flow.\n\n Useful resources:\n Wikipedia: https://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method\n Paper: http://image.diku.dk/imagecanon/material/HornSchunckOptical_Flow.pdf\n\"\"\"\n\nfrom typing import SupportsIndex\n\nimport numpy as np\nfrom scipy.ndimage import convolve\n\n\ndef warp(\n image: np.ndarray, horizontal_flow: np.ndarray, vertical_flow: np.ndarray\n) -> np.ndarray:\n \"\"\"\n Warps the pixels of an image into a new image using the horizontal and vertical\n flows.\n Pixels that are warped from an invalid location are set to 0.\n\n Parameters:\n image: Grayscale image\n horizontal_flow: Horizontal flow\n vertical_flow: Vertical flow\n\n Returns: Warped image\n\n >>> warp(np.array([[0, 1, 2], [0, 3, 0], [2, 2, 2]]), \\\n np.array([[0, 1, -1], [-1, 0, 0], [1, 1, 1]]), \\\n np.array([[0, 0, 0], [0, 1, 0], [0, 0, 1]]))\n array([[0, 0, 0],\n [3, 1, 0],\n [0, 2, 3]])\n \"\"\"\n flow = np.stack((horizontal_flow, vertical_flow), 2)\n\n # Create a grid of all pixel coordinates and subtract the flow to get the\n # target pixels coordinates\n grid = np.stack(\n np.meshgrid(np.arange(0, image.shape[1]), np.arange(0, image.shape[0])), 2\n )\n grid = np.round(grid - flow).astype(np.int32)\n\n # Find the locations outside of the original image\n invalid = (grid < 0) | (grid >= np.array([image.shape[1], image.shape[0]]))\n grid[invalid] = 0\n\n warped = image[grid[:, :, 1], grid[:, :, 0]]\n\n # Set pixels at invalid locations to 0\n warped[invalid[:, :, 0] | invalid[:, :, 1]] = 0\n\n return warped\n\n\ndef horn_schunck(\n image0: np.ndarray,\n image1: np.ndarray,\n num_iter: SupportsIndex,\n alpha: float | None = None,\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This function performs the Horn-Schunck algorithm and returns the estimated\n optical flow. It is assumed that the input images are grayscale and\n normalized to be in [0, 1].\n\n Parameters:\n image0: First image of the sequence\n image1: Second image of the sequence\n alpha: Regularization constant\n num_iter: Number of iterations performed\n\n Returns: estimated horizontal & vertical flow\n\n >>> np.round(horn_schunck(np.array([[0, 0, 2], [0, 0, 2]]), \\\n np.array([[0, 2, 0], [0, 2, 0]]), alpha=0.1, num_iter=110)).\\\n astype(np.int32)\n array([[[ 0, -1, -1],\n [ 0, -1, -1]],\n <BLANKLINE>\n [[ 0, 0, 0],\n [ 0, 0, 0]]], dtype=int32)\n \"\"\"\n if alpha is None:\n alpha = 0.1\n\n # Initialize flow\n horizontal_flow = np.zeros_like(image0)\n vertical_flow = np.zeros_like(image0)\n\n # Prepare kernels for the calculation of the derivatives and the average velocity\n kernel_x = np.array([[-1, 1], [-1, 1]]) * 0.25\n kernel_y = np.array([[-1, -1], [1, 1]]) * 0.25\n kernel_t = np.array([[1, 1], [1, 1]]) * 0.25\n kernel_laplacian = np.array(\n [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]]\n )\n\n # Iteratively refine the flow\n for _ in range(num_iter):\n warped_image = warp(image0, horizontal_flow, vertical_flow)\n derivative_x = convolve(warped_image, kernel_x) + convolve(image1, kernel_x)\n derivative_y = convolve(warped_image, kernel_y) + convolve(image1, kernel_y)\n derivative_t = convolve(warped_image, kernel_t) + convolve(image1, -kernel_t)\n\n avg_horizontal_velocity = convolve(horizontal_flow, kernel_laplacian)\n avg_vertical_velocity = convolve(vertical_flow, kernel_laplacian)\n\n # This updates the flow as proposed in the paper (Step 12)\n update = (\n derivative_x * avg_horizontal_velocity\n + derivative_y * avg_vertical_velocity\n + derivative_t\n )\n update = update / (alpha**2 + derivative_x**2 + derivative_y**2)\n\n horizontal_flow = avg_horizontal_velocity - derivative_x * update\n vertical_flow = avg_vertical_velocity - derivative_y * update\n\n return horizontal_flow, vertical_flow\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "computer_vision/horn_schunck.py"}]}
| 1,961 | 101 |
gh_patches_debug_42794
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-1702
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature Request: Agent DataCollection Can't Handle Different Attributes in ActivationByType
**Describe the Feature**
Receive attribute error when running an agent reporter through datacollector when using RandomActivationByType
**Desired behavior**
I can collect against different attributes based on agent type
**To Reproduce**
Create a model with different agent types and attributes and try to collect against that agent attribute
**Additional context**
Feature would need to update at line 170 of datacollection.py `agent_records = map(get_reports, model.schedule.agents)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesa/datacollection.py`
Content:
```
1 """
2 Mesa Data Collection Module
3 ===========================
4
5 DataCollector is meant to provide a simple, standard way to collect data
6 generated by a Mesa model. It collects three types of data: model-level data,
7 agent-level data, and tables.
8
9 A DataCollector is instantiated with two dictionaries of reporter names and
10 associated variable names or functions for each, one for model-level data and
11 one for agent-level data; a third dictionary provides table names and columns.
12 Variable names are converted into functions which retrieve attributes of that
13 name.
14
15 When the collect() method is called, each model-level function is called, with
16 the model as the argument, and the results associated with the relevant
17 variable. Then the agent-level functions are called on each agent in the model
18 scheduler.
19
20 Additionally, other objects can write directly to tables by passing in an
21 appropriate dictionary object for a table row.
22
23 The DataCollector then stores the data it collects in dictionaries:
24 * model_vars maps each reporter to a list of its values
25 * tables maps each table to a dictionary, with each column as a key with a
26 list as its value.
27 * _agent_records maps each model step to a list of each agents id
28 and its values.
29
30 Finally, DataCollector can create a pandas DataFrame from each collection.
31
32 The default DataCollector here makes several assumptions:
33 * The model has a schedule object called 'schedule'
34 * The schedule has an agent list called agents
35 * For collecting agent-level variables, agents must have a unique_id
36 """
37 import itertools
38 import types
39 from operator import attrgetter
40
41 import pandas as pd
42
43
44 class DataCollector:
45 """Class for collecting data generated by a Mesa model.
46
47 A DataCollector is instantiated with dictionaries of names of model- and
48 agent-level variables to collect, associated with attribute names or
49 functions which actually collect them. When the collect(...) method is
50 called, it collects these attributes and executes these functions one by
51 one and stores the results.
52 """
53
54 def __init__(self, model_reporters=None, agent_reporters=None, tables=None):
55 """Instantiate a DataCollector with lists of model and agent reporters.
56 Both model_reporters and agent_reporters accept a dictionary mapping a
57 variable name to either an attribute name, or a method.
58 For example, if there was only one model-level reporter for number of
59 agents, it might look like:
60 {"agent_count": lambda m: m.schedule.get_agent_count() }
61 If there was only one agent-level reporter (e.g. the agent's energy),
62 it might look like this:
63 {"energy": "energy"}
64 or like this:
65 {"energy": lambda a: a.energy}
66
67 The tables arg accepts a dictionary mapping names of tables to lists of
68 columns. For example, if we want to allow agents to write their age
69 when they are destroyed (to keep track of lifespans), it might look
70 like:
71 {"Lifespan": ["unique_id", "age"]}
72
73 Args:
74 model_reporters: Dictionary of reporter names and attributes/funcs
75 agent_reporters: Dictionary of reporter names and attributes/funcs.
76 tables: Dictionary of table names to lists of column names.
77
78 Notes:
79 If you want to pickle your model you must not use lambda functions.
80 If your model includes a large number of agents, you should *only*
81 use attribute names for the agent reporter, it will be much faster.
82
83 Model reporters can take four types of arguments:
84 lambda like above:
85 {"agent_count": lambda m: m.schedule.get_agent_count() }
86 method of a class/instance:
87 {"agent_count": self.get_agent_count} # self here is a class instance
88 {"agent_count": Model.get_agent_count} # Model here is a class
89 class attributes of a model
90 {"model_attribute": "model_attribute"}
91 functions with parameters that have placed in a list
92 {"Model_Function":[function, [param_1, param_2]]}
93 """
94 self.model_reporters = {}
95 self.agent_reporters = {}
96
97 self.model_vars = {}
98 self._agent_records = {}
99 self.tables = {}
100
101 if model_reporters is not None:
102 for name, reporter in model_reporters.items():
103 self._new_model_reporter(name, reporter)
104
105 if agent_reporters is not None:
106 for name, reporter in agent_reporters.items():
107 self._new_agent_reporter(name, reporter)
108
109 if tables is not None:
110 for name, columns in tables.items():
111 self._new_table(name, columns)
112
113 def _new_model_reporter(self, name, reporter):
114 """Add a new model-level reporter to collect.
115
116 Args:
117 name: Name of the model-level variable to collect.
118 reporter: Attribute string, or function object that returns the
119 variable when given a model instance.
120 """
121 self.model_reporters[name] = reporter
122 self.model_vars[name] = []
123
124 def _new_agent_reporter(self, name, reporter):
125 """Add a new agent-level reporter to collect.
126
127 Args:
128 name: Name of the agent-level variable to collect.
129 reporter: Attribute string, or function object that returns the
130 variable when given a model instance.
131 """
132 if type(reporter) is str:
133 attribute_name = reporter
134
135 def reporter(agent):
136 return getattr(agent, attribute_name, None)
137
138 reporter.attribute_name = attribute_name
139 self.agent_reporters[name] = reporter
140
141 def _new_table(self, table_name, table_columns):
142 """Add a new table that objects can write to.
143
144 Args:
145 table_name: Name of the new table.
146 table_columns: List of columns to add to the table.
147 """
148 new_table = {column: [] for column in table_columns}
149 self.tables[table_name] = new_table
150
151 def _record_agents(self, model):
152 """Record agents data in a mapping of functions and agents."""
153 rep_funcs = self.agent_reporters.values()
154 if all(hasattr(rep, "attribute_name") for rep in rep_funcs):
155 prefix = ["model.schedule.steps", "unique_id"]
156 attributes = [func.attribute_name for func in rep_funcs]
157 get_reports = attrgetter(*prefix + attributes)
158 else:
159
160 def get_reports(agent):
161 _prefix = (agent.model.schedule.steps, agent.unique_id)
162 reports = tuple(rep(agent) for rep in rep_funcs)
163 return _prefix + reports
164
165 agent_records = map(get_reports, model.schedule.agents)
166 return agent_records
167
168 def collect(self, model):
169 """Collect all the data for the given model object."""
170 if self.model_reporters:
171 for var, reporter in self.model_reporters.items():
172 # Check if Lambda operator
173 if isinstance(reporter, types.LambdaType):
174 self.model_vars[var].append(reporter(model))
175 # Check if model attribute
176 elif isinstance(reporter, str):
177 self.model_vars[var].append(getattr(model, reporter, None))
178 # Check if function with arguments
179 elif isinstance(reporter, list):
180 self.model_vars[var].append(reporter[0](*reporter[1]))
181 # TODO: Check if method of a class, as of now it is assumed
182 # implicitly if the other checks fail.
183 else:
184 self.model_vars[var].append(reporter())
185
186 if self.agent_reporters:
187 agent_records = self._record_agents(model)
188 self._agent_records[model.schedule.steps] = list(agent_records)
189
190 def add_table_row(self, table_name, row, ignore_missing=False):
191 """Add a row dictionary to a specific table.
192
193 Args:
194 table_name: Name of the table to append a row to.
195 row: A dictionary of the form {column_name: value...}
196 ignore_missing: If True, fill any missing columns with Nones;
197 if False, throw an error if any columns are missing
198 """
199 if table_name not in self.tables:
200 raise Exception("Table does not exist.")
201
202 for column in self.tables[table_name]:
203 if column in row:
204 self.tables[table_name][column].append(row[column])
205 elif ignore_missing:
206 self.tables[table_name][column].append(None)
207 else:
208 raise Exception("Could not insert row with missing column")
209
210 def get_model_vars_dataframe(self):
211 """Create a pandas DataFrame from the model variables.
212
213 The DataFrame has one column for each model variable, and the index is
214 (implicitly) the model tick.
215 """
216 # Check if self.model_reporters dictionary is empty, if so raise warning
217 if not self.model_reporters:
218 raise UserWarning(
219 "No model reporters have been defined in the DataCollector, returning empty DataFrame."
220 )
221
222 return pd.DataFrame(self.model_vars)
223
224 def get_agent_vars_dataframe(self):
225 """Create a pandas DataFrame from the agent variables.
226
227 The DataFrame has one column for each variable, with two additional
228 columns for tick and agent_id.
229 """
230 # Check if self.agent_reporters dictionary is empty, if so raise warning
231 if not self.agent_reporters:
232 raise UserWarning(
233 "No agent reporters have been defined in the DataCollector, returning empty DataFrame."
234 )
235
236 all_records = itertools.chain.from_iterable(self._agent_records.values())
237 rep_names = list(self.agent_reporters)
238
239 df = pd.DataFrame.from_records(
240 data=all_records,
241 columns=["Step", "AgentID", *rep_names],
242 index=["Step", "AgentID"],
243 )
244 return df
245
246 def get_table_dataframe(self, table_name):
247 """Create a pandas DataFrame from a particular table.
248
249 Args:
250 table_name: The name of the table to convert.
251 """
252 if table_name not in self.tables:
253 raise Exception("No such table.")
254 return pd.DataFrame(self.tables[table_name])
255
```
Path: `mesa/model.py`
Content:
```
1 """
2 The model class for Mesa framework.
3
4 Core Objects: Model
5 """
6 # Mypy; for the `|` operator purpose
7 # Remove this __future__ import once the oldest supported Python is 3.10
8 from __future__ import annotations
9
10 import random
11
12 # mypy
13 from typing import Any
14
15 from mesa.datacollection import DataCollector
16
17
18 class Model:
19 """Base class for models."""
20
21 def __new__(cls, *args: Any, **kwargs: Any) -> Any:
22 """Create a new model object and instantiate its RNG automatically."""
23 obj = object.__new__(cls)
24 obj._seed = kwargs.get("seed", None)
25 obj.random = random.Random(obj._seed)
26 return obj
27
28 def __init__(self, *args: Any, **kwargs: Any) -> None:
29 """Create a new model. Overload this method with the actual code to
30 start the model.
31
32 Attributes:
33 schedule: schedule object
34 running: a bool indicating if the model should continue running
35 """
36
37 self.running = True
38 self.schedule = None
39 self.current_id = 0
40
41 def run_model(self) -> None:
42 """Run the model until the end condition is reached. Overload as
43 needed.
44 """
45 while self.running:
46 self.step()
47
48 def step(self) -> None:
49 """A single step. Fill in here."""
50
51 def next_id(self) -> int:
52 """Return the next unique ID for agents, increment current_id"""
53 self.current_id += 1
54 return self.current_id
55
56 def reset_randomizer(self, seed: int | None = None) -> None:
57 """Reset the model random number generator.
58
59 Args:
60 seed: A new seed for the RNG; if None, reset using the current seed
61 """
62
63 if seed is None:
64 seed = self._seed
65 self.random.seed(seed)
66 self._seed = seed
67
68 def initialize_data_collector(
69 self, model_reporters=None, agent_reporters=None, tables=None
70 ) -> None:
71 if not hasattr(self, "schedule") or self.schedule is None:
72 raise RuntimeError(
73 "You must initialize the scheduler (self.schedule) before initializing the data collector."
74 )
75 if self.schedule.get_agent_count() == 0:
76 raise RuntimeError(
77 "You must add agents to the scheduler before initializing the data collector."
78 )
79 self.datacollector = DataCollector(
80 model_reporters=model_reporters,
81 agent_reporters=agent_reporters,
82 tables=tables,
83 )
84 # Collect data for the first time during initialization.
85 self.datacollector.collect(self)
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesa/datacollection.py b/mesa/datacollection.py
--- a/mesa/datacollection.py
+++ b/mesa/datacollection.py
@@ -51,7 +51,13 @@
one and stores the results.
"""
- def __init__(self, model_reporters=None, agent_reporters=None, tables=None):
+ def __init__(
+ self,
+ model_reporters=None,
+ agent_reporters=None,
+ tables=None,
+ exclude_none_values=False,
+ ):
"""Instantiate a DataCollector with lists of model and agent reporters.
Both model_reporters and agent_reporters accept a dictionary mapping a
variable name to either an attribute name, or a method.
@@ -74,6 +80,8 @@
model_reporters: Dictionary of reporter names and attributes/funcs
agent_reporters: Dictionary of reporter names and attributes/funcs.
tables: Dictionary of table names to lists of column names.
+ exclude_none_values: Boolean of whether to drop records which values
+ are None, in the final result.
Notes:
If you want to pickle your model you must not use lambda functions.
@@ -97,6 +105,7 @@
self.model_vars = {}
self._agent_records = {}
self.tables = {}
+ self.exclude_none_values = exclude_none_values
if model_reporters is not None:
for name, reporter in model_reporters.items():
@@ -151,7 +160,23 @@
def _record_agents(self, model):
"""Record agents data in a mapping of functions and agents."""
rep_funcs = self.agent_reporters.values()
+ if self.exclude_none_values:
+ # Drop records which values are None.
+
+ def get_reports(agent):
+ _prefix = (agent.model.schedule.steps, agent.unique_id)
+ reports = (rep(agent) for rep in rep_funcs)
+ reports_without_none = tuple(r for r in reports if r is not None)
+ if len(reports_without_none) == 0:
+ return None
+ return _prefix + reports_without_none
+
+ agent_records = (get_reports(agent) for agent in model.schedule.agents)
+ agent_records_without_none = (r for r in agent_records if r is not None)
+ return agent_records_without_none
+
if all(hasattr(rep, "attribute_name") for rep in rep_funcs):
+ # This branch is for performance optimization purpose.
prefix = ["model.schedule.steps", "unique_id"]
attributes = [func.attribute_name for func in rep_funcs]
get_reports = attrgetter(*prefix + attributes)
diff --git a/mesa/model.py b/mesa/model.py
--- a/mesa/model.py
+++ b/mesa/model.py
@@ -66,7 +66,11 @@
self._seed = seed
def initialize_data_collector(
- self, model_reporters=None, agent_reporters=None, tables=None
+ self,
+ model_reporters=None,
+ agent_reporters=None,
+ tables=None,
+ exclude_none_values=False,
) -> None:
if not hasattr(self, "schedule") or self.schedule is None:
raise RuntimeError(
@@ -80,6 +84,7 @@
model_reporters=model_reporters,
agent_reporters=agent_reporters,
tables=tables,
+ exclude_none_values=exclude_none_values,
)
# Collect data for the first time during initialization.
self.datacollector.collect(self)
|
{"golden_diff": "diff --git a/mesa/datacollection.py b/mesa/datacollection.py\n--- a/mesa/datacollection.py\n+++ b/mesa/datacollection.py\n@@ -51,7 +51,13 @@\n one and stores the results.\n \"\"\"\n \n- def __init__(self, model_reporters=None, agent_reporters=None, tables=None):\n+ def __init__(\n+ self,\n+ model_reporters=None,\n+ agent_reporters=None,\n+ tables=None,\n+ exclude_none_values=False,\n+ ):\n \"\"\"Instantiate a DataCollector with lists of model and agent reporters.\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to either an attribute name, or a method.\n@@ -74,6 +80,8 @@\n model_reporters: Dictionary of reporter names and attributes/funcs\n agent_reporters: Dictionary of reporter names and attributes/funcs.\n tables: Dictionary of table names to lists of column names.\n+ exclude_none_values: Boolean of whether to drop records which values\n+ are None, in the final result.\n \n Notes:\n If you want to pickle your model you must not use lambda functions.\n@@ -97,6 +105,7 @@\n self.model_vars = {}\n self._agent_records = {}\n self.tables = {}\n+ self.exclude_none_values = exclude_none_values\n \n if model_reporters is not None:\n for name, reporter in model_reporters.items():\n@@ -151,7 +160,23 @@\n def _record_agents(self, model):\n \"\"\"Record agents data in a mapping of functions and agents.\"\"\"\n rep_funcs = self.agent_reporters.values()\n+ if self.exclude_none_values:\n+ # Drop records which values are None.\n+\n+ def get_reports(agent):\n+ _prefix = (agent.model.schedule.steps, agent.unique_id)\n+ reports = (rep(agent) for rep in rep_funcs)\n+ reports_without_none = tuple(r for r in reports if r is not None)\n+ if len(reports_without_none) == 0:\n+ return None\n+ return _prefix + reports_without_none\n+\n+ agent_records = (get_reports(agent) for agent in model.schedule.agents)\n+ agent_records_without_none = (r for r in agent_records if r is not None)\n+ return agent_records_without_none\n+\n if all(hasattr(rep, \"attribute_name\") for rep in rep_funcs):\n+ # This branch is for performance optimization purpose.\n prefix = [\"model.schedule.steps\", \"unique_id\"]\n attributes = [func.attribute_name for func in rep_funcs]\n get_reports = attrgetter(*prefix + attributes)\ndiff --git a/mesa/model.py b/mesa/model.py\n--- a/mesa/model.py\n+++ b/mesa/model.py\n@@ -66,7 +66,11 @@\n self._seed = seed\n \n def initialize_data_collector(\n- self, model_reporters=None, agent_reporters=None, tables=None\n+ self,\n+ model_reporters=None,\n+ agent_reporters=None,\n+ tables=None,\n+ exclude_none_values=False,\n ) -> None:\n if not hasattr(self, \"schedule\") or self.schedule is None:\n raise RuntimeError(\n@@ -80,6 +84,7 @@\n model_reporters=model_reporters,\n agent_reporters=agent_reporters,\n tables=tables,\n+ exclude_none_values=exclude_none_values,\n )\n # Collect data for the first time during initialization.\n self.datacollector.collect(self)\n", "issue": "Feature Request: Agent DataCollection Can't Handle Different Attributes in ActivationByType \n**Describe the Feature**\r\nReceive attribute error when running an agent reporter through datacollector when using RandomActivationByType \r\n\r\n**Desired behavior**\r\nI can collect against different attributes based on agent type\r\n\r\n**To Reproduce**\r\nCreate a model with different agent types and attributes and try to collect against that agent attribute\r\n\r\n**Additional context**\r\nFeature would need to update at line 170 of datacollection.py `agent_records = map(get_reports, model.schedule.agents)`\r\n\n", "before_files": [{"content": "\"\"\"\nMesa Data Collection Module\n===========================\n\nDataCollector is meant to provide a simple, standard way to collect data\ngenerated by a Mesa model. It collects three types of data: model-level data,\nagent-level data, and tables.\n\nA DataCollector is instantiated with two dictionaries of reporter names and\nassociated variable names or functions for each, one for model-level data and\none for agent-level data; a third dictionary provides table names and columns.\nVariable names are converted into functions which retrieve attributes of that\nname.\n\nWhen the collect() method is called, each model-level function is called, with\nthe model as the argument, and the results associated with the relevant\nvariable. Then the agent-level functions are called on each agent in the model\nscheduler.\n\nAdditionally, other objects can write directly to tables by passing in an\nappropriate dictionary object for a table row.\n\nThe DataCollector then stores the data it collects in dictionaries:\n * model_vars maps each reporter to a list of its values\n * tables maps each table to a dictionary, with each column as a key with a\n list as its value.\n * _agent_records maps each model step to a list of each agents id\n and its values.\n\nFinally, DataCollector can create a pandas DataFrame from each collection.\n\nThe default DataCollector here makes several assumptions:\n * The model has a schedule object called 'schedule'\n * The schedule has an agent list called agents\n * For collecting agent-level variables, agents must have a unique_id\n\"\"\"\nimport itertools\nimport types\nfrom operator import attrgetter\n\nimport pandas as pd\n\n\nclass DataCollector:\n \"\"\"Class for collecting data generated by a Mesa model.\n\n A DataCollector is instantiated with dictionaries of names of model- and\n agent-level variables to collect, associated with attribute names or\n functions which actually collect them. When the collect(...) method is\n called, it collects these attributes and executes these functions one by\n one and stores the results.\n \"\"\"\n\n def __init__(self, model_reporters=None, agent_reporters=None, tables=None):\n \"\"\"Instantiate a DataCollector with lists of model and agent reporters.\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to either an attribute name, or a method.\n For example, if there was only one model-level reporter for number of\n agents, it might look like:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n If there was only one agent-level reporter (e.g. the agent's energy),\n it might look like this:\n {\"energy\": \"energy\"}\n or like this:\n {\"energy\": lambda a: a.energy}\n\n The tables arg accepts a dictionary mapping names of tables to lists of\n columns. For example, if we want to allow agents to write their age\n when they are destroyed (to keep track of lifespans), it might look\n like:\n {\"Lifespan\": [\"unique_id\", \"age\"]}\n\n Args:\n model_reporters: Dictionary of reporter names and attributes/funcs\n agent_reporters: Dictionary of reporter names and attributes/funcs.\n tables: Dictionary of table names to lists of column names.\n\n Notes:\n If you want to pickle your model you must not use lambda functions.\n If your model includes a large number of agents, you should *only*\n use attribute names for the agent reporter, it will be much faster.\n\n Model reporters can take four types of arguments:\n lambda like above:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n method of a class/instance:\n {\"agent_count\": self.get_agent_count} # self here is a class instance\n {\"agent_count\": Model.get_agent_count} # Model here is a class\n class attributes of a model\n {\"model_attribute\": \"model_attribute\"}\n functions with parameters that have placed in a list\n {\"Model_Function\":[function, [param_1, param_2]]}\n \"\"\"\n self.model_reporters = {}\n self.agent_reporters = {}\n\n self.model_vars = {}\n self._agent_records = {}\n self.tables = {}\n\n if model_reporters is not None:\n for name, reporter in model_reporters.items():\n self._new_model_reporter(name, reporter)\n\n if agent_reporters is not None:\n for name, reporter in agent_reporters.items():\n self._new_agent_reporter(name, reporter)\n\n if tables is not None:\n for name, columns in tables.items():\n self._new_table(name, columns)\n\n def _new_model_reporter(self, name, reporter):\n \"\"\"Add a new model-level reporter to collect.\n\n Args:\n name: Name of the model-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n \"\"\"\n self.model_reporters[name] = reporter\n self.model_vars[name] = []\n\n def _new_agent_reporter(self, name, reporter):\n \"\"\"Add a new agent-level reporter to collect.\n\n Args:\n name: Name of the agent-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n \"\"\"\n if type(reporter) is str:\n attribute_name = reporter\n\n def reporter(agent):\n return getattr(agent, attribute_name, None)\n\n reporter.attribute_name = attribute_name\n self.agent_reporters[name] = reporter\n\n def _new_table(self, table_name, table_columns):\n \"\"\"Add a new table that objects can write to.\n\n Args:\n table_name: Name of the new table.\n table_columns: List of columns to add to the table.\n \"\"\"\n new_table = {column: [] for column in table_columns}\n self.tables[table_name] = new_table\n\n def _record_agents(self, model):\n \"\"\"Record agents data in a mapping of functions and agents.\"\"\"\n rep_funcs = self.agent_reporters.values()\n if all(hasattr(rep, \"attribute_name\") for rep in rep_funcs):\n prefix = [\"model.schedule.steps\", \"unique_id\"]\n attributes = [func.attribute_name for func in rep_funcs]\n get_reports = attrgetter(*prefix + attributes)\n else:\n\n def get_reports(agent):\n _prefix = (agent.model.schedule.steps, agent.unique_id)\n reports = tuple(rep(agent) for rep in rep_funcs)\n return _prefix + reports\n\n agent_records = map(get_reports, model.schedule.agents)\n return agent_records\n\n def collect(self, model):\n \"\"\"Collect all the data for the given model object.\"\"\"\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n # Check if Lambda operator\n if isinstance(reporter, types.LambdaType):\n self.model_vars[var].append(reporter(model))\n # Check if model attribute\n elif isinstance(reporter, str):\n self.model_vars[var].append(getattr(model, reporter, None))\n # Check if function with arguments\n elif isinstance(reporter, list):\n self.model_vars[var].append(reporter[0](*reporter[1]))\n # TODO: Check if method of a class, as of now it is assumed\n # implicitly if the other checks fail.\n else:\n self.model_vars[var].append(reporter())\n\n if self.agent_reporters:\n agent_records = self._record_agents(model)\n self._agent_records[model.schedule.steps] = list(agent_records)\n\n def add_table_row(self, table_name, row, ignore_missing=False):\n \"\"\"Add a row dictionary to a specific table.\n\n Args:\n table_name: Name of the table to append a row to.\n row: A dictionary of the form {column_name: value...}\n ignore_missing: If True, fill any missing columns with Nones;\n if False, throw an error if any columns are missing\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"Table does not exist.\")\n\n for column in self.tables[table_name]:\n if column in row:\n self.tables[table_name][column].append(row[column])\n elif ignore_missing:\n self.tables[table_name][column].append(None)\n else:\n raise Exception(\"Could not insert row with missing column\")\n\n def get_model_vars_dataframe(self):\n \"\"\"Create a pandas DataFrame from the model variables.\n\n The DataFrame has one column for each model variable, and the index is\n (implicitly) the model tick.\n \"\"\"\n # Check if self.model_reporters dictionary is empty, if so raise warning\n if not self.model_reporters:\n raise UserWarning(\n \"No model reporters have been defined in the DataCollector, returning empty DataFrame.\"\n )\n\n return pd.DataFrame(self.model_vars)\n\n def get_agent_vars_dataframe(self):\n \"\"\"Create a pandas DataFrame from the agent variables.\n\n The DataFrame has one column for each variable, with two additional\n columns for tick and agent_id.\n \"\"\"\n # Check if self.agent_reporters dictionary is empty, if so raise warning\n if not self.agent_reporters:\n raise UserWarning(\n \"No agent reporters have been defined in the DataCollector, returning empty DataFrame.\"\n )\n\n all_records = itertools.chain.from_iterable(self._agent_records.values())\n rep_names = list(self.agent_reporters)\n\n df = pd.DataFrame.from_records(\n data=all_records,\n columns=[\"Step\", \"AgentID\", *rep_names],\n index=[\"Step\", \"AgentID\"],\n )\n return df\n\n def get_table_dataframe(self, table_name):\n \"\"\"Create a pandas DataFrame from a particular table.\n\n Args:\n table_name: The name of the table to convert.\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"No such table.\")\n return pd.DataFrame(self.tables[table_name])\n", "path": "mesa/datacollection.py"}, {"content": "\"\"\"\nThe model class for Mesa framework.\n\nCore Objects: Model\n\"\"\"\n# Mypy; for the `|` operator purpose\n# Remove this __future__ import once the oldest supported Python is 3.10\nfrom __future__ import annotations\n\nimport random\n\n# mypy\nfrom typing import Any\n\nfrom mesa.datacollection import DataCollector\n\n\nclass Model:\n \"\"\"Base class for models.\"\"\"\n\n def __new__(cls, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Create a new model object and instantiate its RNG automatically.\"\"\"\n obj = object.__new__(cls)\n obj._seed = kwargs.get(\"seed\", None)\n obj.random = random.Random(obj._seed)\n return obj\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Create a new model. Overload this method with the actual code to\n start the model.\n\n Attributes:\n schedule: schedule object\n running: a bool indicating if the model should continue running\n \"\"\"\n\n self.running = True\n self.schedule = None\n self.current_id = 0\n\n def run_model(self) -> None:\n \"\"\"Run the model until the end condition is reached. Overload as\n needed.\n \"\"\"\n while self.running:\n self.step()\n\n def step(self) -> None:\n \"\"\"A single step. Fill in here.\"\"\"\n\n def next_id(self) -> int:\n \"\"\"Return the next unique ID for agents, increment current_id\"\"\"\n self.current_id += 1\n return self.current_id\n\n def reset_randomizer(self, seed: int | None = None) -> None:\n \"\"\"Reset the model random number generator.\n\n Args:\n seed: A new seed for the RNG; if None, reset using the current seed\n \"\"\"\n\n if seed is None:\n seed = self._seed\n self.random.seed(seed)\n self._seed = seed\n\n def initialize_data_collector(\n self, model_reporters=None, agent_reporters=None, tables=None\n ) -> None:\n if not hasattr(self, \"schedule\") or self.schedule is None:\n raise RuntimeError(\n \"You must initialize the scheduler (self.schedule) before initializing the data collector.\"\n )\n if self.schedule.get_agent_count() == 0:\n raise RuntimeError(\n \"You must add agents to the scheduler before initializing the data collector.\"\n )\n self.datacollector = DataCollector(\n model_reporters=model_reporters,\n agent_reporters=agent_reporters,\n tables=tables,\n )\n # Collect data for the first time during initialization.\n self.datacollector.collect(self)\n", "path": "mesa/model.py"}], "after_files": [{"content": "\"\"\"\nMesa Data Collection Module\n===========================\n\nDataCollector is meant to provide a simple, standard way to collect data\ngenerated by a Mesa model. It collects three types of data: model-level data,\nagent-level data, and tables.\n\nA DataCollector is instantiated with two dictionaries of reporter names and\nassociated variable names or functions for each, one for model-level data and\none for agent-level data; a third dictionary provides table names and columns.\nVariable names are converted into functions which retrieve attributes of that\nname.\n\nWhen the collect() method is called, each model-level function is called, with\nthe model as the argument, and the results associated with the relevant\nvariable. Then the agent-level functions are called on each agent in the model\nscheduler.\n\nAdditionally, other objects can write directly to tables by passing in an\nappropriate dictionary object for a table row.\n\nThe DataCollector then stores the data it collects in dictionaries:\n * model_vars maps each reporter to a list of its values\n * tables maps each table to a dictionary, with each column as a key with a\n list as its value.\n * _agent_records maps each model step to a list of each agents id\n and its values.\n\nFinally, DataCollector can create a pandas DataFrame from each collection.\n\nThe default DataCollector here makes several assumptions:\n * The model has a schedule object called 'schedule'\n * The schedule has an agent list called agents\n * For collecting agent-level variables, agents must have a unique_id\n\"\"\"\nimport itertools\nimport types\nfrom operator import attrgetter\n\nimport pandas as pd\n\n\nclass DataCollector:\n \"\"\"Class for collecting data generated by a Mesa model.\n\n A DataCollector is instantiated with dictionaries of names of model- and\n agent-level variables to collect, associated with attribute names or\n functions which actually collect them. When the collect(...) method is\n called, it collects these attributes and executes these functions one by\n one and stores the results.\n \"\"\"\n\n def __init__(\n self,\n model_reporters=None,\n agent_reporters=None,\n tables=None,\n exclude_none_values=False,\n ):\n \"\"\"Instantiate a DataCollector with lists of model and agent reporters.\n Both model_reporters and agent_reporters accept a dictionary mapping a\n variable name to either an attribute name, or a method.\n For example, if there was only one model-level reporter for number of\n agents, it might look like:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n If there was only one agent-level reporter (e.g. the agent's energy),\n it might look like this:\n {\"energy\": \"energy\"}\n or like this:\n {\"energy\": lambda a: a.energy}\n\n The tables arg accepts a dictionary mapping names of tables to lists of\n columns. For example, if we want to allow agents to write their age\n when they are destroyed (to keep track of lifespans), it might look\n like:\n {\"Lifespan\": [\"unique_id\", \"age\"]}\n\n Args:\n model_reporters: Dictionary of reporter names and attributes/funcs\n agent_reporters: Dictionary of reporter names and attributes/funcs.\n tables: Dictionary of table names to lists of column names.\n exclude_none_values: Boolean of whether to drop records which values\n are None, in the final result.\n\n Notes:\n If you want to pickle your model you must not use lambda functions.\n If your model includes a large number of agents, you should *only*\n use attribute names for the agent reporter, it will be much faster.\n\n Model reporters can take four types of arguments:\n lambda like above:\n {\"agent_count\": lambda m: m.schedule.get_agent_count() }\n method of a class/instance:\n {\"agent_count\": self.get_agent_count} # self here is a class instance\n {\"agent_count\": Model.get_agent_count} # Model here is a class\n class attributes of a model\n {\"model_attribute\": \"model_attribute\"}\n functions with parameters that have placed in a list\n {\"Model_Function\":[function, [param_1, param_2]]}\n \"\"\"\n self.model_reporters = {}\n self.agent_reporters = {}\n\n self.model_vars = {}\n self._agent_records = {}\n self.tables = {}\n self.exclude_none_values = exclude_none_values\n\n if model_reporters is not None:\n for name, reporter in model_reporters.items():\n self._new_model_reporter(name, reporter)\n\n if agent_reporters is not None:\n for name, reporter in agent_reporters.items():\n self._new_agent_reporter(name, reporter)\n\n if tables is not None:\n for name, columns in tables.items():\n self._new_table(name, columns)\n\n def _new_model_reporter(self, name, reporter):\n \"\"\"Add a new model-level reporter to collect.\n\n Args:\n name: Name of the model-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n \"\"\"\n self.model_reporters[name] = reporter\n self.model_vars[name] = []\n\n def _new_agent_reporter(self, name, reporter):\n \"\"\"Add a new agent-level reporter to collect.\n\n Args:\n name: Name of the agent-level variable to collect.\n reporter: Attribute string, or function object that returns the\n variable when given a model instance.\n \"\"\"\n if type(reporter) is str:\n attribute_name = reporter\n\n def reporter(agent):\n return getattr(agent, attribute_name, None)\n\n reporter.attribute_name = attribute_name\n self.agent_reporters[name] = reporter\n\n def _new_table(self, table_name, table_columns):\n \"\"\"Add a new table that objects can write to.\n\n Args:\n table_name: Name of the new table.\n table_columns: List of columns to add to the table.\n \"\"\"\n new_table = {column: [] for column in table_columns}\n self.tables[table_name] = new_table\n\n def _record_agents(self, model):\n \"\"\"Record agents data in a mapping of functions and agents.\"\"\"\n rep_funcs = self.agent_reporters.values()\n if self.exclude_none_values:\n # Drop records which values are None.\n\n def get_reports(agent):\n _prefix = (agent.model.schedule.steps, agent.unique_id)\n reports = (rep(agent) for rep in rep_funcs)\n reports_without_none = tuple(r for r in reports if r is not None)\n if len(reports_without_none) == 0:\n return None\n return _prefix + reports_without_none\n\n agent_records = (get_reports(agent) for agent in model.schedule.agents)\n agent_records_without_none = (r for r in agent_records if r is not None)\n return agent_records_without_none\n\n if all(hasattr(rep, \"attribute_name\") for rep in rep_funcs):\n # This branch is for performance optimization purpose.\n prefix = [\"model.schedule.steps\", \"unique_id\"]\n attributes = [func.attribute_name for func in rep_funcs]\n get_reports = attrgetter(*prefix + attributes)\n else:\n\n def get_reports(agent):\n _prefix = (agent.model.schedule.steps, agent.unique_id)\n reports = tuple(rep(agent) for rep in rep_funcs)\n return _prefix + reports\n\n agent_records = map(get_reports, model.schedule.agents)\n return agent_records\n\n def collect(self, model):\n \"\"\"Collect all the data for the given model object.\"\"\"\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n # Check if Lambda operator\n if isinstance(reporter, types.LambdaType):\n self.model_vars[var].append(reporter(model))\n # Check if model attribute\n elif isinstance(reporter, str):\n self.model_vars[var].append(getattr(model, reporter, None))\n # Check if function with arguments\n elif isinstance(reporter, list):\n self.model_vars[var].append(reporter[0](*reporter[1]))\n # TODO: Check if method of a class, as of now it is assumed\n # implicitly if the other checks fail.\n else:\n self.model_vars[var].append(reporter())\n\n if self.agent_reporters:\n agent_records = self._record_agents(model)\n self._agent_records[model.schedule.steps] = list(agent_records)\n\n def add_table_row(self, table_name, row, ignore_missing=False):\n \"\"\"Add a row dictionary to a specific table.\n\n Args:\n table_name: Name of the table to append a row to.\n row: A dictionary of the form {column_name: value...}\n ignore_missing: If True, fill any missing columns with Nones;\n if False, throw an error if any columns are missing\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"Table does not exist.\")\n\n for column in self.tables[table_name]:\n if column in row:\n self.tables[table_name][column].append(row[column])\n elif ignore_missing:\n self.tables[table_name][column].append(None)\n else:\n raise Exception(\"Could not insert row with missing column\")\n\n def get_model_vars_dataframe(self):\n \"\"\"Create a pandas DataFrame from the model variables.\n\n The DataFrame has one column for each model variable, and the index is\n (implicitly) the model tick.\n \"\"\"\n # Check if self.model_reporters dictionary is empty, if so raise warning\n if not self.model_reporters:\n raise UserWarning(\n \"No model reporters have been defined in the DataCollector, returning empty DataFrame.\"\n )\n\n return pd.DataFrame(self.model_vars)\n\n def get_agent_vars_dataframe(self):\n \"\"\"Create a pandas DataFrame from the agent variables.\n\n The DataFrame has one column for each variable, with two additional\n columns for tick and agent_id.\n \"\"\"\n # Check if self.agent_reporters dictionary is empty, if so raise warning\n if not self.agent_reporters:\n raise UserWarning(\n \"No agent reporters have been defined in the DataCollector, returning empty DataFrame.\"\n )\n\n all_records = itertools.chain.from_iterable(self._agent_records.values())\n rep_names = list(self.agent_reporters)\n\n df = pd.DataFrame.from_records(\n data=all_records,\n columns=[\"Step\", \"AgentID\", *rep_names],\n index=[\"Step\", \"AgentID\"],\n )\n return df\n\n def get_table_dataframe(self, table_name):\n \"\"\"Create a pandas DataFrame from a particular table.\n\n Args:\n table_name: The name of the table to convert.\n \"\"\"\n if table_name not in self.tables:\n raise Exception(\"No such table.\")\n return pd.DataFrame(self.tables[table_name])\n", "path": "mesa/datacollection.py"}, {"content": "\"\"\"\nThe model class for Mesa framework.\n\nCore Objects: Model\n\"\"\"\n# Mypy; for the `|` operator purpose\n# Remove this __future__ import once the oldest supported Python is 3.10\nfrom __future__ import annotations\n\nimport random\n\n# mypy\nfrom typing import Any\n\nfrom mesa.datacollection import DataCollector\n\n\nclass Model:\n \"\"\"Base class for models.\"\"\"\n\n def __new__(cls, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Create a new model object and instantiate its RNG automatically.\"\"\"\n obj = object.__new__(cls)\n obj._seed = kwargs.get(\"seed\", None)\n obj.random = random.Random(obj._seed)\n return obj\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Create a new model. Overload this method with the actual code to\n start the model.\n\n Attributes:\n schedule: schedule object\n running: a bool indicating if the model should continue running\n \"\"\"\n\n self.running = True\n self.schedule = None\n self.current_id = 0\n\n def run_model(self) -> None:\n \"\"\"Run the model until the end condition is reached. Overload as\n needed.\n \"\"\"\n while self.running:\n self.step()\n\n def step(self) -> None:\n \"\"\"A single step. Fill in here.\"\"\"\n\n def next_id(self) -> int:\n \"\"\"Return the next unique ID for agents, increment current_id\"\"\"\n self.current_id += 1\n return self.current_id\n\n def reset_randomizer(self, seed: int | None = None) -> None:\n \"\"\"Reset the model random number generator.\n\n Args:\n seed: A new seed for the RNG; if None, reset using the current seed\n \"\"\"\n\n if seed is None:\n seed = self._seed\n self.random.seed(seed)\n self._seed = seed\n\n def initialize_data_collector(\n self,\n model_reporters=None,\n agent_reporters=None,\n tables=None,\n exclude_none_values=False,\n ) -> None:\n if not hasattr(self, \"schedule\") or self.schedule is None:\n raise RuntimeError(\n \"You must initialize the scheduler (self.schedule) before initializing the data collector.\"\n )\n if self.schedule.get_agent_count() == 0:\n raise RuntimeError(\n \"You must add agents to the scheduler before initializing the data collector.\"\n )\n self.datacollector = DataCollector(\n model_reporters=model_reporters,\n agent_reporters=agent_reporters,\n tables=tables,\n exclude_none_values=exclude_none_values,\n )\n # Collect data for the first time during initialization.\n self.datacollector.collect(self)\n", "path": "mesa/model.py"}]}
| 3,880 | 766 |
gh_patches_debug_21803
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1827
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GTK animated widgets freeze until first resize
### Describe the bug
GTK Widgets that contain an "animated" component - e.g., Switch (the slide left/right animation) or an indeterminate ProgressBar (pulse animation) won't animate when the window is first drawn. However, if you resize the window, the animation will resume, including any historical state.
### Steps to reproduce
1. Run the toga_switch example.
Try to toggle the "initial state" button. The color of the switch will change, but the switch itself won't "slide".
Also worth note: The exterior border of the "change label" button is very slightly clipped.
2. Run the progressbar example
The indeterminate progress bar may not start.
Toggle the Running Mode and Indeterminate Mode buttons. With sufficient toggling, you will eventually be able to cause the running indeterminate progress bar to stop animating.
### Expected behavior
Animated widgets should consistently render.
### Screenshots
_No response_
### Environment
- Operating System: Linux (any; tested on Fedora 36 and Ubuntu 22.04)
- Python version: 3.10 (but likely any)
- Software versions:
- Toga: 0.3.0+; tested at 235ff2e, but anything after the landing of #1794 will show the problem
### Logs
```
```
### Additional context
The problem appears to be related to the GTK Container fixes introduced in #1794.
In both examples listed above, I have found that the *first* time I run examples after making a code change, it *sometimes* works. However, on second and subsequent runs, the problem reliably occurs. There may be a timing issue at play - maybe the slowdown of PYC recompilation is sufficient to mask event order problems?
A suboptimal fix: if the optimisation on L166 that only does a refresh() `if resized or self.needs_redraw` is removed (so a lot more refreshes are performed), the problem goes away
A partial fix: If `window` listens for the `window-state-event` signal, and marks the container dirty when that event occurs, the switch demo works. The switches on the progress bar demo are also fixed. However, the animation of the indeterminate progress bar *isn't* fixed (at least, not consistently)
This suggests that the issue is a signal that constitutes a "dirty" window, but isn't being treated as a dirty signal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gtk/src/toga_gtk/container.py`
Content:
```
1 from .libs import Gdk, Gtk
2
3
4 class TogaContainer(Gtk.Fixed):
5 """A GTK container widget implementing Toga's layout.
6
7 This is a GTK widget, with no Toga interface manifestation.
8 """
9
10 def __init__(self):
11 super().__init__()
12 self._content = None
13 self.min_width = 100
14 self.min_height = 100
15
16 # GDK/GTK always renders at 96dpi. When HiDPI mode is enabled, it is
17 # managed at the compositor level. See
18 # https://wiki.archlinux.org/index.php/HiDPI#GDK_3_(GTK_3) for details
19 self.dpi = 96
20 self.baseline_dpi = self.dpi
21
22 # The dirty widgets are the set of widgets that are known to need
23 # re-hinting before any redraw occurs.
24 self._dirty_widgets = set()
25
26 # A flag that can be used to explicitly flag that a redraw is required.
27 self.needs_redraw = True
28
29 def make_dirty(self, widget=None):
30 """Mark the container (or a specific widget in the container) as dirty.
31
32 :param widget: If provided, this widget will be rehinted before the next layout.
33 """
34 self.needs_redraw = True
35 if widget is not None:
36 self._dirty_widgets.add(widget)
37 self.queue_resize()
38
39 @property
40 def width(self):
41 """The display width of the container.
42
43 If the container doesn't have any content yet, the width is 0.
44 """
45 if self._content is None:
46 return 0
47 return self.get_allocated_width()
48
49 @property
50 def height(self):
51 """The display height of the container.
52
53 If the container doesn't have any content yet, the height is 0.
54 """
55 if self._content is None:
56 return 0
57 return self.get_allocated_height()
58
59 @property
60 def content(self):
61 """The Toga implementation widget that is the root content of this
62 container.
63
64 All children of the root content will also be added to the container as
65 a result of assigning content.
66
67 If the container already has content, the old content will be replaced.
68 The old root content and all it's children will be removed from the
69 container.
70 """
71 return self._content
72
73 @content.setter
74 def content(self, widget):
75 if self._content:
76 self._content.container = None
77
78 self._content = widget
79 if widget:
80 widget.container = self
81
82 def recompute(self):
83 """Rehint and re-layout the container's content, if necessary.
84
85 Any widgets known to be dirty will be rehinted. The minimum
86 possible layout size for the container will also be recomputed.
87 """
88 if self._content and self.needs_redraw:
89 # If any of the widgets have been marked as dirty,
90 # recompute their bounds, and re-evaluate the minimum
91 # allowed size fo the layout.
92 while self._dirty_widgets:
93 widget = self._dirty_widgets.pop()
94 widget.rehint()
95
96 # Compute the layout using a 0-size container
97 self._content.interface.style.layout(
98 self._content.interface, TogaContainer()
99 )
100
101 # print(" computed min layout", self._content.interface.layout)
102 self.min_width = self._content.interface.layout.width
103 self.min_height = self._content.interface.layout.height
104
105 def do_get_preferred_width(self):
106 """Return (recomputing if necessary) the preferred width for the
107 container.
108
109 The preferred size of the container is it's minimum size. This
110 preference will be overridden with the layout size when the layout is
111 applied.
112
113 If the container does not yet have content, the minimum width is set to
114 0.
115 """
116 # print("GET PREFERRED WIDTH", self._content)
117 if self._content is None:
118 return 0, 0
119
120 # Ensure we have an accurate min layout size
121 self.recompute()
122
123 # The container will conform to the size of the allocation it is given,
124 # so the min and preferred size are the same.
125 return self.min_width, self.min_width
126
127 def do_get_preferred_height(self):
128 """Return (recomputing if necessary) the preferred height for the
129 container.
130
131 The preferred size of the container is it's minimum size. This
132 preference will be overridden with the layout size when the
133 layout is applied.
134
135 If the container does not yet have content, the minimum height
136 is set to 0.
137 """
138 # print("GET PREFERRED HEIGHT", self._content)
139 if self._content is None:
140 return 0, 0
141
142 # Ensure we have an accurate min layout size
143 self.recompute()
144
145 # The container will conform to the size of the allocation it is given,
146 # so the min and preferred size are the same.
147 return self.min_height, self.min_height
148
149 def do_size_allocate(self, allocation):
150 """Perform the actual layout for the widget, and all it's children.
151
152 The container will assume whatever size it has been given by GTK -
153 usually the full space of the window that holds the container.
154 The layout will then be re-computed based on this new available size,
155 and that new geometry will be applied to all child widgets of the
156 container.
157 """
158 # print(self._content, f"Container layout {allocation.width}x{allocation.height} @ {allocation.x}x{allocation.y}")
159
160 # The container will occupy the full space it has been allocated.
161 resized = (allocation.width, allocation.height) != (self.width, self.height)
162 self.set_allocation(allocation)
163
164 # This function may be called in response to irrelevant events like button clicks,
165 # so only refresh if we really need to.
166 if self._content and (resized or self.needs_redraw):
167 # Re-evaluate the layout using the allocation size as the basis for geometry
168 # print("REFRESH LAYOUT", allocation.width, allocation.height)
169 self._content.interface.refresh()
170
171 # WARNING! This is the list of children of the *container*, not
172 # the Toga widget. Toga maintains a tree of children; all nodes
173 # in that tree are direct children of the container.
174 for widget in self.get_children():
175 if not widget.get_visible():
176 # print(" not visible {widget.interface}")
177 pass
178 else:
179 # Set the size of the child widget to the computed layout size.
180 # print(f" allocate child {widget.interface}: {widget.interface.layout}")
181 widget_allocation = Gdk.Rectangle()
182 widget_allocation.x = (
183 widget.interface.layout.absolute_content_left + allocation.x
184 )
185 widget_allocation.y = (
186 widget.interface.layout.absolute_content_top + allocation.y
187 )
188 widget_allocation.width = widget.interface.layout.content_width
189 widget_allocation.height = widget.interface.layout.content_height
190
191 widget.size_allocate(widget_allocation)
192
193 # The layout has been redrawn
194 self.needs_redraw = False
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gtk/src/toga_gtk/container.py b/gtk/src/toga_gtk/container.py
--- a/gtk/src/toga_gtk/container.py
+++ b/gtk/src/toga_gtk/container.py
@@ -161,12 +161,13 @@
resized = (allocation.width, allocation.height) != (self.width, self.height)
self.set_allocation(allocation)
- # This function may be called in response to irrelevant events like button clicks,
- # so only refresh if we really need to.
- if self._content and (resized or self.needs_redraw):
- # Re-evaluate the layout using the allocation size as the basis for geometry
- # print("REFRESH LAYOUT", allocation.width, allocation.height)
- self._content.interface.refresh()
+ if self._content:
+ # This function may be called in response to irrelevant events like button clicks,
+ # so only refresh if we really need to.
+ if resized or self.needs_redraw:
+ # Re-evaluate the layout using the allocation size as the basis for geometry
+ # print("REFRESH LAYOUT", allocation.width, allocation.height)
+ self._content.interface.refresh()
# WARNING! This is the list of children of the *container*, not
# the Toga widget. Toga maintains a tree of children; all nodes
|
{"golden_diff": "diff --git a/gtk/src/toga_gtk/container.py b/gtk/src/toga_gtk/container.py\n--- a/gtk/src/toga_gtk/container.py\n+++ b/gtk/src/toga_gtk/container.py\n@@ -161,12 +161,13 @@\n resized = (allocation.width, allocation.height) != (self.width, self.height)\n self.set_allocation(allocation)\n \n- # This function may be called in response to irrelevant events like button clicks,\n- # so only refresh if we really need to.\n- if self._content and (resized or self.needs_redraw):\n- # Re-evaluate the layout using the allocation size as the basis for geometry\n- # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n- self._content.interface.refresh()\n+ if self._content:\n+ # This function may be called in response to irrelevant events like button clicks,\n+ # so only refresh if we really need to.\n+ if resized or self.needs_redraw:\n+ # Re-evaluate the layout using the allocation size as the basis for geometry\n+ # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n+ self._content.interface.refresh()\n \n # WARNING! This is the list of children of the *container*, not\n # the Toga widget. Toga maintains a tree of children; all nodes\n", "issue": "GTK animated widgets freeze until first resize\n### Describe the bug\n\nGTK Widgets that contain an \"animated\" component - e.g., Switch (the slide left/right animation) or an indeterminate ProgressBar (pulse animation) won't animate when the window is first drawn. However, if you resize the window, the animation will resume, including any historical state.\n\n### Steps to reproduce\n\n1. Run the toga_switch example.\r\n\r\nTry to toggle the \"initial state\" button. The color of the switch will change, but the switch itself won't \"slide\".\r\n\r\nAlso worth note: The exterior border of the \"change label\" button is very slightly clipped.\r\n\r\n2. Run the progressbar example\r\n\r\nThe indeterminate progress bar may not start. \r\n\r\nToggle the Running Mode and Indeterminate Mode buttons. With sufficient toggling, you will eventually be able to cause the running indeterminate progress bar to stop animating.\r\n\n\n### Expected behavior\n\nAnimated widgets should consistently render.\n\n### Screenshots\n\n_No response_\n\n### Environment\n\n- Operating System: Linux (any; tested on Fedora 36 and Ubuntu 22.04)\r\n- Python version: 3.10 (but likely any)\r\n- Software versions:\r\n - Toga: 0.3.0+; tested at 235ff2e, but anything after the landing of #1794 will show the problem\r\n\n\n### Logs\n\n```\r\n\r\n```\r\n\n\n### Additional context\n\nThe problem appears to be related to the GTK Container fixes introduced in #1794.\r\n\r\nIn both examples listed above, I have found that the *first* time I run examples after making a code change, it *sometimes* works. However, on second and subsequent runs, the problem reliably occurs. There may be a timing issue at play - maybe the slowdown of PYC recompilation is sufficient to mask event order problems?\r\n\r\nA suboptimal fix: if the optimisation on L166 that only does a refresh() `if resized or self.needs_redraw` is removed (so a lot more refreshes are performed), the problem goes away\r\n\r\nA partial fix: If `window` listens for the `window-state-event` signal, and marks the container dirty when that event occurs, the switch demo works. The switches on the progress bar demo are also fixed. However, the animation of the indeterminate progress bar *isn't* fixed (at least, not consistently)\r\n\r\nThis suggests that the issue is a signal that constitutes a \"dirty\" window, but isn't being treated as a dirty signal.\r\n\n", "before_files": [{"content": "from .libs import Gdk, Gtk\n\n\nclass TogaContainer(Gtk.Fixed):\n \"\"\"A GTK container widget implementing Toga's layout.\n\n This is a GTK widget, with no Toga interface manifestation.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._content = None\n self.min_width = 100\n self.min_height = 100\n\n # GDK/GTK always renders at 96dpi. When HiDPI mode is enabled, it is\n # managed at the compositor level. See\n # https://wiki.archlinux.org/index.php/HiDPI#GDK_3_(GTK_3) for details\n self.dpi = 96\n self.baseline_dpi = self.dpi\n\n # The dirty widgets are the set of widgets that are known to need\n # re-hinting before any redraw occurs.\n self._dirty_widgets = set()\n\n # A flag that can be used to explicitly flag that a redraw is required.\n self.needs_redraw = True\n\n def make_dirty(self, widget=None):\n \"\"\"Mark the container (or a specific widget in the container) as dirty.\n\n :param widget: If provided, this widget will be rehinted before the next layout.\n \"\"\"\n self.needs_redraw = True\n if widget is not None:\n self._dirty_widgets.add(widget)\n self.queue_resize()\n\n @property\n def width(self):\n \"\"\"The display width of the container.\n\n If the container doesn't have any content yet, the width is 0.\n \"\"\"\n if self._content is None:\n return 0\n return self.get_allocated_width()\n\n @property\n def height(self):\n \"\"\"The display height of the container.\n\n If the container doesn't have any content yet, the height is 0.\n \"\"\"\n if self._content is None:\n return 0\n return self.get_allocated_height()\n\n @property\n def content(self):\n \"\"\"The Toga implementation widget that is the root content of this\n container.\n\n All children of the root content will also be added to the container as\n a result of assigning content.\n\n If the container already has content, the old content will be replaced.\n The old root content and all it's children will be removed from the\n container.\n \"\"\"\n return self._content\n\n @content.setter\n def content(self, widget):\n if self._content:\n self._content.container = None\n\n self._content = widget\n if widget:\n widget.container = self\n\n def recompute(self):\n \"\"\"Rehint and re-layout the container's content, if necessary.\n\n Any widgets known to be dirty will be rehinted. The minimum\n possible layout size for the container will also be recomputed.\n \"\"\"\n if self._content and self.needs_redraw:\n # If any of the widgets have been marked as dirty,\n # recompute their bounds, and re-evaluate the minimum\n # allowed size fo the layout.\n while self._dirty_widgets:\n widget = self._dirty_widgets.pop()\n widget.rehint()\n\n # Compute the layout using a 0-size container\n self._content.interface.style.layout(\n self._content.interface, TogaContainer()\n )\n\n # print(\" computed min layout\", self._content.interface.layout)\n self.min_width = self._content.interface.layout.width\n self.min_height = self._content.interface.layout.height\n\n def do_get_preferred_width(self):\n \"\"\"Return (recomputing if necessary) the preferred width for the\n container.\n\n The preferred size of the container is it's minimum size. This\n preference will be overridden with the layout size when the layout is\n applied.\n\n If the container does not yet have content, the minimum width is set to\n 0.\n \"\"\"\n # print(\"GET PREFERRED WIDTH\", self._content)\n if self._content is None:\n return 0, 0\n\n # Ensure we have an accurate min layout size\n self.recompute()\n\n # The container will conform to the size of the allocation it is given,\n # so the min and preferred size are the same.\n return self.min_width, self.min_width\n\n def do_get_preferred_height(self):\n \"\"\"Return (recomputing if necessary) the preferred height for the\n container.\n\n The preferred size of the container is it's minimum size. This\n preference will be overridden with the layout size when the\n layout is applied.\n\n If the container does not yet have content, the minimum height\n is set to 0.\n \"\"\"\n # print(\"GET PREFERRED HEIGHT\", self._content)\n if self._content is None:\n return 0, 0\n\n # Ensure we have an accurate min layout size\n self.recompute()\n\n # The container will conform to the size of the allocation it is given,\n # so the min and preferred size are the same.\n return self.min_height, self.min_height\n\n def do_size_allocate(self, allocation):\n \"\"\"Perform the actual layout for the widget, and all it's children.\n\n The container will assume whatever size it has been given by GTK -\n usually the full space of the window that holds the container.\n The layout will then be re-computed based on this new available size,\n and that new geometry will be applied to all child widgets of the\n container.\n \"\"\"\n # print(self._content, f\"Container layout {allocation.width}x{allocation.height} @ {allocation.x}x{allocation.y}\")\n\n # The container will occupy the full space it has been allocated.\n resized = (allocation.width, allocation.height) != (self.width, self.height)\n self.set_allocation(allocation)\n\n # This function may be called in response to irrelevant events like button clicks,\n # so only refresh if we really need to.\n if self._content and (resized or self.needs_redraw):\n # Re-evaluate the layout using the allocation size as the basis for geometry\n # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n self._content.interface.refresh()\n\n # WARNING! This is the list of children of the *container*, not\n # the Toga widget. Toga maintains a tree of children; all nodes\n # in that tree are direct children of the container.\n for widget in self.get_children():\n if not widget.get_visible():\n # print(\" not visible {widget.interface}\")\n pass\n else:\n # Set the size of the child widget to the computed layout size.\n # print(f\" allocate child {widget.interface}: {widget.interface.layout}\")\n widget_allocation = Gdk.Rectangle()\n widget_allocation.x = (\n widget.interface.layout.absolute_content_left + allocation.x\n )\n widget_allocation.y = (\n widget.interface.layout.absolute_content_top + allocation.y\n )\n widget_allocation.width = widget.interface.layout.content_width\n widget_allocation.height = widget.interface.layout.content_height\n\n widget.size_allocate(widget_allocation)\n\n # The layout has been redrawn\n self.needs_redraw = False\n", "path": "gtk/src/toga_gtk/container.py"}], "after_files": [{"content": "from .libs import Gdk, Gtk\n\n\nclass TogaContainer(Gtk.Fixed):\n \"\"\"A GTK container widget implementing Toga's layout.\n\n This is a GTK widget, with no Toga interface manifestation.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._content = None\n self.min_width = 100\n self.min_height = 100\n\n # GDK/GTK always renders at 96dpi. When HiDPI mode is enabled, it is\n # managed at the compositor level. See\n # https://wiki.archlinux.org/index.php/HiDPI#GDK_3_(GTK_3) for details\n self.dpi = 96\n self.baseline_dpi = self.dpi\n\n # The dirty widgets are the set of widgets that are known to need\n # re-hinting before any redraw occurs.\n self._dirty_widgets = set()\n\n # A flag that can be used to explicitly flag that a redraw is required.\n self.needs_redraw = True\n\n def make_dirty(self, widget=None):\n \"\"\"Mark the container (or a specific widget in the container) as dirty.\n\n :param widget: If provided, this widget will be rehinted before the next layout.\n \"\"\"\n self.needs_redraw = True\n if widget is not None:\n self._dirty_widgets.add(widget)\n self.queue_resize()\n\n @property\n def width(self):\n \"\"\"The display width of the container.\n\n If the container doesn't have any content yet, the width is 0.\n \"\"\"\n if self._content is None:\n return 0\n return self.get_allocated_width()\n\n @property\n def height(self):\n \"\"\"The display height of the container.\n\n If the container doesn't have any content yet, the height is 0.\n \"\"\"\n if self._content is None:\n return 0\n return self.get_allocated_height()\n\n @property\n def content(self):\n \"\"\"The Toga implementation widget that is the root content of this\n container.\n\n All children of the root content will also be added to the container as\n a result of assigning content.\n\n If the container already has content, the old content will be replaced.\n The old root content and all it's children will be removed from the\n container.\n \"\"\"\n return self._content\n\n @content.setter\n def content(self, widget):\n if self._content:\n self._content.container = None\n\n self._content = widget\n if widget:\n widget.container = self\n\n def recompute(self):\n \"\"\"Rehint and re-layout the container's content, if necessary.\n\n Any widgets known to be dirty will be rehinted. The minimum\n possible layout size for the container will also be recomputed.\n \"\"\"\n if self._content and self.needs_redraw:\n # If any of the widgets have been marked as dirty,\n # recompute their bounds, and re-evaluate the minimum\n # allowed size fo the layout.\n while self._dirty_widgets:\n widget = self._dirty_widgets.pop()\n widget.rehint()\n\n # Compute the layout using a 0-size container\n self._content.interface.style.layout(\n self._content.interface, TogaContainer()\n )\n\n # print(\" computed min layout\", self._content.interface.layout)\n self.min_width = self._content.interface.layout.width\n self.min_height = self._content.interface.layout.height\n\n def do_get_preferred_width(self):\n \"\"\"Return (recomputing if necessary) the preferred width for the\n container.\n\n The preferred size of the container is it's minimum size. This\n preference will be overridden with the layout size when the layout is\n applied.\n\n If the container does not yet have content, the minimum width is set to\n 0.\n \"\"\"\n # print(\"GET PREFERRED WIDTH\", self._content)\n if self._content is None:\n return 0, 0\n\n # Ensure we have an accurate min layout size\n self.recompute()\n\n # The container will conform to the size of the allocation it is given,\n # so the min and preferred size are the same.\n return self.min_width, self.min_width\n\n def do_get_preferred_height(self):\n \"\"\"Return (recomputing if necessary) the preferred height for the\n container.\n\n The preferred size of the container is it's minimum size. This\n preference will be overridden with the layout size when the\n layout is applied.\n\n If the container does not yet have content, the minimum height\n is set to 0.\n \"\"\"\n # print(\"GET PREFERRED HEIGHT\", self._content)\n if self._content is None:\n return 0, 0\n\n # Ensure we have an accurate min layout size\n self.recompute()\n\n # The container will conform to the size of the allocation it is given,\n # so the min and preferred size are the same.\n return self.min_height, self.min_height\n\n def do_size_allocate(self, allocation):\n \"\"\"Perform the actual layout for the widget, and all it's children.\n\n The container will assume whatever size it has been given by GTK -\n usually the full space of the window that holds the container.\n The layout will then be re-computed based on this new available size,\n and that new geometry will be applied to all child widgets of the\n container.\n \"\"\"\n # print(self._content, f\"Container layout {allocation.width}x{allocation.height} @ {allocation.x}x{allocation.y}\")\n\n # The container will occupy the full space it has been allocated.\n resized = (allocation.width, allocation.height) != (self.width, self.height)\n self.set_allocation(allocation)\n\n if self._content:\n # This function may be called in response to irrelevant events like button clicks,\n # so only refresh if we really need to.\n if resized or self.needs_redraw:\n # Re-evaluate the layout using the allocation size as the basis for geometry\n # print(\"REFRESH LAYOUT\", allocation.width, allocation.height)\n self._content.interface.refresh()\n\n # WARNING! This is the list of children of the *container*, not\n # the Toga widget. Toga maintains a tree of children; all nodes\n # in that tree are direct children of the container.\n for widget in self.get_children():\n if not widget.get_visible():\n # print(\" not visible {widget.interface}\")\n pass\n else:\n # Set the size of the child widget to the computed layout size.\n # print(f\" allocate child {widget.interface}: {widget.interface.layout}\")\n widget_allocation = Gdk.Rectangle()\n widget_allocation.x = (\n widget.interface.layout.absolute_content_left + allocation.x\n )\n widget_allocation.y = (\n widget.interface.layout.absolute_content_top + allocation.y\n )\n widget_allocation.width = widget.interface.layout.content_width\n widget_allocation.height = widget.interface.layout.content_height\n\n widget.size_allocate(widget_allocation)\n\n # The layout has been redrawn\n self.needs_redraw = False\n", "path": "gtk/src/toga_gtk/container.py"}]}
| 2,823 | 303 |
gh_patches_debug_31649
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-5970
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JoinDocuments should use highest score when multiple retrievers recall the same document
**Is your feature request related to a problem? Please describe.**
The JoinDocuments node currently uses the document from the last retriever if their are multiple retrievers and they recall the same document. The first retriever could have the highest score and be more useful. That's why I propose to use the document with the highest score if there are duplicate documents recalled
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/nodes/other/join_docs.py`
Content:
```
1 from collections import defaultdict
2 import logging
3 from math import inf
4
5 from typing import Optional, List
6
7 from haystack.schema import Document
8 from haystack.nodes.other.join import JoinNode
9
10 logger = logging.getLogger(__name__)
11
12
13 class JoinDocuments(JoinNode):
14 """
15 A node to join documents outputted by multiple retriever nodes.
16
17 The node allows multiple join modes:
18 * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.
19 The score is only determined by the last node that outputs the document.
20 * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different
21 `weight` & a `top_k` limit can be set. This mode can also be used for "reranking" retrieved documents.
22 * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.
23 """
24
25 outgoing_edges = 1
26
27 def __init__(
28 self,
29 join_mode: str = "concatenate",
30 weights: Optional[List[float]] = None,
31 top_k_join: Optional[int] = None,
32 sort_by_score: bool = True,
33 ):
34 """
35 :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of
36 individual documents, `reciprocal_rank_fusion` to apply rank based scoring.
37 :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for
38 adjusting document scores when using the `merge` join_mode. By default, equal weight is given
39 to each retriever score. This param is not compatible with the `concatenate` join_mode.
40 :param top_k_join: Limit documents to top_k based on the resulting scores of the join.
41 :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your
42 Documents are coming with `score` values. Set to False if any of the Documents come
43 from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.
44 """
45 assert join_mode in [
46 "concatenate",
47 "merge",
48 "reciprocal_rank_fusion",
49 ], f"JoinDocuments node does not support '{join_mode}' join_mode."
50
51 assert not (
52 weights is not None and join_mode == "concatenate"
53 ), "Weights are not compatible with 'concatenate' join_mode."
54
55 super().__init__()
56
57 self.join_mode = join_mode
58 self.weights = [float(i) / sum(weights) for i in weights] if weights else None
59 self.top_k_join = top_k_join
60 self.sort_by_score = sort_by_score
61
62 def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore
63 results = [inp["documents"] for inp in inputs]
64 document_map = {doc.id: doc for result in results for doc in result}
65
66 if self.join_mode == "concatenate":
67 scores_map = self._concatenate_results(results)
68 elif self.join_mode == "merge":
69 scores_map = self._calculate_comb_sum(results)
70 elif self.join_mode == "reciprocal_rank_fusion":
71 scores_map = self._calculate_rrf(results)
72 else:
73 raise ValueError(f"Invalid join_mode: {self.join_mode}")
74
75 # only sort the docs if that was requested
76 if self.sort_by_score:
77 sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)
78 if any(s is None for s in scores_map.values()):
79 logger.info(
80 "The `JoinDocuments` node has received some documents with `score=None` - and was requested "
81 "to sort the documents by score, so the `score=None` documents got sorted as if their "
82 "score would be `-infinity`."
83 )
84 else:
85 sorted_docs = list(scores_map.items())
86
87 if not top_k_join:
88 top_k_join = self.top_k_join
89 if not top_k_join:
90 top_k_join = len(sorted_docs)
91
92 docs = []
93 for id, score in sorted_docs[:top_k_join]:
94 doc = document_map[id]
95 doc.score = score
96 docs.append(doc)
97
98 output = {"documents": docs, "labels": inputs[0].get("labels", None)}
99
100 return output, "output_1"
101
102 def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore
103 # Join single document lists
104 if isinstance(inputs[0]["documents"][0], Document):
105 return self.run(inputs=inputs, top_k_join=top_k_join)
106 # Join lists of document lists
107 else:
108 output_docs = []
109 incoming_edges = [inp["documents"] for inp in inputs]
110 for idx in range(len(incoming_edges[0])):
111 cur_docs_to_join = []
112 for edge in incoming_edges:
113 cur_docs_to_join.append({"documents": edge[idx]})
114 cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)
115 output_docs.append(cur["documents"])
116
117 output = {"documents": output_docs, "labels": inputs[0].get("labels", None)}
118
119 return output, "output_1"
120
121 def _concatenate_results(self, results):
122 """
123 Concatenates multiple document result lists.
124 """
125 return {doc.id: doc.score for result in results for doc in result}
126
127 def _calculate_comb_sum(self, results):
128 """
129 Calculates a combination sum by multiplying each score by its weight.
130 """
131 scores_map = defaultdict(int)
132 weights = self.weights if self.weights else [1 / len(results)] * len(results)
133
134 for result, weight in zip(results, weights):
135 for doc in result:
136 scores_map[doc.id] += (doc.score if doc.score else 0) * weight
137
138 return scores_map
139
140 def _calculate_rrf(self, results):
141 """
142 Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,
143 plus 1 as python lists are 0-based and the paper used 1-based ranking).
144 """
145 K = 61
146
147 scores_map = defaultdict(int)
148 for result in results:
149 for rank, doc in enumerate(result):
150 scores_map[doc.id] += 1 / (K + rank)
151
152 return scores_map
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/haystack/nodes/other/join_docs.py b/haystack/nodes/other/join_docs.py
--- a/haystack/nodes/other/join_docs.py
+++ b/haystack/nodes/other/join_docs.py
@@ -1,11 +1,10 @@
-from collections import defaultdict
import logging
+from collections import defaultdict
from math import inf
+from typing import List, Optional
-from typing import Optional, List
-
-from haystack.schema import Document
from haystack.nodes.other.join import JoinNode
+from haystack.schema import Document
logger = logging.getLogger(__name__)
@@ -64,7 +63,7 @@
document_map = {doc.id: doc for result in results for doc in result}
if self.join_mode == "concatenate":
- scores_map = self._concatenate_results(results)
+ scores_map = self._concatenate_results(results, document_map)
elif self.join_mode == "merge":
scores_map = self._calculate_comb_sum(results)
elif self.join_mode == "reciprocal_rank_fusion":
@@ -118,11 +117,22 @@
return output, "output_1"
- def _concatenate_results(self, results):
+ def _concatenate_results(self, results, document_map):
"""
Concatenates multiple document result lists.
+ Return the documents with the higher score.
"""
- return {doc.id: doc.score for result in results for doc in result}
+ list_id = list(document_map.keys())
+ scores_map = {}
+ for idx in list_id:
+ tmp = []
+ for result in results:
+ for doc in result:
+ if doc.id == idx:
+ tmp.append(doc)
+ item_best_score = max(tmp, key=lambda x: x.score)
+ scores_map.update({idx: item_best_score.score})
+ return scores_map
def _calculate_comb_sum(self, results):
"""
|
{"golden_diff": "diff --git a/haystack/nodes/other/join_docs.py b/haystack/nodes/other/join_docs.py\n--- a/haystack/nodes/other/join_docs.py\n+++ b/haystack/nodes/other/join_docs.py\n@@ -1,11 +1,10 @@\n-from collections import defaultdict\n import logging\n+from collections import defaultdict\n from math import inf\n+from typing import List, Optional\n \n-from typing import Optional, List\n-\n-from haystack.schema import Document\n from haystack.nodes.other.join import JoinNode\n+from haystack.schema import Document\n \n logger = logging.getLogger(__name__)\n \n@@ -64,7 +63,7 @@\n document_map = {doc.id: doc for result in results for doc in result}\n \n if self.join_mode == \"concatenate\":\n- scores_map = self._concatenate_results(results)\n+ scores_map = self._concatenate_results(results, document_map)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n@@ -118,11 +117,22 @@\n \n return output, \"output_1\"\n \n- def _concatenate_results(self, results):\n+ def _concatenate_results(self, results, document_map):\n \"\"\"\n Concatenates multiple document result lists.\n+ Return the documents with the higher score.\n \"\"\"\n- return {doc.id: doc.score for result in results for doc in result}\n+ list_id = list(document_map.keys())\n+ scores_map = {}\n+ for idx in list_id:\n+ tmp = []\n+ for result in results:\n+ for doc in result:\n+ if doc.id == idx:\n+ tmp.append(doc)\n+ item_best_score = max(tmp, key=lambda x: x.score)\n+ scores_map.update({idx: item_best_score.score})\n+ return scores_map\n \n def _calculate_comb_sum(self, results):\n \"\"\"\n", "issue": "JoinDocuments should use highest score when multiple retrievers recall the same document\n**Is your feature request related to a problem? Please describe.**\r\nThe JoinDocuments node currently uses the document from the last retriever if their are multiple retrievers and they recall the same document. The first retriever could have the highest score and be more useful. That's why I propose to use the document with the highest score if there are duplicate documents recalled\r\n\n", "before_files": [{"content": "from collections import defaultdict\nimport logging\nfrom math import inf\n\nfrom typing import Optional, List\n\nfrom haystack.schema import Document\nfrom haystack.nodes.other.join import JoinNode\n\nlogger = logging.getLogger(__name__)\n\n\nclass JoinDocuments(JoinNode):\n \"\"\"\n A node to join documents outputted by multiple retriever nodes.\n\n The node allows multiple join modes:\n * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.\n The score is only determined by the last node that outputs the document.\n * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different\n `weight` & a `top_k` limit can be set. This mode can also be used for \"reranking\" retrieved documents.\n * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k_join: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of\n individual documents, `reciprocal_rank_fusion` to apply rank based scoring.\n :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for\n adjusting document scores when using the `merge` join_mode. By default, equal weight is given\n to each retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k_join: Limit documents to top_k based on the resulting scores of the join.\n :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your\n Documents are coming with `score` values. Set to False if any of the Documents come\n from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.\n \"\"\"\n assert join_mode in [\n \"concatenate\",\n \"merge\",\n \"reciprocal_rank_fusion\",\n ], f\"JoinDocuments node does not support '{join_mode}' join_mode.\"\n\n assert not (\n weights is not None and join_mode == \"concatenate\"\n ), \"Weights are not compatible with 'concatenate' join_mode.\"\n\n super().__init__()\n\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k_join = top_k_join\n self.sort_by_score = sort_by_score\n\n def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n results = [inp[\"documents\"] for inp in inputs]\n document_map = {doc.id: doc for result in results for doc in result}\n\n if self.join_mode == \"concatenate\":\n scores_map = self._concatenate_results(results)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n scores_map = self._calculate_rrf(results)\n else:\n raise ValueError(f\"Invalid join_mode: {self.join_mode}\")\n\n # only sort the docs if that was requested\n if self.sort_by_score:\n sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)\n if any(s is None for s in scores_map.values()):\n logger.info(\n \"The `JoinDocuments` node has received some documents with `score=None` - and was requested \"\n \"to sort the documents by score, so the `score=None` documents got sorted as if their \"\n \"score would be `-infinity`.\"\n )\n else:\n sorted_docs = list(scores_map.items())\n\n if not top_k_join:\n top_k_join = self.top_k_join\n if not top_k_join:\n top_k_join = len(sorted_docs)\n\n docs = []\n for id, score in sorted_docs[:top_k_join]:\n doc = document_map[id]\n doc.score = score\n docs.append(doc)\n\n output = {\"documents\": docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n # Join single document lists\n if isinstance(inputs[0][\"documents\"][0], Document):\n return self.run(inputs=inputs, top_k_join=top_k_join)\n # Join lists of document lists\n else:\n output_docs = []\n incoming_edges = [inp[\"documents\"] for inp in inputs]\n for idx in range(len(incoming_edges[0])):\n cur_docs_to_join = []\n for edge in incoming_edges:\n cur_docs_to_join.append({\"documents\": edge[idx]})\n cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)\n output_docs.append(cur[\"documents\"])\n\n output = {\"documents\": output_docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def _concatenate_results(self, results):\n \"\"\"\n Concatenates multiple document result lists.\n \"\"\"\n return {doc.id: doc.score for result in results for doc in result}\n\n def _calculate_comb_sum(self, results):\n \"\"\"\n Calculates a combination sum by multiplying each score by its weight.\n \"\"\"\n scores_map = defaultdict(int)\n weights = self.weights if self.weights else [1 / len(results)] * len(results)\n\n for result, weight in zip(results, weights):\n for doc in result:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n\n return scores_map\n\n def _calculate_rrf(self, results):\n \"\"\"\n Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n K = 61\n\n scores_map = defaultdict(int)\n for result in results:\n for rank, doc in enumerate(result):\n scores_map[doc.id] += 1 / (K + rank)\n\n return scores_map\n", "path": "haystack/nodes/other/join_docs.py"}], "after_files": [{"content": "import logging\nfrom collections import defaultdict\nfrom math import inf\nfrom typing import List, Optional\n\nfrom haystack.nodes.other.join import JoinNode\nfrom haystack.schema import Document\n\nlogger = logging.getLogger(__name__)\n\n\nclass JoinDocuments(JoinNode):\n \"\"\"\n A node to join documents outputted by multiple retriever nodes.\n\n The node allows multiple join modes:\n * concatenate: combine the documents from multiple nodes. Any duplicate documents are discarded.\n The score is only determined by the last node that outputs the document.\n * merge: merge scores of documents from multiple nodes. Optionally, each input score can be given a different\n `weight` & a `top_k` limit can be set. This mode can also be used for \"reranking\" retrieved documents.\n * reciprocal_rank_fusion: combines the documents based on their rank in multiple nodes.\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(\n self,\n join_mode: str = \"concatenate\",\n weights: Optional[List[float]] = None,\n top_k_join: Optional[int] = None,\n sort_by_score: bool = True,\n ):\n \"\"\"\n :param join_mode: `concatenate` to combine documents from multiple retrievers `merge` to aggregate scores of\n individual documents, `reciprocal_rank_fusion` to apply rank based scoring.\n :param weights: A node-wise list(length of list must be equal to the number of input nodes) of weights for\n adjusting document scores when using the `merge` join_mode. By default, equal weight is given\n to each retriever score. This param is not compatible with the `concatenate` join_mode.\n :param top_k_join: Limit documents to top_k based on the resulting scores of the join.\n :param sort_by_score: Whether to sort the incoming documents by their score. Set this to True if all your\n Documents are coming with `score` values. Set to False if any of the Documents come\n from sources where the `score` is set to `None`, like `TfidfRetriever` on Elasticsearch.\n \"\"\"\n assert join_mode in [\n \"concatenate\",\n \"merge\",\n \"reciprocal_rank_fusion\",\n ], f\"JoinDocuments node does not support '{join_mode}' join_mode.\"\n\n assert not (\n weights is not None and join_mode == \"concatenate\"\n ), \"Weights are not compatible with 'concatenate' join_mode.\"\n\n super().__init__()\n\n self.join_mode = join_mode\n self.weights = [float(i) / sum(weights) for i in weights] if weights else None\n self.top_k_join = top_k_join\n self.sort_by_score = sort_by_score\n\n def run_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n results = [inp[\"documents\"] for inp in inputs]\n document_map = {doc.id: doc for result in results for doc in result}\n\n if self.join_mode == \"concatenate\":\n scores_map = self._concatenate_results(results, document_map)\n elif self.join_mode == \"merge\":\n scores_map = self._calculate_comb_sum(results)\n elif self.join_mode == \"reciprocal_rank_fusion\":\n scores_map = self._calculate_rrf(results)\n else:\n raise ValueError(f\"Invalid join_mode: {self.join_mode}\")\n\n # only sort the docs if that was requested\n if self.sort_by_score:\n sorted_docs = sorted(scores_map.items(), key=lambda d: d[1] if d[1] is not None else -inf, reverse=True)\n if any(s is None for s in scores_map.values()):\n logger.info(\n \"The `JoinDocuments` node has received some documents with `score=None` - and was requested \"\n \"to sort the documents by score, so the `score=None` documents got sorted as if their \"\n \"score would be `-infinity`.\"\n )\n else:\n sorted_docs = list(scores_map.items())\n\n if not top_k_join:\n top_k_join = self.top_k_join\n if not top_k_join:\n top_k_join = len(sorted_docs)\n\n docs = []\n for id, score in sorted_docs[:top_k_join]:\n doc = document_map[id]\n doc.score = score\n docs.append(doc)\n\n output = {\"documents\": docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def run_batch_accumulated(self, inputs: List[dict], top_k_join: Optional[int] = None): # type: ignore\n # Join single document lists\n if isinstance(inputs[0][\"documents\"][0], Document):\n return self.run(inputs=inputs, top_k_join=top_k_join)\n # Join lists of document lists\n else:\n output_docs = []\n incoming_edges = [inp[\"documents\"] for inp in inputs]\n for idx in range(len(incoming_edges[0])):\n cur_docs_to_join = []\n for edge in incoming_edges:\n cur_docs_to_join.append({\"documents\": edge[idx]})\n cur, _ = self.run(inputs=cur_docs_to_join, top_k_join=top_k_join)\n output_docs.append(cur[\"documents\"])\n\n output = {\"documents\": output_docs, \"labels\": inputs[0].get(\"labels\", None)}\n\n return output, \"output_1\"\n\n def _concatenate_results(self, results, document_map):\n \"\"\"\n Concatenates multiple document result lists.\n Return the documents with the higher score.\n \"\"\"\n list_id = list(document_map.keys())\n scores_map = {}\n for idx in list_id:\n tmp = []\n for result in results:\n for doc in result:\n if doc.id == idx:\n tmp.append(doc)\n item_best_score = max(tmp, key=lambda x: x.score)\n scores_map.update({idx: item_best_score.score})\n return scores_map\n\n def _calculate_comb_sum(self, results):\n \"\"\"\n Calculates a combination sum by multiplying each score by its weight.\n \"\"\"\n scores_map = defaultdict(int)\n weights = self.weights if self.weights else [1 / len(results)] * len(results)\n\n for result, weight in zip(results, weights):\n for doc in result:\n scores_map[doc.id] += (doc.score if doc.score else 0) * weight\n\n return scores_map\n\n def _calculate_rrf(self, results):\n \"\"\"\n Calculates the reciprocal rank fusion. The constant K is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n K = 61\n\n scores_map = defaultdict(int)\n for result in results:\n for rank, doc in enumerate(result):\n scores_map[doc.id] += 1 / (K + rank)\n\n return scores_map\n", "path": "haystack/nodes/other/join_docs.py"}]}
| 2,115 | 432 |
gh_patches_debug_8938
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1448
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unhandled yaml.scanner.ScannerError when trying autoupdate with a malformed pre-commit config
In migrate_config.py we catch `yaml.YAMLError` on [lines 31-36](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/commands/migrate_config.py#L31-L36) (of which `yaml.scanner.ScannerError` is a subclass), but when the exception is raised on line 28, it is unhandled.
```console
$ pre-commit autoupdate
An unexpected error has occurred: ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
Check the log at /home/ryan/.cache/pre-commit/pre-commit.log
```
### version information
```
pre-commit version: 2.3.0
sys.version:
3.8.2 (default, Apr 8 2020, 14:31:25)
[GCC 9.3.0]
sys.executable: /home/ryan/.local/pipx/venvs/pre-commit/bin/python
os.name: posix
sys.platform: linux
```
### error information
```
An unexpected error has occurred: ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
```
```
Traceback (most recent call last):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py", line 56, in error_handler
yield
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py", line 354, in main
return autoupdate(
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py", line 141, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 49, in migrate_config
contents = _migrate_map(contents)
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py", line 28, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py", line 114, in load
return loader.get_single_data()
File "/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "ext/_yaml.pyx", line 707, in _yaml.CParser.get_single_node
File "ext/_yaml.pyx", line 726, in _yaml.CParser._compose_document
File "ext/_yaml.pyx", line 905, in _yaml.CParser._parse_next_event
yaml.scanner.ScannerError: mapping values are not allowed in this context
in "<unicode string>", line 2, column 6
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/migrate_config.py`
Content:
```
1 import re
2
3 import yaml
4
5 from pre_commit.util import yaml_load
6
7
8 def _indent(s: str) -> str:
9 lines = s.splitlines(True)
10 return ''.join(' ' * 4 + line if line.strip() else line for line in lines)
11
12
13 def _is_header_line(line: str) -> bool:
14 return line.startswith(('#', '---')) or not line.strip()
15
16
17 def _migrate_map(contents: str) -> str:
18 # Find the first non-header line
19 lines = contents.splitlines(True)
20 i = 0
21 # Only loop on non empty configuration file
22 while i < len(lines) and _is_header_line(lines[i]):
23 i += 1
24
25 header = ''.join(lines[:i])
26 rest = ''.join(lines[i:])
27
28 if isinstance(yaml_load(contents), list):
29 # If they are using the "default" flow style of yaml, this operation
30 # will yield a valid configuration
31 try:
32 trial_contents = f'{header}repos:\n{rest}'
33 yaml_load(trial_contents)
34 contents = trial_contents
35 except yaml.YAMLError:
36 contents = f'{header}repos:\n{_indent(rest)}'
37
38 return contents
39
40
41 def _migrate_sha_to_rev(contents: str) -> str:
42 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
43
44
45 def migrate_config(config_file: str, quiet: bool = False) -> int:
46 with open(config_file) as f:
47 orig_contents = contents = f.read()
48
49 contents = _migrate_map(contents)
50 contents = _migrate_sha_to_rev(contents)
51
52 if contents != orig_contents:
53 with open(config_file, 'w') as f:
54 f.write(contents)
55
56 print('Configuration has been migrated.')
57 elif not quiet:
58 print('Configuration is already migrated.')
59 return 0
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -2,6 +2,7 @@
import yaml
+from pre_commit.clientlib import load_config
from pre_commit.util import yaml_load
@@ -43,6 +44,9 @@
def migrate_config(config_file: str, quiet: bool = False) -> int:
+ # ensure that the configuration is a valid pre-commit configuration
+ load_config(config_file)
+
with open(config_file) as f:
orig_contents = contents = f.read()
|
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -2,6 +2,7 @@\n \n import yaml\n \n+from pre_commit.clientlib import load_config\n from pre_commit.util import yaml_load\n \n \n@@ -43,6 +44,9 @@\n \n \n def migrate_config(config_file: str, quiet: bool = False) -> int:\n+ # ensure that the configuration is a valid pre-commit configuration\n+ load_config(config_file)\n+\n with open(config_file) as f:\n orig_contents = contents = f.read()\n", "issue": "Unhandled yaml.scanner.ScannerError when trying autoupdate with a malformed pre-commit config\nIn migrate_config.py we catch `yaml.YAMLError` on [lines 31-36](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/commands/migrate_config.py#L31-L36) (of which `yaml.scanner.ScannerError` is a subclass), but when the exception is raised on line 28, it is unhandled.\r\n\r\n```console\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\nCheck the log at /home/ryan/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.3.0\r\nsys.version:\r\n 3.8.2 (default, Apr 8 2020, 14:31:25) \r\n [GCC 9.3.0]\r\nsys.executable: /home/ryan/.local/pipx/venvs/pre-commit/bin/python\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/error_handler.py\", line 56, in error_handler\r\n yield\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/main.py\", line 354, in main\r\n return autoupdate(\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/autoupdate.py\", line 141, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py\", line 49, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/pre_commit/commands/migrate_config.py\", line 28, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/__init__.py\", line 114, in load\r\n return loader.get_single_data()\r\n File \"/home/ryan/.local/pipx/venvs/pre-commit/lib/python3.8/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"ext/_yaml.pyx\", line 707, in _yaml.CParser.get_single_node\r\n File \"ext/_yaml.pyx\", line 726, in _yaml.CParser._compose_document\r\n File \"ext/_yaml.pyx\", line 905, in _yaml.CParser._parse_next_event\r\nyaml.scanner.ScannerError: mapping values are not allowed in this context\r\n in \"<unicode string>\", line 2, column 6\r\n```\r\n\n", "before_files": [{"content": "import re\n\nimport yaml\n\nfrom pre_commit.util import yaml_load\n\n\ndef _indent(s: str) -> str:\n lines = s.splitlines(True)\n return ''.join(' ' * 4 + line if line.strip() else line for line in lines)\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n if isinstance(yaml_load(contents), list):\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{_indent(rest)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}], "after_files": [{"content": "import re\n\nimport yaml\n\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.util import yaml_load\n\n\ndef _indent(s: str) -> str:\n lines = s.splitlines(True)\n return ''.join(' ' * 4 + line if line.strip() else line for line in lines)\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n if isinstance(yaml_load(contents), list):\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{_indent(rest)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n # ensure that the configuration is a valid pre-commit configuration\n load_config(config_file)\n\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
| 1,524 | 147 |
gh_patches_debug_60346
|
rasdani/github-patches
|
git_diff
|
graspologic-org__graspologic-366
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
returning test statistic in LDT
some practitioners (read: Vince, cep) only care about the test statistic and not the p-value. obviously one can still extract it if they perform the full test. however, that wastes time and resources. one can set the number of iterations to 1 to minimize that, but we can still do less. i propose to allow the number of permutations to be set to 0 (hyppo allows that, so really it is just a change in argument check). i am happy to do this, but:
this brings up the following questions: what should be happening to the fit_predict in that case? should it return the test statistic instead? or the p-value of 1? or NaN? should we be raising warnings?
and on a larger scale: should we really have this API? should fit predict return p-value, or a tuple of a p-value and a test statistic, like many other tests in python? furthremore, should it really be a class? once again, most tests in python that i have seen (scipy, statsmodels) are functions, not classes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3 from setuptools import setup, find_packages
4 from sys import platform
5
6 PACKAGE_NAME = "graspy"
7 DESCRIPTION = "A set of python modules for graph statistics"
8 with open("README.md", "r") as f:
9 LONG_DESCRIPTION = f.read()
10 AUTHOR = ("Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand",)
11 AUTHOR_EMAIL = "[email protected]"
12 URL = "https://github.com/neurodata/graspy"
13 MINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5
14 REQUIRED_PACKAGES = [
15 "networkx>=2.1",
16 "numpy>=1.8.1",
17 "scikit-learn>=0.19.1",
18 "scipy>=1.1.0",
19 "seaborn>=0.9.0",
20 "matplotlib>=3.0.0",
21 "hyppo>=0.1.2",
22 ]
23
24
25 # Find GraSPy version.
26 PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
27 for line in open(os.path.join(PROJECT_PATH, "graspy", "__init__.py")):
28 if line.startswith("__version__ = "):
29 VERSION = line.strip().split()[2][1:-1]
30
31
32 def check_python_version():
33 """Exit when the Python version is too low."""
34 if sys.version_info < MINIMUM_PYTHON_VERSION:
35 sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
36
37
38 check_python_version()
39
40 setup(
41 name=PACKAGE_NAME,
42 version=VERSION,
43 description=DESCRIPTION,
44 long_description=LONG_DESCRIPTION,
45 long_description_content_type="text/markdown",
46 author=AUTHOR,
47 author_email=AUTHOR_EMAIL,
48 install_requires=REQUIRED_PACKAGES,
49 url=URL,
50 license="Apache License 2.0",
51 classifiers=[
52 "Development Status :: 3 - Alpha",
53 "Intended Audience :: Science/Research",
54 "Topic :: Scientific/Engineering :: Mathematics",
55 "License :: OSI Approved :: Apache Software License",
56 "Programming Language :: Python :: 3",
57 "Programming Language :: Python :: 3.6",
58 "Programming Language :: Python :: 3.7",
59 ],
60 packages=find_packages(),
61 include_package_data=True,
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
"scipy>=1.1.0",
"seaborn>=0.9.0",
"matplotlib>=3.0.0",
- "hyppo>=0.1.2",
+ "hyppo>=0.1.3",
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n- \"hyppo>=0.1.2\",\n+ \"hyppo>=0.1.3\",\n ]\n", "issue": "returning test statistic in LDT\nsome practitioners (read: Vince, cep) only care about the test statistic and not the p-value. obviously one can still extract it if they perform the full test. however, that wastes time and resources. one can set the number of iterations to 1 to minimize that, but we can still do less. i propose to allow the number of permutations to be set to 0 (hyppo allows that, so really it is just a change in argument check). i am happy to do this, but:\r\n\r\nthis brings up the following questions: what should be happening to the fit_predict in that case? should it return the test statistic instead? or the p-value of 1? or NaN? should we be raising warnings?\r\n\r\nand on a larger scale: should we really have this API? should fit predict return p-value, or a tuple of a p-value and a test statistic, like many other tests in python? furthremore, should it really be a class? once again, most tests in python that i have seen (scipy, statsmodels) are functions, not classes.\n", "before_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.2\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\nfrom setuptools import setup, find_packages\nfrom sys import platform\n\nPACKAGE_NAME = \"graspy\"\nDESCRIPTION = \"A set of python modules for graph statistics\"\nwith open(\"README.md\", \"r\") as f:\n LONG_DESCRIPTION = f.read()\nAUTHOR = (\"Eric Bridgeford, Jaewon Chung, Benjamin Pedigo, Bijan Varjavand\",)\nAUTHOR_EMAIL = \"[email protected]\"\nURL = \"https://github.com/neurodata/graspy\"\nMINIMUM_PYTHON_VERSION = 3, 6 # Minimum of Python 3.5\nREQUIRED_PACKAGES = [\n \"networkx>=2.1\",\n \"numpy>=1.8.1\",\n \"scikit-learn>=0.19.1\",\n \"scipy>=1.1.0\",\n \"seaborn>=0.9.0\",\n \"matplotlib>=3.0.0\",\n \"hyppo>=0.1.3\",\n]\n\n\n# Find GraSPy version.\nPROJECT_PATH = os.path.dirname(os.path.abspath(__file__))\nfor line in open(os.path.join(PROJECT_PATH, \"graspy\", \"__init__.py\")):\n if line.startswith(\"__version__ = \"):\n VERSION = line.strip().split()[2][1:-1]\n\n\ndef check_python_version():\n \"\"\"Exit when the Python version is too low.\"\"\"\n if sys.version_info < MINIMUM_PYTHON_VERSION:\n sys.exit(\"Python {}.{}+ is required.\".format(*MINIMUM_PYTHON_VERSION))\n\n\ncheck_python_version()\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n install_requires=REQUIRED_PACKAGES,\n url=URL,\n license=\"Apache License 2.0\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n)\n", "path": "setup.py"}]}
| 1,103 | 90 |
gh_patches_debug_17961
|
rasdani/github-patches
|
git_diff
|
pytorch__tnt-101
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AverageValueMeter returns incorrect results when `tensor` is passed
Based on [this thread](https://discuss.pytorch.org/t/confusing-result-about-meter-averagevaluemeter/21819) it seems as if the internal members of the class hold references to the `tensors`, thus yielding wrong results.
When the `tensor` value is passed by `.item()` the result is correct.
A simple fix would be to add this condition to `add`:
```python
def add(self, value, n=1):
if isinstance(value, torch.Tensor):
value = value.item()
self.val = value
```
I can submit a PR, if that makes sense to you.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchnet/meter/averagevaluemeter.py`
Content:
```
1 import math
2 from . import meter
3 import numpy as np
4
5
6 class AverageValueMeter(meter.Meter):
7 def __init__(self):
8 super(AverageValueMeter, self).__init__()
9 self.reset()
10 self.val = 0
11
12 def add(self, value, n=1):
13 self.val = value
14 self.sum += value
15 self.var += value * value
16 self.n += n
17
18 if self.n == 0:
19 self.mean, self.std = np.nan, np.nan
20 elif self.n == 1:
21 self.mean, self.std = self.sum, np.inf
22 self.mean_old = self.mean
23 self.m_s = 0.0
24 else:
25 self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
26 self.m_s += (value - self.mean_old) * (value - self.mean)
27 self.mean_old = self.mean
28 self.std = math.sqrt(self.m_s / (self.n - 1.0))
29
30 def value(self):
31 return self.mean, self.std
32
33 def reset(self):
34 self.n = 0
35 self.sum = 0.0
36 self.var = 0.0
37 self.val = 0.0
38 self.mean = np.nan
39 self.mean_old = 0.0
40 self.m_s = 0.0
41 self.std = np.nan
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchnet/meter/averagevaluemeter.py b/torchnet/meter/averagevaluemeter.py
--- a/torchnet/meter/averagevaluemeter.py
+++ b/torchnet/meter/averagevaluemeter.py
@@ -18,14 +18,15 @@
if self.n == 0:
self.mean, self.std = np.nan, np.nan
elif self.n == 1:
- self.mean, self.std = self.sum, np.inf
+ self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy
+ self.std = np.inf
self.mean_old = self.mean
self.m_s = 0.0
else:
self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)
self.m_s += (value - self.mean_old) * (value - self.mean)
self.mean_old = self.mean
- self.std = math.sqrt(self.m_s / (self.n - 1.0))
+ self.std = np.sqrt(self.m_s / (self.n - 1.0))
def value(self):
return self.mean, self.std
|
{"golden_diff": "diff --git a/torchnet/meter/averagevaluemeter.py b/torchnet/meter/averagevaluemeter.py\n--- a/torchnet/meter/averagevaluemeter.py\n+++ b/torchnet/meter/averagevaluemeter.py\n@@ -18,14 +18,15 @@\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n- self.mean, self.std = self.sum, np.inf\n+ self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy\n+ self.std = np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n- self.std = math.sqrt(self.m_s / (self.n - 1.0))\n+ self.std = np.sqrt(self.m_s / (self.n - 1.0))\n \n def value(self):\n return self.mean, self.std\n", "issue": "AverageValueMeter returns incorrect results when `tensor` is passed\nBased on [this thread](https://discuss.pytorch.org/t/confusing-result-about-meter-averagevaluemeter/21819) it seems as if the internal members of the class hold references to the `tensors`, thus yielding wrong results.\r\nWhen the `tensor` value is passed by `.item()` the result is correct.\r\nA simple fix would be to add this condition to `add`:\r\n```python\r\ndef add(self, value, n=1):\r\n if isinstance(value, torch.Tensor):\r\n value = value.item()\r\n self.val = value\r\n```\r\n\r\nI can submit a PR, if that makes sense to you.\n", "before_files": [{"content": "import math\nfrom . import meter\nimport numpy as np\n\n\nclass AverageValueMeter(meter.Meter):\n def __init__(self):\n super(AverageValueMeter, self).__init__()\n self.reset()\n self.val = 0\n\n def add(self, value, n=1):\n self.val = value\n self.sum += value\n self.var += value * value\n self.n += n\n\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n self.mean, self.std = self.sum, np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n self.std = math.sqrt(self.m_s / (self.n - 1.0))\n\n def value(self):\n return self.mean, self.std\n\n def reset(self):\n self.n = 0\n self.sum = 0.0\n self.var = 0.0\n self.val = 0.0\n self.mean = np.nan\n self.mean_old = 0.0\n self.m_s = 0.0\n self.std = np.nan\n", "path": "torchnet/meter/averagevaluemeter.py"}], "after_files": [{"content": "import math\nfrom . import meter\nimport numpy as np\n\n\nclass AverageValueMeter(meter.Meter):\n def __init__(self):\n super(AverageValueMeter, self).__init__()\n self.reset()\n self.val = 0\n\n def add(self, value, n=1):\n self.val = value\n self.sum += value\n self.var += value * value\n self.n += n\n\n if self.n == 0:\n self.mean, self.std = np.nan, np.nan\n elif self.n == 1:\n self.mean = 0.0 + self.sum # This is to force a copy in torch/numpy\n self.std = np.inf\n self.mean_old = self.mean\n self.m_s = 0.0\n else:\n self.mean = self.mean_old + (value - n * self.mean_old) / float(self.n)\n self.m_s += (value - self.mean_old) * (value - self.mean)\n self.mean_old = self.mean\n self.std = np.sqrt(self.m_s / (self.n - 1.0))\n\n def value(self):\n return self.mean, self.std\n\n def reset(self):\n self.n = 0\n self.sum = 0.0\n self.var = 0.0\n self.val = 0.0\n self.mean = np.nan\n self.mean_old = 0.0\n self.m_s = 0.0\n self.std = np.nan\n", "path": "torchnet/meter/averagevaluemeter.py"}]}
| 796 | 274 |
gh_patches_debug_6669
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-1955
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hook after params attribute is missing
Hi
```python
def do_before(req, resp, resource, params):
# here params is available
def do_after(req, resp, resource, params):
# here params is not available
```
How can I access the params?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/hooks.py`
Content:
```
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Hook decorators."""
16
17 from functools import wraps
18 from inspect import getmembers
19 from inspect import iscoroutinefunction
20 import re
21
22 from falcon.constants import COMBINED_METHODS
23 from falcon.util.misc import get_argnames
24 from falcon.util.sync import _wrap_non_coroutine_unsafe
25
26
27 _DECORABLE_METHOD_NAME = re.compile(
28 r'^on_({})(_\w+)?$'.format('|'.join(method.lower() for method in COMBINED_METHODS))
29 )
30
31
32 def before(action, *args, is_async=False, **kwargs):
33 """Execute the given action function *before* the responder.
34
35 The `params` argument that is passed to the hook
36 contains only the fields from the URI template path; it does not
37 include query string values.
38
39 Hooks may inject extra params as needed. For example::
40
41 def do_something(req, resp, resource, params):
42 try:
43 params['id'] = int(params['id'])
44 except ValueError:
45 raise falcon.HTTPBadRequest(title='Invalid ID',
46 description='ID was not valid.')
47
48 params['answer'] = 42
49
50 Args:
51 action (callable): A function of the form
52 ``func(req, resp, resource, params)``, where `resource` is a
53 reference to the resource class instance associated with the
54 request and `params` is a dict of URI template field names,
55 if any, that will be passed into the resource responder as
56 kwargs.
57
58 *args: Any additional arguments will be passed to *action* in the
59 order given, immediately following the *req*, *resp*, *resource*,
60 and *params* arguments.
61
62 Keyword Args:
63 is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
64 the decorated responder is a coroutine function (i.e., that it
65 is defined with ``async def``) or that it returns an awaitable
66 coroutine object.
67
68 Normally, when the function source is declared using ``async def``,
69 the resulting function object is flagged to indicate it returns a
70 coroutine when invoked, and this can be automatically detected.
71 However, it is possible to use a regular function to return an
72 awaitable coroutine object, in which case a hint is required to let
73 the framework know what to expect. Also, a hint is always required
74 when using a cythonized coroutine function, since Cython does not
75 flag them in a way that can be detected in advance, even when the
76 function is declared using ``async def``.
77
78 **kwargs: Any additional keyword arguments will be passed through to
79 *action*.
80 """
81
82 def _before(responder_or_resource):
83 if isinstance(responder_or_resource, type):
84 resource = responder_or_resource
85
86 for responder_name, responder in getmembers(resource, callable):
87 if _DECORABLE_METHOD_NAME.match(responder_name):
88 # This pattern is necessary to capture the current value of
89 # responder in the do_before_all closure; otherwise, they
90 # will capture the same responder variable that is shared
91 # between iterations of the for loop, above.
92 def let(responder=responder):
93 do_before_all = _wrap_with_before(
94 responder, action, args, kwargs, is_async
95 )
96
97 setattr(resource, responder_name, do_before_all)
98
99 let()
100
101 return resource
102
103 else:
104 responder = responder_or_resource
105 do_before_one = _wrap_with_before(responder, action, args, kwargs, is_async)
106
107 return do_before_one
108
109 return _before
110
111
112 def after(action, *args, is_async=False, **kwargs):
113 """Execute the given action function *after* the responder.
114
115 Args:
116 action (callable): A function of the form
117 ``func(req, resp, resource)``, where `resource` is a
118 reference to the resource class instance associated with the
119 request
120
121 *args: Any additional arguments will be passed to *action* in the
122 order given, immediately following the *req*, *resp*, *resource*,
123 and *params* arguments.
124
125 Keyword Args:
126 is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
127 the decorated responder is a coroutine function (i.e., that it
128 is defined with ``async def``) or that it returns an awaitable
129 coroutine object.
130
131 Normally, when the function source is declared using ``async def``,
132 the resulting function object is flagged to indicate it returns a
133 coroutine when invoked, and this can be automatically detected.
134 However, it is possible to use a regular function to return an
135 awaitable coroutine object, in which case a hint is required to let
136 the framework know what to expect. Also, a hint is always required
137 when using a cythonized coroutine function, since Cython does not
138 flag them in a way that can be detected in advance, even when the
139 function is declared using ``async def``.
140
141 **kwargs: Any additional keyword arguments will be passed through to
142 *action*.
143 """
144
145 def _after(responder_or_resource):
146 if isinstance(responder_or_resource, type):
147 resource = responder_or_resource
148
149 for responder_name, responder in getmembers(resource, callable):
150 if _DECORABLE_METHOD_NAME.match(responder_name):
151
152 def let(responder=responder):
153 do_after_all = _wrap_with_after(
154 responder, action, args, kwargs, is_async
155 )
156
157 setattr(resource, responder_name, do_after_all)
158
159 let()
160
161 return resource
162
163 else:
164 responder = responder_or_resource
165 do_after_one = _wrap_with_after(responder, action, args, kwargs, is_async)
166
167 return do_after_one
168
169 return _after
170
171
172 # -----------------------------------------------------------------------------
173 # Helpers
174 # -----------------------------------------------------------------------------
175
176
177 def _wrap_with_after(responder, action, action_args, action_kwargs, is_async):
178 """Execute the given action function after a responder method.
179
180 Args:
181 responder: The responder method to wrap.
182 action: A function with a signature similar to a resource responder
183 method, taking the form ``func(req, resp, resource)``.
184 action_args: Additional positional agruments to pass to *action*.
185 action_kwargs: Additional keyword arguments to pass to *action*.
186 is_async: Set to ``True`` for cythonized responders that are
187 actually coroutine functions, since such responders can not
188 be auto-detected. A hint is also required for regular functions
189 that happen to return an awaitable coroutine object.
190 """
191
192 responder_argnames = get_argnames(responder)
193 extra_argnames = responder_argnames[2:] # Skip req, resp
194
195 if is_async or iscoroutinefunction(responder):
196 # NOTE(kgriffs): I manually verified that the implicit "else" branch
197 # is actually covered, but coverage isn't tracking it for
198 # some reason.
199 if not is_async: # pragma: nocover
200 action = _wrap_non_coroutine_unsafe(action)
201
202 @wraps(responder)
203 async def do_after(self, req, resp, *args, **kwargs):
204 if args:
205 _merge_responder_args(args, kwargs, extra_argnames)
206
207 await responder(self, req, resp, **kwargs)
208 await action(req, resp, self, *action_args, **action_kwargs)
209
210 else:
211
212 @wraps(responder)
213 def do_after(self, req, resp, *args, **kwargs):
214 if args:
215 _merge_responder_args(args, kwargs, extra_argnames)
216
217 responder(self, req, resp, **kwargs)
218 action(req, resp, self, *action_args, **action_kwargs)
219
220 return do_after
221
222
223 def _wrap_with_before(responder, action, action_args, action_kwargs, is_async):
224 """Execute the given action function before a responder method.
225
226 Args:
227 responder: The responder method to wrap.
228 action: A function with a similar signature to a resource responder
229 method, taking the form ``func(req, resp, resource, params)``.
230 action_args: Additional positional agruments to pass to *action*.
231 action_kwargs: Additional keyword arguments to pass to *action*.
232 is_async: Set to ``True`` for cythonized responders that are
233 actually coroutine functions, since such responders can not
234 be auto-detected. A hint is also required for regular functions
235 that happen to return an awaitable coroutine object.
236 """
237
238 responder_argnames = get_argnames(responder)
239 extra_argnames = responder_argnames[2:] # Skip req, resp
240
241 if is_async or iscoroutinefunction(responder):
242 # NOTE(kgriffs): I manually verified that the implicit "else" branch
243 # is actually covered, but coverage isn't tracking it for
244 # some reason.
245 if not is_async: # pragma: nocover
246 action = _wrap_non_coroutine_unsafe(action)
247
248 @wraps(responder)
249 async def do_before(self, req, resp, *args, **kwargs):
250 if args:
251 _merge_responder_args(args, kwargs, extra_argnames)
252
253 await action(req, resp, self, kwargs, *action_args, **action_kwargs)
254 await responder(self, req, resp, **kwargs)
255
256 else:
257
258 @wraps(responder)
259 def do_before(self, req, resp, *args, **kwargs):
260 if args:
261 _merge_responder_args(args, kwargs, extra_argnames)
262
263 action(req, resp, self, kwargs, *action_args, **action_kwargs)
264 responder(self, req, resp, **kwargs)
265
266 return do_before
267
268
269 def _merge_responder_args(args, kwargs, argnames):
270 """Merge responder args into kwargs.
271
272 The framework always passes extra args as keyword arguments.
273 However, when the app calls the responder directly, it might use
274 positional arguments instead, so we need to handle that case. This
275 might happen, for example, when overriding a resource and calling
276 a responder via super().
277
278 Args:
279 args (tuple): Extra args passed into the responder
280 kwargs (dict): Keyword args passed into the responder
281 argnames (list): Extra argnames from the responder's
282 signature, ordered as defined
283 """
284
285 # NOTE(kgriffs): Merge positional args into kwargs by matching
286 # them up to the responder's signature. To do that, we must
287 # find out the names of the positional arguments by matching
288 # them in the order of the arguments named in the responder's
289 # signature.
290 for i, argname in enumerate(argnames):
291 # NOTE(kgriffs): extra_argnames may contain keyword arguments,
292 # which wont be in the args list, and are already in the kwargs
293 # dict anyway, so detect and skip them.
294 if argname not in kwargs:
295 kwargs[argname] = args[i]
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/falcon/hooks.py b/falcon/hooks.py
--- a/falcon/hooks.py
+++ b/falcon/hooks.py
@@ -119,8 +119,8 @@
request
*args: Any additional arguments will be passed to *action* in the
- order given, immediately following the *req*, *resp*, *resource*,
- and *params* arguments.
+ order given, immediately following the *req*, *resp* and *resource*
+ arguments.
Keyword Args:
is_async (bool): Set to ``True`` for ASGI apps to provide a hint that
|
{"golden_diff": "diff --git a/falcon/hooks.py b/falcon/hooks.py\n--- a/falcon/hooks.py\n+++ b/falcon/hooks.py\n@@ -119,8 +119,8 @@\n request\n \n *args: Any additional arguments will be passed to *action* in the\n- order given, immediately following the *req*, *resp*, *resource*,\n- and *params* arguments.\n+ order given, immediately following the *req*, *resp* and *resource*\n+ arguments.\n \n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n", "issue": "Hook after params attribute is missing \nHi\r\n\r\n```python\r\ndef do_before(req, resp, resource, params):\r\n\t# here params is available\r\n\r\ndef do_after(req, resp, resource, params):\r\n\t# here params is not available\r\n\r\n```\r\n\r\nHow can I access the params?\r\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hook decorators.\"\"\"\n\nfrom functools import wraps\nfrom inspect import getmembers\nfrom inspect import iscoroutinefunction\nimport re\n\nfrom falcon.constants import COMBINED_METHODS\nfrom falcon.util.misc import get_argnames\nfrom falcon.util.sync import _wrap_non_coroutine_unsafe\n\n\n_DECORABLE_METHOD_NAME = re.compile(\n r'^on_({})(_\\w+)?$'.format('|'.join(method.lower() for method in COMBINED_METHODS))\n)\n\n\ndef before(action, *args, is_async=False, **kwargs):\n \"\"\"Execute the given action function *before* the responder.\n\n The `params` argument that is passed to the hook\n contains only the fields from the URI template path; it does not\n include query string values.\n\n Hooks may inject extra params as needed. For example::\n\n def do_something(req, resp, resource, params):\n try:\n params['id'] = int(params['id'])\n except ValueError:\n raise falcon.HTTPBadRequest(title='Invalid ID',\n description='ID was not valid.')\n\n params['answer'] = 42\n\n Args:\n action (callable): A function of the form\n ``func(req, resp, resource, params)``, where `resource` is a\n reference to the resource class instance associated with the\n request and `params` is a dict of URI template field names,\n if any, that will be passed into the resource responder as\n kwargs.\n\n *args: Any additional arguments will be passed to *action* in the\n order given, immediately following the *req*, *resp*, *resource*,\n and *params* arguments.\n\n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n **kwargs: Any additional keyword arguments will be passed through to\n *action*.\n \"\"\"\n\n def _before(responder_or_resource):\n if isinstance(responder_or_resource, type):\n resource = responder_or_resource\n\n for responder_name, responder in getmembers(resource, callable):\n if _DECORABLE_METHOD_NAME.match(responder_name):\n # This pattern is necessary to capture the current value of\n # responder in the do_before_all closure; otherwise, they\n # will capture the same responder variable that is shared\n # between iterations of the for loop, above.\n def let(responder=responder):\n do_before_all = _wrap_with_before(\n responder, action, args, kwargs, is_async\n )\n\n setattr(resource, responder_name, do_before_all)\n\n let()\n\n return resource\n\n else:\n responder = responder_or_resource\n do_before_one = _wrap_with_before(responder, action, args, kwargs, is_async)\n\n return do_before_one\n\n return _before\n\n\ndef after(action, *args, is_async=False, **kwargs):\n \"\"\"Execute the given action function *after* the responder.\n\n Args:\n action (callable): A function of the form\n ``func(req, resp, resource)``, where `resource` is a\n reference to the resource class instance associated with the\n request\n\n *args: Any additional arguments will be passed to *action* in the\n order given, immediately following the *req*, *resp*, *resource*,\n and *params* arguments.\n\n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n **kwargs: Any additional keyword arguments will be passed through to\n *action*.\n \"\"\"\n\n def _after(responder_or_resource):\n if isinstance(responder_or_resource, type):\n resource = responder_or_resource\n\n for responder_name, responder in getmembers(resource, callable):\n if _DECORABLE_METHOD_NAME.match(responder_name):\n\n def let(responder=responder):\n do_after_all = _wrap_with_after(\n responder, action, args, kwargs, is_async\n )\n\n setattr(resource, responder_name, do_after_all)\n\n let()\n\n return resource\n\n else:\n responder = responder_or_resource\n do_after_one = _wrap_with_after(responder, action, args, kwargs, is_async)\n\n return do_after_one\n\n return _after\n\n\n# -----------------------------------------------------------------------------\n# Helpers\n# -----------------------------------------------------------------------------\n\n\ndef _wrap_with_after(responder, action, action_args, action_kwargs, is_async):\n \"\"\"Execute the given action function after a responder method.\n\n Args:\n responder: The responder method to wrap.\n action: A function with a signature similar to a resource responder\n method, taking the form ``func(req, resp, resource)``.\n action_args: Additional positional agruments to pass to *action*.\n action_kwargs: Additional keyword arguments to pass to *action*.\n is_async: Set to ``True`` for cythonized responders that are\n actually coroutine functions, since such responders can not\n be auto-detected. A hint is also required for regular functions\n that happen to return an awaitable coroutine object.\n \"\"\"\n\n responder_argnames = get_argnames(responder)\n extra_argnames = responder_argnames[2:] # Skip req, resp\n\n if is_async or iscoroutinefunction(responder):\n # NOTE(kgriffs): I manually verified that the implicit \"else\" branch\n # is actually covered, but coverage isn't tracking it for\n # some reason.\n if not is_async: # pragma: nocover\n action = _wrap_non_coroutine_unsafe(action)\n\n @wraps(responder)\n async def do_after(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n await responder(self, req, resp, **kwargs)\n await action(req, resp, self, *action_args, **action_kwargs)\n\n else:\n\n @wraps(responder)\n def do_after(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n responder(self, req, resp, **kwargs)\n action(req, resp, self, *action_args, **action_kwargs)\n\n return do_after\n\n\ndef _wrap_with_before(responder, action, action_args, action_kwargs, is_async):\n \"\"\"Execute the given action function before a responder method.\n\n Args:\n responder: The responder method to wrap.\n action: A function with a similar signature to a resource responder\n method, taking the form ``func(req, resp, resource, params)``.\n action_args: Additional positional agruments to pass to *action*.\n action_kwargs: Additional keyword arguments to pass to *action*.\n is_async: Set to ``True`` for cythonized responders that are\n actually coroutine functions, since such responders can not\n be auto-detected. A hint is also required for regular functions\n that happen to return an awaitable coroutine object.\n \"\"\"\n\n responder_argnames = get_argnames(responder)\n extra_argnames = responder_argnames[2:] # Skip req, resp\n\n if is_async or iscoroutinefunction(responder):\n # NOTE(kgriffs): I manually verified that the implicit \"else\" branch\n # is actually covered, but coverage isn't tracking it for\n # some reason.\n if not is_async: # pragma: nocover\n action = _wrap_non_coroutine_unsafe(action)\n\n @wraps(responder)\n async def do_before(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n await action(req, resp, self, kwargs, *action_args, **action_kwargs)\n await responder(self, req, resp, **kwargs)\n\n else:\n\n @wraps(responder)\n def do_before(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n action(req, resp, self, kwargs, *action_args, **action_kwargs)\n responder(self, req, resp, **kwargs)\n\n return do_before\n\n\ndef _merge_responder_args(args, kwargs, argnames):\n \"\"\"Merge responder args into kwargs.\n\n The framework always passes extra args as keyword arguments.\n However, when the app calls the responder directly, it might use\n positional arguments instead, so we need to handle that case. This\n might happen, for example, when overriding a resource and calling\n a responder via super().\n\n Args:\n args (tuple): Extra args passed into the responder\n kwargs (dict): Keyword args passed into the responder\n argnames (list): Extra argnames from the responder's\n signature, ordered as defined\n \"\"\"\n\n # NOTE(kgriffs): Merge positional args into kwargs by matching\n # them up to the responder's signature. To do that, we must\n # find out the names of the positional arguments by matching\n # them in the order of the arguments named in the responder's\n # signature.\n for i, argname in enumerate(argnames):\n # NOTE(kgriffs): extra_argnames may contain keyword arguments,\n # which wont be in the args list, and are already in the kwargs\n # dict anyway, so detect and skip them.\n if argname not in kwargs:\n kwargs[argname] = args[i]\n", "path": "falcon/hooks.py"}], "after_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hook decorators.\"\"\"\n\nfrom functools import wraps\nfrom inspect import getmembers\nfrom inspect import iscoroutinefunction\nimport re\n\nfrom falcon.constants import COMBINED_METHODS\nfrom falcon.util.misc import get_argnames\nfrom falcon.util.sync import _wrap_non_coroutine_unsafe\n\n\n_DECORABLE_METHOD_NAME = re.compile(\n r'^on_({})(_\\w+)?$'.format('|'.join(method.lower() for method in COMBINED_METHODS))\n)\n\n\ndef before(action, *args, is_async=False, **kwargs):\n \"\"\"Execute the given action function *before* the responder.\n\n The `params` argument that is passed to the hook\n contains only the fields from the URI template path; it does not\n include query string values.\n\n Hooks may inject extra params as needed. For example::\n\n def do_something(req, resp, resource, params):\n try:\n params['id'] = int(params['id'])\n except ValueError:\n raise falcon.HTTPBadRequest(title='Invalid ID',\n description='ID was not valid.')\n\n params['answer'] = 42\n\n Args:\n action (callable): A function of the form\n ``func(req, resp, resource, params)``, where `resource` is a\n reference to the resource class instance associated with the\n request and `params` is a dict of URI template field names,\n if any, that will be passed into the resource responder as\n kwargs.\n\n *args: Any additional arguments will be passed to *action* in the\n order given, immediately following the *req*, *resp*, *resource*,\n and *params* arguments.\n\n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n **kwargs: Any additional keyword arguments will be passed through to\n *action*.\n \"\"\"\n\n def _before(responder_or_resource):\n if isinstance(responder_or_resource, type):\n resource = responder_or_resource\n\n for responder_name, responder in getmembers(resource, callable):\n if _DECORABLE_METHOD_NAME.match(responder_name):\n # This pattern is necessary to capture the current value of\n # responder in the do_before_all closure; otherwise, they\n # will capture the same responder variable that is shared\n # between iterations of the for loop, above.\n def let(responder=responder):\n do_before_all = _wrap_with_before(\n responder, action, args, kwargs, is_async\n )\n\n setattr(resource, responder_name, do_before_all)\n\n let()\n\n return resource\n\n else:\n responder = responder_or_resource\n do_before_one = _wrap_with_before(responder, action, args, kwargs, is_async)\n\n return do_before_one\n\n return _before\n\n\ndef after(action, *args, is_async=False, **kwargs):\n \"\"\"Execute the given action function *after* the responder.\n\n Args:\n action (callable): A function of the form\n ``func(req, resp, resource)``, where `resource` is a\n reference to the resource class instance associated with the\n request\n\n *args: Any additional arguments will be passed to *action* in the\n order given, immediately following the *req*, *resp* and *resource*\n arguments.\n\n Keyword Args:\n is_async (bool): Set to ``True`` for ASGI apps to provide a hint that\n the decorated responder is a coroutine function (i.e., that it\n is defined with ``async def``) or that it returns an awaitable\n coroutine object.\n\n Normally, when the function source is declared using ``async def``,\n the resulting function object is flagged to indicate it returns a\n coroutine when invoked, and this can be automatically detected.\n However, it is possible to use a regular function to return an\n awaitable coroutine object, in which case a hint is required to let\n the framework know what to expect. Also, a hint is always required\n when using a cythonized coroutine function, since Cython does not\n flag them in a way that can be detected in advance, even when the\n function is declared using ``async def``.\n\n **kwargs: Any additional keyword arguments will be passed through to\n *action*.\n \"\"\"\n\n def _after(responder_or_resource):\n if isinstance(responder_or_resource, type):\n resource = responder_or_resource\n\n for responder_name, responder in getmembers(resource, callable):\n if _DECORABLE_METHOD_NAME.match(responder_name):\n\n def let(responder=responder):\n do_after_all = _wrap_with_after(\n responder, action, args, kwargs, is_async\n )\n\n setattr(resource, responder_name, do_after_all)\n\n let()\n\n return resource\n\n else:\n responder = responder_or_resource\n do_after_one = _wrap_with_after(responder, action, args, kwargs, is_async)\n\n return do_after_one\n\n return _after\n\n\n# -----------------------------------------------------------------------------\n# Helpers\n# -----------------------------------------------------------------------------\n\n\ndef _wrap_with_after(responder, action, action_args, action_kwargs, is_async):\n \"\"\"Execute the given action function after a responder method.\n\n Args:\n responder: The responder method to wrap.\n action: A function with a signature similar to a resource responder\n method, taking the form ``func(req, resp, resource)``.\n action_args: Additional positional agruments to pass to *action*.\n action_kwargs: Additional keyword arguments to pass to *action*.\n is_async: Set to ``True`` for cythonized responders that are\n actually coroutine functions, since such responders can not\n be auto-detected. A hint is also required for regular functions\n that happen to return an awaitable coroutine object.\n \"\"\"\n\n responder_argnames = get_argnames(responder)\n extra_argnames = responder_argnames[2:] # Skip req, resp\n\n if is_async or iscoroutinefunction(responder):\n # NOTE(kgriffs): I manually verified that the implicit \"else\" branch\n # is actually covered, but coverage isn't tracking it for\n # some reason.\n if not is_async: # pragma: nocover\n action = _wrap_non_coroutine_unsafe(action)\n\n @wraps(responder)\n async def do_after(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n await responder(self, req, resp, **kwargs)\n await action(req, resp, self, *action_args, **action_kwargs)\n\n else:\n\n @wraps(responder)\n def do_after(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n responder(self, req, resp, **kwargs)\n action(req, resp, self, *action_args, **action_kwargs)\n\n return do_after\n\n\ndef _wrap_with_before(responder, action, action_args, action_kwargs, is_async):\n \"\"\"Execute the given action function before a responder method.\n\n Args:\n responder: The responder method to wrap.\n action: A function with a similar signature to a resource responder\n method, taking the form ``func(req, resp, resource, params)``.\n action_args: Additional positional agruments to pass to *action*.\n action_kwargs: Additional keyword arguments to pass to *action*.\n is_async: Set to ``True`` for cythonized responders that are\n actually coroutine functions, since such responders can not\n be auto-detected. A hint is also required for regular functions\n that happen to return an awaitable coroutine object.\n \"\"\"\n\n responder_argnames = get_argnames(responder)\n extra_argnames = responder_argnames[2:] # Skip req, resp\n\n if is_async or iscoroutinefunction(responder):\n # NOTE(kgriffs): I manually verified that the implicit \"else\" branch\n # is actually covered, but coverage isn't tracking it for\n # some reason.\n if not is_async: # pragma: nocover\n action = _wrap_non_coroutine_unsafe(action)\n\n @wraps(responder)\n async def do_before(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n await action(req, resp, self, kwargs, *action_args, **action_kwargs)\n await responder(self, req, resp, **kwargs)\n\n else:\n\n @wraps(responder)\n def do_before(self, req, resp, *args, **kwargs):\n if args:\n _merge_responder_args(args, kwargs, extra_argnames)\n\n action(req, resp, self, kwargs, *action_args, **action_kwargs)\n responder(self, req, resp, **kwargs)\n\n return do_before\n\n\ndef _merge_responder_args(args, kwargs, argnames):\n \"\"\"Merge responder args into kwargs.\n\n The framework always passes extra args as keyword arguments.\n However, when the app calls the responder directly, it might use\n positional arguments instead, so we need to handle that case. This\n might happen, for example, when overriding a resource and calling\n a responder via super().\n\n Args:\n args (tuple): Extra args passed into the responder\n kwargs (dict): Keyword args passed into the responder\n argnames (list): Extra argnames from the responder's\n signature, ordered as defined\n \"\"\"\n\n # NOTE(kgriffs): Merge positional args into kwargs by matching\n # them up to the responder's signature. To do that, we must\n # find out the names of the positional arguments by matching\n # them in the order of the arguments named in the responder's\n # signature.\n for i, argname in enumerate(argnames):\n # NOTE(kgriffs): extra_argnames may contain keyword arguments,\n # which wont be in the args list, and are already in the kwargs\n # dict anyway, so detect and skip them.\n if argname not in kwargs:\n kwargs[argname] = args[i]\n", "path": "falcon/hooks.py"}]}
| 3,627 | 136 |
gh_patches_debug_5898
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-4414
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
UnboundLocalError: local variable 'distutils' referenced before assignment
This error happens building from source in Linux. Introduced in https://github.com/pytorch/pytorch/pull/3993
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/setup_helpers/ninja_builder.py`
Content:
```
1 import re
2 import os
3 import sys
4 import setuptools
5 import distutils
6 from contextlib import contextmanager
7 import subprocess
8
9 BUILD_DIR = 'build'
10
11
12 # on the fly create a ninja file in build/ and then
13 # run it when run() is called.
14 class NinjaBuilder(object):
15 def __init__(self, name):
16 import ninja
17 if not os.path.exists(BUILD_DIR):
18 os.mkdir(BUILD_DIR)
19 self.ninja_program = os.path.join(ninja.BIN_DIR, 'ninja')
20 self.name = name
21 self.filename = os.path.join(BUILD_DIR, 'build.{}.ninja'.format(name))
22 self.writer = ninja.Writer(open(self.filename, 'w'))
23 self.writer.rule('do_cmd', '$cmd')
24 self.writer.rule('compile', '$cmd')
25 self.compdb_targets = []
26
27 def run(self):
28 import ninja
29 self.writer.close()
30 try:
31 subprocess.check_call([self.ninja_program, '-f', self.filename])
32 except subprocess.CalledProcessError as err:
33 # avoid printing the setup.py stack trace because it obscures the
34 # C++ errors.
35 sys.stderr.write(str(err) + '\n')
36 sys.exit(1)
37 compile_db_path = os.path.join(BUILD_DIR, '{}_compile_commands.json'.format(self.name))
38 with open(compile_db_path, 'w') as compile_db:
39 subprocess.check_call([self.ninja_program, '-f', self.filename,
40 '-t', 'compdb', 'compile'], stdout=compile_db)
41
42 # weird build logic in build develop causes some things to be run
43 # twice so make sure even after we run the command we still
44 # reset this to a valid state
45 # don't use the same name or you can't inspect the real ninja files
46 self.__init__(self.name + "_")
47
48
49 class ninja_build_ext(setuptools.command.build_ext.build_ext):
50 def _build_default(self, ext):
51 return setuptools.command.build_ext.build_ext.build_extension(self, ext)
52
53 def build_extension(self, ext):
54 builder = NinjaBuilder(ext.name)
55
56 @contextmanager
57 def patch(obj, attr_name, val):
58 orig_val = getattr(obj, attr_name)
59 setattr(obj, attr_name, val)
60 try:
61 yield
62 finally:
63 setattr(obj, attr_name, orig_val)
64
65 if self.compiler.compiler_type == 'msvc':
66 import distutils.msvccompiler
67 import distutils.msvc9compiler
68 if sys.version[0] == 2:
69 orig_compiler = distutils.msvc9compiler.MSVCCompiler
70 else:
71 orig_compiler = distutils._msvccompiler.MSVCCompiler
72 orig_compile = orig_compiler.compile
73 orig_link = orig_compiler.link
74 orig_spawn = orig_compiler.spawn
75 else:
76 orig_compiler = distutils.unixccompiler.UnixCCompiler
77 orig_compile = orig_compiler._compile
78 orig_link = orig_compiler.link
79
80 def win_compile(self, sources,
81 output_dir=None, macros=None, include_dirs=None, debug=0,
82 extra_preargs=None, extra_postargs=None, depends=None):
83
84 def spawn(cmd):
85 # Using regex to match src and obj
86
87 src_regex = re.compile('/T(p|c)(.*)')
88 src_list = [m.group(2) for m in (
89 src_regex.match(elem) for elem in cmd) if m]
90
91 obj_regex = re.compile('/Fo(.*)')
92 obj_list = [m.group(1) for m in (
93 obj_regex.match(elem) for elem in cmd) if m]
94
95 if len(src_list) >= 1 and len(obj_list) >= 1:
96 src = src_list[0]
97 obj = obj_list[0]
98 else:
99 # Cannot find src or obj, revert back to original style
100 return orig_spawn(cmd)
101
102 builder.writer.build(
103 [obj], 'compile', [src],
104 variables={
105 'cmd': cmd,
106 'deps': 'msvc'
107 })
108
109 with patch(self, 'spawn', spawn):
110 orig_compile(self, sources,
111 output_dir, macros, include_dirs, debug,
112 extra_preargs, extra_postargs, depends)
113
114 def unix_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
115 depfile = os.path.splitext(obj)[0] + '.d'
116
117 def spawn(cmd):
118 builder.writer.build(
119 [obj], 'compile', [src],
120 variables={
121 'cmd': cmd,
122 'depfile': depfile,
123 'deps': 'gcc'
124 })
125
126 extra_postargs = extra_postargs + ['-MMD', '-MF', depfile]
127 with patch(self, 'spawn', spawn):
128 orig_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)
129
130 def link(self, target_desc, objects,
131 output_filename, output_dir=None, libraries=None,
132 library_dirs=None, runtime_library_dirs=None,
133 export_symbols=None, debug=0, extra_preargs=None,
134 extra_postargs=None, build_temp=None, target_lang=None):
135
136 builder.run()
137 orig_link(self, target_desc, objects,
138 output_filename, output_dir, libraries,
139 library_dirs, runtime_library_dirs,
140 export_symbols, debug, extra_preargs,
141 extra_postargs, build_temp, target_lang)
142
143 if self.compiler.compiler_type == 'msvc':
144 _compile_func = win_compile
145 _compile_func_name = 'compile'
146 else:
147 _compile_func = unix_compile
148 _compile_func_name = '_compile'
149
150 with patch(orig_compiler, _compile_func_name, _compile_func):
151 with patch(orig_compiler, 'link', link):
152 with patch(self, 'force', True):
153 self._build_default(ext)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/setup_helpers/ninja_builder.py b/tools/setup_helpers/ninja_builder.py
--- a/tools/setup_helpers/ninja_builder.py
+++ b/tools/setup_helpers/ninja_builder.py
@@ -73,6 +73,7 @@
orig_link = orig_compiler.link
orig_spawn = orig_compiler.spawn
else:
+ import distutils.unixccompiler
orig_compiler = distutils.unixccompiler.UnixCCompiler
orig_compile = orig_compiler._compile
orig_link = orig_compiler.link
|
{"golden_diff": "diff --git a/tools/setup_helpers/ninja_builder.py b/tools/setup_helpers/ninja_builder.py\n--- a/tools/setup_helpers/ninja_builder.py\n+++ b/tools/setup_helpers/ninja_builder.py\n@@ -73,6 +73,7 @@\n orig_link = orig_compiler.link\n orig_spawn = orig_compiler.spawn\n else:\n+ import distutils.unixccompiler\n orig_compiler = distutils.unixccompiler.UnixCCompiler\n orig_compile = orig_compiler._compile\n orig_link = orig_compiler.link\n", "issue": "UnboundLocalError: local variable 'distutils' referenced before assignment\nThis error happens building from source in Linux. Introduced in https://github.com/pytorch/pytorch/pull/3993\n", "before_files": [{"content": "import re\nimport os\nimport sys\nimport setuptools\nimport distutils\nfrom contextlib import contextmanager\nimport subprocess\n\nBUILD_DIR = 'build'\n\n\n# on the fly create a ninja file in build/ and then\n# run it when run() is called.\nclass NinjaBuilder(object):\n def __init__(self, name):\n import ninja\n if not os.path.exists(BUILD_DIR):\n os.mkdir(BUILD_DIR)\n self.ninja_program = os.path.join(ninja.BIN_DIR, 'ninja')\n self.name = name\n self.filename = os.path.join(BUILD_DIR, 'build.{}.ninja'.format(name))\n self.writer = ninja.Writer(open(self.filename, 'w'))\n self.writer.rule('do_cmd', '$cmd')\n self.writer.rule('compile', '$cmd')\n self.compdb_targets = []\n\n def run(self):\n import ninja\n self.writer.close()\n try:\n subprocess.check_call([self.ninja_program, '-f', self.filename])\n except subprocess.CalledProcessError as err:\n # avoid printing the setup.py stack trace because it obscures the\n # C++ errors.\n sys.stderr.write(str(err) + '\\n')\n sys.exit(1)\n compile_db_path = os.path.join(BUILD_DIR, '{}_compile_commands.json'.format(self.name))\n with open(compile_db_path, 'w') as compile_db:\n subprocess.check_call([self.ninja_program, '-f', self.filename,\n '-t', 'compdb', 'compile'], stdout=compile_db)\n\n # weird build logic in build develop causes some things to be run\n # twice so make sure even after we run the command we still\n # reset this to a valid state\n # don't use the same name or you can't inspect the real ninja files\n self.__init__(self.name + \"_\")\n\n\nclass ninja_build_ext(setuptools.command.build_ext.build_ext):\n def _build_default(self, ext):\n return setuptools.command.build_ext.build_ext.build_extension(self, ext)\n\n def build_extension(self, ext):\n builder = NinjaBuilder(ext.name)\n\n @contextmanager\n def patch(obj, attr_name, val):\n orig_val = getattr(obj, attr_name)\n setattr(obj, attr_name, val)\n try:\n yield\n finally:\n setattr(obj, attr_name, orig_val)\n\n if self.compiler.compiler_type == 'msvc':\n import distutils.msvccompiler\n import distutils.msvc9compiler\n if sys.version[0] == 2:\n orig_compiler = distutils.msvc9compiler.MSVCCompiler\n else:\n orig_compiler = distutils._msvccompiler.MSVCCompiler\n orig_compile = orig_compiler.compile\n orig_link = orig_compiler.link\n orig_spawn = orig_compiler.spawn\n else:\n orig_compiler = distutils.unixccompiler.UnixCCompiler\n orig_compile = orig_compiler._compile\n orig_link = orig_compiler.link\n\n def win_compile(self, sources,\n output_dir=None, macros=None, include_dirs=None, debug=0,\n extra_preargs=None, extra_postargs=None, depends=None):\n\n def spawn(cmd):\n # Using regex to match src and obj\n\n src_regex = re.compile('/T(p|c)(.*)')\n src_list = [m.group(2) for m in (\n src_regex.match(elem) for elem in cmd) if m]\n\n obj_regex = re.compile('/Fo(.*)')\n obj_list = [m.group(1) for m in (\n obj_regex.match(elem) for elem in cmd) if m]\n\n if len(src_list) >= 1 and len(obj_list) >= 1:\n src = src_list[0]\n obj = obj_list[0]\n else:\n # Cannot find src or obj, revert back to original style\n return orig_spawn(cmd)\n\n builder.writer.build(\n [obj], 'compile', [src],\n variables={\n 'cmd': cmd,\n 'deps': 'msvc'\n })\n\n with patch(self, 'spawn', spawn):\n orig_compile(self, sources,\n output_dir, macros, include_dirs, debug,\n extra_preargs, extra_postargs, depends)\n\n def unix_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n depfile = os.path.splitext(obj)[0] + '.d'\n\n def spawn(cmd):\n builder.writer.build(\n [obj], 'compile', [src],\n variables={\n 'cmd': cmd,\n 'depfile': depfile,\n 'deps': 'gcc'\n })\n\n extra_postargs = extra_postargs + ['-MMD', '-MF', depfile]\n with patch(self, 'spawn', spawn):\n orig_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n def link(self, target_desc, objects,\n output_filename, output_dir=None, libraries=None,\n library_dirs=None, runtime_library_dirs=None,\n export_symbols=None, debug=0, extra_preargs=None,\n extra_postargs=None, build_temp=None, target_lang=None):\n\n builder.run()\n orig_link(self, target_desc, objects,\n output_filename, output_dir, libraries,\n library_dirs, runtime_library_dirs,\n export_symbols, debug, extra_preargs,\n extra_postargs, build_temp, target_lang)\n\n if self.compiler.compiler_type == 'msvc':\n _compile_func = win_compile\n _compile_func_name = 'compile'\n else:\n _compile_func = unix_compile\n _compile_func_name = '_compile'\n\n with patch(orig_compiler, _compile_func_name, _compile_func):\n with patch(orig_compiler, 'link', link):\n with patch(self, 'force', True):\n self._build_default(ext)\n", "path": "tools/setup_helpers/ninja_builder.py"}], "after_files": [{"content": "import re\nimport os\nimport sys\nimport setuptools\nimport distutils\nfrom contextlib import contextmanager\nimport subprocess\n\nBUILD_DIR = 'build'\n\n\n# on the fly create a ninja file in build/ and then\n# run it when run() is called.\nclass NinjaBuilder(object):\n def __init__(self, name):\n import ninja\n if not os.path.exists(BUILD_DIR):\n os.mkdir(BUILD_DIR)\n self.ninja_program = os.path.join(ninja.BIN_DIR, 'ninja')\n self.name = name\n self.filename = os.path.join(BUILD_DIR, 'build.{}.ninja'.format(name))\n self.writer = ninja.Writer(open(self.filename, 'w'))\n self.writer.rule('do_cmd', '$cmd')\n self.writer.rule('compile', '$cmd')\n self.compdb_targets = []\n\n def run(self):\n import ninja\n self.writer.close()\n try:\n subprocess.check_call([self.ninja_program, '-f', self.filename])\n except subprocess.CalledProcessError as err:\n # avoid printing the setup.py stack trace because it obscures the\n # C++ errors.\n sys.stderr.write(str(err) + '\\n')\n sys.exit(1)\n compile_db_path = os.path.join(BUILD_DIR, '{}_compile_commands.json'.format(self.name))\n with open(compile_db_path, 'w') as compile_db:\n subprocess.check_call([self.ninja_program, '-f', self.filename,\n '-t', 'compdb', 'compile'], stdout=compile_db)\n\n # weird build logic in build develop causes some things to be run\n # twice so make sure even after we run the command we still\n # reset this to a valid state\n # don't use the same name or you can't inspect the real ninja files\n self.__init__(self.name + \"_\")\n\n\nclass ninja_build_ext(setuptools.command.build_ext.build_ext):\n def _build_default(self, ext):\n return setuptools.command.build_ext.build_ext.build_extension(self, ext)\n\n def build_extension(self, ext):\n builder = NinjaBuilder(ext.name)\n\n @contextmanager\n def patch(obj, attr_name, val):\n orig_val = getattr(obj, attr_name)\n setattr(obj, attr_name, val)\n try:\n yield\n finally:\n setattr(obj, attr_name, orig_val)\n\n if self.compiler.compiler_type == 'msvc':\n import distutils.msvccompiler\n import distutils.msvc9compiler\n if sys.version[0] == 2:\n orig_compiler = distutils.msvc9compiler.MSVCCompiler\n else:\n orig_compiler = distutils._msvccompiler.MSVCCompiler\n orig_compile = orig_compiler.compile\n orig_link = orig_compiler.link\n orig_spawn = orig_compiler.spawn\n else:\n import distutils.unixccompiler\n orig_compiler = distutils.unixccompiler.UnixCCompiler\n orig_compile = orig_compiler._compile\n orig_link = orig_compiler.link\n\n def win_compile(self, sources,\n output_dir=None, macros=None, include_dirs=None, debug=0,\n extra_preargs=None, extra_postargs=None, depends=None):\n\n def spawn(cmd):\n # Using regex to match src and obj\n\n src_regex = re.compile('/T(p|c)(.*)')\n src_list = [m.group(2) for m in (\n src_regex.match(elem) for elem in cmd) if m]\n\n obj_regex = re.compile('/Fo(.*)')\n obj_list = [m.group(1) for m in (\n obj_regex.match(elem) for elem in cmd) if m]\n\n if len(src_list) >= 1 and len(obj_list) >= 1:\n src = src_list[0]\n obj = obj_list[0]\n else:\n # Cannot find src or obj, revert back to original style\n return orig_spawn(cmd)\n\n builder.writer.build(\n [obj], 'compile', [src],\n variables={\n 'cmd': cmd,\n 'deps': 'msvc'\n })\n\n with patch(self, 'spawn', spawn):\n orig_compile(self, sources,\n output_dir, macros, include_dirs, debug,\n extra_preargs, extra_postargs, depends)\n\n def unix_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n depfile = os.path.splitext(obj)[0] + '.d'\n\n def spawn(cmd):\n builder.writer.build(\n [obj], 'compile', [src],\n variables={\n 'cmd': cmd,\n 'depfile': depfile,\n 'deps': 'gcc'\n })\n\n extra_postargs = extra_postargs + ['-MMD', '-MF', depfile]\n with patch(self, 'spawn', spawn):\n orig_compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n def link(self, target_desc, objects,\n output_filename, output_dir=None, libraries=None,\n library_dirs=None, runtime_library_dirs=None,\n export_symbols=None, debug=0, extra_preargs=None,\n extra_postargs=None, build_temp=None, target_lang=None):\n\n builder.run()\n orig_link(self, target_desc, objects,\n output_filename, output_dir, libraries,\n library_dirs, runtime_library_dirs,\n export_symbols, debug, extra_preargs,\n extra_postargs, build_temp, target_lang)\n\n if self.compiler.compiler_type == 'msvc':\n _compile_func = win_compile\n _compile_func_name = 'compile'\n else:\n _compile_func = unix_compile\n _compile_func_name = '_compile'\n\n with patch(orig_compiler, _compile_func_name, _compile_func):\n with patch(orig_compiler, 'link', link):\n with patch(self, 'force', True):\n self._build_default(ext)\n", "path": "tools/setup_helpers/ninja_builder.py"}]}
| 1,911 | 113 |
gh_patches_debug_32725
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-5393
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Job Buttons do not honor the Job task_queue
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.6.11
* Python version: 3.11
* Database platform, version: NA
* Middleware(s): NA
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a JobButtonReciever that uses a queue other than default
2. Create a Job Button that points to the JobButtonReciever and connect it to a contenttype (like dcim.devices)
3. Click the Job Button on the contenttype from above.
<!-- What did you expect to happen? -->
### Expected Behavior
The Job runs under the queue that is configured for JobButtonReceiver.
<!-- What happened instead? -->
### Observed Behavior
The Job runs under the default queue (which in my case causes an error, but may not in other use cases).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/extras/templatetags/job_buttons.py`
Content:
```
1 from collections import OrderedDict
2
3 from django import template
4 from django.contrib.contenttypes.models import ContentType
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.safestring import mark_safe
8
9 from nautobot.extras.models import Job, JobButton
10 from nautobot.utilities.utils import render_jinja2
11
12
13 register = template.Library()
14
15 GROUP_DROPDOWN = """
16 <div class="btn-group">
17 <button type="button" class="btn btn-sm btn-{group_button_class} dropdown-toggle" data-toggle="dropdown">
18 {group_name} <span class="caret"></span>
19 </button>
20 <ul class="dropdown-menu pull-right">
21 {grouped_buttons}
22 </ul>
23 </div>
24 """
25
26 HIDDEN_INPUTS = """
27 <input type="hidden" name="csrfmiddlewaretoken" value="{csrf_token}">
28 <input type="hidden" name="object_pk" value="{object_pk}">
29 <input type="hidden" name="object_model_name" value="{object_model_name}">
30 <input type="hidden" name="_schedule_type" value="immediately">
31 <input type="hidden" name="_return_url" value="{redirect_path}">
32 <input type="hidden" name="_commit" value="on">
33 """
34
35 NO_CONFIRM_BUTTON = """
36 <button type="submit" form="form_id_{button_id}" class="btn btn-sm btn-{button_class}" {disabled}>{button_text}</button>
37 """
38
39 NO_CONFIRM_FORM = """
40 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
41 {hidden_inputs}
42 </form>
43 """
44
45 CONFIRM_BUTTON = """
46 <button type="button" class="btn btn-sm btn-{button_class}" data-toggle="modal" data-target="#confirm_modal_id_{button_id}" {disabled}>
47 {button_text}
48 </button>
49 """
50
51 CONFIRM_MODAL = """
52 <div class="modal fade" id="confirm_modal_id_{button_id}" tabindex="-1" role="dialog" aria-labelledby="confirm_modal_label_{button_id}">
53 <div class="modal-dialog" role="document">
54 <div class="modal-content">
55 <div class="modal-header">
56 <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
57 <h4 class="modal-title" id="confirm_modal_label_{button_id}">Confirmation</h4>
58 </div>
59 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
60 <div class="modal-body">
61 {hidden_inputs}
62 Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?
63 </div>
64 <div class="modal-footer">
65 <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
66 <button type="submit" class="btn btn-primary">Confirm</button>
67 </div>
68 </form>
69 </div>
70 </div>
71 </div>
72 """
73
74 SAFE_EMPTY_STR = mark_safe("") # noqa: S308
75
76
77 def _render_job_button_for_obj(job_button, obj, context, content_type):
78 """
79 Helper method for job_buttons templatetag to reduce repetition of code.
80
81 Returns:
82 (str, str): (button_html, form_html)
83 """
84 # Pass select context data when rendering the JobButton text as Jinja2
85 button_context = {
86 "obj": obj,
87 "debug": context.get("debug", False), # django.template.context_processors.debug
88 "request": context["request"], # django.template.context_processors.request
89 "user": context["user"], # django.contrib.auth.context_processors.auth
90 "perms": context["perms"], # django.contrib.auth.context_processors.auth
91 }
92 try:
93 text_rendered = render_jinja2(job_button.text, button_context)
94 except Exception as exc:
95 return (
96 format_html(
97 '<a class="btn btn-sm btn-{}" disabled="disabled" title="{}"><i class="mdi mdi-alert"></i> {}</a>\n',
98 "default" if not job_button.group_name else "link",
99 exc,
100 job_button.name,
101 ),
102 SAFE_EMPTY_STR,
103 )
104
105 if not text_rendered:
106 return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)
107
108 # Disable buttons if the user doesn't have permission to run the underlying Job.
109 has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
110 hidden_inputs = format_html(
111 HIDDEN_INPUTS,
112 csrf_token=context["csrf_token"],
113 object_pk=obj.pk,
114 object_model_name=f"{content_type.app_label}.{content_type.model}",
115 redirect_path=context["request"].path,
116 )
117 template_args = {
118 "button_id": job_button.pk,
119 "button_text": text_rendered,
120 "button_class": job_button.button_class if not job_button.group_name else "link",
121 "button_url": reverse("extras:job_run", kwargs={"slug": job_button.job.slug}),
122 "object": obj,
123 "job": job_button.job,
124 "hidden_inputs": hidden_inputs,
125 "disabled": "" if has_run_perm else "disabled",
126 }
127
128 if job_button.confirmation:
129 return (
130 format_html(CONFIRM_BUTTON, **template_args),
131 format_html(CONFIRM_MODAL, **template_args),
132 )
133 else:
134 return (
135 format_html(NO_CONFIRM_BUTTON, **template_args),
136 format_html(NO_CONFIRM_FORM, **template_args),
137 )
138
139
140 @register.simple_tag(takes_context=True)
141 def job_buttons(context, obj):
142 """
143 Render all applicable job buttons for the given object.
144 """
145 content_type = ContentType.objects.get_for_model(obj)
146 # We will enforce "run" permission later in deciding which buttons to show as disabled.
147 buttons = JobButton.objects.filter(content_types=content_type)
148 if not buttons:
149 return SAFE_EMPTY_STR
150
151 buttons_html = forms_html = SAFE_EMPTY_STR
152 group_names = OrderedDict()
153
154 for jb in buttons:
155 # Organize job buttons by group for later processing
156 if jb.group_name:
157 group_names.setdefault(jb.group_name, []).append(jb)
158
159 # Render and add non-grouped buttons
160 else:
161 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
162 buttons_html += button_html
163 forms_html += form_html
164
165 # Add grouped buttons to template
166 for group_name, buttons in group_names.items():
167 group_button_class = buttons[0].button_class
168
169 buttons_rendered = SAFE_EMPTY_STR
170
171 for jb in buttons:
172 # Render grouped buttons as list items
173 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
174 if button_html:
175 buttons_rendered += format_html("<li>{}</li>", button_html)
176 if form_html:
177 forms_html += form_html
178
179 if buttons_rendered:
180 buttons_html += format_html(
181 GROUP_DROPDOWN,
182 group_button_class=group_button_class,
183 group_name=group_name,
184 grouped_buttons=buttons_rendered,
185 )
186
187 # We want all of the buttons first and then any modals and forms so the buttons render properly
188 return buttons_html + forms_html
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py
--- a/nautobot/extras/templatetags/job_buttons.py
+++ b/nautobot/extras/templatetags/job_buttons.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
from django import template
+from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from django.utils.html import format_html
@@ -28,6 +29,7 @@
<input type="hidden" name="object_pk" value="{object_pk}">
<input type="hidden" name="object_model_name" value="{object_model_name}">
<input type="hidden" name="_schedule_type" value="immediately">
+<input type="hidden" name="_task_queue" value="{task_queue}">
<input type="hidden" name="_return_url" value="{redirect_path}">
<input type="hidden" name="_commit" value="on">
"""
@@ -107,12 +109,17 @@
# Disable buttons if the user doesn't have permission to run the underlying Job.
has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
+ try:
+ _task_queue = job_button.job.task_queues[0]
+ except IndexError:
+ _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE
hidden_inputs = format_html(
HIDDEN_INPUTS,
csrf_token=context["csrf_token"],
object_pk=obj.pk,
object_model_name=f"{content_type.app_label}.{content_type.model}",
redirect_path=context["request"].path,
+ task_queue=_task_queue,
)
template_args = {
"button_id": job_button.pk,
|
{"golden_diff": "diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py\n--- a/nautobot/extras/templatetags/job_buttons.py\n+++ b/nautobot/extras/templatetags/job_buttons.py\n@@ -1,6 +1,7 @@\n from collections import OrderedDict\n \n from django import template\n+from django.conf import settings\n from django.contrib.contenttypes.models import ContentType\n from django.urls import reverse\n from django.utils.html import format_html\n@@ -28,6 +29,7 @@\n <input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n <input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n <input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n+<input type=\"hidden\" name=\"_task_queue\" value=\"{task_queue}\">\n <input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n <input type=\"hidden\" name=\"_commit\" value=\"on\">\n \"\"\"\n@@ -107,12 +109,17 @@\n \n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n+ try:\n+ _task_queue = job_button.job.task_queues[0]\n+ except IndexError:\n+ _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n+ task_queue=_task_queue,\n )\n template_args = {\n \"button_id\": job_button.pk,\n", "issue": "Job Buttons do not honor the Job task_queue\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.6.11\r\n* Python version: 3.11\r\n* Database platform, version: NA\r\n* Middleware(s): NA\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a JobButtonReciever that uses a queue other than default\r\n2. Create a Job Button that points to the JobButtonReciever and connect it to a contenttype (like dcim.devices)\r\n3. Click the Job Button on the contenttype from above.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nThe Job runs under the queue that is configured for JobButtonReceiver.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nThe Job runs under the default queue (which in my case causes an error, but may not in other use cases).\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.extras.models import Job, JobButton\nfrom nautobot.utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nGROUP_DROPDOWN = \"\"\"\n<div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-sm btn-{group_button_class} dropdown-toggle\" data-toggle=\"dropdown\">\n {group_name} <span class=\"caret\"></span>\n </button>\n <ul class=\"dropdown-menu pull-right\">\n {grouped_buttons}\n </ul>\n</div>\n\"\"\"\n\nHIDDEN_INPUTS = \"\"\"\n<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\"{csrf_token}\">\n<input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n<input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n<input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n<input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n<input type=\"hidden\" name=\"_commit\" value=\"on\">\n\"\"\"\n\nNO_CONFIRM_BUTTON = \"\"\"\n<button type=\"submit\" form=\"form_id_{button_id}\" class=\"btn btn-sm btn-{button_class}\" {disabled}>{button_text}</button>\n\"\"\"\n\nNO_CONFIRM_FORM = \"\"\"\n<form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n {hidden_inputs}\n</form>\n\"\"\"\n\nCONFIRM_BUTTON = \"\"\"\n<button type=\"button\" class=\"btn btn-sm btn-{button_class}\" data-toggle=\"modal\" data-target=\"#confirm_modal_id_{button_id}\" {disabled}>\n {button_text}\n</button>\n\"\"\"\n\nCONFIRM_MODAL = \"\"\"\n<div class=\"modal fade\" id=\"confirm_modal_id_{button_id}\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"confirm_modal_label_{button_id}\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>\n <h4 class=\"modal-title\" id=\"confirm_modal_label_{button_id}\">Confirmation</h4>\n </div>\n <form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n <div class=\"modal-body\">\n {hidden_inputs}\n Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Cancel</button>\n <button type=\"submit\" class=\"btn btn-primary\">Confirm</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n\"\"\"\n\nSAFE_EMPTY_STR = mark_safe(\"\") # noqa: S308\n\n\ndef _render_job_button_for_obj(job_button, obj, context, content_type):\n \"\"\"\n Helper method for job_buttons templatetag to reduce repetition of code.\n\n Returns:\n (str, str): (button_html, form_html)\n \"\"\"\n # Pass select context data when rendering the JobButton text as Jinja2\n button_context = {\n \"obj\": obj,\n \"debug\": context.get(\"debug\", False), # django.template.context_processors.debug\n \"request\": context[\"request\"], # django.template.context_processors.request\n \"user\": context[\"user\"], # django.contrib.auth.context_processors.auth\n \"perms\": context[\"perms\"], # django.contrib.auth.context_processors.auth\n }\n try:\n text_rendered = render_jinja2(job_button.text, button_context)\n except Exception as exc:\n return (\n format_html(\n '<a class=\"btn btn-sm btn-{}\" disabled=\"disabled\" title=\"{}\"><i class=\"mdi mdi-alert\"></i> {}</a>\\n',\n \"default\" if not job_button.group_name else \"link\",\n exc,\n job_button.name,\n ),\n SAFE_EMPTY_STR,\n )\n\n if not text_rendered:\n return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)\n\n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n )\n template_args = {\n \"button_id\": job_button.pk,\n \"button_text\": text_rendered,\n \"button_class\": job_button.button_class if not job_button.group_name else \"link\",\n \"button_url\": reverse(\"extras:job_run\", kwargs={\"slug\": job_button.job.slug}),\n \"object\": obj,\n \"job\": job_button.job,\n \"hidden_inputs\": hidden_inputs,\n \"disabled\": \"\" if has_run_perm else \"disabled\",\n }\n\n if job_button.confirmation:\n return (\n format_html(CONFIRM_BUTTON, **template_args),\n format_html(CONFIRM_MODAL, **template_args),\n )\n else:\n return (\n format_html(NO_CONFIRM_BUTTON, **template_args),\n format_html(NO_CONFIRM_FORM, **template_args),\n )\n\n\[email protected]_tag(takes_context=True)\ndef job_buttons(context, obj):\n \"\"\"\n Render all applicable job buttons for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n # We will enforce \"run\" permission later in deciding which buttons to show as disabled.\n buttons = JobButton.objects.filter(content_types=content_type)\n if not buttons:\n return SAFE_EMPTY_STR\n\n buttons_html = forms_html = SAFE_EMPTY_STR\n group_names = OrderedDict()\n\n for jb in buttons:\n # Organize job buttons by group for later processing\n if jb.group_name:\n group_names.setdefault(jb.group_name, []).append(jb)\n\n # Render and add non-grouped buttons\n else:\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_html += button_html\n forms_html += form_html\n\n # Add grouped buttons to template\n for group_name, buttons in group_names.items():\n group_button_class = buttons[0].button_class\n\n buttons_rendered = SAFE_EMPTY_STR\n\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n if button_html:\n buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n if form_html:\n forms_html += form_html\n\n if buttons_rendered:\n buttons_html += format_html(\n GROUP_DROPDOWN,\n group_button_class=group_button_class,\n group_name=group_name,\n grouped_buttons=buttons_rendered,\n )\n\n # We want all of the buttons first and then any modals and forms so the buttons render properly\n return buttons_html + forms_html\n", "path": "nautobot/extras/templatetags/job_buttons.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.extras.models import Job, JobButton\nfrom nautobot.utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nGROUP_DROPDOWN = \"\"\"\n<div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-sm btn-{group_button_class} dropdown-toggle\" data-toggle=\"dropdown\">\n {group_name} <span class=\"caret\"></span>\n </button>\n <ul class=\"dropdown-menu pull-right\">\n {grouped_buttons}\n </ul>\n</div>\n\"\"\"\n\nHIDDEN_INPUTS = \"\"\"\n<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\"{csrf_token}\">\n<input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n<input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n<input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n<input type=\"hidden\" name=\"_task_queue\" value=\"{task_queue}\">\n<input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n<input type=\"hidden\" name=\"_commit\" value=\"on\">\n\"\"\"\n\nNO_CONFIRM_BUTTON = \"\"\"\n<button type=\"submit\" form=\"form_id_{button_id}\" class=\"btn btn-sm btn-{button_class}\" {disabled}>{button_text}</button>\n\"\"\"\n\nNO_CONFIRM_FORM = \"\"\"\n<form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n {hidden_inputs}\n</form>\n\"\"\"\n\nCONFIRM_BUTTON = \"\"\"\n<button type=\"button\" class=\"btn btn-sm btn-{button_class}\" data-toggle=\"modal\" data-target=\"#confirm_modal_id_{button_id}\" {disabled}>\n {button_text}\n</button>\n\"\"\"\n\nCONFIRM_MODAL = \"\"\"\n<div class=\"modal fade\" id=\"confirm_modal_id_{button_id}\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"confirm_modal_label_{button_id}\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>\n <h4 class=\"modal-title\" id=\"confirm_modal_label_{button_id}\">Confirmation</h4>\n </div>\n <form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n <div class=\"modal-body\">\n {hidden_inputs}\n Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Cancel</button>\n <button type=\"submit\" class=\"btn btn-primary\">Confirm</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n\"\"\"\n\nSAFE_EMPTY_STR = mark_safe(\"\") # noqa: S308\n\n\ndef _render_job_button_for_obj(job_button, obj, context, content_type):\n \"\"\"\n Helper method for job_buttons templatetag to reduce repetition of code.\n\n Returns:\n (str, str): (button_html, form_html)\n \"\"\"\n # Pass select context data when rendering the JobButton text as Jinja2\n button_context = {\n \"obj\": obj,\n \"debug\": context.get(\"debug\", False), # django.template.context_processors.debug\n \"request\": context[\"request\"], # django.template.context_processors.request\n \"user\": context[\"user\"], # django.contrib.auth.context_processors.auth\n \"perms\": context[\"perms\"], # django.contrib.auth.context_processors.auth\n }\n try:\n text_rendered = render_jinja2(job_button.text, button_context)\n except Exception as exc:\n return (\n format_html(\n '<a class=\"btn btn-sm btn-{}\" disabled=\"disabled\" title=\"{}\"><i class=\"mdi mdi-alert\"></i> {}</a>\\n',\n \"default\" if not job_button.group_name else \"link\",\n exc,\n job_button.name,\n ),\n SAFE_EMPTY_STR,\n )\n\n if not text_rendered:\n return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)\n\n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n try:\n _task_queue = job_button.job.task_queues[0]\n except IndexError:\n _task_queue = settings.CELERY_TASK_DEFAULT_QUEUE\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n task_queue=_task_queue,\n )\n template_args = {\n \"button_id\": job_button.pk,\n \"button_text\": text_rendered,\n \"button_class\": job_button.button_class if not job_button.group_name else \"link\",\n \"button_url\": reverse(\"extras:job_run\", kwargs={\"slug\": job_button.job.slug}),\n \"object\": obj,\n \"job\": job_button.job,\n \"hidden_inputs\": hidden_inputs,\n \"disabled\": \"\" if has_run_perm else \"disabled\",\n }\n\n if job_button.confirmation:\n return (\n format_html(CONFIRM_BUTTON, **template_args),\n format_html(CONFIRM_MODAL, **template_args),\n )\n else:\n return (\n format_html(NO_CONFIRM_BUTTON, **template_args),\n format_html(NO_CONFIRM_FORM, **template_args),\n )\n\n\[email protected]_tag(takes_context=True)\ndef job_buttons(context, obj):\n \"\"\"\n Render all applicable job buttons for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n # We will enforce \"run\" permission later in deciding which buttons to show as disabled.\n buttons = JobButton.objects.filter(content_types=content_type)\n if not buttons:\n return SAFE_EMPTY_STR\n\n buttons_html = forms_html = SAFE_EMPTY_STR\n group_names = OrderedDict()\n\n for jb in buttons:\n # Organize job buttons by group for later processing\n if jb.group_name:\n group_names.setdefault(jb.group_name, []).append(jb)\n\n # Render and add non-grouped buttons\n else:\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_html += button_html\n forms_html += form_html\n\n # Add grouped buttons to template\n for group_name, buttons in group_names.items():\n group_button_class = buttons[0].button_class\n\n buttons_rendered = SAFE_EMPTY_STR\n\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n if button_html:\n buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n if form_html:\n forms_html += form_html\n\n if buttons_rendered:\n buttons_html += format_html(\n GROUP_DROPDOWN,\n group_button_class=group_button_class,\n group_name=group_name,\n grouped_buttons=buttons_rendered,\n )\n\n # We want all of the buttons first and then any modals and forms so the buttons render properly\n return buttons_html + forms_html\n", "path": "nautobot/extras/templatetags/job_buttons.py"}]}
| 2,703 | 393 |
gh_patches_debug_42880
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-1185
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
All logging messages produced in ray actors have 'ERROR' level
At this moment, all log messages produced in ray actor came with 'ERROR' level. There are two issues because this:
1. db grow quick.
2. with sqllite as db is many errors related to db lock, especially of start several learn processes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/utilities/log.py`
Content:
```
1 import os
2 import sys
3 import logging
4 import traceback
5
6 from mindsdb.interfaces.storage.db import session, Log
7 from mindsdb.utilities.config import Config
8
9 telemtry_enabled = os.getenv('CHECK_FOR_UPDATES', '1').lower() not in ['0', 'false', 'False']
10 global_config = Config().get_all()
11
12 if telemtry_enabled:
13 import sentry_sdk
14 from sentry_sdk import capture_exception, capture_message, add_breadcrumb
15 sentry_sdk.init(
16 "https://[email protected]/5633566",
17 traces_sample_rate=0 #Set to `1` to experiment with performance metrics
18 )
19
20 class LoggerWrapper(object):
21 def __init__(self, writer_arr, default_writer_pos):
22 self._writer_arr = writer_arr
23 self.default_writer_pos = default_writer_pos
24
25 def write(self, message):
26 if 'DEBUG:' in message:
27 self._writer_arr[0](message)
28 elif 'INFO:' in message:
29 self._writer_arr[1](message)
30 elif 'WARNING:' in message:
31 self._writer_arr[2](message)
32 elif 'ERROR:' in message:
33 self._writer_arr[3](message)
34 else:
35 self._writer_arr[self.default_writer_pos](message)
36
37 def flush(self):
38 pass
39
40 class DbHandler(logging.Handler):
41 def __init__(self):
42 logging.Handler.__init__(self)
43 self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)
44
45 def emit(self, record):
46 log_type = record.levelname
47 source = f'file: {record.pathname} - line: {record.lineno}'
48 payload = record.msg
49
50 if telemtry_enabled:
51 pass
52 # @TODO: Enable once we are sure no sensitive info is being outputed in the logs
53 # if log_type in ['INFO']:
54 # add_breadcrumb(
55 # category='auth',
56 # message=str(payload),
57 # level='info',
58 # )
59 # Might be too much traffic if we send this for users with slow networks
60 #if log_type in ['DEBUG']:
61 # add_breadcrumb(
62 # category='auth',
63 # message=str(payload),
64 # level='debug',
65 # )
66
67 if log_type in ['ERROR', 'WARNING']:
68 trace = str(traceback.format_stack(limit=20))
69 trac_log = Log(log_type='traceback', source=source, payload=trace, company_id=self.company_id)
70 session.add(trac_log)
71 session.commit()
72
73 if telemtry_enabled:
74 add_breadcrumb(
75 category='stack_trace',
76 message=trace,
77 level='info',
78 )
79 if log_type in ['ERROR']:
80 capture_message(str(payload))
81 if log_type in ['WARNING']:
82 capture_message(str(payload))
83
84 log = Log(log_type=str(log_type), source=source, payload=str(payload), company_id=self.company_id)
85 session.add(log)
86 session.commit()
87
88 def fmt_log_record(log_record):
89 return {
90 'log_from': 'mindsdb',
91 'level': log_record.log_type,
92 'context': 'unkown',
93 'text': log_record.payload,
94 'created_at': str(log_record.created_at).split('.')[0]
95 }
96
97 def get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):
98 logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)
99
100 if max_timestamp is not None:
101 logs = logs.filter(Log.created_at<max_timestamp)
102
103 if context is not None:
104 # e.g. datasource/predictor and assoicated id
105 pass
106
107 if level is not None:
108 logs = logs.filter(Log.log_type==level)
109
110 if log_from is not None:
111 # mindsdb/native/lightwood/all
112 pass
113
114 if limit is not None:
115 logs = logs.limit(limit)
116
117 logs = [fmt_log_record(x) for x in logs]
118 return logs
119
120 def initialize_log(config=global_config, logger_name='main', wrap_print=False):
121 ''' Create new logger
122 :param config: object, app config
123 :param logger_name: str, name of logger
124 :param wrap_print: bool, if true, then print() calls will be wrapped by log.debug() function.
125 '''
126 log = logging.getLogger(f'mindsdb.{logger_name}')
127 log.propagate = False
128 log.setLevel(min(
129 getattr(logging, config['log']['level']['console']),
130 getattr(logging, config['log']['level']['file'])
131 ))
132
133 formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
134
135 ch = logging.StreamHandler()
136 ch.setLevel(config['log']['level']['console']) # that level will be in console
137 log.addHandler(ch)
138 db_handler = DbHandler()
139 log.addHandler(db_handler)
140
141 if wrap_print:
142 sys.stdout = LoggerWrapper([log.debug, log.info, log.warning, log.error], 1)
143 sys.stderr = LoggerWrapper([log.debug, log.info, log.warning, log.error], 3)
144
145 return log
146
147
148 def get_log(logger_name=None):
149 if logger_name is None:
150 return logging.getLogger('mindsdb')
151 return logging.getLogger(f'mindsdb.{logger_name}')
152
153
154 log = initialize_log()
155
```
Path: `mindsdb/__init__.py`
Content:
```
1 import os
2 import sys
3 import json
4
5 from mindsdb.__about__ import __package_name__ as name, __version__ # noqa
6 from mindsdb.utilities.fs import get_or_create_dir_struct, create_dirs_recursive
7 from mindsdb.utilities.functions import args_parse, is_notebook
8 from mindsdb.__about__ import __version__ as mindsdb_version
9 from mindsdb.utilities.telemetry import telemetry_file_exists, disable_telemetry
10
11 is_ray_worker = False
12 if '-ray' in str(sys.argv):
13 is_ray_worker = True
14
15 if not is_ray_worker:
16 try:
17 if not is_notebook():
18 args = args_parse()
19 else:
20 args = None
21 except Exception:
22 # This fials in some notebooks ... check above for is_notebook is still needed because even if the exception is caught trying to read the arg still leads to failure in other notebooks... notebooks a
23 args = None
24
25 # ---- CHECK SYSTEM ----
26 if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
27 print("""
28 MindsDB server requires Python >= 3.6 to run
29
30 Once you have Python 3.6 installed you can tun mindsdb as follows:
31
32 1. create and activate venv:
33 python3.6 -m venv venv
34 source venv/bin/activate
35
36 2. install MindsDB:
37 pip3 install mindsdb
38
39 3. Run MindsDB
40 python3.6 -m mindsdb
41
42 More instructions in https://docs.mindsdb.com
43 """)
44 exit(1)
45
46 # --- VERSION MODE ----
47 if args is not None and args.version:
48 print(f'MindsDB {mindsdb_version}')
49 sys.exit(0)
50
51 # --- MODULE OR LIBRARY IMPORT MODE ----
52 if args is not None and args.config is not None:
53 config_path = args.config
54 with open(config_path, 'r') as fp:
55 user_config = json.load(fp)
56 else:
57 user_config = {}
58 config_path = 'absent'
59 os.environ['MINDSDB_CONFIG_PATH'] = config_path
60
61 if 'storage_dir' in user_config:
62 root_storage_dir = user_config['storage_dir']
63 os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir
64 elif os.environ.get('MINDSDB_STORAGE_DIR') is not None:
65 root_storage_dir = os.environ['MINDSDB_STORAGE_DIR']
66 else:
67 _, root_storage_dir = get_or_create_dir_struct()
68 os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir
69
70 if os.path.isdir(root_storage_dir) is False:
71 os.makedirs(root_storage_dir)
72
73
74
75 if 'storage_db' in user_config:
76 os.environ['MINDSDB_DB_CON'] = user_config['storage_db']
77 elif os.environ.get('MINDSDB_DB_CON','') == '':
78 os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'
79
80 if 'company_id' in user_config:
81 os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']
82
83
84 from mindsdb.utilities.config import Config
85 mindsdb_config = Config()
86 create_dirs_recursive(mindsdb_config['paths'])
87
88 os.environ['DEFAULT_LOG_LEVEL'] = os.environ.get('DEFAULT_LOG_LEVEL', 'ERROR')
89 os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')
90 os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']
91
92
93 if telemetry_file_exists(mindsdb_config['storage_dir']):
94 os.environ['CHECK_FOR_UPDATES'] = '0'
95 print('\n x telemetry disabled! \n')
96 elif os.getenv('CHECK_FOR_UPDATES', '1').lower() in ['0', 'false', 'False']:
97 disable_telemetry(mindsdb_config['storage_dir'])
98 print('\n x telemetry disabled \n')
99 else:
100 print('\n ✓ telemetry enabled \n')
101
102
103 from mindsdb.interfaces.custom.model_interface import ModelInterface
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py
--- a/mindsdb/__init__.py
+++ b/mindsdb/__init__.py
@@ -70,17 +70,14 @@
if os.path.isdir(root_storage_dir) is False:
os.makedirs(root_storage_dir)
-
-
if 'storage_db' in user_config:
os.environ['MINDSDB_DB_CON'] = user_config['storage_db']
- elif os.environ.get('MINDSDB_DB_CON','') == '':
- os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'
+ elif os.environ.get('MINDSDB_DB_CON', '') == '':
+ os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'], 'mindsdb.sqlite3.db') + '?check_same_thread=False&timeout=30'
if 'company_id' in user_config:
os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']
-
from mindsdb.utilities.config import Config
mindsdb_config = Config()
create_dirs_recursive(mindsdb_config['paths'])
@@ -89,7 +86,6 @@
os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')
os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']
-
if telemetry_file_exists(mindsdb_config['storage_dir']):
os.environ['CHECK_FOR_UPDATES'] = '0'
print('\n x telemetry disabled! \n')
diff --git a/mindsdb/utilities/log.py b/mindsdb/utilities/log.py
--- a/mindsdb/utilities/log.py
+++ b/mindsdb/utilities/log.py
@@ -17,12 +17,15 @@
traces_sample_rate=0 #Set to `1` to experiment with performance metrics
)
+
class LoggerWrapper(object):
def __init__(self, writer_arr, default_writer_pos):
self._writer_arr = writer_arr
self.default_writer_pos = default_writer_pos
def write(self, message):
+ if len(message.strip(' \n')) == 0:
+ return
if 'DEBUG:' in message:
self._writer_arr[0](message)
elif 'INFO:' in message:
@@ -37,12 +40,17 @@
def flush(self):
pass
+
class DbHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)
def emit(self, record):
+ if len(record.message.strip(' \n')) == 0 \
+ or (record.threadName == 'ray_print_logs' and 'mindsdb-logger' not in record.message):
+ return
+
log_type = record.levelname
source = f'file: {record.pathname} - line: {record.lineno}'
payload = record.msg
@@ -85,14 +93,16 @@
session.add(log)
session.commit()
+
def fmt_log_record(log_record):
- return {
- 'log_from': 'mindsdb',
- 'level': log_record.log_type,
- 'context': 'unkown',
- 'text': log_record.payload,
- 'created_at': str(log_record.created_at).split('.')[0]
- }
+ return {
+ 'log_from': 'mindsdb',
+ 'level': log_record.log_type,
+ 'context': 'unkown',
+ 'text': log_record.payload,
+ 'created_at': str(log_record.created_at).split('.')[0]
+ }
+
def get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):
logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)
|
{"golden_diff": "diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py\n--- a/mindsdb/__init__.py\n+++ b/mindsdb/__init__.py\n@@ -70,17 +70,14 @@\n if os.path.isdir(root_storage_dir) is False:\n os.makedirs(root_storage_dir)\n \n-\n-\n if 'storage_db' in user_config:\n os.environ['MINDSDB_DB_CON'] = user_config['storage_db']\n- elif os.environ.get('MINDSDB_DB_CON','') == '':\n- os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'\n+ elif os.environ.get('MINDSDB_DB_CON', '') == '':\n+ os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'], 'mindsdb.sqlite3.db') + '?check_same_thread=False&timeout=30'\n \n if 'company_id' in user_config:\n os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']\n \n-\n from mindsdb.utilities.config import Config\n mindsdb_config = Config()\n create_dirs_recursive(mindsdb_config['paths'])\n@@ -89,7 +86,6 @@\n os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')\n os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']\n \n-\n if telemetry_file_exists(mindsdb_config['storage_dir']):\n os.environ['CHECK_FOR_UPDATES'] = '0'\n print('\\n x telemetry disabled! \\n')\ndiff --git a/mindsdb/utilities/log.py b/mindsdb/utilities/log.py\n--- a/mindsdb/utilities/log.py\n+++ b/mindsdb/utilities/log.py\n@@ -17,12 +17,15 @@\n traces_sample_rate=0 #Set to `1` to experiment with performance metrics\n )\n \n+\n class LoggerWrapper(object):\n def __init__(self, writer_arr, default_writer_pos):\n self._writer_arr = writer_arr\n self.default_writer_pos = default_writer_pos\n \n def write(self, message):\n+ if len(message.strip(' \\n')) == 0:\n+ return\n if 'DEBUG:' in message:\n self._writer_arr[0](message)\n elif 'INFO:' in message:\n@@ -37,12 +40,17 @@\n def flush(self):\n pass\n \n+\n class DbHandler(logging.Handler):\n def __init__(self):\n logging.Handler.__init__(self)\n self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)\n \n def emit(self, record):\n+ if len(record.message.strip(' \\n')) == 0 \\\n+ or (record.threadName == 'ray_print_logs' and 'mindsdb-logger' not in record.message):\n+ return\n+\n log_type = record.levelname\n source = f'file: {record.pathname} - line: {record.lineno}'\n payload = record.msg\n@@ -85,14 +93,16 @@\n session.add(log)\n session.commit()\n \n+\n def fmt_log_record(log_record):\n- return {\n- 'log_from': 'mindsdb',\n- 'level': log_record.log_type,\n- 'context': 'unkown',\n- 'text': log_record.payload,\n- 'created_at': str(log_record.created_at).split('.')[0]\n- }\n+ return {\n+ 'log_from': 'mindsdb',\n+ 'level': log_record.log_type,\n+ 'context': 'unkown',\n+ 'text': log_record.payload,\n+ 'created_at': str(log_record.created_at).split('.')[0]\n+ }\n+\n \n def get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):\n logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)\n", "issue": "All logging messages produced in ray actors have 'ERROR' level\nAt this moment, all log messages produced in ray actor came with 'ERROR' level. There are two issues because this:\r\n1. db grow quick.\r\n2. with sqllite as db is many errors related to db lock, especially of start several learn processes.\n", "before_files": [{"content": "import os\nimport sys\nimport logging\nimport traceback\n\nfrom mindsdb.interfaces.storage.db import session, Log\nfrom mindsdb.utilities.config import Config\n\ntelemtry_enabled = os.getenv('CHECK_FOR_UPDATES', '1').lower() not in ['0', 'false', 'False']\nglobal_config = Config().get_all()\n\nif telemtry_enabled:\n import sentry_sdk\n from sentry_sdk import capture_exception, capture_message, add_breadcrumb\n sentry_sdk.init(\n \"https://[email protected]/5633566\",\n traces_sample_rate=0 #Set to `1` to experiment with performance metrics\n )\n\nclass LoggerWrapper(object):\n def __init__(self, writer_arr, default_writer_pos):\n self._writer_arr = writer_arr\n self.default_writer_pos = default_writer_pos\n\n def write(self, message):\n if 'DEBUG:' in message:\n self._writer_arr[0](message)\n elif 'INFO:' in message:\n self._writer_arr[1](message)\n elif 'WARNING:' in message:\n self._writer_arr[2](message)\n elif 'ERROR:' in message:\n self._writer_arr[3](message)\n else:\n self._writer_arr[self.default_writer_pos](message)\n\n def flush(self):\n pass\n\nclass DbHandler(logging.Handler):\n def __init__(self):\n logging.Handler.__init__(self)\n self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)\n\n def emit(self, record):\n log_type = record.levelname\n source = f'file: {record.pathname} - line: {record.lineno}'\n payload = record.msg\n\n if telemtry_enabled:\n pass\n # @TODO: Enable once we are sure no sensitive info is being outputed in the logs\n # if log_type in ['INFO']:\n # add_breadcrumb(\n # category='auth',\n # message=str(payload),\n # level='info',\n # )\n # Might be too much traffic if we send this for users with slow networks\n #if log_type in ['DEBUG']:\n # add_breadcrumb(\n # category='auth',\n # message=str(payload),\n # level='debug',\n # )\n\n if log_type in ['ERROR', 'WARNING']:\n trace = str(traceback.format_stack(limit=20))\n trac_log = Log(log_type='traceback', source=source, payload=trace, company_id=self.company_id)\n session.add(trac_log)\n session.commit()\n\n if telemtry_enabled:\n add_breadcrumb(\n category='stack_trace',\n message=trace,\n level='info',\n )\n if log_type in ['ERROR']:\n capture_message(str(payload))\n if log_type in ['WARNING']:\n capture_message(str(payload))\n\n log = Log(log_type=str(log_type), source=source, payload=str(payload), company_id=self.company_id)\n session.add(log)\n session.commit()\n\ndef fmt_log_record(log_record):\n return {\n 'log_from': 'mindsdb',\n 'level': log_record.log_type,\n 'context': 'unkown',\n 'text': log_record.payload,\n 'created_at': str(log_record.created_at).split('.')[0]\n }\n\ndef get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):\n logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)\n\n if max_timestamp is not None:\n logs = logs.filter(Log.created_at<max_timestamp)\n\n if context is not None:\n # e.g. datasource/predictor and assoicated id\n pass\n\n if level is not None:\n logs = logs.filter(Log.log_type==level)\n\n if log_from is not None:\n # mindsdb/native/lightwood/all\n pass\n\n if limit is not None:\n logs = logs.limit(limit)\n\n logs = [fmt_log_record(x) for x in logs]\n return logs\n\ndef initialize_log(config=global_config, logger_name='main', wrap_print=False):\n ''' Create new logger\n :param config: object, app config\n :param logger_name: str, name of logger\n :param wrap_print: bool, if true, then print() calls will be wrapped by log.debug() function.\n '''\n log = logging.getLogger(f'mindsdb.{logger_name}')\n log.propagate = False\n log.setLevel(min(\n getattr(logging, config['log']['level']['console']),\n getattr(logging, config['log']['level']['file'])\n ))\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n ch = logging.StreamHandler()\n ch.setLevel(config['log']['level']['console']) # that level will be in console\n log.addHandler(ch)\n db_handler = DbHandler()\n log.addHandler(db_handler)\n\n if wrap_print:\n sys.stdout = LoggerWrapper([log.debug, log.info, log.warning, log.error], 1)\n sys.stderr = LoggerWrapper([log.debug, log.info, log.warning, log.error], 3)\n\n return log\n\n\ndef get_log(logger_name=None):\n if logger_name is None:\n return logging.getLogger('mindsdb')\n return logging.getLogger(f'mindsdb.{logger_name}')\n\n\nlog = initialize_log()\n", "path": "mindsdb/utilities/log.py"}, {"content": "import os\nimport sys\nimport json\n\nfrom mindsdb.__about__ import __package_name__ as name, __version__ # noqa\nfrom mindsdb.utilities.fs import get_or_create_dir_struct, create_dirs_recursive\nfrom mindsdb.utilities.functions import args_parse, is_notebook\nfrom mindsdb.__about__ import __version__ as mindsdb_version\nfrom mindsdb.utilities.telemetry import telemetry_file_exists, disable_telemetry\n\nis_ray_worker = False\nif '-ray' in str(sys.argv):\n is_ray_worker = True\n\nif not is_ray_worker:\n try:\n if not is_notebook():\n args = args_parse()\n else:\n args = None\n except Exception:\n # This fials in some notebooks ... check above for is_notebook is still needed because even if the exception is caught trying to read the arg still leads to failure in other notebooks... notebooks a\n args = None\n\n # ---- CHECK SYSTEM ----\n if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):\n print(\"\"\"\n MindsDB server requires Python >= 3.6 to run\n\n Once you have Python 3.6 installed you can tun mindsdb as follows:\n\n 1. create and activate venv:\n python3.6 -m venv venv\n source venv/bin/activate\n\n 2. install MindsDB:\n pip3 install mindsdb\n\n 3. Run MindsDB\n python3.6 -m mindsdb\n\n More instructions in https://docs.mindsdb.com\n \"\"\")\n exit(1)\n\n # --- VERSION MODE ----\n if args is not None and args.version:\n print(f'MindsDB {mindsdb_version}')\n sys.exit(0)\n\n # --- MODULE OR LIBRARY IMPORT MODE ----\n if args is not None and args.config is not None:\n config_path = args.config\n with open(config_path, 'r') as fp:\n user_config = json.load(fp)\n else:\n user_config = {}\n config_path = 'absent'\n os.environ['MINDSDB_CONFIG_PATH'] = config_path\n\n if 'storage_dir' in user_config:\n root_storage_dir = user_config['storage_dir']\n os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir\n elif os.environ.get('MINDSDB_STORAGE_DIR') is not None:\n root_storage_dir = os.environ['MINDSDB_STORAGE_DIR']\n else:\n _, root_storage_dir = get_or_create_dir_struct()\n os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir\n\n if os.path.isdir(root_storage_dir) is False:\n os.makedirs(root_storage_dir)\n\n\n\n if 'storage_db' in user_config:\n os.environ['MINDSDB_DB_CON'] = user_config['storage_db']\n elif os.environ.get('MINDSDB_DB_CON','') == '':\n os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'],'mindsdb.sqlite3.db') + '?check_same_thread=False'\n\n if 'company_id' in user_config:\n os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']\n\n\n from mindsdb.utilities.config import Config\n mindsdb_config = Config()\n create_dirs_recursive(mindsdb_config['paths'])\n\n os.environ['DEFAULT_LOG_LEVEL'] = os.environ.get('DEFAULT_LOG_LEVEL', 'ERROR')\n os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')\n os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']\n\n\n if telemetry_file_exists(mindsdb_config['storage_dir']):\n os.environ['CHECK_FOR_UPDATES'] = '0'\n print('\\n x telemetry disabled! \\n')\n elif os.getenv('CHECK_FOR_UPDATES', '1').lower() in ['0', 'false', 'False']:\n disable_telemetry(mindsdb_config['storage_dir'])\n print('\\n x telemetry disabled \\n')\n else:\n print('\\n \u2713 telemetry enabled \\n')\n\n\n from mindsdb.interfaces.custom.model_interface import ModelInterface\n", "path": "mindsdb/__init__.py"}], "after_files": [{"content": "import os\nimport sys\nimport logging\nimport traceback\n\nfrom mindsdb.interfaces.storage.db import session, Log\nfrom mindsdb.utilities.config import Config\n\ntelemtry_enabled = os.getenv('CHECK_FOR_UPDATES', '1').lower() not in ['0', 'false', 'False']\nglobal_config = Config().get_all()\n\nif telemtry_enabled:\n import sentry_sdk\n from sentry_sdk import capture_exception, capture_message, add_breadcrumb\n sentry_sdk.init(\n \"https://[email protected]/5633566\",\n traces_sample_rate=0 #Set to `1` to experiment with performance metrics\n )\n\n\nclass LoggerWrapper(object):\n def __init__(self, writer_arr, default_writer_pos):\n self._writer_arr = writer_arr\n self.default_writer_pos = default_writer_pos\n\n def write(self, message):\n if len(message.strip(' \\n')) == 0:\n return\n if 'DEBUG:' in message:\n self._writer_arr[0](message)\n elif 'INFO:' in message:\n self._writer_arr[1](message)\n elif 'WARNING:' in message:\n self._writer_arr[2](message)\n elif 'ERROR:' in message:\n self._writer_arr[3](message)\n else:\n self._writer_arr[self.default_writer_pos](message)\n\n def flush(self):\n pass\n\n\nclass DbHandler(logging.Handler):\n def __init__(self):\n logging.Handler.__init__(self)\n self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)\n\n def emit(self, record):\n if len(record.message.strip(' \\n')) == 0 \\\n or (record.threadName == 'ray_print_logs' and 'mindsdb-logger' not in record.message):\n return\n\n log_type = record.levelname\n source = f'file: {record.pathname} - line: {record.lineno}'\n payload = record.msg\n\n if telemtry_enabled:\n pass\n # @TODO: Enable once we are sure no sensitive info is being outputed in the logs\n # if log_type in ['INFO']:\n # add_breadcrumb(\n # category='auth',\n # message=str(payload),\n # level='info',\n # )\n # Might be too much traffic if we send this for users with slow networks\n #if log_type in ['DEBUG']:\n # add_breadcrumb(\n # category='auth',\n # message=str(payload),\n # level='debug',\n # )\n\n if log_type in ['ERROR', 'WARNING']:\n trace = str(traceback.format_stack(limit=20))\n trac_log = Log(log_type='traceback', source=source, payload=trace, company_id=self.company_id)\n session.add(trac_log)\n session.commit()\n\n if telemtry_enabled:\n add_breadcrumb(\n category='stack_trace',\n message=trace,\n level='info',\n )\n if log_type in ['ERROR']:\n capture_message(str(payload))\n if log_type in ['WARNING']:\n capture_message(str(payload))\n\n log = Log(log_type=str(log_type), source=source, payload=str(payload), company_id=self.company_id)\n session.add(log)\n session.commit()\n\n\ndef fmt_log_record(log_record):\n return {\n 'log_from': 'mindsdb',\n 'level': log_record.log_type,\n 'context': 'unkown',\n 'text': log_record.payload,\n 'created_at': str(log_record.created_at).split('.')[0]\n }\n\n\ndef get_logs(min_timestamp, max_timestamp, context, level, log_from, limit):\n logs = session.query(Log).filter(Log.company_id==os.environ.get('MINDSDB_COMPANY_ID', None), Log.created_at>min_timestamp)\n\n if max_timestamp is not None:\n logs = logs.filter(Log.created_at<max_timestamp)\n\n if context is not None:\n # e.g. datasource/predictor and assoicated id\n pass\n\n if level is not None:\n logs = logs.filter(Log.log_type==level)\n\n if log_from is not None:\n # mindsdb/native/lightwood/all\n pass\n\n if limit is not None:\n logs = logs.limit(limit)\n\n logs = [fmt_log_record(x) for x in logs]\n return logs\n\ndef initialize_log(config=global_config, logger_name='main', wrap_print=False):\n ''' Create new logger\n :param config: object, app config\n :param logger_name: str, name of logger\n :param wrap_print: bool, if true, then print() calls will be wrapped by log.debug() function.\n '''\n log = logging.getLogger(f'mindsdb.{logger_name}')\n log.propagate = False\n log.setLevel(min(\n getattr(logging, config['log']['level']['console']),\n getattr(logging, config['log']['level']['file'])\n ))\n\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n ch = logging.StreamHandler()\n ch.setLevel(config['log']['level']['console']) # that level will be in console\n log.addHandler(ch)\n db_handler = DbHandler()\n log.addHandler(db_handler)\n\n if wrap_print:\n sys.stdout = LoggerWrapper([log.debug, log.info, log.warning, log.error], 1)\n sys.stderr = LoggerWrapper([log.debug, log.info, log.warning, log.error], 3)\n\n return log\n\n\ndef get_log(logger_name=None):\n if logger_name is None:\n return logging.getLogger('mindsdb')\n return logging.getLogger(f'mindsdb.{logger_name}')\n\n\nlog = initialize_log()\n", "path": "mindsdb/utilities/log.py"}, {"content": "import os\nimport sys\nimport json\n\nfrom mindsdb.__about__ import __package_name__ as name, __version__ # noqa\nfrom mindsdb.utilities.fs import get_or_create_dir_struct, create_dirs_recursive\nfrom mindsdb.utilities.functions import args_parse, is_notebook\nfrom mindsdb.__about__ import __version__ as mindsdb_version\nfrom mindsdb.utilities.telemetry import telemetry_file_exists, disable_telemetry\n\nis_ray_worker = False\nif '-ray' in str(sys.argv):\n is_ray_worker = True\n\nif not is_ray_worker:\n try:\n if not is_notebook():\n args = args_parse()\n else:\n args = None\n except Exception:\n # This fials in some notebooks ... check above for is_notebook is still needed because even if the exception is caught trying to read the arg still leads to failure in other notebooks... notebooks a\n args = None\n\n # ---- CHECK SYSTEM ----\n if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):\n print(\"\"\"\n MindsDB server requires Python >= 3.6 to run\n\n Once you have Python 3.6 installed you can tun mindsdb as follows:\n\n 1. create and activate venv:\n python3.6 -m venv venv\n source venv/bin/activate\n\n 2. install MindsDB:\n pip3 install mindsdb\n\n 3. Run MindsDB\n python3.6 -m mindsdb\n\n More instructions in https://docs.mindsdb.com\n \"\"\")\n exit(1)\n\n # --- VERSION MODE ----\n if args is not None and args.version:\n print(f'MindsDB {mindsdb_version}')\n sys.exit(0)\n\n # --- MODULE OR LIBRARY IMPORT MODE ----\n if args is not None and args.config is not None:\n config_path = args.config\n with open(config_path, 'r') as fp:\n user_config = json.load(fp)\n else:\n user_config = {}\n config_path = 'absent'\n os.environ['MINDSDB_CONFIG_PATH'] = config_path\n\n if 'storage_dir' in user_config:\n root_storage_dir = user_config['storage_dir']\n os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir\n elif os.environ.get('MINDSDB_STORAGE_DIR') is not None:\n root_storage_dir = os.environ['MINDSDB_STORAGE_DIR']\n else:\n _, root_storage_dir = get_or_create_dir_struct()\n os.environ['MINDSDB_STORAGE_DIR'] = root_storage_dir\n\n if os.path.isdir(root_storage_dir) is False:\n os.makedirs(root_storage_dir)\n\n if 'storage_db' in user_config:\n os.environ['MINDSDB_DB_CON'] = user_config['storage_db']\n elif os.environ.get('MINDSDB_DB_CON', '') == '':\n os.environ['MINDSDB_DB_CON'] = 'sqlite:///' + os.path.join(os.environ['MINDSDB_STORAGE_DIR'], 'mindsdb.sqlite3.db') + '?check_same_thread=False&timeout=30'\n\n if 'company_id' in user_config:\n os.environ['MINDSDB_COMPANY_ID'] = user_config['company_id']\n\n from mindsdb.utilities.config import Config\n mindsdb_config = Config()\n create_dirs_recursive(mindsdb_config['paths'])\n\n os.environ['DEFAULT_LOG_LEVEL'] = os.environ.get('DEFAULT_LOG_LEVEL', 'ERROR')\n os.environ['LIGHTWOOD_LOG_LEVEL'] = os.environ.get('LIGHTWOOD_LOG_LEVEL', 'ERROR')\n os.environ['MINDSDB_STORAGE_PATH'] = mindsdb_config['paths']['predictors']\n\n if telemetry_file_exists(mindsdb_config['storage_dir']):\n os.environ['CHECK_FOR_UPDATES'] = '0'\n print('\\n x telemetry disabled! \\n')\n elif os.getenv('CHECK_FOR_UPDATES', '1').lower() in ['0', 'false', 'False']:\n disable_telemetry(mindsdb_config['storage_dir'])\n print('\\n x telemetry disabled \\n')\n else:\n print('\\n \u2713 telemetry enabled \\n')\n\n\n from mindsdb.interfaces.custom.model_interface import ModelInterface\n", "path": "mindsdb/__init__.py"}]}
| 3,036 | 908 |
gh_patches_debug_59413
|
rasdani/github-patches
|
git_diff
|
encode__starlette-88
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CORSMiddleware is sending an extra 'http.response.body'
It seems that even with all tests passing and cors being successfully applied, CORSMiddleware still raises a runtime error.
Code being tested:
```python
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=["*"])
@app.route("/")
async def homepage(request):
return PlainTextResponse('Hello', status_code=200)
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
```
And the error being produced:
```
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 384, in run_asgi
result = await asgi(self.receive, self.send)
File "/home/alexbotello/Code/starlette/starlette/exceptions.py", line 60, in app
raise exc from None
File "/home/alexbotello/Code/starlette/starlette/exceptions.py", line 52, in app
await instance(receive, sender)
File "/home/alexbotello/Code/starlette/starlette/middleware/cors.py", line 116, in simple_response
await inner(receive, send)
File "/home/alexbotello/Code/starlette/starlette/applications.py", line 26, in awaitable
await response(receive, send)
File "/home/alexbotello/Code/starlette/starlette/responses.py", line 100, in __call__
await send({"type": "http.response.body", "body": self.body})
File "/home/alexbotello/Code/starlette/starlette/middleware/cors.py", line 130, in send
await send(message)
File "/home/alexbotello/Code/starlette/starlette/exceptions.py", line 47, in sender
await send(message)
File "/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 518, in send
raise RuntimeError(msg % message_type)
RuntimeError: Unexpected ASGI message 'http.response.body' sent, after response already completed.
```
It seems the issue is originating from `send`. Specifically:
```python
if message["type"] != "http.response.start":
await send(message)
```
Removing this fixes the issue and does not break any tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/middleware/cors.py`
Content:
```
1 from starlette.datastructures import Headers, MutableHeaders, URL
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import functools
5 import typing
6
7
8 ALL_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
9
10
11 class CORSMiddleware:
12 def __init__(
13 self,
14 app: ASGIApp,
15 allow_origins: typing.Sequence[str] = (),
16 allow_methods: typing.Sequence[str] = ("GET",),
17 allow_headers: typing.Sequence[str] = (),
18 allow_credentials: bool = False,
19 expose_headers: typing.Sequence[str] = (),
20 max_age: int = 600,
21 ):
22
23 if "*" in allow_methods:
24 allow_methods = ALL_METHODS
25
26 simple_headers = {}
27 if "*" in allow_origins:
28 simple_headers["Access-Control-Allow-Origin"] = "*"
29 if allow_credentials:
30 simple_headers["Access-Control-Allow-Credentials"] = "true"
31 if expose_headers:
32 simple_headers["Access-Control-Expose-Headers"] = ", ".join(expose_headers)
33
34 preflight_headers = {}
35 if "*" in allow_origins:
36 preflight_headers["Access-Control-Allow-Origin"] = "*"
37 else:
38 preflight_headers["Vary"] = "Origin"
39 preflight_headers.update(
40 {
41 "Access-Control-Allow-Methods": ", ".join(allow_methods),
42 "Access-Control-Max-Age": str(max_age),
43 }
44 )
45 if allow_headers and "*" not in allow_headers:
46 preflight_headers["Access-Control-Allow-Headers"] = ", ".join(allow_headers)
47 if allow_credentials:
48 preflight_headers["Access-Control-Allow-Credentials"] = "true"
49
50 self.app = app
51 self.allow_origins = allow_origins
52 self.allow_methods = allow_methods
53 self.allow_headers = allow_headers
54 self.allow_all_origins = "*" in allow_origins
55 self.allow_all_headers = "*" in allow_headers
56 self.simple_headers = simple_headers
57 self.preflight_headers = preflight_headers
58
59 def __call__(self, scope: Scope):
60 if scope["type"] == "http":
61 method = scope["method"]
62 headers = Headers(scope["headers"])
63 origin = headers.get("origin")
64
65 if origin is not None:
66 if method == "OPTIONS" and "access-control-request-method" in headers:
67 return self.preflight_response(request_headers=headers)
68 else:
69 return functools.partial(
70 self.simple_response, scope=scope, origin=origin
71 )
72
73 return self.app(scope)
74
75 def preflight_response(self, request_headers):
76 requested_origin = request_headers["origin"]
77 requested_method = request_headers["access-control-request-method"]
78 requested_headers = request_headers.get("access-control-request-headers")
79 requested_cookie = "cookie" in request_headers
80
81 headers = dict(self.preflight_headers)
82 failures = []
83
84 # If we only allow specific origins, then we have to mirror back
85 # the Origin header in the response.
86 if not self.allow_all_origins:
87 if requested_origin in self.allow_origins:
88 headers["Access-Control-Allow-Origin"] = requested_origin
89 else:
90 failures.append("origin")
91
92 if requested_method not in self.allow_methods:
93 failures.append("method")
94
95 # If we allow all headers, then we have to mirror back any requested
96 # headers in the response.
97 if self.allow_all_headers and requested_headers is not None:
98 headers["Access-Control-Allow-Headers"] = requested_headers
99 elif requested_headers is not None:
100 for header in requested_headers.split(","):
101 if header.strip() not in self.allow_headers:
102 failures.append("headers")
103
104 # We don't strictly need to use 400 responses here, since its up to
105 # the browser to enforce the CORS policy, but its more informative
106 # if we do.
107 if failures:
108 failure_text = "Disallowed CORS " + ", ".join(failures)
109 return PlainTextResponse(failure_text, status_code=400, headers=headers)
110
111 return PlainTextResponse("OK", status_code=200, headers=headers)
112
113 async def simple_response(self, receive, send, scope=None, origin=None):
114 inner = self.app(scope)
115 send = functools.partial(self.send, send=send, origin=origin)
116 await inner(receive, send)
117
118 async def send(self, message, send=None, origin=None):
119 if message["type"] != "http.response.start":
120 await send(message)
121
122 message.setdefault("headers", [])
123 headers = MutableHeaders(message["headers"])
124
125 # If we only allow specific origins, then we have to mirror back
126 # the Origin header in the response.
127 if not self.allow_all_origins and origin in self.allow_origins:
128 headers["Access-Control-Allow-Origin"] = origin
129 headers.update(self.simple_headers)
130 await send(message)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py
--- a/starlette/middleware/cors.py
+++ b/starlette/middleware/cors.py
@@ -118,6 +118,7 @@
async def send(self, message, send=None, origin=None):
if message["type"] != "http.response.start":
await send(message)
+ return
message.setdefault("headers", [])
headers = MutableHeaders(message["headers"])
|
{"golden_diff": "diff --git a/starlette/middleware/cors.py b/starlette/middleware/cors.py\n--- a/starlette/middleware/cors.py\n+++ b/starlette/middleware/cors.py\n@@ -118,6 +118,7 @@\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n+ return\n \n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n", "issue": "CORSMiddleware is sending an extra 'http.response.body'\nIt seems that even with all tests passing and cors being successfully applied, CORSMiddleware still raises a runtime error.\r\n\r\nCode being tested:\r\n```python\r\napp = Starlette()\r\n\r\napp.add_middleware(CORSMiddleware, allow_origins=[\"*\"])\r\n\r\[email protected](\"/\")\r\nasync def homepage(request):\r\n return PlainTextResponse('Hello', status_code=200)\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\r\n```\r\n\r\nAnd the error being produced:\r\n```\r\nERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 384, in run_asgi\r\n result = await asgi(self.receive, self.send)\r\n File \"/home/alexbotello/Code/starlette/starlette/exceptions.py\", line 60, in app\r\n raise exc from None\r\n File \"/home/alexbotello/Code/starlette/starlette/exceptions.py\", line 52, in app\r\n await instance(receive, sender)\r\n File \"/home/alexbotello/Code/starlette/starlette/middleware/cors.py\", line 116, in simple_response\r\n await inner(receive, send)\r\n File \"/home/alexbotello/Code/starlette/starlette/applications.py\", line 26, in awaitable\r\n await response(receive, send)\r\n File \"/home/alexbotello/Code/starlette/starlette/responses.py\", line 100, in __call__\r\n await send({\"type\": \"http.response.body\", \"body\": self.body})\r\n File \"/home/alexbotello/Code/starlette/starlette/middleware/cors.py\", line 130, in send\r\n await send(message)\r\n File \"/home/alexbotello/Code/starlette/starlette/exceptions.py\", line 47, in sender\r\n await send(message)\r\n File \"/home/alexbotello/.local/share/virtualenvs/starlette-dshJy1CJ/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 518, in send\r\n raise RuntimeError(msg % message_type)\r\nRuntimeError: Unexpected ASGI message 'http.response.body' sent, after response already completed.\r\n```\r\nIt seems the issue is originating from `send`. Specifically:\r\n```python\r\nif message[\"type\"] != \"http.response.start\":\r\n await send(message)\r\n```\r\nRemoving this fixes the issue and does not break any tests.\n", "before_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n if requested_origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}], "after_files": [{"content": "from starlette.datastructures import Headers, MutableHeaders, URL\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport functools\nimport typing\n\n\nALL_METHODS = (\"DELETE\", \"GET\", \"OPTIONS\", \"PATCH\", \"POST\", \"PUT\")\n\n\nclass CORSMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n allow_origins: typing.Sequence[str] = (),\n allow_methods: typing.Sequence[str] = (\"GET\",),\n allow_headers: typing.Sequence[str] = (),\n allow_credentials: bool = False,\n expose_headers: typing.Sequence[str] = (),\n max_age: int = 600,\n ):\n\n if \"*\" in allow_methods:\n allow_methods = ALL_METHODS\n\n simple_headers = {}\n if \"*\" in allow_origins:\n simple_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n if allow_credentials:\n simple_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n if expose_headers:\n simple_headers[\"Access-Control-Expose-Headers\"] = \", \".join(expose_headers)\n\n preflight_headers = {}\n if \"*\" in allow_origins:\n preflight_headers[\"Access-Control-Allow-Origin\"] = \"*\"\n else:\n preflight_headers[\"Vary\"] = \"Origin\"\n preflight_headers.update(\n {\n \"Access-Control-Allow-Methods\": \", \".join(allow_methods),\n \"Access-Control-Max-Age\": str(max_age),\n }\n )\n if allow_headers and \"*\" not in allow_headers:\n preflight_headers[\"Access-Control-Allow-Headers\"] = \", \".join(allow_headers)\n if allow_credentials:\n preflight_headers[\"Access-Control-Allow-Credentials\"] = \"true\"\n\n self.app = app\n self.allow_origins = allow_origins\n self.allow_methods = allow_methods\n self.allow_headers = allow_headers\n self.allow_all_origins = \"*\" in allow_origins\n self.allow_all_headers = \"*\" in allow_headers\n self.simple_headers = simple_headers\n self.preflight_headers = preflight_headers\n\n def __call__(self, scope: Scope):\n if scope[\"type\"] == \"http\":\n method = scope[\"method\"]\n headers = Headers(scope[\"headers\"])\n origin = headers.get(\"origin\")\n\n if origin is not None:\n if method == \"OPTIONS\" and \"access-control-request-method\" in headers:\n return self.preflight_response(request_headers=headers)\n else:\n return functools.partial(\n self.simple_response, scope=scope, origin=origin\n )\n\n return self.app(scope)\n\n def preflight_response(self, request_headers):\n requested_origin = request_headers[\"origin\"]\n requested_method = request_headers[\"access-control-request-method\"]\n requested_headers = request_headers.get(\"access-control-request-headers\")\n requested_cookie = \"cookie\" in request_headers\n\n headers = dict(self.preflight_headers)\n failures = []\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins:\n if requested_origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = requested_origin\n else:\n failures.append(\"origin\")\n\n if requested_method not in self.allow_methods:\n failures.append(\"method\")\n\n # If we allow all headers, then we have to mirror back any requested\n # headers in the response.\n if self.allow_all_headers and requested_headers is not None:\n headers[\"Access-Control-Allow-Headers\"] = requested_headers\n elif requested_headers is not None:\n for header in requested_headers.split(\",\"):\n if header.strip() not in self.allow_headers:\n failures.append(\"headers\")\n\n # We don't strictly need to use 400 responses here, since its up to\n # the browser to enforce the CORS policy, but its more informative\n # if we do.\n if failures:\n failure_text = \"Disallowed CORS \" + \", \".join(failures)\n return PlainTextResponse(failure_text, status_code=400, headers=headers)\n\n return PlainTextResponse(\"OK\", status_code=200, headers=headers)\n\n async def simple_response(self, receive, send, scope=None, origin=None):\n inner = self.app(scope)\n send = functools.partial(self.send, send=send, origin=origin)\n await inner(receive, send)\n\n async def send(self, message, send=None, origin=None):\n if message[\"type\"] != \"http.response.start\":\n await send(message)\n return\n\n message.setdefault(\"headers\", [])\n headers = MutableHeaders(message[\"headers\"])\n\n # If we only allow specific origins, then we have to mirror back\n # the Origin header in the response.\n if not self.allow_all_origins and origin in self.allow_origins:\n headers[\"Access-Control-Allow-Origin\"] = origin\n headers.update(self.simple_headers)\n await send(message)\n", "path": "starlette/middleware/cors.py"}]}
| 2,191 | 105 |
gh_patches_debug_11392
|
rasdani/github-patches
|
git_diff
|
dj-stripe__dj-stripe-1055
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cancelling a Subscription removes the model instance
I notice at the moment when you cancel a subscription the model instance is deleted too and looking at the code this seems to be by design. Is this the correct action to take - surely having the history would be useful and would there not be foreign keys to invoices etc? What purpose is the subscription status of "cancelled" if the records are deleted? I also noticed that the Customer._sync_subscriptions() method pulls in all subscriptions regardless of their status so the functionality at least seems to be inconsistent.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `djstripe/event_handlers.py`
Content:
```
1 """
2 Webhook event handlers for the various models
3
4 Stripe docs for Events: https://stripe.com/docs/api/events
5 Stripe docs for Webhooks: https://stripe.com/docs/webhooks
6
7 TODO: Implement webhook event handlers for all the models that need to
8 respond to webhook events.
9
10 NOTE:
11 Event data is not guaranteed to be in the correct API version format.
12 See #116. When writing a webhook handler, make sure to first
13 re-retrieve the object you wish to process.
14
15 """
16 import logging
17
18 from . import models, webhooks
19 from .enums import SourceType
20 from .utils import convert_tstamp
21
22 logger = logging.getLogger(__name__)
23
24
25 @webhooks.handler("customer")
26 def customer_webhook_handler(event):
27 """Handle updates to customer objects.
28
29 First determines the crud_type and then handles the event if a customer
30 exists locally.
31 As customers are tied to local users, djstripe will not create customers that
32 do not already exist locally.
33
34 Docs and an example customer webhook response:
35 https://stripe.com/docs/api#customer_object
36 """
37 if event.customer:
38 # As customers are tied to local users, djstripe will not create
39 # customers that do not already exist locally.
40 _handle_crud_like_event(
41 target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True
42 )
43
44
45 @webhooks.handler("customer.discount")
46 def customer_discount_webhook_handler(event):
47 """Handle updates to customer discount objects.
48
49 Docs: https://stripe.com/docs/api#discounts
50
51 Because there is no concept of a "Discount" model in dj-stripe (due to the
52 lack of a stripe id on them), this is a little different to the other
53 handlers.
54 """
55
56 crud_type = CrudType.determine(event=event)
57 discount_data = event.data.get("object", {})
58 coupon_data = discount_data.get("coupon", {})
59 customer = event.customer
60
61 if crud_type.created or crud_type.updated:
62 coupon, _ = _handle_crud_like_event(
63 target_cls=models.Coupon,
64 event=event,
65 data=coupon_data,
66 id=coupon_data.get("id"),
67 )
68 coupon_start = discount_data.get("start")
69 coupon_end = discount_data.get("end")
70 else:
71 coupon = None
72 coupon_start = None
73 coupon_end = None
74
75 customer.coupon = coupon
76 customer.coupon_start = convert_tstamp(coupon_start)
77 customer.coupon_end = convert_tstamp(coupon_end)
78 customer.save()
79
80
81 @webhooks.handler("customer.source")
82 def customer_source_webhook_handler(event):
83 """Handle updates to customer payment-source objects.
84
85 Docs: https://stripe.com/docs/api#customer_object-sources.
86 """
87 customer_data = event.data.get("object", {})
88 source_type = customer_data.get("object", {})
89
90 # TODO: handle other types of sources
91 # (https://stripe.com/docs/api#customer_object-sources)
92 if source_type == SourceType.card:
93 if event.verb.endswith("deleted") and customer_data:
94 # On customer.source.deleted, we do not delete the object,
95 # we merely unlink it.
96 # customer = Customer.objects.get(id=customer_data["id"])
97 # NOTE: for now, customer.sources still points to Card
98 # Also, https://github.com/dj-stripe/dj-stripe/issues/576
99 models.Card.objects.filter(id=customer_data.get("id", "")).delete()
100 models.DjstripePaymentMethod.objects.filter(
101 id=customer_data.get("id", "")
102 ).delete()
103 else:
104 _handle_crud_like_event(target_cls=models.Card, event=event)
105
106
107 @webhooks.handler("customer.subscription")
108 def customer_subscription_webhook_handler(event):
109 """Handle updates to customer subscription objects.
110
111 Docs an example subscription webhook response:
112 https://stripe.com/docs/api#subscription_object
113 """
114 _handle_crud_like_event(target_cls=models.Subscription, event=event)
115
116
117 @webhooks.handler("payment_method")
118 def payment_method_handler(event):
119 """
120 Handle updates to payment_method objects
121 :param event:
122 :return:
123
124 Docs for:
125 - payment_method: https://stripe.com/docs/api/payment_methods
126 """
127 id_ = event.data.get("object", {}).get("id", None)
128
129 if (
130 event.parts == ["payment_method", "detached"]
131 and id_
132 and id_.startswith("card_")
133 ):
134 # Special case to handle a quirk in stripe's wrapping of legacy "card" objects
135 # with payment_methods - card objects are deleted on detach, so treat this as
136 # a delete event
137 _handle_crud_like_event(
138 target_cls=models.PaymentMethod,
139 event=event,
140 crud_type=CrudType(deleted=True),
141 )
142 else:
143 _handle_crud_like_event(target_cls=models.PaymentMethod, event=event)
144
145
146 @webhooks.handler(
147 "transfer",
148 "charge",
149 "coupon",
150 "invoice",
151 "invoiceitem",
152 "payment_intent",
153 "plan",
154 "product",
155 "setup_intent",
156 "source",
157 )
158 def other_object_webhook_handler(event):
159 """
160 Handle updates to transfer, charge, coupon, invoice, invoiceitem, payment_intent,
161 plan, product, setup_intent and source objects.
162
163 Docs for:
164 - charge: https://stripe.com/docs/api#charges
165 - coupon: https://stripe.com/docs/api#coupons
166 - invoice: https://stripe.com/docs/api#invoices
167 - invoiceitem: https://stripe.com/docs/api#invoiceitems
168 - plan: https://stripe.com/docs/api#plans
169 - product: https://stripe.com/docs/api#products
170 - source: https://stripe.com/docs/api#sources
171 - payment_intent: https://stripe.com/docs/api/payment_intents
172 """
173
174 if event.parts[:2] == ["charge", "dispute"]:
175 # Do not attempt to handle charge.dispute.* events.
176 # We do not have a Dispute model yet.
177 target_cls = models.Dispute
178 else:
179 target_cls = {
180 "charge": models.Charge,
181 "coupon": models.Coupon,
182 "invoice": models.Invoice,
183 "invoiceitem": models.InvoiceItem,
184 "payment_intent": models.PaymentIntent,
185 "plan": models.Plan,
186 "product": models.Product,
187 "transfer": models.Transfer,
188 "setup_intent": models.SetupIntent,
189 "source": models.Source,
190 }.get(event.category)
191
192 _handle_crud_like_event(target_cls=target_cls, event=event)
193
194
195 #
196 # Helpers
197 #
198
199
200 class CrudType(object):
201 """Helper object to determine CRUD-like event state."""
202
203 created = False
204 updated = False
205 deleted = False
206
207 def __init__(self, **kwargs):
208 """Set attributes."""
209 for k, v in kwargs.items():
210 setattr(self, k, v)
211
212 @property
213 def valid(self):
214 """Return True if this is a CRUD-like event."""
215 return self.created or self.updated or self.deleted
216
217 @classmethod
218 def determine(cls, event, verb=None, exact=False):
219 """
220 Determine if the event verb is a crud_type (without the 'R') event.
221
222 :param event:
223 :type event: models.Event
224 :param verb: The event verb to examine.
225 :type verb: str
226 :param exact: If True, match crud_type to event verb string exactly.
227 :type exact: bool
228 :returns: The CrudType state object.
229 :rtype: CrudType
230 """
231 verb = verb or event.verb
232
233 def check(crud_type_event):
234 if exact:
235 return verb == crud_type_event
236 else:
237 return verb.endswith(crud_type_event)
238
239 created = updated = deleted = False
240
241 if check("updated"):
242 updated = True
243 elif check("created"):
244 created = True
245 elif check("deleted"):
246 deleted = True
247
248 return cls(created=created, updated=updated, deleted=deleted)
249
250
251 def _handle_crud_like_event(
252 target_cls,
253 event,
254 data=None,
255 verb=None,
256 id=None,
257 customer=None,
258 crud_type=None,
259 crud_exact=False,
260 crud_valid=False,
261 ):
262 """
263 Helper to process crud_type-like events for objects.
264
265 Non-deletes (creates, updates and "anything else" events) are treated as
266 update_or_create events - The object will be retrieved locally, then it is
267 synchronised with the Stripe API for parity.
268
269 Deletes only occur for delete events and cause the object to be deleted
270 from the local database, if it existed. If it doesn't exist then it is
271 ignored (but the event processing still succeeds).
272
273 :param target_cls: The djstripe model being handled.
274 :type target_cls: Type[models.StripeModel]
275 :param event: The event object
276 :type event: models.Event
277 :param data: The event object data (defaults to ``event.data``).
278 :param verb: The event verb (defaults to ``event.verb``).
279 :type verb: str
280 :param id: The object Stripe ID (defaults to ``object.id``).
281 :type id: str
282 :param customer: The customer object (defaults to ``event.customer``).
283 :param crud_type: The CrudType object (determined by default).
284 :param crud_exact: If True, match verb against CRUD type exactly.
285 :param crud_valid: If True, CRUD type must match valid type.
286 :returns: The object (if any) and the event CrudType.
287 :rtype: Tuple[models.StripeModel, CrudType]
288 """
289 data = data or event.data
290 id = id or data.get("object", {}).get("id", None)
291
292 if not id:
293 # We require an object when applying CRUD-like events, so if there's
294 # no ID the event is ignored/dropped. This happens in events such as
295 # invoice.upcoming, which refer to a future (non-existant) invoice.
296 logger.debug(
297 "Ignoring %r Stripe event without object ID: %r", event.type, event
298 )
299 return
300
301 verb = verb or event.verb
302 customer = customer or event.customer
303 crud_type = crud_type or CrudType.determine(
304 event=event, verb=verb, exact=crud_exact
305 )
306 obj = None
307
308 if crud_valid and not crud_type.valid:
309 logger.debug(
310 "Ignoring %r Stripe event without valid CRUD type: %r", event.type, event
311 )
312 return
313
314 if crud_type.deleted:
315 qs = target_cls.objects.filter(id=id)
316 if target_cls is models.Customer and qs.exists():
317 qs.get().purge()
318 else:
319 obj = target_cls.objects.filter(id=id).delete()
320 else:
321 # Any other event type (creates, updates, etc.) - This can apply to
322 # verbs that aren't strictly CRUD but Stripe do intend an update. Such
323 # as invoice.payment_failed.
324 kwargs = {"id": id}
325 if hasattr(target_cls, "customer"):
326 kwargs["customer"] = customer
327 data = target_cls(**kwargs).api_retrieve()
328 obj = target_cls.sync_from_stripe_data(data)
329
330 return obj, crud_type
331
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/djstripe/event_handlers.py b/djstripe/event_handlers.py
--- a/djstripe/event_handlers.py
+++ b/djstripe/event_handlers.py
@@ -111,7 +111,16 @@
Docs an example subscription webhook response:
https://stripe.com/docs/api#subscription_object
"""
- _handle_crud_like_event(target_cls=models.Subscription, event=event)
+
+ # customer.subscription.deleted doesn't actually delete the subscription
+ # on the stripe side, it updates it to canceled status, so override
+ # crud_type to update to match.
+ crud_type = CrudType.determine(event=event)
+ if crud_type.deleted:
+ crud_type = CrudType(updated=True)
+ _handle_crud_like_event(
+ target_cls=models.Subscription, event=event, crud_type=crud_type
+ )
@webhooks.handler("payment_method")
|
{"golden_diff": "diff --git a/djstripe/event_handlers.py b/djstripe/event_handlers.py\n--- a/djstripe/event_handlers.py\n+++ b/djstripe/event_handlers.py\n@@ -111,7 +111,16 @@\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n- _handle_crud_like_event(target_cls=models.Subscription, event=event)\n+\n+ # customer.subscription.deleted doesn't actually delete the subscription\n+ # on the stripe side, it updates it to canceled status, so override\n+ # crud_type to update to match.\n+ crud_type = CrudType.determine(event=event)\n+ if crud_type.deleted:\n+ crud_type = CrudType(updated=True)\n+ _handle_crud_like_event(\n+ target_cls=models.Subscription, event=event, crud_type=crud_type\n+ )\n \n \n @webhooks.handler(\"payment_method\")\n", "issue": "Cancelling a Subscription removes the model instance\nI notice at the moment when you cancel a subscription the model instance is deleted too and looking at the code this seems to be by design. Is this the correct action to take - surely having the history would be useful and would there not be foreign keys to invoices etc? What purpose is the subscription status of \"cancelled\" if the records are deleted? I also noticed that the Customer._sync_subscriptions() method pulls in all subscriptions regardless of their status so the functionality at least seems to be inconsistent.\n", "before_files": [{"content": "\"\"\"\nWebhook event handlers for the various models\n\nStripe docs for Events: https://stripe.com/docs/api/events\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\nTODO: Implement webhook event handlers for all the models that need to\n respond to webhook events.\n\nNOTE:\n Event data is not guaranteed to be in the correct API version format.\n See #116. When writing a webhook handler, make sure to first\n re-retrieve the object you wish to process.\n\n\"\"\"\nimport logging\n\nfrom . import models, webhooks\nfrom .enums import SourceType\nfrom .utils import convert_tstamp\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"customer\")\ndef customer_webhook_handler(event):\n \"\"\"Handle updates to customer objects.\n\n First determines the crud_type and then handles the event if a customer\n exists locally.\n As customers are tied to local users, djstripe will not create customers that\n do not already exist locally.\n\n Docs and an example customer webhook response:\n https://stripe.com/docs/api#customer_object\n \"\"\"\n if event.customer:\n # As customers are tied to local users, djstripe will not create\n # customers that do not already exist locally.\n _handle_crud_like_event(\n target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True\n )\n\n\[email protected](\"customer.discount\")\ndef customer_discount_webhook_handler(event):\n \"\"\"Handle updates to customer discount objects.\n\n Docs: https://stripe.com/docs/api#discounts\n\n Because there is no concept of a \"Discount\" model in dj-stripe (due to the\n lack of a stripe id on them), this is a little different to the other\n handlers.\n \"\"\"\n\n crud_type = CrudType.determine(event=event)\n discount_data = event.data.get(\"object\", {})\n coupon_data = discount_data.get(\"coupon\", {})\n customer = event.customer\n\n if crud_type.created or crud_type.updated:\n coupon, _ = _handle_crud_like_event(\n target_cls=models.Coupon,\n event=event,\n data=coupon_data,\n id=coupon_data.get(\"id\"),\n )\n coupon_start = discount_data.get(\"start\")\n coupon_end = discount_data.get(\"end\")\n else:\n coupon = None\n coupon_start = None\n coupon_end = None\n\n customer.coupon = coupon\n customer.coupon_start = convert_tstamp(coupon_start)\n customer.coupon_end = convert_tstamp(coupon_end)\n customer.save()\n\n\[email protected](\"customer.source\")\ndef customer_source_webhook_handler(event):\n \"\"\"Handle updates to customer payment-source objects.\n\n Docs: https://stripe.com/docs/api#customer_object-sources.\n \"\"\"\n customer_data = event.data.get(\"object\", {})\n source_type = customer_data.get(\"object\", {})\n\n # TODO: handle other types of sources\n # (https://stripe.com/docs/api#customer_object-sources)\n if source_type == SourceType.card:\n if event.verb.endswith(\"deleted\") and customer_data:\n # On customer.source.deleted, we do not delete the object,\n # we merely unlink it.\n # customer = Customer.objects.get(id=customer_data[\"id\"])\n # NOTE: for now, customer.sources still points to Card\n # Also, https://github.com/dj-stripe/dj-stripe/issues/576\n models.Card.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n models.DjstripePaymentMethod.objects.filter(\n id=customer_data.get(\"id\", \"\")\n ).delete()\n else:\n _handle_crud_like_event(target_cls=models.Card, event=event)\n\n\[email protected](\"customer.subscription\")\ndef customer_subscription_webhook_handler(event):\n \"\"\"Handle updates to customer subscription objects.\n\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n _handle_crud_like_event(target_cls=models.Subscription, event=event)\n\n\[email protected](\"payment_method\")\ndef payment_method_handler(event):\n \"\"\"\n Handle updates to payment_method objects\n :param event:\n :return:\n\n Docs for:\n - payment_method: https://stripe.com/docs/api/payment_methods\n \"\"\"\n id_ = event.data.get(\"object\", {}).get(\"id\", None)\n\n if (\n event.parts == [\"payment_method\", \"detached\"]\n and id_\n and id_.startswith(\"card_\")\n ):\n # Special case to handle a quirk in stripe's wrapping of legacy \"card\" objects\n # with payment_methods - card objects are deleted on detach, so treat this as\n # a delete event\n _handle_crud_like_event(\n target_cls=models.PaymentMethod,\n event=event,\n crud_type=CrudType(deleted=True),\n )\n else:\n _handle_crud_like_event(target_cls=models.PaymentMethod, event=event)\n\n\[email protected](\n \"transfer\",\n \"charge\",\n \"coupon\",\n \"invoice\",\n \"invoiceitem\",\n \"payment_intent\",\n \"plan\",\n \"product\",\n \"setup_intent\",\n \"source\",\n)\ndef other_object_webhook_handler(event):\n \"\"\"\n Handle updates to transfer, charge, coupon, invoice, invoiceitem, payment_intent,\n plan, product, setup_intent and source objects.\n\n Docs for:\n - charge: https://stripe.com/docs/api#charges\n - coupon: https://stripe.com/docs/api#coupons\n - invoice: https://stripe.com/docs/api#invoices\n - invoiceitem: https://stripe.com/docs/api#invoiceitems\n - plan: https://stripe.com/docs/api#plans\n - product: https://stripe.com/docs/api#products\n - source: https://stripe.com/docs/api#sources\n - payment_intent: https://stripe.com/docs/api/payment_intents\n \"\"\"\n\n if event.parts[:2] == [\"charge\", \"dispute\"]:\n # Do not attempt to handle charge.dispute.* events.\n # We do not have a Dispute model yet.\n target_cls = models.Dispute\n else:\n target_cls = {\n \"charge\": models.Charge,\n \"coupon\": models.Coupon,\n \"invoice\": models.Invoice,\n \"invoiceitem\": models.InvoiceItem,\n \"payment_intent\": models.PaymentIntent,\n \"plan\": models.Plan,\n \"product\": models.Product,\n \"transfer\": models.Transfer,\n \"setup_intent\": models.SetupIntent,\n \"source\": models.Source,\n }.get(event.category)\n\n _handle_crud_like_event(target_cls=target_cls, event=event)\n\n\n#\n# Helpers\n#\n\n\nclass CrudType(object):\n \"\"\"Helper object to determine CRUD-like event state.\"\"\"\n\n created = False\n updated = False\n deleted = False\n\n def __init__(self, **kwargs):\n \"\"\"Set attributes.\"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def valid(self):\n \"\"\"Return True if this is a CRUD-like event.\"\"\"\n return self.created or self.updated or self.deleted\n\n @classmethod\n def determine(cls, event, verb=None, exact=False):\n \"\"\"\n Determine if the event verb is a crud_type (without the 'R') event.\n\n :param event:\n :type event: models.Event\n :param verb: The event verb to examine.\n :type verb: str\n :param exact: If True, match crud_type to event verb string exactly.\n :type exact: bool\n :returns: The CrudType state object.\n :rtype: CrudType\n \"\"\"\n verb = verb or event.verb\n\n def check(crud_type_event):\n if exact:\n return verb == crud_type_event\n else:\n return verb.endswith(crud_type_event)\n\n created = updated = deleted = False\n\n if check(\"updated\"):\n updated = True\n elif check(\"created\"):\n created = True\n elif check(\"deleted\"):\n deleted = True\n\n return cls(created=created, updated=updated, deleted=deleted)\n\n\ndef _handle_crud_like_event(\n target_cls,\n event,\n data=None,\n verb=None,\n id=None,\n customer=None,\n crud_type=None,\n crud_exact=False,\n crud_valid=False,\n):\n \"\"\"\n Helper to process crud_type-like events for objects.\n\n Non-deletes (creates, updates and \"anything else\" events) are treated as\n update_or_create events - The object will be retrieved locally, then it is\n synchronised with the Stripe API for parity.\n\n Deletes only occur for delete events and cause the object to be deleted\n from the local database, if it existed. If it doesn't exist then it is\n ignored (but the event processing still succeeds).\n\n :param target_cls: The djstripe model being handled.\n :type target_cls: Type[models.StripeModel]\n :param event: The event object\n :type event: models.Event\n :param data: The event object data (defaults to ``event.data``).\n :param verb: The event verb (defaults to ``event.verb``).\n :type verb: str\n :param id: The object Stripe ID (defaults to ``object.id``).\n :type id: str\n :param customer: The customer object (defaults to ``event.customer``).\n :param crud_type: The CrudType object (determined by default).\n :param crud_exact: If True, match verb against CRUD type exactly.\n :param crud_valid: If True, CRUD type must match valid type.\n :returns: The object (if any) and the event CrudType.\n :rtype: Tuple[models.StripeModel, CrudType]\n \"\"\"\n data = data or event.data\n id = id or data.get(\"object\", {}).get(\"id\", None)\n\n if not id:\n # We require an object when applying CRUD-like events, so if there's\n # no ID the event is ignored/dropped. This happens in events such as\n # invoice.upcoming, which refer to a future (non-existant) invoice.\n logger.debug(\n \"Ignoring %r Stripe event without object ID: %r\", event.type, event\n )\n return\n\n verb = verb or event.verb\n customer = customer or event.customer\n crud_type = crud_type or CrudType.determine(\n event=event, verb=verb, exact=crud_exact\n )\n obj = None\n\n if crud_valid and not crud_type.valid:\n logger.debug(\n \"Ignoring %r Stripe event without valid CRUD type: %r\", event.type, event\n )\n return\n\n if crud_type.deleted:\n qs = target_cls.objects.filter(id=id)\n if target_cls is models.Customer and qs.exists():\n qs.get().purge()\n else:\n obj = target_cls.objects.filter(id=id).delete()\n else:\n # Any other event type (creates, updates, etc.) - This can apply to\n # verbs that aren't strictly CRUD but Stripe do intend an update. Such\n # as invoice.payment_failed.\n kwargs = {\"id\": id}\n if hasattr(target_cls, \"customer\"):\n kwargs[\"customer\"] = customer\n data = target_cls(**kwargs).api_retrieve()\n obj = target_cls.sync_from_stripe_data(data)\n\n return obj, crud_type\n", "path": "djstripe/event_handlers.py"}], "after_files": [{"content": "\"\"\"\nWebhook event handlers for the various models\n\nStripe docs for Events: https://stripe.com/docs/api/events\nStripe docs for Webhooks: https://stripe.com/docs/webhooks\n\nTODO: Implement webhook event handlers for all the models that need to\n respond to webhook events.\n\nNOTE:\n Event data is not guaranteed to be in the correct API version format.\n See #116. When writing a webhook handler, make sure to first\n re-retrieve the object you wish to process.\n\n\"\"\"\nimport logging\n\nfrom . import models, webhooks\nfrom .enums import SourceType\nfrom .utils import convert_tstamp\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\"customer\")\ndef customer_webhook_handler(event):\n \"\"\"Handle updates to customer objects.\n\n First determines the crud_type and then handles the event if a customer\n exists locally.\n As customers are tied to local users, djstripe will not create customers that\n do not already exist locally.\n\n Docs and an example customer webhook response:\n https://stripe.com/docs/api#customer_object\n \"\"\"\n if event.customer:\n # As customers are tied to local users, djstripe will not create\n # customers that do not already exist locally.\n _handle_crud_like_event(\n target_cls=models.Customer, event=event, crud_exact=True, crud_valid=True\n )\n\n\[email protected](\"customer.discount\")\ndef customer_discount_webhook_handler(event):\n \"\"\"Handle updates to customer discount objects.\n\n Docs: https://stripe.com/docs/api#discounts\n\n Because there is no concept of a \"Discount\" model in dj-stripe (due to the\n lack of a stripe id on them), this is a little different to the other\n handlers.\n \"\"\"\n\n crud_type = CrudType.determine(event=event)\n discount_data = event.data.get(\"object\", {})\n coupon_data = discount_data.get(\"coupon\", {})\n customer = event.customer\n\n if crud_type.created or crud_type.updated:\n coupon, _ = _handle_crud_like_event(\n target_cls=models.Coupon,\n event=event,\n data=coupon_data,\n id=coupon_data.get(\"id\"),\n )\n coupon_start = discount_data.get(\"start\")\n coupon_end = discount_data.get(\"end\")\n else:\n coupon = None\n coupon_start = None\n coupon_end = None\n\n customer.coupon = coupon\n customer.coupon_start = convert_tstamp(coupon_start)\n customer.coupon_end = convert_tstamp(coupon_end)\n customer.save()\n\n\[email protected](\"customer.source\")\ndef customer_source_webhook_handler(event):\n \"\"\"Handle updates to customer payment-source objects.\n\n Docs: https://stripe.com/docs/api#customer_object-sources.\n \"\"\"\n customer_data = event.data.get(\"object\", {})\n source_type = customer_data.get(\"object\", {})\n\n # TODO: handle other types of sources\n # (https://stripe.com/docs/api#customer_object-sources)\n if source_type == SourceType.card:\n if event.verb.endswith(\"deleted\") and customer_data:\n # On customer.source.deleted, we do not delete the object,\n # we merely unlink it.\n # customer = Customer.objects.get(id=customer_data[\"id\"])\n # NOTE: for now, customer.sources still points to Card\n # Also, https://github.com/dj-stripe/dj-stripe/issues/576\n models.Card.objects.filter(id=customer_data.get(\"id\", \"\")).delete()\n models.DjstripePaymentMethod.objects.filter(\n id=customer_data.get(\"id\", \"\")\n ).delete()\n else:\n _handle_crud_like_event(target_cls=models.Card, event=event)\n\n\[email protected](\"customer.subscription\")\ndef customer_subscription_webhook_handler(event):\n \"\"\"Handle updates to customer subscription objects.\n\n Docs an example subscription webhook response:\n https://stripe.com/docs/api#subscription_object\n \"\"\"\n\n # customer.subscription.deleted doesn't actually delete the subscription\n # on the stripe side, it updates it to canceled status, so override\n # crud_type to update to match.\n crud_type = CrudType.determine(event=event)\n if crud_type.deleted:\n crud_type = CrudType(updated=True)\n _handle_crud_like_event(\n target_cls=models.Subscription, event=event, crud_type=crud_type\n )\n\n\[email protected](\"payment_method\")\ndef payment_method_handler(event):\n \"\"\"\n Handle updates to payment_method objects\n :param event:\n :return:\n\n Docs for:\n - payment_method: https://stripe.com/docs/api/payment_methods\n \"\"\"\n id_ = event.data.get(\"object\", {}).get(\"id\", None)\n\n if (\n event.parts == [\"payment_method\", \"detached\"]\n and id_\n and id_.startswith(\"card_\")\n ):\n # Special case to handle a quirk in stripe's wrapping of legacy \"card\" objects\n # with payment_methods - card objects are deleted on detach, so treat this as\n # a delete event\n _handle_crud_like_event(\n target_cls=models.PaymentMethod,\n event=event,\n crud_type=CrudType(deleted=True),\n )\n else:\n _handle_crud_like_event(target_cls=models.PaymentMethod, event=event)\n\n\[email protected](\n \"transfer\",\n \"charge\",\n \"coupon\",\n \"invoice\",\n \"invoiceitem\",\n \"payment_intent\",\n \"plan\",\n \"product\",\n \"setup_intent\",\n \"source\",\n)\ndef other_object_webhook_handler(event):\n \"\"\"\n Handle updates to transfer, charge, coupon, invoice, invoiceitem, payment_intent,\n plan, product, setup_intent and source objects.\n\n Docs for:\n - charge: https://stripe.com/docs/api#charges\n - coupon: https://stripe.com/docs/api#coupons\n - invoice: https://stripe.com/docs/api#invoices\n - invoiceitem: https://stripe.com/docs/api#invoiceitems\n - plan: https://stripe.com/docs/api#plans\n - product: https://stripe.com/docs/api#products\n - source: https://stripe.com/docs/api#sources\n - payment_intent: https://stripe.com/docs/api/payment_intents\n \"\"\"\n\n if event.parts[:2] == [\"charge\", \"dispute\"]:\n # Do not attempt to handle charge.dispute.* events.\n # We do not have a Dispute model yet.\n target_cls = models.Dispute\n else:\n target_cls = {\n \"charge\": models.Charge,\n \"coupon\": models.Coupon,\n \"invoice\": models.Invoice,\n \"invoiceitem\": models.InvoiceItem,\n \"payment_intent\": models.PaymentIntent,\n \"plan\": models.Plan,\n \"product\": models.Product,\n \"transfer\": models.Transfer,\n \"setup_intent\": models.SetupIntent,\n \"source\": models.Source,\n }.get(event.category)\n\n _handle_crud_like_event(target_cls=target_cls, event=event)\n\n\n#\n# Helpers\n#\n\n\nclass CrudType(object):\n \"\"\"Helper object to determine CRUD-like event state.\"\"\"\n\n created = False\n updated = False\n deleted = False\n\n def __init__(self, **kwargs):\n \"\"\"Set attributes.\"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def valid(self):\n \"\"\"Return True if this is a CRUD-like event.\"\"\"\n return self.created or self.updated or self.deleted\n\n @classmethod\n def determine(cls, event, verb=None, exact=False):\n \"\"\"\n Determine if the event verb is a crud_type (without the 'R') event.\n\n :param event:\n :type event: models.Event\n :param verb: The event verb to examine.\n :type verb: str\n :param exact: If True, match crud_type to event verb string exactly.\n :type exact: bool\n :returns: The CrudType state object.\n :rtype: CrudType\n \"\"\"\n verb = verb or event.verb\n\n def check(crud_type_event):\n if exact:\n return verb == crud_type_event\n else:\n return verb.endswith(crud_type_event)\n\n created = updated = deleted = False\n\n if check(\"updated\"):\n updated = True\n elif check(\"created\"):\n created = True\n elif check(\"deleted\"):\n deleted = True\n\n return cls(created=created, updated=updated, deleted=deleted)\n\n\ndef _handle_crud_like_event(\n target_cls,\n event,\n data=None,\n verb=None,\n id=None,\n customer=None,\n crud_type=None,\n crud_exact=False,\n crud_valid=False,\n):\n \"\"\"\n Helper to process crud_type-like events for objects.\n\n Non-deletes (creates, updates and \"anything else\" events) are treated as\n update_or_create events - The object will be retrieved locally, then it is\n synchronised with the Stripe API for parity.\n\n Deletes only occur for delete events and cause the object to be deleted\n from the local database, if it existed. If it doesn't exist then it is\n ignored (but the event processing still succeeds).\n\n :param target_cls: The djstripe model being handled.\n :type target_cls: Type[models.StripeModel]\n :param event: The event object\n :type event: models.Event\n :param data: The event object data (defaults to ``event.data``).\n :param verb: The event verb (defaults to ``event.verb``).\n :type verb: str\n :param id: The object Stripe ID (defaults to ``object.id``).\n :type id: str\n :param customer: The customer object (defaults to ``event.customer``).\n :param crud_type: The CrudType object (determined by default).\n :param crud_exact: If True, match verb against CRUD type exactly.\n :param crud_valid: If True, CRUD type must match valid type.\n :returns: The object (if any) and the event CrudType.\n :rtype: Tuple[models.StripeModel, CrudType]\n \"\"\"\n data = data or event.data\n id = id or data.get(\"object\", {}).get(\"id\", None)\n\n if not id:\n # We require an object when applying CRUD-like events, so if there's\n # no ID the event is ignored/dropped. This happens in events such as\n # invoice.upcoming, which refer to a future (non-existant) invoice.\n logger.debug(\n \"Ignoring %r Stripe event without object ID: %r\", event.type, event\n )\n return\n\n verb = verb or event.verb\n customer = customer or event.customer\n crud_type = crud_type or CrudType.determine(\n event=event, verb=verb, exact=crud_exact\n )\n obj = None\n\n if crud_valid and not crud_type.valid:\n logger.debug(\n \"Ignoring %r Stripe event without valid CRUD type: %r\", event.type, event\n )\n return\n\n if crud_type.deleted:\n qs = target_cls.objects.filter(id=id)\n if target_cls is models.Customer and qs.exists():\n qs.get().purge()\n else:\n obj = target_cls.objects.filter(id=id).delete()\n else:\n # Any other event type (creates, updates, etc.) - This can apply to\n # verbs that aren't strictly CRUD but Stripe do intend an update. Such\n # as invoice.payment_failed.\n kwargs = {\"id\": id}\n if hasattr(target_cls, \"customer\"):\n kwargs[\"customer\"] = customer\n data = target_cls(**kwargs).api_retrieve()\n obj = target_cls.sync_from_stripe_data(data)\n\n return obj, crud_type\n", "path": "djstripe/event_handlers.py"}]}
| 3,715 | 198 |
gh_patches_debug_33067
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1572
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor dependents tests to use library usecase
## Problem
Initially, both DB and API tests were created using dummy data and were quite difficult to read.
## Proposed solution
Refactor them to use a common use case and simplify the cases.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/dependents/dependents_utils.py`
Content:
```
1 from sqlalchemy import MetaData, Table, any_, column, exists, func, literal, select, text, true, union
2 from sqlalchemy.dialects.postgresql import array
3
4 # OIDs assigned during normal database operation are constrained to be 16384 or higher.
5 USER_DEFINED_OBJECTS_MIN_OID = 16384
6 # automatic and normal dependents
7 PG_DEPENDENT_TYPES = ['a', 'n']
8 PG_CLASS_CATALOGUE_NAME = '\'pg_class\''
9 START_LEVEL = 1
10 MAX_LEVEL = 10
11
12
13 def get_dependents_graph(referenced_object_id, engine):
14 dependency_pairs = _get_typed_dependency_pairs_stmt(engine)
15 dependency_pairs_cte = dependency_pairs.cte(recursive=True, name='dependency_pairs_cte')
16
17 # anchor member which includes all dependents of a requested object
18 anchor = select(
19 dependency_pairs_cte,
20 literal(START_LEVEL).label('level'),
21 array([dependency_pairs_cte.c.refobjid]).label('dependency_chain')) \
22 .where(dependency_pairs_cte.c.refobjid == referenced_object_id) \
23 .where(dependency_pairs_cte.c.objid != referenced_object_id)
24 anchor = anchor.cte('cte')
25
26 # recursive member which includes dependents for each object of the previous level
27 recursive = select(
28 dependency_pairs_cte,
29 (anchor.c.level + 1).label('level'),
30 anchor.c.dependency_chain + array([anchor.c.objid])) \
31 .where(anchor.c.level < MAX_LEVEL) \
32 .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \
33 .where(dependency_pairs_cte.c.objid != dependency_pairs_cte.c.refobjid)
34 recursive = recursive.join(anchor, dependency_pairs_cte.c.refobjid == anchor.c.objid)
35
36 recursive_stmt = anchor.union(recursive)
37 stmt = select(recursive_stmt)
38
39 with engine.connect() as conn:
40 result = conn.execute(stmt)
41
42 return _get_structured_result(result)
43
44
45 # finding table dependents based on foreign key constraints from the referenced tables
46 def _get_table_dependents(foreign_key_dependents, pg_constraint_table):
47 # TODO: update refobjsubid with actual values when working on columns
48 pg_identify_object = _get_pg_identify_object_lateral_stmt(
49 text(f'{PG_CLASS_CATALOGUE_NAME}::regclass::oid'), pg_constraint_table.c.conrelid, 0)
50
51 pg_identify_refobject = _get_pg_identify_object_lateral_stmt(
52 foreign_key_dependents.c.refclassid, foreign_key_dependents.c.refobjid, 0)
53
54 # conrelid in this case is the oid of the table which a constraint resides in
55 return select(
56 foreign_key_dependents.c.classid,
57 pg_constraint_table.c.conrelid.label('objid'),
58 foreign_key_dependents.c.objsubid,
59 foreign_key_dependents.c.refclassid,
60 foreign_key_dependents.c.refobjid,
61 foreign_key_dependents.c.refobjsubid,
62 foreign_key_dependents.c.deptype,
63 pg_identify_object.c.name.label('objname'),
64 pg_identify_object.c.type.label('objtype'),
65 pg_identify_refobject.c.name.label('refobjname'),
66 pg_identify_refobject.c.type.label('refobjtype')) \
67 .select_from(foreign_key_dependents) \
68 .join(pg_constraint_table, pg_constraint_table.c.oid == foreign_key_dependents.c.objid) \
69 .join(pg_identify_object, true()) \
70 .join(pg_identify_refobject, true()) \
71 .where(pg_constraint_table.c.confrelid != 0) \
72 .group_by(
73 foreign_key_dependents,
74 pg_constraint_table.c.conrelid,
75 pg_identify_object.c.name,
76 pg_identify_object.c.type,
77 pg_identify_refobject.c.name,
78 pg_identify_refobject.c.type)
79
80
81 def _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair):
82 return dependency_pair.where(pg_identify_object.c.type == 'table constraint')
83
84
85 # getting a full list of dependents and identifying them
86 def _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):
87 result = select(
88 pg_depend,
89 pg_identify_object.c.name.label('objname'),
90 pg_identify_object.c.type.label('objtype'),
91 pg_identify_refobject.c.name.label('refobjname'),
92 pg_identify_refobject.c.type.label('refobjtype')) \
93 .select_from(pg_depend) \
94 .join(pg_identify_object, true()) \
95 .join(pg_identify_refobject, true()) \
96 .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) \
97 .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) \
98 .group_by(
99 pg_depend,
100 pg_identify_object.c.name,
101 pg_identify_object.c.type,
102 pg_identify_refobject.c.name,
103 pg_identify_refobject.c.type)
104
105 return result
106
107
108 def _get_pg_depend_table(engine, metadata):
109 return Table("pg_depend", metadata, autoload_with=engine)
110
111
112 def _get_pg_constraint_table(engine, metadata):
113 return Table("pg_constraint", metadata, autoload_with=engine)
114
115
116 def _get_pg_identify_object_lateral_stmt(classid, objid, objsubid):
117 return select(
118 column("name"),
119 column("type")) \
120 .select_from(func.pg_identify_object(
121 classid,
122 objid,
123 objsubid)) \
124 .lateral()
125
126
127 def _get_typed_dependency_pairs_stmt(engine):
128 metadata = MetaData()
129
130 pg_depend = _get_pg_depend_table(engine, metadata)
131 pg_identify_object = _get_pg_identify_object_lateral_stmt(
132 pg_depend.c.classid, pg_depend.c.objid, pg_depend.c.objsubid)
133 pg_identify_refobject = _get_pg_identify_object_lateral_stmt(
134 pg_depend.c.refclassid, pg_depend.c.refobjid, 0)
135 pg_constraint = _get_pg_constraint_table(engine, metadata)
136
137 # each statement filters the base statement extracting dependents of a specific type
138 # so it's easy to exclude particular types or add new
139 dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)
140 foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')
141 table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')
142
143 return union(
144 select(foreign_key_constraint_dependents),
145 select(table_dependents))
146
147
148 def has_dependencies(referenced_object_id, engine):
149 metadata = MetaData()
150
151 pg_depend = _get_pg_depend_table(engine, metadata)
152
153 stmt = select(
154 exists(
155 select().select_from(pg_depend)
156 .where(pg_depend.c.refobjid == referenced_object_id)
157 .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES)))
158 .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID)
159 )
160 )
161
162 with engine.connect() as conn:
163 result = conn.execute(stmt).scalar()
164
165 return result
166
167
168 def _get_structured_result(dependency_graph_result):
169 result = []
170 for dependency_pair in dependency_graph_result:
171 d = {}
172 d['level'] = dependency_pair.level
173 d['obj'] = {'objid': dependency_pair.objid, 'type': dependency_pair.objtype}
174 d['parent_obj'] = {'objid': dependency_pair.refobjid, 'type': dependency_pair.refobjtype}
175 result.append(d)
176
177 return result
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/dependents/dependents_utils.py b/db/dependents/dependents_utils.py
--- a/db/dependents/dependents_utils.py
+++ b/db/dependents/dependents_utils.py
@@ -26,7 +26,7 @@
# recursive member which includes dependents for each object of the previous level
recursive = select(
dependency_pairs_cte,
- (anchor.c.level + 1).label('level'),
+ (anchor.c.level + 1),
anchor.c.dependency_chain + array([anchor.c.objid])) \
.where(anchor.c.level < MAX_LEVEL) \
.where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \
@@ -82,7 +82,7 @@
return dependency_pair.where(pg_identify_object.c.type == 'table constraint')
-# getting a full list of dependents and identifying them
+# stmt for getting a full list of dependents and identifying them
def _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):
result = select(
pg_depend,
@@ -136,8 +136,8 @@
# each statement filters the base statement extracting dependents of a specific type
# so it's easy to exclude particular types or add new
- dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)
- foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')
+ dependency_pairs = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)
+ foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pairs).cte('foreign_key_constraint_dependents')
table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')
return union(
|
{"golden_diff": "diff --git a/db/dependents/dependents_utils.py b/db/dependents/dependents_utils.py\n--- a/db/dependents/dependents_utils.py\n+++ b/db/dependents/dependents_utils.py\n@@ -26,7 +26,7 @@\n # recursive member which includes dependents for each object of the previous level\n recursive = select(\n dependency_pairs_cte,\n- (anchor.c.level + 1).label('level'),\n+ (anchor.c.level + 1),\n anchor.c.dependency_chain + array([anchor.c.objid])) \\\n .where(anchor.c.level < MAX_LEVEL) \\\n .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \\\n@@ -82,7 +82,7 @@\n return dependency_pair.where(pg_identify_object.c.type == 'table constraint')\n \n \n-# getting a full list of dependents and identifying them\n+# stmt for getting a full list of dependents and identifying them\n def _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):\n result = select(\n pg_depend,\n@@ -136,8 +136,8 @@\n \n # each statement filters the base statement extracting dependents of a specific type\n # so it's easy to exclude particular types or add new\n- dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n- foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')\n+ dependency_pairs = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n+ foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pairs).cte('foreign_key_constraint_dependents')\n table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')\n \n return union(\n", "issue": "Refactor dependents tests to use library usecase\n## Problem\r\nInitially, both DB and API tests were created using dummy data and were quite difficult to read. \r\n\r\n## Proposed solution\r\nRefactor them to use a common use case and simplify the cases.\r\n\n", "before_files": [{"content": "from sqlalchemy import MetaData, Table, any_, column, exists, func, literal, select, text, true, union\nfrom sqlalchemy.dialects.postgresql import array\n\n# OIDs assigned during normal database operation are constrained to be 16384 or higher.\nUSER_DEFINED_OBJECTS_MIN_OID = 16384\n# automatic and normal dependents\nPG_DEPENDENT_TYPES = ['a', 'n']\nPG_CLASS_CATALOGUE_NAME = '\\'pg_class\\''\nSTART_LEVEL = 1\nMAX_LEVEL = 10\n\n\ndef get_dependents_graph(referenced_object_id, engine):\n dependency_pairs = _get_typed_dependency_pairs_stmt(engine)\n dependency_pairs_cte = dependency_pairs.cte(recursive=True, name='dependency_pairs_cte')\n\n # anchor member which includes all dependents of a requested object\n anchor = select(\n dependency_pairs_cte,\n literal(START_LEVEL).label('level'),\n array([dependency_pairs_cte.c.refobjid]).label('dependency_chain')) \\\n .where(dependency_pairs_cte.c.refobjid == referenced_object_id) \\\n .where(dependency_pairs_cte.c.objid != referenced_object_id)\n anchor = anchor.cte('cte')\n\n # recursive member which includes dependents for each object of the previous level\n recursive = select(\n dependency_pairs_cte,\n (anchor.c.level + 1).label('level'),\n anchor.c.dependency_chain + array([anchor.c.objid])) \\\n .where(anchor.c.level < MAX_LEVEL) \\\n .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \\\n .where(dependency_pairs_cte.c.objid != dependency_pairs_cte.c.refobjid)\n recursive = recursive.join(anchor, dependency_pairs_cte.c.refobjid == anchor.c.objid)\n\n recursive_stmt = anchor.union(recursive)\n stmt = select(recursive_stmt)\n\n with engine.connect() as conn:\n result = conn.execute(stmt)\n\n return _get_structured_result(result)\n\n\n# finding table dependents based on foreign key constraints from the referenced tables\ndef _get_table_dependents(foreign_key_dependents, pg_constraint_table):\n # TODO: update refobjsubid with actual values when working on columns\n pg_identify_object = _get_pg_identify_object_lateral_stmt(\n text(f'{PG_CLASS_CATALOGUE_NAME}::regclass::oid'), pg_constraint_table.c.conrelid, 0)\n\n pg_identify_refobject = _get_pg_identify_object_lateral_stmt(\n foreign_key_dependents.c.refclassid, foreign_key_dependents.c.refobjid, 0)\n\n # conrelid in this case is the oid of the table which a constraint resides in\n return select(\n foreign_key_dependents.c.classid,\n pg_constraint_table.c.conrelid.label('objid'),\n foreign_key_dependents.c.objsubid,\n foreign_key_dependents.c.refclassid,\n foreign_key_dependents.c.refobjid,\n foreign_key_dependents.c.refobjsubid,\n foreign_key_dependents.c.deptype,\n pg_identify_object.c.name.label('objname'),\n pg_identify_object.c.type.label('objtype'),\n pg_identify_refobject.c.name.label('refobjname'),\n pg_identify_refobject.c.type.label('refobjtype')) \\\n .select_from(foreign_key_dependents) \\\n .join(pg_constraint_table, pg_constraint_table.c.oid == foreign_key_dependents.c.objid) \\\n .join(pg_identify_object, true()) \\\n .join(pg_identify_refobject, true()) \\\n .where(pg_constraint_table.c.confrelid != 0) \\\n .group_by(\n foreign_key_dependents,\n pg_constraint_table.c.conrelid,\n pg_identify_object.c.name,\n pg_identify_object.c.type,\n pg_identify_refobject.c.name,\n pg_identify_refobject.c.type)\n\n\ndef _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair):\n return dependency_pair.where(pg_identify_object.c.type == 'table constraint')\n\n\n# getting a full list of dependents and identifying them\ndef _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):\n result = select(\n pg_depend,\n pg_identify_object.c.name.label('objname'),\n pg_identify_object.c.type.label('objtype'),\n pg_identify_refobject.c.name.label('refobjname'),\n pg_identify_refobject.c.type.label('refobjtype')) \\\n .select_from(pg_depend) \\\n .join(pg_identify_object, true()) \\\n .join(pg_identify_refobject, true()) \\\n .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) \\\n .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) \\\n .group_by(\n pg_depend,\n pg_identify_object.c.name,\n pg_identify_object.c.type,\n pg_identify_refobject.c.name,\n pg_identify_refobject.c.type)\n\n return result\n\n\ndef _get_pg_depend_table(engine, metadata):\n return Table(\"pg_depend\", metadata, autoload_with=engine)\n\n\ndef _get_pg_constraint_table(engine, metadata):\n return Table(\"pg_constraint\", metadata, autoload_with=engine)\n\n\ndef _get_pg_identify_object_lateral_stmt(classid, objid, objsubid):\n return select(\n column(\"name\"),\n column(\"type\")) \\\n .select_from(func.pg_identify_object(\n classid,\n objid,\n objsubid)) \\\n .lateral()\n\n\ndef _get_typed_dependency_pairs_stmt(engine):\n metadata = MetaData()\n\n pg_depend = _get_pg_depend_table(engine, metadata)\n pg_identify_object = _get_pg_identify_object_lateral_stmt(\n pg_depend.c.classid, pg_depend.c.objid, pg_depend.c.objsubid)\n pg_identify_refobject = _get_pg_identify_object_lateral_stmt(\n pg_depend.c.refclassid, pg_depend.c.refobjid, 0)\n pg_constraint = _get_pg_constraint_table(engine, metadata)\n\n # each statement filters the base statement extracting dependents of a specific type\n # so it's easy to exclude particular types or add new\n dependency_pair = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair).cte('foreign_key_constraint_dependents')\n table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')\n\n return union(\n select(foreign_key_constraint_dependents),\n select(table_dependents))\n\n\ndef has_dependencies(referenced_object_id, engine):\n metadata = MetaData()\n\n pg_depend = _get_pg_depend_table(engine, metadata)\n\n stmt = select(\n exists(\n select().select_from(pg_depend)\n .where(pg_depend.c.refobjid == referenced_object_id)\n .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES)))\n .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID)\n )\n )\n\n with engine.connect() as conn:\n result = conn.execute(stmt).scalar()\n\n return result\n\n\ndef _get_structured_result(dependency_graph_result):\n result = []\n for dependency_pair in dependency_graph_result:\n d = {}\n d['level'] = dependency_pair.level\n d['obj'] = {'objid': dependency_pair.objid, 'type': dependency_pair.objtype}\n d['parent_obj'] = {'objid': dependency_pair.refobjid, 'type': dependency_pair.refobjtype}\n result.append(d)\n\n return result\n", "path": "db/dependents/dependents_utils.py"}], "after_files": [{"content": "from sqlalchemy import MetaData, Table, any_, column, exists, func, literal, select, text, true, union\nfrom sqlalchemy.dialects.postgresql import array\n\n# OIDs assigned during normal database operation are constrained to be 16384 or higher.\nUSER_DEFINED_OBJECTS_MIN_OID = 16384\n# automatic and normal dependents\nPG_DEPENDENT_TYPES = ['a', 'n']\nPG_CLASS_CATALOGUE_NAME = '\\'pg_class\\''\nSTART_LEVEL = 1\nMAX_LEVEL = 10\n\n\ndef get_dependents_graph(referenced_object_id, engine):\n dependency_pairs = _get_typed_dependency_pairs_stmt(engine)\n dependency_pairs_cte = dependency_pairs.cte(recursive=True, name='dependency_pairs_cte')\n\n # anchor member which includes all dependents of a requested object\n anchor = select(\n dependency_pairs_cte,\n literal(START_LEVEL).label('level'),\n array([dependency_pairs_cte.c.refobjid]).label('dependency_chain')) \\\n .where(dependency_pairs_cte.c.refobjid == referenced_object_id) \\\n .where(dependency_pairs_cte.c.objid != referenced_object_id)\n anchor = anchor.cte('cte')\n\n # recursive member which includes dependents for each object of the previous level\n recursive = select(\n dependency_pairs_cte,\n (anchor.c.level + 1),\n anchor.c.dependency_chain + array([anchor.c.objid])) \\\n .where(anchor.c.level < MAX_LEVEL) \\\n .where(dependency_pairs_cte.c.objid != any_(anchor.c.dependency_chain)) \\\n .where(dependency_pairs_cte.c.objid != dependency_pairs_cte.c.refobjid)\n recursive = recursive.join(anchor, dependency_pairs_cte.c.refobjid == anchor.c.objid)\n\n recursive_stmt = anchor.union(recursive)\n stmt = select(recursive_stmt)\n\n with engine.connect() as conn:\n result = conn.execute(stmt)\n\n return _get_structured_result(result)\n\n\n# finding table dependents based on foreign key constraints from the referenced tables\ndef _get_table_dependents(foreign_key_dependents, pg_constraint_table):\n # TODO: update refobjsubid with actual values when working on columns\n pg_identify_object = _get_pg_identify_object_lateral_stmt(\n text(f'{PG_CLASS_CATALOGUE_NAME}::regclass::oid'), pg_constraint_table.c.conrelid, 0)\n\n pg_identify_refobject = _get_pg_identify_object_lateral_stmt(\n foreign_key_dependents.c.refclassid, foreign_key_dependents.c.refobjid, 0)\n\n # conrelid in this case is the oid of the table which a constraint resides in\n return select(\n foreign_key_dependents.c.classid,\n pg_constraint_table.c.conrelid.label('objid'),\n foreign_key_dependents.c.objsubid,\n foreign_key_dependents.c.refclassid,\n foreign_key_dependents.c.refobjid,\n foreign_key_dependents.c.refobjsubid,\n foreign_key_dependents.c.deptype,\n pg_identify_object.c.name.label('objname'),\n pg_identify_object.c.type.label('objtype'),\n pg_identify_refobject.c.name.label('refobjname'),\n pg_identify_refobject.c.type.label('refobjtype')) \\\n .select_from(foreign_key_dependents) \\\n .join(pg_constraint_table, pg_constraint_table.c.oid == foreign_key_dependents.c.objid) \\\n .join(pg_identify_object, true()) \\\n .join(pg_identify_refobject, true()) \\\n .where(pg_constraint_table.c.confrelid != 0) \\\n .group_by(\n foreign_key_dependents,\n pg_constraint_table.c.conrelid,\n pg_identify_object.c.name,\n pg_identify_object.c.type,\n pg_identify_refobject.c.name,\n pg_identify_refobject.c.type)\n\n\ndef _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pair):\n return dependency_pair.where(pg_identify_object.c.type == 'table constraint')\n\n\n# stmt for getting a full list of dependents and identifying them\ndef _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject):\n result = select(\n pg_depend,\n pg_identify_object.c.name.label('objname'),\n pg_identify_object.c.type.label('objtype'),\n pg_identify_refobject.c.name.label('refobjname'),\n pg_identify_refobject.c.type.label('refobjtype')) \\\n .select_from(pg_depend) \\\n .join(pg_identify_object, true()) \\\n .join(pg_identify_refobject, true()) \\\n .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES))) \\\n .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID) \\\n .group_by(\n pg_depend,\n pg_identify_object.c.name,\n pg_identify_object.c.type,\n pg_identify_refobject.c.name,\n pg_identify_refobject.c.type)\n\n return result\n\n\ndef _get_pg_depend_table(engine, metadata):\n return Table(\"pg_depend\", metadata, autoload_with=engine)\n\n\ndef _get_pg_constraint_table(engine, metadata):\n return Table(\"pg_constraint\", metadata, autoload_with=engine)\n\n\ndef _get_pg_identify_object_lateral_stmt(classid, objid, objsubid):\n return select(\n column(\"name\"),\n column(\"type\")) \\\n .select_from(func.pg_identify_object(\n classid,\n objid,\n objsubid)) \\\n .lateral()\n\n\ndef _get_typed_dependency_pairs_stmt(engine):\n metadata = MetaData()\n\n pg_depend = _get_pg_depend_table(engine, metadata)\n pg_identify_object = _get_pg_identify_object_lateral_stmt(\n pg_depend.c.classid, pg_depend.c.objid, pg_depend.c.objsubid)\n pg_identify_refobject = _get_pg_identify_object_lateral_stmt(\n pg_depend.c.refclassid, pg_depend.c.refobjid, 0)\n pg_constraint = _get_pg_constraint_table(engine, metadata)\n\n # each statement filters the base statement extracting dependents of a specific type\n # so it's easy to exclude particular types or add new\n dependency_pairs = _get_dependency_pairs_stmt(pg_depend, pg_identify_object, pg_identify_refobject)\n foreign_key_constraint_dependents = _get_foreign_key_constraint_dependents(pg_identify_object, dependency_pairs).cte('foreign_key_constraint_dependents')\n table_dependents = _get_table_dependents(foreign_key_constraint_dependents, pg_constraint).cte('table_dependents')\n\n return union(\n select(foreign_key_constraint_dependents),\n select(table_dependents))\n\n\ndef has_dependencies(referenced_object_id, engine):\n metadata = MetaData()\n\n pg_depend = _get_pg_depend_table(engine, metadata)\n\n stmt = select(\n exists(\n select().select_from(pg_depend)\n .where(pg_depend.c.refobjid == referenced_object_id)\n .where(pg_depend.c.deptype == any_(array(PG_DEPENDENT_TYPES)))\n .where(pg_depend.c.objid >= USER_DEFINED_OBJECTS_MIN_OID)\n )\n )\n\n with engine.connect() as conn:\n result = conn.execute(stmt).scalar()\n\n return result\n\n\ndef _get_structured_result(dependency_graph_result):\n result = []\n for dependency_pair in dependency_graph_result:\n d = {}\n d['level'] = dependency_pair.level\n d['obj'] = {'objid': dependency_pair.objid, 'type': dependency_pair.objtype}\n d['parent_obj'] = {'objid': dependency_pair.refobjid, 'type': dependency_pair.refobjtype}\n result.append(d)\n\n return result\n", "path": "db/dependents/dependents_utils.py"}]}
| 2,428 | 433 |
gh_patches_debug_24187
|
rasdani/github-patches
|
git_diff
|
vega__altair-1539
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support built-in vega themes
See [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:
```python
alt.themes.enable('vega.themes.dark')
```
We'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v3/theme.py`
Content:
```
1 """Tools for enabling and registering chart themes"""
2
3 from ...utils.theme import ThemeRegistry
4
5 # The entry point group that can be used by other packages to declare other
6 # renderers that will be auto-detected. Explicit registration is also
7 # allowed by the PluginRegistery API.
8 ENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str
9 themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
10
11 themes.register('default', lambda: {"config": {"view": {"width": 400, "height": 300},
12 "mark": {"tooltip": None}}})
13 themes.register('opaque', lambda: {"config": {"background": "white",
14 "view": {"width": 400, "height": 300},
15 "mark": {"tooltip": None}}})
16 themes.register('none', lambda: {})
17 themes.enable('default')
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py
--- a/altair/vegalite/v3/theme.py
+++ b/altair/vegalite/v3/theme.py
@@ -2,6 +2,23 @@
from ...utils.theme import ThemeRegistry
+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']
+
+
+class VegaTheme(object):
+ """Implementation of a builtin vega theme."""
+ def __init__(self, theme):
+ self.theme = theme
+
+ def __call__(self):
+ return {"usermeta": {"embedOptions": {"theme": self.theme}},
+ "config": {"view": {"width": 400, "height": 300},
+ "mark": {"tooltip": None}}}
+
+ def __repr__(self):
+ return "VegaTheme({!r})".format(self.theme)
+
+
# The entry point group that can be used by other packages to declare other
# renderers that will be auto-detected. Explicit registration is also
# allowed by the PluginRegistery API.
@@ -14,4 +31,8 @@
"view": {"width": 400, "height": 300},
"mark": {"tooltip": None}}})
themes.register('none', lambda: {})
+
+for theme in VEGA_THEMES:
+ themes.register(theme, VegaTheme(theme))
+
themes.enable('default')
|
{"golden_diff": "diff --git a/altair/vegalite/v3/theme.py b/altair/vegalite/v3/theme.py\n--- a/altair/vegalite/v3/theme.py\n+++ b/altair/vegalite/v3/theme.py\n@@ -2,6 +2,23 @@\n \n from ...utils.theme import ThemeRegistry\n \n+VEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']\n+\n+\n+class VegaTheme(object):\n+ \"\"\"Implementation of a builtin vega theme.\"\"\"\n+ def __init__(self, theme):\n+ self.theme = theme\n+ \n+ def __call__(self):\n+ return {\"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n+ \"config\": {\"view\": {\"width\": 400, \"height\": 300},\n+ \"mark\": {\"tooltip\": None}}}\n+\n+ def __repr__(self):\n+ return \"VegaTheme({!r})\".format(self.theme)\n+\n+\n # The entry point group that can be used by other packages to declare other\n # renderers that will be auto-detected. Explicit registration is also\n # allowed by the PluginRegistery API.\n@@ -14,4 +31,8 @@\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\n themes.register('none', lambda: {})\n+ \n+for theme in VEGA_THEMES:\n+ themes.register(theme, VegaTheme(theme))\n+\n themes.enable('default')\n", "issue": "Support built-in vega themes\nSee [vega-themes](https://github.com/vega/vega-themes). Themes should be supported via the current theme infrastructure, maybe something like this:\r\n```python\r\nalt.themes.enable('vega.themes.dark')\r\n```\r\nWe'll have to think about how to best populate the list of available themes, and how to make this work cleanly with user-specified themes from within Altair.\n", "before_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register('default', lambda: {\"config\": {\"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('opaque', lambda: {\"config\": {\"background\": \"white\",\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('none', lambda: {})\nthemes.enable('default')\n", "path": "altair/vegalite/v3/theme.py"}], "after_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\nVEGA_THEMES = ['ggplot2', 'quartz', 'vox', 'fivethirtyeight', 'dark', 'latimes']\n\n\nclass VegaTheme(object):\n \"\"\"Implementation of a builtin vega theme.\"\"\"\n def __init__(self, theme):\n self.theme = theme\n \n def __call__(self):\n return {\"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n \"config\": {\"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}}\n\n def __repr__(self):\n return \"VegaTheme({!r})\".format(self.theme)\n\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = 'altair.vegalite.v3.theme' # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register('default', lambda: {\"config\": {\"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('opaque', lambda: {\"config\": {\"background\": \"white\",\n \"view\": {\"width\": 400, \"height\": 300},\n \"mark\": {\"tooltip\": None}}})\nthemes.register('none', lambda: {})\n \nfor theme in VEGA_THEMES:\n themes.register(theme, VegaTheme(theme))\n\nthemes.enable('default')\n", "path": "altair/vegalite/v3/theme.py"}]}
| 574 | 351 |
gh_patches_debug_4783
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-3280
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
label2rgb index error when using average and background color
When using averaged label2rgb with bg_label the out image indexing fails with error:
```
Traceback (most recent call last):
File "file.py", line 222, in generate
overlay = color.label2rgb(domain, image=img, bg_label=0, bg_color=1.0, kind="avg")
File "[...]/python3.6/site-packages/skimage/color/colorlabel.py", line 116, in label2rgb
return _label2rgb_avg(label, image, bg_label, bg_color)
File "[...]/python3.6/site-packages/skimage/color/colorlabel.py", line 225, in _label2rgb_avg
out[bg] = bg_color
IndexError: boolean index did not match indexed array along dimension 0; dimension is 100 but corresponding boolean dimension is 2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/color/colorlabel.py`
Content:
```
1 import itertools
2
3 import numpy as np
4
5 from .._shared.utils import warn
6 from ..util import img_as_float
7 from . import rgb_colors
8 from .colorconv import rgb2gray, gray2rgb
9
10
11 __all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']
12
13
14 DEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',
15 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')
16
17
18 color_dict = {k: v for k, v in rgb_colors.__dict__.items()
19 if isinstance(v, tuple)}
20
21
22 def _rgb_vector(color):
23 """Return RGB color as (1, 3) array.
24
25 This RGB array gets multiplied by masked regions of an RGB image, which are
26 partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).
27
28 Parameters
29 ----------
30 color : str or array
31 Color name in `color_dict` or RGB float values between [0, 1].
32 """
33 if isinstance(color, str):
34 color = color_dict[color]
35 # Slice to handle RGBA colors.
36 return np.array(color[:3])
37
38
39 def _match_label_with_color(label, colors, bg_label, bg_color):
40 """Return `unique_labels` and `color_cycle` for label array and color list.
41
42 Colors are cycled for normal labels, but the background color should only
43 be used for the background.
44 """
45 # Temporarily set background color; it will be removed later.
46 if bg_color is None:
47 bg_color = (0, 0, 0)
48 bg_color = _rgb_vector([bg_color])
49
50 # map labels to their ranks among all labels from small to large
51 unique_labels, mapped_labels = np.unique(label, return_inverse=True)
52
53 # get rank of bg_label
54 bg_label_rank_list = mapped_labels[label.flat == bg_label]
55
56 # The rank of each label is the index of the color it is matched to in
57 # color cycle. bg_label should always be mapped to the first color, so
58 # its rank must be 0. Other labels should be ranked from small to large
59 # from 1.
60 if len(bg_label_rank_list) > 0:
61 bg_label_rank = bg_label_rank_list[0]
62 mapped_labels[mapped_labels < bg_label_rank] += 1
63 mapped_labels[label.flat == bg_label] = 0
64 else:
65 mapped_labels += 1
66
67 # Modify labels and color cycle so background color is used only once.
68 color_cycle = itertools.cycle(colors)
69 color_cycle = itertools.chain(bg_color, color_cycle)
70
71 return mapped_labels, color_cycle
72
73
74 def label2rgb(label, image=None, colors=None, alpha=0.3,
75 bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):
76 """Return an RGB image where color-coded labels are painted over the image.
77
78 Parameters
79 ----------
80 label : array, shape (M, N)
81 Integer array of labels with the same shape as `image`.
82 image : array, shape (M, N, 3), optional
83 Image used as underlay for labels. If the input is an RGB image, it's
84 converted to grayscale before coloring.
85 colors : list, optional
86 List of colors. If the number of labels exceeds the number of colors,
87 then the colors are cycled.
88 alpha : float [0, 1], optional
89 Opacity of colorized labels. Ignored if image is `None`.
90 bg_label : int, optional
91 Label that's treated as the background.
92 bg_color : str or array, optional
93 Background color. Must be a name in `color_dict` or RGB float values
94 between [0, 1].
95 image_alpha : float [0, 1], optional
96 Opacity of the image.
97 kind : string, one of {'overlay', 'avg'}
98 The kind of color image desired. 'overlay' cycles over defined colors
99 and overlays the colored labels over the original image. 'avg' replaces
100 each labeled segment with its average color, for a stained-class or
101 pastel painting appearance.
102
103 Returns
104 -------
105 result : array of float, shape (M, N, 3)
106 The result of blending a cycling colormap (`colors`) for each distinct
107 value in `label` with the image, at a certain alpha value.
108 """
109 if kind == 'overlay':
110 return _label2rgb_overlay(label, image, colors, alpha, bg_label,
111 bg_color, image_alpha)
112 else:
113 return _label2rgb_avg(label, image, bg_label, bg_color)
114
115
116 def _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,
117 bg_label=-1, bg_color=None, image_alpha=1):
118 """Return an RGB image where color-coded labels are painted over the image.
119
120 Parameters
121 ----------
122 label : array, shape (M, N)
123 Integer array of labels with the same shape as `image`.
124 image : array, shape (M, N, 3), optional
125 Image used as underlay for labels. If the input is an RGB image, it's
126 converted to grayscale before coloring.
127 colors : list, optional
128 List of colors. If the number of labels exceeds the number of colors,
129 then the colors are cycled.
130 alpha : float [0, 1], optional
131 Opacity of colorized labels. Ignored if image is `None`.
132 bg_label : int, optional
133 Label that's treated as the background.
134 bg_color : str or array, optional
135 Background color. Must be a name in `color_dict` or RGB float values
136 between [0, 1].
137 image_alpha : float [0, 1], optional
138 Opacity of the image.
139
140 Returns
141 -------
142 result : array of float, shape (M, N, 3)
143 The result of blending a cycling colormap (`colors`) for each distinct
144 value in `label` with the image, at a certain alpha value.
145 """
146 if colors is None:
147 colors = DEFAULT_COLORS
148 colors = [_rgb_vector(c) for c in colors]
149
150 if image is None:
151 image = np.zeros(label.shape + (3,), dtype=np.float64)
152 # Opacity doesn't make sense if no image exists.
153 alpha = 1
154 else:
155 if not image.shape[:2] == label.shape:
156 raise ValueError("`image` and `label` must be the same shape")
157
158 if image.min() < 0:
159 warn("Negative intensities in `image` are not supported")
160
161 image = img_as_float(rgb2gray(image))
162 image = gray2rgb(image) * image_alpha + (1 - image_alpha)
163
164 # Ensure that all labels are non-negative so we can index into
165 # `label_to_color` correctly.
166 offset = min(label.min(), bg_label)
167 if offset != 0:
168 label = label - offset # Make sure you don't modify the input array.
169 bg_label -= offset
170
171 new_type = np.min_scalar_type(int(label.max()))
172 if new_type == np.bool:
173 new_type = np.uint8
174 label = label.astype(new_type)
175
176 mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,
177 bg_label, bg_color)
178
179 if len(mapped_labels_flat) == 0:
180 return image
181
182 dense_labels = range(max(mapped_labels_flat) + 1)
183
184 label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])
185
186 mapped_labels = label
187 mapped_labels.flat = mapped_labels_flat
188 result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)
189
190 # Remove background label if its color was not specified.
191 remove_background = 0 in mapped_labels_flat and bg_color is None
192 if remove_background:
193 result[label == bg_label] = image[label == bg_label]
194
195 return result
196
197
198 def _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):
199 """Visualise each segment in `label_field` with its mean color in `image`.
200
201 Parameters
202 ----------
203 label_field : array of int
204 A segmentation of an image.
205 image : array, shape ``label_field.shape + (3,)``
206 A color image of the same spatial shape as `label_field`.
207 bg_label : int, optional
208 A value in `label_field` to be treated as background.
209 bg_color : 3-tuple of int, optional
210 The color for the background label
211
212 Returns
213 -------
214 out : array, same shape and type as `image`
215 The output visualization.
216 """
217 out = np.zeros_like(image)
218 labels = np.unique(label_field)
219 bg = (labels == bg_label)
220 if bg.any():
221 labels = labels[labels != bg_label]
222 out[bg] = bg_color
223 for label in labels:
224 mask = (label_field == label).nonzero()
225 color = image[mask].mean(axis=0)
226 out[mask] = color
227 return out
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/color/colorlabel.py b/skimage/color/colorlabel.py
--- a/skimage/color/colorlabel.py
+++ b/skimage/color/colorlabel.py
@@ -219,7 +219,8 @@
bg = (labels == bg_label)
if bg.any():
labels = labels[labels != bg_label]
- out[bg] = bg_color
+ mask = (label_field == bg_label).nonzero()
+ out[mask] = bg_color
for label in labels:
mask = (label_field == label).nonzero()
color = image[mask].mean(axis=0)
|
{"golden_diff": "diff --git a/skimage/color/colorlabel.py b/skimage/color/colorlabel.py\n--- a/skimage/color/colorlabel.py\n+++ b/skimage/color/colorlabel.py\n@@ -219,7 +219,8 @@\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n- out[bg] = bg_color\n+ mask = (label_field == bg_label).nonzero()\n+ out[mask] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n", "issue": "label2rgb index error when using average and background color\nWhen using averaged label2rgb with bg_label the out image indexing fails with error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"file.py\", line 222, in generate\r\n overlay = color.label2rgb(domain, image=img, bg_label=0, bg_color=1.0, kind=\"avg\")\r\n File \"[...]/python3.6/site-packages/skimage/color/colorlabel.py\", line 116, in label2rgb\r\n return _label2rgb_avg(label, image, bg_label, bg_color)\r\n File \"[...]/python3.6/site-packages/skimage/color/colorlabel.py\", line 225, in _label2rgb_avg\r\n out[bg] = bg_color\r\nIndexError: boolean index did not match indexed array along dimension 0; dimension is 100 but corresponding boolean dimension is 2\r\n```\n", "before_files": [{"content": "import itertools\n\nimport numpy as np\n\nfrom .._shared.utils import warn\nfrom ..util import img_as_float\nfrom . import rgb_colors\nfrom .colorconv import rgb2gray, gray2rgb\n\n\n__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']\n\n\nDEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',\n 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')\n\n\ncolor_dict = {k: v for k, v in rgb_colors.__dict__.items()\n if isinstance(v, tuple)}\n\n\ndef _rgb_vector(color):\n \"\"\"Return RGB color as (1, 3) array.\n\n This RGB array gets multiplied by masked regions of an RGB image, which are\n partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).\n\n Parameters\n ----------\n color : str or array\n Color name in `color_dict` or RGB float values between [0, 1].\n \"\"\"\n if isinstance(color, str):\n color = color_dict[color]\n # Slice to handle RGBA colors.\n return np.array(color[:3])\n\n\ndef _match_label_with_color(label, colors, bg_label, bg_color):\n \"\"\"Return `unique_labels` and `color_cycle` for label array and color list.\n\n Colors are cycled for normal labels, but the background color should only\n be used for the background.\n \"\"\"\n # Temporarily set background color; it will be removed later.\n if bg_color is None:\n bg_color = (0, 0, 0)\n bg_color = _rgb_vector([bg_color])\n\n # map labels to their ranks among all labels from small to large\n unique_labels, mapped_labels = np.unique(label, return_inverse=True)\n\n # get rank of bg_label\n bg_label_rank_list = mapped_labels[label.flat == bg_label]\n\n # The rank of each label is the index of the color it is matched to in\n # color cycle. bg_label should always be mapped to the first color, so\n # its rank must be 0. Other labels should be ranked from small to large\n # from 1.\n if len(bg_label_rank_list) > 0:\n bg_label_rank = bg_label_rank_list[0]\n mapped_labels[mapped_labels < bg_label_rank] += 1\n mapped_labels[label.flat == bg_label] = 0\n else:\n mapped_labels += 1\n\n # Modify labels and color cycle so background color is used only once.\n color_cycle = itertools.cycle(colors)\n color_cycle = itertools.chain(bg_color, color_cycle)\n\n return mapped_labels, color_cycle\n\n\ndef label2rgb(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n kind : string, one of {'overlay', 'avg'}\n The kind of color image desired. 'overlay' cycles over defined colors\n and overlays the colored labels over the original image. 'avg' replaces\n each labeled segment with its average color, for a stained-class or\n pastel painting appearance.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if kind == 'overlay':\n return _label2rgb_overlay(label, image, colors, alpha, bg_label,\n bg_color, image_alpha)\n else:\n return _label2rgb_avg(label, image, bg_label, bg_color)\n\n\ndef _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=None, image_alpha=1):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if colors is None:\n colors = DEFAULT_COLORS\n colors = [_rgb_vector(c) for c in colors]\n\n if image is None:\n image = np.zeros(label.shape + (3,), dtype=np.float64)\n # Opacity doesn't make sense if no image exists.\n alpha = 1\n else:\n if not image.shape[:2] == label.shape:\n raise ValueError(\"`image` and `label` must be the same shape\")\n\n if image.min() < 0:\n warn(\"Negative intensities in `image` are not supported\")\n\n image = img_as_float(rgb2gray(image))\n image = gray2rgb(image) * image_alpha + (1 - image_alpha)\n\n # Ensure that all labels are non-negative so we can index into\n # `label_to_color` correctly.\n offset = min(label.min(), bg_label)\n if offset != 0:\n label = label - offset # Make sure you don't modify the input array.\n bg_label -= offset\n\n new_type = np.min_scalar_type(int(label.max()))\n if new_type == np.bool:\n new_type = np.uint8\n label = label.astype(new_type)\n\n mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,\n bg_label, bg_color)\n\n if len(mapped_labels_flat) == 0:\n return image\n\n dense_labels = range(max(mapped_labels_flat) + 1)\n\n label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])\n\n mapped_labels = label\n mapped_labels.flat = mapped_labels_flat\n result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)\n\n # Remove background label if its color was not specified.\n remove_background = 0 in mapped_labels_flat and bg_color is None\n if remove_background:\n result[label == bg_label] = image[label == bg_label]\n\n return result\n\n\ndef _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):\n \"\"\"Visualise each segment in `label_field` with its mean color in `image`.\n\n Parameters\n ----------\n label_field : array of int\n A segmentation of an image.\n image : array, shape ``label_field.shape + (3,)``\n A color image of the same spatial shape as `label_field`.\n bg_label : int, optional\n A value in `label_field` to be treated as background.\n bg_color : 3-tuple of int, optional\n The color for the background label\n\n Returns\n -------\n out : array, same shape and type as `image`\n The output visualization.\n \"\"\"\n out = np.zeros_like(image)\n labels = np.unique(label_field)\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n out[bg] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n out[mask] = color\n return out\n", "path": "skimage/color/colorlabel.py"}], "after_files": [{"content": "import itertools\n\nimport numpy as np\n\nfrom .._shared.utils import warn\nfrom ..util import img_as_float\nfrom . import rgb_colors\nfrom .colorconv import rgb2gray, gray2rgb\n\n\n__all__ = ['color_dict', 'label2rgb', 'DEFAULT_COLORS']\n\n\nDEFAULT_COLORS = ('red', 'blue', 'yellow', 'magenta', 'green',\n 'indigo', 'darkorange', 'cyan', 'pink', 'yellowgreen')\n\n\ncolor_dict = {k: v for k, v in rgb_colors.__dict__.items()\n if isinstance(v, tuple)}\n\n\ndef _rgb_vector(color):\n \"\"\"Return RGB color as (1, 3) array.\n\n This RGB array gets multiplied by masked regions of an RGB image, which are\n partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).\n\n Parameters\n ----------\n color : str or array\n Color name in `color_dict` or RGB float values between [0, 1].\n \"\"\"\n if isinstance(color, str):\n color = color_dict[color]\n # Slice to handle RGBA colors.\n return np.array(color[:3])\n\n\ndef _match_label_with_color(label, colors, bg_label, bg_color):\n \"\"\"Return `unique_labels` and `color_cycle` for label array and color list.\n\n Colors are cycled for normal labels, but the background color should only\n be used for the background.\n \"\"\"\n # Temporarily set background color; it will be removed later.\n if bg_color is None:\n bg_color = (0, 0, 0)\n bg_color = _rgb_vector([bg_color])\n\n # map labels to their ranks among all labels from small to large\n unique_labels, mapped_labels = np.unique(label, return_inverse=True)\n\n # get rank of bg_label\n bg_label_rank_list = mapped_labels[label.flat == bg_label]\n\n # The rank of each label is the index of the color it is matched to in\n # color cycle. bg_label should always be mapped to the first color, so\n # its rank must be 0. Other labels should be ranked from small to large\n # from 1.\n if len(bg_label_rank_list) > 0:\n bg_label_rank = bg_label_rank_list[0]\n mapped_labels[mapped_labels < bg_label_rank] += 1\n mapped_labels[label.flat == bg_label] = 0\n else:\n mapped_labels += 1\n\n # Modify labels and color cycle so background color is used only once.\n color_cycle = itertools.cycle(colors)\n color_cycle = itertools.chain(bg_color, color_cycle)\n\n return mapped_labels, color_cycle\n\n\ndef label2rgb(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=(0, 0, 0), image_alpha=1, kind='overlay'):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n kind : string, one of {'overlay', 'avg'}\n The kind of color image desired. 'overlay' cycles over defined colors\n and overlays the colored labels over the original image. 'avg' replaces\n each labeled segment with its average color, for a stained-class or\n pastel painting appearance.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if kind == 'overlay':\n return _label2rgb_overlay(label, image, colors, alpha, bg_label,\n bg_color, image_alpha)\n else:\n return _label2rgb_avg(label, image, bg_label, bg_color)\n\n\ndef _label2rgb_overlay(label, image=None, colors=None, alpha=0.3,\n bg_label=-1, bg_color=None, image_alpha=1):\n \"\"\"Return an RGB image where color-coded labels are painted over the image.\n\n Parameters\n ----------\n label : array, shape (M, N)\n Integer array of labels with the same shape as `image`.\n image : array, shape (M, N, 3), optional\n Image used as underlay for labels. If the input is an RGB image, it's\n converted to grayscale before coloring.\n colors : list, optional\n List of colors. If the number of labels exceeds the number of colors,\n then the colors are cycled.\n alpha : float [0, 1], optional\n Opacity of colorized labels. Ignored if image is `None`.\n bg_label : int, optional\n Label that's treated as the background.\n bg_color : str or array, optional\n Background color. Must be a name in `color_dict` or RGB float values\n between [0, 1].\n image_alpha : float [0, 1], optional\n Opacity of the image.\n\n Returns\n -------\n result : array of float, shape (M, N, 3)\n The result of blending a cycling colormap (`colors`) for each distinct\n value in `label` with the image, at a certain alpha value.\n \"\"\"\n if colors is None:\n colors = DEFAULT_COLORS\n colors = [_rgb_vector(c) for c in colors]\n\n if image is None:\n image = np.zeros(label.shape + (3,), dtype=np.float64)\n # Opacity doesn't make sense if no image exists.\n alpha = 1\n else:\n if not image.shape[:2] == label.shape:\n raise ValueError(\"`image` and `label` must be the same shape\")\n\n if image.min() < 0:\n warn(\"Negative intensities in `image` are not supported\")\n\n image = img_as_float(rgb2gray(image))\n image = gray2rgb(image) * image_alpha + (1 - image_alpha)\n\n # Ensure that all labels are non-negative so we can index into\n # `label_to_color` correctly.\n offset = min(label.min(), bg_label)\n if offset != 0:\n label = label - offset # Make sure you don't modify the input array.\n bg_label -= offset\n\n new_type = np.min_scalar_type(int(label.max()))\n if new_type == np.bool:\n new_type = np.uint8\n label = label.astype(new_type)\n\n mapped_labels_flat, color_cycle = _match_label_with_color(label, colors,\n bg_label, bg_color)\n\n if len(mapped_labels_flat) == 0:\n return image\n\n dense_labels = range(max(mapped_labels_flat) + 1)\n\n label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])\n\n mapped_labels = label\n mapped_labels.flat = mapped_labels_flat\n result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)\n\n # Remove background label if its color was not specified.\n remove_background = 0 in mapped_labels_flat and bg_color is None\n if remove_background:\n result[label == bg_label] = image[label == bg_label]\n\n return result\n\n\ndef _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):\n \"\"\"Visualise each segment in `label_field` with its mean color in `image`.\n\n Parameters\n ----------\n label_field : array of int\n A segmentation of an image.\n image : array, shape ``label_field.shape + (3,)``\n A color image of the same spatial shape as `label_field`.\n bg_label : int, optional\n A value in `label_field` to be treated as background.\n bg_color : 3-tuple of int, optional\n The color for the background label\n\n Returns\n -------\n out : array, same shape and type as `image`\n The output visualization.\n \"\"\"\n out = np.zeros_like(image)\n labels = np.unique(label_field)\n bg = (labels == bg_label)\n if bg.any():\n labels = labels[labels != bg_label]\n mask = (label_field == bg_label).nonzero()\n out[mask] = bg_color\n for label in labels:\n mask = (label_field == label).nonzero()\n color = image[mask].mean(axis=0)\n out[mask] = color\n return out\n", "path": "skimage/color/colorlabel.py"}]}
| 3,060 | 140 |
gh_patches_debug_22033
|
rasdani/github-patches
|
git_diff
|
searx__searx-1689
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Google Images & DeviantArt don't work anymore
From one day to another, Google Images and DeviantArt stopped to show me anything, even with simple searches.


They are of course activated in the engines. It has been a few days, with a restart every day (local instance using Docker), without modifying engines nor any other setting. Using searx 0.15.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/google_images.py`
Content:
```
1 """
2 Google (Images)
3
4 @website https://www.google.com
5 @provide-api yes (https://developers.google.com/custom-search/)
6
7 @using-api no
8 @results HTML chunks with JSON inside
9 @stable no
10 @parse url, title, img_src
11 """
12
13 from datetime import date, timedelta
14 from json import loads
15 from lxml import html
16 from searx.url_utils import urlencode
17
18 # engine dependent config
19 categories = ['images']
20 paging = True
21 safesearch = True
22 time_range_support = True
23 number_of_results = 100
24
25 search_url = 'https://www.google.com/search'\
26 '?{query}'\
27 '&tbm=isch'\
28 '&yv=2'\
29 '&{search_options}'
30 time_range_attr = "qdr:{range}"
31 time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
32 time_range_dict = {'day': 'd',
33 'week': 'w',
34 'month': 'm'}
35
36
37 # do search-request
38 def request(query, params):
39 search_options = {
40 'ijn': params['pageno'] - 1,
41 'start': (params['pageno'] - 1) * number_of_results
42 }
43
44 if params['time_range'] in time_range_dict:
45 search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
46 elif params['time_range'] == 'year':
47 now = date.today()
48 then = now - timedelta(days=365)
49 start = then.strftime('%m/%d/%Y')
50 end = now.strftime('%m/%d/%Y')
51 search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
52
53 if safesearch and params['safesearch']:
54 search_options['safe'] = 'on'
55
56 params['url'] = search_url.format(query=urlencode({'q': query}),
57 search_options=urlencode(search_options))
58
59 return params
60
61
62 # get response from search-request
63 def response(resp):
64 results = []
65
66 dom = html.fromstring(resp.text)
67
68 # parse results
69 for result in dom.xpath('//div[contains(@class, "rg_meta")]/text()'):
70
71 try:
72 metadata = loads(result)
73 img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
74 source = "{0} ({1})".format(metadata['st'], metadata['isu'])
75 results.append({'url': metadata['ru'],
76 'title': metadata['pt'],
77 'content': metadata['s'],
78 'source': source,
79 'img_format': img_format,
80 'thumbnail_src': metadata['tu'],
81 'img_src': metadata['ou'],
82 'template': 'images.html'})
83
84 except:
85 continue
86
87 return results
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -70,11 +70,21 @@
try:
metadata = loads(result)
- img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
- source = "{0} ({1})".format(metadata['st'], metadata['isu'])
+
+ img_format = metadata.get('ity', '')
+ img_width = metadata.get('ow', '')
+ img_height = metadata.get('oh', '')
+ if img_width and img_height:
+ img_format += " {0}x{1}".format(img_width, img_height)
+
+ source = metadata.get('st', '')
+ source_url = metadata.get('isu', '')
+ if source_url:
+ source += " ({0})".format(source_url)
+
results.append({'url': metadata['ru'],
'title': metadata['pt'],
- 'content': metadata['s'],
+ 'content': metadata.get('s', ''),
'source': source,
'img_format': img_format,
'thumbnail_src': metadata['tu'],
|
{"golden_diff": "diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py\n--- a/searx/engines/google_images.py\n+++ b/searx/engines/google_images.py\n@@ -70,11 +70,21 @@\n \n try:\n metadata = loads(result)\n- img_format = \"{0} {1}x{2}\".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))\n- source = \"{0} ({1})\".format(metadata['st'], metadata['isu'])\n+\n+ img_format = metadata.get('ity', '')\n+ img_width = metadata.get('ow', '')\n+ img_height = metadata.get('oh', '')\n+ if img_width and img_height:\n+ img_format += \" {0}x{1}\".format(img_width, img_height)\n+\n+ source = metadata.get('st', '')\n+ source_url = metadata.get('isu', '')\n+ if source_url:\n+ source += \" ({0})\".format(source_url)\n+\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n- 'content': metadata['s'],\n+ 'content': metadata.get('s', ''),\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n", "issue": "Google Images & DeviantArt don't work anymore\nFrom one day to another, Google Images and DeviantArt stopped to show me anything, even with simple searches.\r\n\r\n\r\nThey are of course activated in the engines. It has been a few days, with a restart every day (local instance using Docker), without modifying engines nor any other setting. Using searx 0.15.0\n", "before_files": [{"content": "\"\"\"\n Google (Images)\n\n @website https://www.google.com\n @provide-api yes (https://developers.google.com/custom-search/)\n\n @using-api no\n @results HTML chunks with JSON inside\n @stable no\n @parse url, title, img_src\n\"\"\"\n\nfrom datetime import date, timedelta\nfrom json import loads\nfrom lxml import html\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['images']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 100\n\nsearch_url = 'https://www.google.com/search'\\\n '?{query}'\\\n '&tbm=isch'\\\n '&yv=2'\\\n '&{search_options}'\ntime_range_attr = \"qdr:{range}\"\ntime_range_custom_attr = \"cdr:1,cd_min:{start},cd_max{end}\"\ntime_range_dict = {'day': 'd',\n 'week': 'w',\n 'month': 'm'}\n\n\n# do search-request\ndef request(query, params):\n search_options = {\n 'ijn': params['pageno'] - 1,\n 'start': (params['pageno'] - 1) * number_of_results\n }\n\n if params['time_range'] in time_range_dict:\n search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])\n elif params['time_range'] == 'year':\n now = date.today()\n then = now - timedelta(days=365)\n start = then.strftime('%m/%d/%Y')\n end = now.strftime('%m/%d/%Y')\n search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)\n\n if safesearch and params['safesearch']:\n search_options['safe'] = 'on'\n\n params['url'] = search_url.format(query=urlencode({'q': query}),\n search_options=urlencode(search_options))\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in dom.xpath('//div[contains(@class, \"rg_meta\")]/text()'):\n\n try:\n metadata = loads(result)\n img_format = \"{0} {1}x{2}\".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))\n source = \"{0} ({1})\".format(metadata['st'], metadata['isu'])\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n 'content': metadata['s'],\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n 'img_src': metadata['ou'],\n 'template': 'images.html'})\n\n except:\n continue\n\n return results\n", "path": "searx/engines/google_images.py"}], "after_files": [{"content": "\"\"\"\n Google (Images)\n\n @website https://www.google.com\n @provide-api yes (https://developers.google.com/custom-search/)\n\n @using-api no\n @results HTML chunks with JSON inside\n @stable no\n @parse url, title, img_src\n\"\"\"\n\nfrom datetime import date, timedelta\nfrom json import loads\nfrom lxml import html\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['images']\npaging = True\nsafesearch = True\ntime_range_support = True\nnumber_of_results = 100\n\nsearch_url = 'https://www.google.com/search'\\\n '?{query}'\\\n '&tbm=isch'\\\n '&yv=2'\\\n '&{search_options}'\ntime_range_attr = \"qdr:{range}\"\ntime_range_custom_attr = \"cdr:1,cd_min:{start},cd_max{end}\"\ntime_range_dict = {'day': 'd',\n 'week': 'w',\n 'month': 'm'}\n\n\n# do search-request\ndef request(query, params):\n search_options = {\n 'ijn': params['pageno'] - 1,\n 'start': (params['pageno'] - 1) * number_of_results\n }\n\n if params['time_range'] in time_range_dict:\n search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])\n elif params['time_range'] == 'year':\n now = date.today()\n then = now - timedelta(days=365)\n start = then.strftime('%m/%d/%Y')\n end = now.strftime('%m/%d/%Y')\n search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)\n\n if safesearch and params['safesearch']:\n search_options['safe'] = 'on'\n\n params['url'] = search_url.format(query=urlencode({'q': query}),\n search_options=urlencode(search_options))\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n # parse results\n for result in dom.xpath('//div[contains(@class, \"rg_meta\")]/text()'):\n\n try:\n metadata = loads(result)\n\n img_format = metadata.get('ity', '')\n img_width = metadata.get('ow', '')\n img_height = metadata.get('oh', '')\n if img_width and img_height:\n img_format += \" {0}x{1}\".format(img_width, img_height)\n\n source = metadata.get('st', '')\n source_url = metadata.get('isu', '')\n if source_url:\n source += \" ({0})\".format(source_url)\n\n results.append({'url': metadata['ru'],\n 'title': metadata['pt'],\n 'content': metadata.get('s', ''),\n 'source': source,\n 'img_format': img_format,\n 'thumbnail_src': metadata['tu'],\n 'img_src': metadata['ou'],\n 'template': 'images.html'})\n\n except:\n continue\n\n return results\n", "path": "searx/engines/google_images.py"}]}
| 1,268 | 288 |
gh_patches_debug_39059
|
rasdani/github-patches
|
git_diff
|
pypa__pip-8910
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to clean http cache with pip
<!--
If you're reporting an issue for `--use-feature=2020-resolver`, use the "Dependency resolver failures / errors" template instead.
-->
**Environment**
* pip version: pip 20.2.3
* Python version: Python 2.7 (32bit)
* OS: Windows 10 (64bit)
<!-- Feel free to add more information about your environment here -->
**Description**
<!-- A clear and concise description of what the bug is. -->
We noticed pip will cache the http response for future installation. However if the cached http response includes corruped data (network issue for example), installing the same package will result in pip cache error as follow:
```
(venv2) H:\>pip install pyside
DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.
Collecting pyside
Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)
ERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.
pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:
Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908
Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427
```
The symption of this issue has already been reported in https://github.com/pypa/warehouse/issues/8330.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
If the error is caused by the data corruption in http response cache then pip should invalidate the cache and try to re-download for best attempt.
Currently `pip cache purge` will not clear http response cache. The only solution to this case is to remove http directory from the file system forcing pip to rebuild http cache.
**How to Reproduce**
<!-- Describe the steps to reproduce this bug. -->
1. Create a Python 2 virtual environment
2. Download the example corruped file https://mega.nz/file/WsthyLTS#AWD7NmS-w9B62Q3Y8Lb4SvCalqCb1d83a5FniKPmFqY
3. Overwrite http folder in C:\Users\<account name>\AppData\Local\pip\cache\http
4. Uninstall pyside and install pyside by `pip install pyside`
**Output**
```
(venv2) H:\>pip install pyside
DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.
Collecting pyside
Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)
ERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.
pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:
Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908
Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/cache.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import os
5 import textwrap
6
7 import pip._internal.utils.filesystem as filesystem
8 from pip._internal.cli.base_command import Command
9 from pip._internal.cli.status_codes import ERROR, SUCCESS
10 from pip._internal.exceptions import CommandError, PipError
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from optparse import Values
15 from typing import Any, List
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class CacheCommand(Command):
22 """
23 Inspect and manage pip's wheel cache.
24
25 Subcommands:
26
27 - dir: Show the cache directory.
28 - info: Show information about the cache.
29 - list: List filenames of packages stored in the cache.
30 - remove: Remove one or more package from the cache.
31 - purge: Remove all items from the cache.
32
33 ``<pattern>`` can be a glob expression or a package name.
34 """
35
36 ignore_require_venv = True
37 usage = """
38 %prog dir
39 %prog info
40 %prog list [<pattern>] [--format=[human, abspath]]
41 %prog remove <pattern>
42 %prog purge
43 """
44
45 def add_options(self):
46 # type: () -> None
47
48 self.cmd_opts.add_option(
49 '--format',
50 action='store',
51 dest='list_format',
52 default="human",
53 choices=('human', 'abspath'),
54 help="Select the output format among: human (default) or abspath"
55 )
56
57 self.parser.insert_option_group(0, self.cmd_opts)
58
59 def run(self, options, args):
60 # type: (Values, List[Any]) -> int
61 handlers = {
62 "dir": self.get_cache_dir,
63 "info": self.get_cache_info,
64 "list": self.list_cache_items,
65 "remove": self.remove_cache_items,
66 "purge": self.purge_cache,
67 }
68
69 if not options.cache_dir:
70 logger.error("pip cache commands can not "
71 "function since cache is disabled.")
72 return ERROR
73
74 # Determine action
75 if not args or args[0] not in handlers:
76 logger.error(
77 "Need an action (%s) to perform.",
78 ", ".join(sorted(handlers)),
79 )
80 return ERROR
81
82 action = args[0]
83
84 # Error handling happens here, not in the action-handlers.
85 try:
86 handlers[action](options, args[1:])
87 except PipError as e:
88 logger.error(e.args[0])
89 return ERROR
90
91 return SUCCESS
92
93 def get_cache_dir(self, options, args):
94 # type: (Values, List[Any]) -> None
95 if args:
96 raise CommandError('Too many arguments')
97
98 logger.info(options.cache_dir)
99
100 def get_cache_info(self, options, args):
101 # type: (Values, List[Any]) -> None
102 if args:
103 raise CommandError('Too many arguments')
104
105 num_packages = len(self._find_wheels(options, '*'))
106
107 cache_location = self._wheels_cache_dir(options)
108 cache_size = filesystem.format_directory_size(cache_location)
109
110 message = textwrap.dedent("""
111 Location: {location}
112 Size: {size}
113 Number of wheels: {package_count}
114 """).format(
115 location=cache_location,
116 package_count=num_packages,
117 size=cache_size,
118 ).strip()
119
120 logger.info(message)
121
122 def list_cache_items(self, options, args):
123 # type: (Values, List[Any]) -> None
124 if len(args) > 1:
125 raise CommandError('Too many arguments')
126
127 if args:
128 pattern = args[0]
129 else:
130 pattern = '*'
131
132 files = self._find_wheels(options, pattern)
133 if options.list_format == 'human':
134 self.format_for_human(files)
135 else:
136 self.format_for_abspath(files)
137
138 def format_for_human(self, files):
139 # type: (List[str]) -> None
140 if not files:
141 logger.info('Nothing cached.')
142 return
143
144 results = []
145 for filename in files:
146 wheel = os.path.basename(filename)
147 size = filesystem.format_file_size(filename)
148 results.append(' - {} ({})'.format(wheel, size))
149 logger.info('Cache contents:\n')
150 logger.info('\n'.join(sorted(results)))
151
152 def format_for_abspath(self, files):
153 # type: (List[str]) -> None
154 if not files:
155 return
156
157 results = []
158 for filename in files:
159 results.append(filename)
160
161 logger.info('\n'.join(sorted(results)))
162
163 def remove_cache_items(self, options, args):
164 # type: (Values, List[Any]) -> None
165 if len(args) > 1:
166 raise CommandError('Too many arguments')
167
168 if not args:
169 raise CommandError('Please provide a pattern')
170
171 files = self._find_wheels(options, args[0])
172 if not files:
173 raise CommandError('No matching packages')
174
175 for filename in files:
176 os.unlink(filename)
177 logger.debug('Removed %s', filename)
178 logger.info('Files removed: %s', len(files))
179
180 def purge_cache(self, options, args):
181 # type: (Values, List[Any]) -> None
182 if args:
183 raise CommandError('Too many arguments')
184
185 return self.remove_cache_items(options, ['*'])
186
187 def _wheels_cache_dir(self, options):
188 # type: (Values) -> str
189 return os.path.join(options.cache_dir, 'wheels')
190
191 def _find_wheels(self, options, pattern):
192 # type: (Values, str) -> List[str]
193 wheel_dir = self._wheels_cache_dir(options)
194
195 # The wheel filename format, as specified in PEP 427, is:
196 # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
197 #
198 # Additionally, non-alphanumeric values in the distribution are
199 # normalized to underscores (_), meaning hyphens can never occur
200 # before `-{version}`.
201 #
202 # Given that information:
203 # - If the pattern we're given contains a hyphen (-), the user is
204 # providing at least the version. Thus, we can just append `*.whl`
205 # to match the rest of it.
206 # - If the pattern we're given doesn't contain a hyphen (-), the
207 # user is only providing the name. Thus, we append `-*.whl` to
208 # match the hyphen before the version, followed by anything else.
209 #
210 # PEP 427: https://www.python.org/dev/peps/pep-0427/
211 pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
212
213 return filesystem.find_files(wheel_dir, pattern)
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py
--- a/src/pip/_internal/commands/cache.py
+++ b/src/pip/_internal/commands/cache.py
@@ -102,19 +102,30 @@
if args:
raise CommandError('Too many arguments')
+ num_http_files = len(self._find_http_files(options))
num_packages = len(self._find_wheels(options, '*'))
- cache_location = self._wheels_cache_dir(options)
- cache_size = filesystem.format_directory_size(cache_location)
+ http_cache_location = self._cache_dir(options, 'http')
+ wheels_cache_location = self._cache_dir(options, 'wheels')
+ http_cache_size = filesystem.format_directory_size(http_cache_location)
+ wheels_cache_size = filesystem.format_directory_size(
+ wheels_cache_location
+ )
message = textwrap.dedent("""
- Location: {location}
- Size: {size}
+ Package index page cache location: {http_cache_location}
+ Package index page cache size: {http_cache_size}
+ Number of HTTP files: {num_http_files}
+ Wheels location: {wheels_cache_location}
+ Wheels size: {wheels_cache_size}
Number of wheels: {package_count}
""").format(
- location=cache_location,
+ http_cache_location=http_cache_location,
+ http_cache_size=http_cache_size,
+ num_http_files=num_http_files,
+ wheels_cache_location=wheels_cache_location,
package_count=num_packages,
- size=cache_size,
+ wheels_cache_size=wheels_cache_size,
).strip()
logger.info(message)
@@ -169,6 +180,11 @@
raise CommandError('Please provide a pattern')
files = self._find_wheels(options, args[0])
+
+ # Only fetch http files if no specific pattern given
+ if args[0] == '*':
+ files += self._find_http_files(options)
+
if not files:
raise CommandError('No matching packages')
@@ -184,13 +200,18 @@
return self.remove_cache_items(options, ['*'])
- def _wheels_cache_dir(self, options):
- # type: (Values) -> str
- return os.path.join(options.cache_dir, 'wheels')
+ def _cache_dir(self, options, subdir):
+ # type: (Values, str) -> str
+ return os.path.join(options.cache_dir, subdir)
+
+ def _find_http_files(self, options):
+ # type: (Values) -> List[str]
+ http_dir = self._cache_dir(options, 'http')
+ return filesystem.find_files(http_dir, '*')
def _find_wheels(self, options, pattern):
# type: (Values, str) -> List[str]
- wheel_dir = self._wheels_cache_dir(options)
+ wheel_dir = self._cache_dir(options, 'wheels')
# The wheel filename format, as specified in PEP 427, is:
# {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
|
{"golden_diff": "diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py\n--- a/src/pip/_internal/commands/cache.py\n+++ b/src/pip/_internal/commands/cache.py\n@@ -102,19 +102,30 @@\n if args:\n raise CommandError('Too many arguments')\n \n+ num_http_files = len(self._find_http_files(options))\n num_packages = len(self._find_wheels(options, '*'))\n \n- cache_location = self._wheels_cache_dir(options)\n- cache_size = filesystem.format_directory_size(cache_location)\n+ http_cache_location = self._cache_dir(options, 'http')\n+ wheels_cache_location = self._cache_dir(options, 'wheels')\n+ http_cache_size = filesystem.format_directory_size(http_cache_location)\n+ wheels_cache_size = filesystem.format_directory_size(\n+ wheels_cache_location\n+ )\n \n message = textwrap.dedent(\"\"\"\n- Location: {location}\n- Size: {size}\n+ Package index page cache location: {http_cache_location}\n+ Package index page cache size: {http_cache_size}\n+ Number of HTTP files: {num_http_files}\n+ Wheels location: {wheels_cache_location}\n+ Wheels size: {wheels_cache_size}\n Number of wheels: {package_count}\n \"\"\").format(\n- location=cache_location,\n+ http_cache_location=http_cache_location,\n+ http_cache_size=http_cache_size,\n+ num_http_files=num_http_files,\n+ wheels_cache_location=wheels_cache_location,\n package_count=num_packages,\n- size=cache_size,\n+ wheels_cache_size=wheels_cache_size,\n ).strip()\n \n logger.info(message)\n@@ -169,6 +180,11 @@\n raise CommandError('Please provide a pattern')\n \n files = self._find_wheels(options, args[0])\n+\n+ # Only fetch http files if no specific pattern given\n+ if args[0] == '*':\n+ files += self._find_http_files(options)\n+\n if not files:\n raise CommandError('No matching packages')\n \n@@ -184,13 +200,18 @@\n \n return self.remove_cache_items(options, ['*'])\n \n- def _wheels_cache_dir(self, options):\n- # type: (Values) -> str\n- return os.path.join(options.cache_dir, 'wheels')\n+ def _cache_dir(self, options, subdir):\n+ # type: (Values, str) -> str\n+ return os.path.join(options.cache_dir, subdir)\n+\n+ def _find_http_files(self, options):\n+ # type: (Values) -> List[str]\n+ http_dir = self._cache_dir(options, 'http')\n+ return filesystem.find_files(http_dir, '*')\n \n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n- wheel_dir = self._wheels_cache_dir(options)\n+ wheel_dir = self._cache_dir(options, 'wheels')\n \n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n", "issue": "Unable to clean http cache with pip\n<!--\r\nIf you're reporting an issue for `--use-feature=2020-resolver`, use the \"Dependency resolver failures / errors\" template instead.\r\n-->\r\n\r\n**Environment**\r\n\r\n* pip version: pip 20.2.3\r\n* Python version: Python 2.7 (32bit)\r\n* OS: Windows 10 (64bit)\r\n\r\n<!-- Feel free to add more information about your environment here -->\r\n\r\n**Description**\r\n<!-- A clear and concise description of what the bug is. -->\r\nWe noticed pip will cache the http response for future installation. However if the cached http response includes corruped data (network issue for example), installing the same package will result in pip cache error as follow: \r\n\r\n```\r\n(venv2) H:\\>pip install pyside\r\nDEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.\r\nCollecting pyside\r\n Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)\r\nERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.\r\n pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:\r\n Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908\r\n Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427\r\n```\r\n\r\n\r\nThe symption of this issue has already been reported in https://github.com/pypa/warehouse/issues/8330. \r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nIf the error is caused by the data corruption in http response cache then pip should invalidate the cache and try to re-download for best attempt. \r\n\r\nCurrently `pip cache purge` will not clear http response cache. The only solution to this case is to remove http directory from the file system forcing pip to rebuild http cache. \r\n\r\n**How to Reproduce**\r\n<!-- Describe the steps to reproduce this bug. -->\r\n\r\n1. Create a Python 2 virtual environment\r\n2. Download the example corruped file https://mega.nz/file/WsthyLTS#AWD7NmS-w9B62Q3Y8Lb4SvCalqCb1d83a5FniKPmFqY\r\n3. Overwrite http folder in C:\\Users\\<account name>\\AppData\\Local\\pip\\cache\\http\r\n4. Uninstall pyside and install pyside by `pip install pyside`\r\n\r\n\r\n**Output**\r\n\r\n```\r\n(venv2) H:\\>pip install pyside\r\nDEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality.\r\nCollecting pyside\r\n Using cached PySide-1.2.4-cp27-none-win32.whl (41.0 MB)\r\nERROR: THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS FILE. If you have updated the package versions, please update the hashes. Otherwise, examine the package contents carefully; someone may have tampered with them.\r\n pyside from https://files.pythonhosted.org/packages/39/78/2b5fcd3b4ff4fc891f1c3a8bee09616d59fa6d644b0dc6252d46d8fbb423/PySide-1.2.4-cp27-none-win32.whl#sha256=104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908:\r\n Expected sha256 104c0f3aee597e92c43b60f31205cbbd57a4307fea0fae4bbaeeb58b61878908\r\n Got 9c9bdae0c8b07e8e834b3bd9f35ab93f3ddbae8365047b1596553eb70e996427\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n - dir: Show the cache directory.\n - info: Show information about the cache.\n - list: List filenames of packages stored in the cache.\n - remove: Remove one or more package from the cache.\n - purge: Remove all items from the cache.\n\n ``<pattern>`` can be a glob expression or a package name.\n \"\"\"\n\n ignore_require_venv = True\n usage = \"\"\"\n %prog dir\n %prog info\n %prog list [<pattern>] [--format=[human, abspath]]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def add_options(self):\n # type: () -> None\n\n self.cmd_opts.add_option(\n '--format',\n action='store',\n dest='list_format',\n default=\"human\",\n choices=('human', 'abspath'),\n help=\"Select the output format among: human (default) or abspath\"\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"dir\": self.get_cache_dir,\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n if not options.cache_dir:\n logger.error(\"pip cache commands can not \"\n \"function since cache is disabled.\")\n return ERROR\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\n \"Need an action (%s) to perform.\",\n \", \".join(sorted(handlers)),\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_dir(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n logger.info(options.cache_dir)\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n if options.list_format == 'human':\n self.format_for_human(files)\n else:\n self.format_for_abspath(files)\n\n def format_for_human(self, files):\n # type: (List[str]) -> None\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def format_for_abspath(self, files):\n # type: (List[str]) -> None\n if not files:\n return\n\n results = []\n for filename in files:\n results.append(filename)\n\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n - dir: Show the cache directory.\n - info: Show information about the cache.\n - list: List filenames of packages stored in the cache.\n - remove: Remove one or more package from the cache.\n - purge: Remove all items from the cache.\n\n ``<pattern>`` can be a glob expression or a package name.\n \"\"\"\n\n ignore_require_venv = True\n usage = \"\"\"\n %prog dir\n %prog info\n %prog list [<pattern>] [--format=[human, abspath]]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def add_options(self):\n # type: () -> None\n\n self.cmd_opts.add_option(\n '--format',\n action='store',\n dest='list_format',\n default=\"human\",\n choices=('human', 'abspath'),\n help=\"Select the output format among: human (default) or abspath\"\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"dir\": self.get_cache_dir,\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n if not options.cache_dir:\n logger.error(\"pip cache commands can not \"\n \"function since cache is disabled.\")\n return ERROR\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\n \"Need an action (%s) to perform.\",\n \", \".join(sorted(handlers)),\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_dir(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n logger.info(options.cache_dir)\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_http_files = len(self._find_http_files(options))\n num_packages = len(self._find_wheels(options, '*'))\n\n http_cache_location = self._cache_dir(options, 'http')\n wheels_cache_location = self._cache_dir(options, 'wheels')\n http_cache_size = filesystem.format_directory_size(http_cache_location)\n wheels_cache_size = filesystem.format_directory_size(\n wheels_cache_location\n )\n\n message = textwrap.dedent(\"\"\"\n Package index page cache location: {http_cache_location}\n Package index page cache size: {http_cache_size}\n Number of HTTP files: {num_http_files}\n Wheels location: {wheels_cache_location}\n Wheels size: {wheels_cache_size}\n Number of wheels: {package_count}\n \"\"\").format(\n http_cache_location=http_cache_location,\n http_cache_size=http_cache_size,\n num_http_files=num_http_files,\n wheels_cache_location=wheels_cache_location,\n package_count=num_packages,\n wheels_cache_size=wheels_cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n if options.list_format == 'human':\n self.format_for_human(files)\n else:\n self.format_for_abspath(files)\n\n def format_for_human(self, files):\n # type: (List[str]) -> None\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def format_for_abspath(self, files):\n # type: (List[str]) -> None\n if not files:\n return\n\n results = []\n for filename in files:\n results.append(filename)\n\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n\n # Only fetch http files if no specific pattern given\n if args[0] == '*':\n files += self._find_http_files(options)\n\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _cache_dir(self, options, subdir):\n # type: (Values, str) -> str\n return os.path.join(options.cache_dir, subdir)\n\n def _find_http_files(self, options):\n # type: (Values) -> List[str]\n http_dir = self._cache_dir(options, 'http')\n return filesystem.find_files(http_dir, '*')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._cache_dir(options, 'wheels')\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}]}
| 3,673 | 712 |
gh_patches_debug_20225
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-2228
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Student T tests fail on GPU [bug]
```self = MultivariateStudentT(df: 1.5, loc: torch.Size([2]), scale_tril: torch.Size([2, 2]))
sample_shape = torch.Size([])
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()
Z = self._chi2.rsample(sample_shape)
Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)
> return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)
E RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'mat2' in call to _th_mm
pyro/distributions/multivariate_studentt.py:74: RuntimeError
```
This issue was discussed in #2226 - running `make test` on the dev branch errors out for me if running on a machine with cuda. I am guessing this hasn't shown up in the CI because it uses a cpu only machine.
I think this bug is pretty simple - it happens because, as we can see in the above snippet, y inherits its device from self.df, and in the fixture, self.df is set to a scalar value. This is not converted into a tensor by the tensors_default_to context manager, and so isn't sent to the gpu.
I fixed this in #2226 by changing the fixture, but @fritzo suggested that it might suggest a missing coercion rather than a change to the fixture, so that change in the PR was reverted and I am opening this issue instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/multivariate_studentt.py`
Content:
```
1 import math
2
3 import torch
4 from torch.distributions import constraints
5 from torch.distributions.utils import broadcast_all, lazy_property
6
7 from pyro.distributions.torch import Chi2
8 from pyro.distributions.torch_distribution import TorchDistribution
9 from pyro.distributions.util import broadcast_shape
10
11
12 class MultivariateStudentT(TorchDistribution):
13 """
14 Creates a multivariate Student's t-distribution parameterized by degree of
15 freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.
16
17 :param ~torch.Tensor df: degrees of freedom
18 :param ~torch.Tensor loc: mean of the distribution
19 :param ~torch.Tensor scale_tril: scale of the distribution, which is
20 a lower triangular matrix with positive diagonal entries
21 """
22 arg_constraints = {'df': constraints.positive,
23 'loc': constraints.real_vector,
24 'scale_tril': constraints.lower_cholesky}
25 support = constraints.real_vector
26 has_rsample = True
27
28 def __init__(self, df, loc, scale_tril, validate_args=None):
29 dim = loc.size(-1)
30 assert scale_tril.shape[-2:] == (dim, dim)
31 df, = broadcast_all(df)
32 batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])
33 event_shape = (dim,)
34 self.df = df.expand(batch_shape)
35 self.loc = loc
36 self.scale_tril = scale_tril
37 self._chi2 = Chi2(self.df)
38 super(MultivariateStudentT, self).__init__(batch_shape, event_shape, validate_args=validate_args)
39
40 @lazy_property
41 def covariance_matrix(self):
42 # NB: this is not covariance of this distribution;
43 # the actual covariance is df / (df - 2) * covariance_matrix
44 return torch.matmul(self.scale_tril, self.scale_tril.transpose(-1, -2))
45
46 @lazy_property
47 def precision_matrix(self):
48 identity = torch.eye(self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype)
49 scale_inv = identity.triangular_solve(self.scale_tril, upper=False).solution.transpose(-1, -2)
50 return torch.matmul(scale_inv.transpose(-1, -2), scale_inv)
51
52 def expand(self, batch_shape, _instance=None):
53 new = self._get_checked_instance(MultivariateStudentT, _instance)
54 batch_shape = torch.Size(batch_shape)
55 loc_shape = batch_shape + self.event_shape
56 scale_shape = loc_shape + self.event_shape
57 new.df = self.df.expand(batch_shape)
58 new.loc = self.loc.expand(loc_shape)
59 new.scale_tril = self.scale_tril.expand(scale_shape)
60 if 'covariance_matrix' in self.__dict__:
61 new.covariance_matrix = self.covariance_matrix.expand(scale_shape)
62 if 'precision_matrix' in self.__dict__:
63 new.precision_matrix = self.precision_matrix.expand(scale_shape)
64 new._chi2 = self._chi2.expand(batch_shape)
65 super(MultivariateStudentT, new).__init__(batch_shape, self.event_shape, validate_args=False)
66 new._validate_args = self._validate_args
67 return new
68
69 def rsample(self, sample_shape=torch.Size()):
70 shape = self._extended_shape(sample_shape)
71 X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()
72 Z = self._chi2.rsample(sample_shape)
73 Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)
74 return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)
75
76 def log_prob(self, value):
77 if self._validate_args:
78 self._validate_sample(value)
79 n = self.loc.size(-1)
80 y = (value - self.loc).unsqueeze(-1).triangular_solve(self.scale_tril, upper=False).solution.squeeze(-1)
81 Z = (self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) +
82 0.5 * n * self.df.log() +
83 0.5 * n * math.log(math.pi) +
84 torch.lgamma(0.5 * self.df) -
85 torch.lgamma(0.5 * (self.df + n)))
86 return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z
87
88 @property
89 def mean(self):
90 m = self.loc.clone()
91 m[self.df <= 1, :] = float('nan')
92 return m
93
94 @property
95 def variance(self):
96 m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)
97 m[(self.df <= 2) & (self.df > 1), :] = float('inf')
98 m[self.df <= 1, :] = float('nan')
99 return m
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/distributions/multivariate_studentt.py b/pyro/distributions/multivariate_studentt.py
--- a/pyro/distributions/multivariate_studentt.py
+++ b/pyro/distributions/multivariate_studentt.py
@@ -2,7 +2,7 @@
import torch
from torch.distributions import constraints
-from torch.distributions.utils import broadcast_all, lazy_property
+from torch.distributions.utils import lazy_property
from pyro.distributions.torch import Chi2
from pyro.distributions.torch_distribution import TorchDistribution
@@ -28,7 +28,8 @@
def __init__(self, df, loc, scale_tril, validate_args=None):
dim = loc.size(-1)
assert scale_tril.shape[-2:] == (dim, dim)
- df, = broadcast_all(df)
+ if not isinstance(df, torch.Tensor):
+ df = loc.new_tensor(df)
batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])
event_shape = (dim,)
self.df = df.expand(batch_shape)
|
{"golden_diff": "diff --git a/pyro/distributions/multivariate_studentt.py b/pyro/distributions/multivariate_studentt.py\n--- a/pyro/distributions/multivariate_studentt.py\n+++ b/pyro/distributions/multivariate_studentt.py\n@@ -2,7 +2,7 @@\n \n import torch\n from torch.distributions import constraints\n-from torch.distributions.utils import broadcast_all, lazy_property\n+from torch.distributions.utils import lazy_property\n \n from pyro.distributions.torch import Chi2\n from pyro.distributions.torch_distribution import TorchDistribution\n@@ -28,7 +28,8 @@\n def __init__(self, df, loc, scale_tril, validate_args=None):\n dim = loc.size(-1)\n assert scale_tril.shape[-2:] == (dim, dim)\n- df, = broadcast_all(df)\n+ if not isinstance(df, torch.Tensor):\n+ df = loc.new_tensor(df)\n batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])\n event_shape = (dim,)\n self.df = df.expand(batch_shape)\n", "issue": "Student T tests fail on GPU [bug]\n```self = MultivariateStudentT(df: 1.5, loc: torch.Size([2]), scale_tril: torch.Size([2, 2]))\r\nsample_shape = torch.Size([])\r\n\r\n def rsample(self, sample_shape=torch.Size()):\r\n shape = self._extended_shape(sample_shape)\r\n X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()\r\n Z = self._chi2.rsample(sample_shape)\r\n Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)\r\n> return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)\r\nE RuntimeError: Expected object of device type cuda but got device type cpu for argument #2 'mat2' in call to _th_mm\r\n\r\npyro/distributions/multivariate_studentt.py:74: RuntimeError\r\n```\r\n\r\nThis issue was discussed in #2226 - running `make test` on the dev branch errors out for me if running on a machine with cuda. I am guessing this hasn't shown up in the CI because it uses a cpu only machine. \r\n\r\nI think this bug is pretty simple - it happens because, as we can see in the above snippet, y inherits its device from self.df, and in the fixture, self.df is set to a scalar value. This is not converted into a tensor by the tensors_default_to context manager, and so isn't sent to the gpu. \r\n\r\nI fixed this in #2226 by changing the fixture, but @fritzo suggested that it might suggest a missing coercion rather than a change to the fixture, so that change in the PR was reverted and I am opening this issue instead.\r\n\n", "before_files": [{"content": "import math\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import broadcast_all, lazy_property\n\nfrom pyro.distributions.torch import Chi2\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.util import broadcast_shape\n\n\nclass MultivariateStudentT(TorchDistribution):\n \"\"\"\n Creates a multivariate Student's t-distribution parameterized by degree of\n freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.\n\n :param ~torch.Tensor df: degrees of freedom\n :param ~torch.Tensor loc: mean of the distribution\n :param ~torch.Tensor scale_tril: scale of the distribution, which is\n a lower triangular matrix with positive diagonal entries\n \"\"\"\n arg_constraints = {'df': constraints.positive,\n 'loc': constraints.real_vector,\n 'scale_tril': constraints.lower_cholesky}\n support = constraints.real_vector\n has_rsample = True\n\n def __init__(self, df, loc, scale_tril, validate_args=None):\n dim = loc.size(-1)\n assert scale_tril.shape[-2:] == (dim, dim)\n df, = broadcast_all(df)\n batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])\n event_shape = (dim,)\n self.df = df.expand(batch_shape)\n self.loc = loc\n self.scale_tril = scale_tril\n self._chi2 = Chi2(self.df)\n super(MultivariateStudentT, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @lazy_property\n def covariance_matrix(self):\n # NB: this is not covariance of this distribution;\n # the actual covariance is df / (df - 2) * covariance_matrix\n return torch.matmul(self.scale_tril, self.scale_tril.transpose(-1, -2))\n\n @lazy_property\n def precision_matrix(self):\n identity = torch.eye(self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype)\n scale_inv = identity.triangular_solve(self.scale_tril, upper=False).solution.transpose(-1, -2)\n return torch.matmul(scale_inv.transpose(-1, -2), scale_inv)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(MultivariateStudentT, _instance)\n batch_shape = torch.Size(batch_shape)\n loc_shape = batch_shape + self.event_shape\n scale_shape = loc_shape + self.event_shape\n new.df = self.df.expand(batch_shape)\n new.loc = self.loc.expand(loc_shape)\n new.scale_tril = self.scale_tril.expand(scale_shape)\n if 'covariance_matrix' in self.__dict__:\n new.covariance_matrix = self.covariance_matrix.expand(scale_shape)\n if 'precision_matrix' in self.__dict__:\n new.precision_matrix = self.precision_matrix.expand(scale_shape)\n new._chi2 = self._chi2.expand(batch_shape)\n super(MultivariateStudentT, new).__init__(batch_shape, self.event_shape, validate_args=False)\n new._validate_args = self._validate_args\n return new\n\n def rsample(self, sample_shape=torch.Size()):\n shape = self._extended_shape(sample_shape)\n X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()\n Z = self._chi2.rsample(sample_shape)\n Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)\n return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n n = self.loc.size(-1)\n y = (value - self.loc).unsqueeze(-1).triangular_solve(self.scale_tril, upper=False).solution.squeeze(-1)\n Z = (self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) +\n 0.5 * n * self.df.log() +\n 0.5 * n * math.log(math.pi) +\n torch.lgamma(0.5 * self.df) -\n torch.lgamma(0.5 * (self.df + n)))\n return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z\n\n @property\n def mean(self):\n m = self.loc.clone()\n m[self.df <= 1, :] = float('nan')\n return m\n\n @property\n def variance(self):\n m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)\n m[(self.df <= 2) & (self.df > 1), :] = float('inf')\n m[self.df <= 1, :] = float('nan')\n return m\n", "path": "pyro/distributions/multivariate_studentt.py"}], "after_files": [{"content": "import math\n\nimport torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import lazy_property\n\nfrom pyro.distributions.torch import Chi2\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.util import broadcast_shape\n\n\nclass MultivariateStudentT(TorchDistribution):\n \"\"\"\n Creates a multivariate Student's t-distribution parameterized by degree of\n freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.\n\n :param ~torch.Tensor df: degrees of freedom\n :param ~torch.Tensor loc: mean of the distribution\n :param ~torch.Tensor scale_tril: scale of the distribution, which is\n a lower triangular matrix with positive diagonal entries\n \"\"\"\n arg_constraints = {'df': constraints.positive,\n 'loc': constraints.real_vector,\n 'scale_tril': constraints.lower_cholesky}\n support = constraints.real_vector\n has_rsample = True\n\n def __init__(self, df, loc, scale_tril, validate_args=None):\n dim = loc.size(-1)\n assert scale_tril.shape[-2:] == (dim, dim)\n if not isinstance(df, torch.Tensor):\n df = loc.new_tensor(df)\n batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])\n event_shape = (dim,)\n self.df = df.expand(batch_shape)\n self.loc = loc\n self.scale_tril = scale_tril\n self._chi2 = Chi2(self.df)\n super(MultivariateStudentT, self).__init__(batch_shape, event_shape, validate_args=validate_args)\n\n @lazy_property\n def covariance_matrix(self):\n # NB: this is not covariance of this distribution;\n # the actual covariance is df / (df - 2) * covariance_matrix\n return torch.matmul(self.scale_tril, self.scale_tril.transpose(-1, -2))\n\n @lazy_property\n def precision_matrix(self):\n identity = torch.eye(self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype)\n scale_inv = identity.triangular_solve(self.scale_tril, upper=False).solution.transpose(-1, -2)\n return torch.matmul(scale_inv.transpose(-1, -2), scale_inv)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(MultivariateStudentT, _instance)\n batch_shape = torch.Size(batch_shape)\n loc_shape = batch_shape + self.event_shape\n scale_shape = loc_shape + self.event_shape\n new.df = self.df.expand(batch_shape)\n new.loc = self.loc.expand(loc_shape)\n new.scale_tril = self.scale_tril.expand(scale_shape)\n if 'covariance_matrix' in self.__dict__:\n new.covariance_matrix = self.covariance_matrix.expand(scale_shape)\n if 'precision_matrix' in self.__dict__:\n new.precision_matrix = self.precision_matrix.expand(scale_shape)\n new._chi2 = self._chi2.expand(batch_shape)\n super(MultivariateStudentT, new).__init__(batch_shape, self.event_shape, validate_args=False)\n new._validate_args = self._validate_args\n return new\n\n def rsample(self, sample_shape=torch.Size()):\n shape = self._extended_shape(sample_shape)\n X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()\n Z = self._chi2.rsample(sample_shape)\n Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)\n return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)\n\n def log_prob(self, value):\n if self._validate_args:\n self._validate_sample(value)\n n = self.loc.size(-1)\n y = (value - self.loc).unsqueeze(-1).triangular_solve(self.scale_tril, upper=False).solution.squeeze(-1)\n Z = (self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) +\n 0.5 * n * self.df.log() +\n 0.5 * n * math.log(math.pi) +\n torch.lgamma(0.5 * self.df) -\n torch.lgamma(0.5 * (self.df + n)))\n return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z\n\n @property\n def mean(self):\n m = self.loc.clone()\n m[self.df <= 1, :] = float('nan')\n return m\n\n @property\n def variance(self):\n m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)\n m[(self.df <= 2) & (self.df > 1), :] = float('inf')\n m[self.df <= 1, :] = float('nan')\n return m\n", "path": "pyro/distributions/multivariate_studentt.py"}]}
| 1,888 | 237 |
gh_patches_debug_22396
|
rasdani/github-patches
|
git_diff
|
freqtrade__freqtrade-5847
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Kucoin] {"code":"429000","msg":"Too Many Requests"}. Able to ignore this error and prevent DDOS protection?
<!--
Have you searched for similar issues before posting it?
If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
Please do not use bug reports to request new features.
-->
## Describe your environment
* Operating system: Canonical Ubuntu
* Python Version:
* CCXT version: 1.56.86
* Freqtrade Version:
Note: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.
## Describe the problem:
{"code":"429000","msg":"Too Many Requests"} error triggering DDOS protection delay.
### Steps to reproduce:
Randomly effect pairs on Kucoin
### Observed Results:
DDOS protection triggering when it doesn't need to which may cause unintended and financially ruinous behavior in the bot.
Admin on telegram says we can retry the request instantly without fear of DDOS protection triggering.

### Relevant code exceptions or logs
freqtrade | 2021-10-11 04:55:04,274 - freqtrade.exchange.common - WARNING - _async_get_candle_history() returned exception: "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?symbol=DYDX-USDT&type=5min&startAt=1633778101&endAt=1633928101 429 Too Many Requests {"code":"429000","msg":"Too Many Requests"}"
freqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - WARNING - retrying _async_get_candle_history() still for 4 times
freqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - INFO - Applying DDosProtection backoff delay: 1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/exchange/common.py`
Content:
```
1 import asyncio
2 import logging
3 import time
4 from functools import wraps
5
6 from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
7
8
9 logger = logging.getLogger(__name__)
10
11
12 # Maximum default retry count.
13 # Functions are always called RETRY_COUNT + 1 times (for the original call)
14 API_RETRY_COUNT = 4
15 API_FETCH_ORDER_RETRY_COUNT = 5
16
17 BAD_EXCHANGES = {
18 "bitmex": "Various reasons.",
19 "phemex": "Does not provide history. ",
20 "poloniex": "Does not provide fetch_order endpoint to fetch both open and closed orders.",
21 }
22
23 MAP_EXCHANGE_CHILDCLASS = {
24 'binanceus': 'binance',
25 'binanceje': 'binance',
26 }
27
28
29 EXCHANGE_HAS_REQUIRED = [
30 # Required / private
31 'fetchOrder',
32 'cancelOrder',
33 'createOrder',
34 # 'createLimitOrder', 'createMarketOrder',
35 'fetchBalance',
36
37 # Public endpoints
38 'loadMarkets',
39 'fetchOHLCV',
40 ]
41
42 EXCHANGE_HAS_OPTIONAL = [
43 # Private
44 'fetchMyTrades', # Trades for order - fee detection
45 # Public
46 'fetchOrderBook', 'fetchL2OrderBook', 'fetchTicker', # OR for pricing
47 'fetchTickers', # For volumepairlist?
48 'fetchTrades', # Downloading trades data
49 ]
50
51
52 def remove_credentials(config) -> None:
53 """
54 Removes exchange keys from the configuration and specifies dry-run
55 Used for backtesting / hyperopt / edge and utils.
56 Modifies the input dict!
57 """
58 if config.get('dry_run', False):
59 config['exchange']['key'] = ''
60 config['exchange']['secret'] = ''
61 config['exchange']['password'] = ''
62 config['exchange']['uid'] = ''
63
64
65 def calculate_backoff(retrycount, max_retries):
66 """
67 Calculate backoff
68 """
69 return (max_retries - retrycount) ** 2 + 1
70
71
72 def retrier_async(f):
73 async def wrapper(*args, **kwargs):
74 count = kwargs.pop('count', API_RETRY_COUNT)
75 try:
76 return await f(*args, **kwargs)
77 except TemporaryError as ex:
78 logger.warning('%s() returned exception: "%s"', f.__name__, ex)
79 if count > 0:
80 logger.warning('retrying %s() still for %s times', f.__name__, count)
81 count -= 1
82 kwargs.update({'count': count})
83 if isinstance(ex, DDosProtection):
84 backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
85 logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
86 await asyncio.sleep(backoff_delay)
87 return await wrapper(*args, **kwargs)
88 else:
89 logger.warning('Giving up retrying: %s()', f.__name__)
90 raise ex
91 return wrapper
92
93
94 def retrier(_func=None, retries=API_RETRY_COUNT):
95 def decorator(f):
96 @wraps(f)
97 def wrapper(*args, **kwargs):
98 count = kwargs.pop('count', retries)
99 try:
100 return f(*args, **kwargs)
101 except (TemporaryError, RetryableOrderError) as ex:
102 logger.warning('%s() returned exception: "%s"', f.__name__, ex)
103 if count > 0:
104 logger.warning('retrying %s() still for %s times', f.__name__, count)
105 count -= 1
106 kwargs.update({'count': count})
107 if isinstance(ex, (DDosProtection, RetryableOrderError)):
108 # increasing backoff
109 backoff_delay = calculate_backoff(count + 1, retries)
110 logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
111 time.sleep(backoff_delay)
112 return wrapper(*args, **kwargs)
113 else:
114 logger.warning('Giving up retrying: %s()', f.__name__)
115 raise ex
116 return wrapper
117 # Support both @retrier and @retrier(retries=2) syntax
118 if _func is None:
119 return decorator
120 else:
121 return decorator(_func)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/freqtrade/exchange/common.py b/freqtrade/exchange/common.py
--- a/freqtrade/exchange/common.py
+++ b/freqtrade/exchange/common.py
@@ -81,9 +81,16 @@
count -= 1
kwargs.update({'count': count})
if isinstance(ex, DDosProtection):
- backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
- logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
- await asyncio.sleep(backoff_delay)
+ if "kucoin" in str(ex) and "429000" in str(ex):
+ # Temporary fix for 429000 error on kucoin
+ # see https://github.com/freqtrade/freqtrade/issues/5700 for details.
+ logger.warning(
+ f"Kucoin 429 error, avoid triggering DDosProtection backoff delay. "
+ f"{count} tries left before giving up")
+ else:
+ backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)
+ logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}")
+ await asyncio.sleep(backoff_delay)
return await wrapper(*args, **kwargs)
else:
logger.warning('Giving up retrying: %s()', f.__name__)
|
{"golden_diff": "diff --git a/freqtrade/exchange/common.py b/freqtrade/exchange/common.py\n--- a/freqtrade/exchange/common.py\n+++ b/freqtrade/exchange/common.py\n@@ -81,9 +81,16 @@\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, DDosProtection):\n- backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n- logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n- await asyncio.sleep(backoff_delay)\n+ if \"kucoin\" in str(ex) and \"429000\" in str(ex):\n+ # Temporary fix for 429000 error on kucoin\n+ # see https://github.com/freqtrade/freqtrade/issues/5700 for details.\n+ logger.warning(\n+ f\"Kucoin 429 error, avoid triggering DDosProtection backoff delay. \"\n+ f\"{count} tries left before giving up\")\n+ else:\n+ backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n+ logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n+ await asyncio.sleep(backoff_delay)\n return await wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n", "issue": "[Kucoin] {\"code\":\"429000\",\"msg\":\"Too Many Requests\"}. Able to ignore this error and prevent DDOS protection?\n<!-- \r\nHave you searched for similar issues before posting it?\r\n\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\nPlease do not use bug reports to request new features.\r\n-->\r\n\r\n## Describe your environment\r\n\r\n * Operating system: Canonical Ubuntu\r\n * Python Version: \r\n * CCXT version: 1.56.86\r\n * Freqtrade Version: \r\n \r\nNote: All issues other than enhancement requests will be closed without further comment if the above template is deleted or not filled out.\r\n\r\n## Describe the problem:\r\n\r\n{\"code\":\"429000\",\"msg\":\"Too Many Requests\"} error triggering DDOS protection delay. \r\n\r\n### Steps to reproduce:\r\nRandomly effect pairs on Kucoin\r\n\r\n### Observed Results:\r\n\r\nDDOS protection triggering when it doesn't need to which may cause unintended and financially ruinous behavior in the bot. \r\n\r\nAdmin on telegram says we can retry the request instantly without fear of DDOS protection triggering. \r\n\r\n\r\n\r\n\r\n\r\n\r\n### Relevant code exceptions or logs\r\n\r\nfreqtrade | 2021-10-11 04:55:04,274 - freqtrade.exchange.common - WARNING - _async_get_candle_history() returned exception: \"kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?symbol=DYDX-USDT&type=5min&startAt=1633778101&endAt=1633928101 429 Too Many Requests {\"code\":\"429000\",\"msg\":\"Too Many Requests\"}\"\r\nfreqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - WARNING - retrying _async_get_candle_history() still for 4 times\r\nfreqtrade | 2021-10-11 04:55:04,275 - freqtrade.exchange.common - INFO - Applying DDosProtection backoff delay: 1\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\nimport time\nfrom functools import wraps\n\nfrom freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Maximum default retry count.\n# Functions are always called RETRY_COUNT + 1 times (for the original call)\nAPI_RETRY_COUNT = 4\nAPI_FETCH_ORDER_RETRY_COUNT = 5\n\nBAD_EXCHANGES = {\n \"bitmex\": \"Various reasons.\",\n \"phemex\": \"Does not provide history. \",\n \"poloniex\": \"Does not provide fetch_order endpoint to fetch both open and closed orders.\",\n}\n\nMAP_EXCHANGE_CHILDCLASS = {\n 'binanceus': 'binance',\n 'binanceje': 'binance',\n}\n\n\nEXCHANGE_HAS_REQUIRED = [\n # Required / private\n 'fetchOrder',\n 'cancelOrder',\n 'createOrder',\n # 'createLimitOrder', 'createMarketOrder',\n 'fetchBalance',\n\n # Public endpoints\n 'loadMarkets',\n 'fetchOHLCV',\n]\n\nEXCHANGE_HAS_OPTIONAL = [\n # Private\n 'fetchMyTrades', # Trades for order - fee detection\n # Public\n 'fetchOrderBook', 'fetchL2OrderBook', 'fetchTicker', # OR for pricing\n 'fetchTickers', # For volumepairlist?\n 'fetchTrades', # Downloading trades data\n]\n\n\ndef remove_credentials(config) -> None:\n \"\"\"\n Removes exchange keys from the configuration and specifies dry-run\n Used for backtesting / hyperopt / edge and utils.\n Modifies the input dict!\n \"\"\"\n if config.get('dry_run', False):\n config['exchange']['key'] = ''\n config['exchange']['secret'] = ''\n config['exchange']['password'] = ''\n config['exchange']['uid'] = ''\n\n\ndef calculate_backoff(retrycount, max_retries):\n \"\"\"\n Calculate backoff\n \"\"\"\n return (max_retries - retrycount) ** 2 + 1\n\n\ndef retrier_async(f):\n async def wrapper(*args, **kwargs):\n count = kwargs.pop('count', API_RETRY_COUNT)\n try:\n return await f(*args, **kwargs)\n except TemporaryError as ex:\n logger.warning('%s() returned exception: \"%s\"', f.__name__, ex)\n if count > 0:\n logger.warning('retrying %s() still for %s times', f.__name__, count)\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, DDosProtection):\n backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n await asyncio.sleep(backoff_delay)\n return await wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n raise ex\n return wrapper\n\n\ndef retrier(_func=None, retries=API_RETRY_COUNT):\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n count = kwargs.pop('count', retries)\n try:\n return f(*args, **kwargs)\n except (TemporaryError, RetryableOrderError) as ex:\n logger.warning('%s() returned exception: \"%s\"', f.__name__, ex)\n if count > 0:\n logger.warning('retrying %s() still for %s times', f.__name__, count)\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, (DDosProtection, RetryableOrderError)):\n # increasing backoff\n backoff_delay = calculate_backoff(count + 1, retries)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n time.sleep(backoff_delay)\n return wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n raise ex\n return wrapper\n # Support both @retrier and @retrier(retries=2) syntax\n if _func is None:\n return decorator\n else:\n return decorator(_func)\n", "path": "freqtrade/exchange/common.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport time\nfrom functools import wraps\n\nfrom freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError\n\n\nlogger = logging.getLogger(__name__)\n\n\n# Maximum default retry count.\n# Functions are always called RETRY_COUNT + 1 times (for the original call)\nAPI_RETRY_COUNT = 4\nAPI_FETCH_ORDER_RETRY_COUNT = 5\n\nBAD_EXCHANGES = {\n \"bitmex\": \"Various reasons.\",\n \"phemex\": \"Does not provide history. \",\n \"poloniex\": \"Does not provide fetch_order endpoint to fetch both open and closed orders.\",\n}\n\nMAP_EXCHANGE_CHILDCLASS = {\n 'binanceus': 'binance',\n 'binanceje': 'binance',\n}\n\n\nEXCHANGE_HAS_REQUIRED = [\n # Required / private\n 'fetchOrder',\n 'cancelOrder',\n 'createOrder',\n # 'createLimitOrder', 'createMarketOrder',\n 'fetchBalance',\n\n # Public endpoints\n 'loadMarkets',\n 'fetchOHLCV',\n]\n\nEXCHANGE_HAS_OPTIONAL = [\n # Private\n 'fetchMyTrades', # Trades for order - fee detection\n # Public\n 'fetchOrderBook', 'fetchL2OrderBook', 'fetchTicker', # OR for pricing\n 'fetchTickers', # For volumepairlist?\n 'fetchTrades', # Downloading trades data\n]\n\n\ndef remove_credentials(config) -> None:\n \"\"\"\n Removes exchange keys from the configuration and specifies dry-run\n Used for backtesting / hyperopt / edge and utils.\n Modifies the input dict!\n \"\"\"\n if config.get('dry_run', False):\n config['exchange']['key'] = ''\n config['exchange']['secret'] = ''\n config['exchange']['password'] = ''\n config['exchange']['uid'] = ''\n\n\ndef calculate_backoff(retrycount, max_retries):\n \"\"\"\n Calculate backoff\n \"\"\"\n return (max_retries - retrycount) ** 2 + 1\n\n\ndef retrier_async(f):\n async def wrapper(*args, **kwargs):\n count = kwargs.pop('count', API_RETRY_COUNT)\n try:\n return await f(*args, **kwargs)\n except TemporaryError as ex:\n logger.warning('%s() returned exception: \"%s\"', f.__name__, ex)\n if count > 0:\n logger.warning('retrying %s() still for %s times', f.__name__, count)\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, DDosProtection):\n if \"kucoin\" in str(ex) and \"429000\" in str(ex):\n # Temporary fix for 429000 error on kucoin\n # see https://github.com/freqtrade/freqtrade/issues/5700 for details.\n logger.warning(\n f\"Kucoin 429 error, avoid triggering DDosProtection backoff delay. \"\n f\"{count} tries left before giving up\")\n else:\n backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n await asyncio.sleep(backoff_delay)\n return await wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n raise ex\n return wrapper\n\n\ndef retrier(_func=None, retries=API_RETRY_COUNT):\n def decorator(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n count = kwargs.pop('count', retries)\n try:\n return f(*args, **kwargs)\n except (TemporaryError, RetryableOrderError) as ex:\n logger.warning('%s() returned exception: \"%s\"', f.__name__, ex)\n if count > 0:\n logger.warning('retrying %s() still for %s times', f.__name__, count)\n count -= 1\n kwargs.update({'count': count})\n if isinstance(ex, (DDosProtection, RetryableOrderError)):\n # increasing backoff\n backoff_delay = calculate_backoff(count + 1, retries)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n time.sleep(backoff_delay)\n return wrapper(*args, **kwargs)\n else:\n logger.warning('Giving up retrying: %s()', f.__name__)\n raise ex\n return wrapper\n # Support both @retrier and @retrier(retries=2) syntax\n if _func is None:\n return decorator\n else:\n return decorator(_func)\n", "path": "freqtrade/exchange/common.py"}]}
| 1,994 | 308 |
gh_patches_debug_6905
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-4932
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in bookshelves.py add method when changing edition
When calling the add method in openlibrary/core/bookshelves.py (e.g. POSTing to bookshelves.json) with a specific edition_id, the result is not as expected if another edition of the same work already exists on the user's bookshelves.
### Steps to Reproduce
Submit a POST to bookshelves.json in a work context (e.g. https://openlibrary.org/works/OL460810W/bookshelves.json) with an add action and a specific edition_id: {'action':'add', 'redir':false, 'bookshelf_id':1, 'edition_id':'/books/OL7656518M', 'dont_remove':true}
If the user already has a different edition of the work on any bookshelf:
* If dont_remove is true, no change is made
* If dont_remove is false, the existing edition is removed from the bookshelf
Expected behavior:
The record for the work on the user's bookshelf should be updated to reflect the passed edition_id if dont_remove is true.
### Details
- **Logged in (Y/N)?** Y
- **Browser type/version?** N/A
- **Operating system?** N/A
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
Changing the update call at line 189 from
`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, vars=data)`
to
`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)`
should create the expected behavior without affecting other use cases.
### Stakeholders
@mheiman
@mekarpeles
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/core/bookshelves.py`
Content:
```
1 from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO
2
3 from . import db
4
5
6 class Bookshelves(object):
7
8 PRESET_BOOKSHELVES = {
9 'Want to Read': 1,
10 'Currently Reading': 2,
11 'Already Read': 3
12 }
13
14 PRESET_BOOKSHELVES_JSON = {
15 'want_to_read': 1,
16 'currently_reading': 2,
17 'already_read': 3,
18 }
19
20 @classmethod
21 def summary(cls):
22 return {
23 'total_books_logged': {
24 'total': Bookshelves.total_books_logged(),
25 'month': Bookshelves.total_books_logged(since=DATE_ONE_MONTH_AGO),
26 'week': Bookshelves.total_books_logged(since=DATE_ONE_WEEK_AGO)
27 },
28 'total_users_logged': {
29 'total': Bookshelves.total_unique_users(),
30 'month': Bookshelves.total_unique_users(since=DATE_ONE_MONTH_AGO),
31 'week': Bookshelves.total_unique_users(since=DATE_ONE_WEEK_AGO)
32 }
33 }
34
35 @classmethod
36 def total_books_logged(cls, shelf_ids=None, since=None):
37 """Returns (int) number of books logged across all Reading Log shelves (e.g. those
38 specified in PRESET_BOOKSHELVES). One may alternatively specify a
39 `list` of `shelf_ids` to isolate or span multiple
40 shelves. `since` may be used to limit the result to those
41 books logged since a specific date. Any python datetime.date
42 type should work.
43
44 Args:
45 shelf_ids (list) - one or more bookshelf_id values, see
46 also the default values specified in PRESET_BOOKSHELVES
47 since (datetime.date) - returns all logged books after date
48
49 """
50
51 oldb = db.get_db()
52 query = "SELECT count(*) from bookshelves_books"
53 if shelf_ids:
54 query += " WHERE bookshelf_id IN ($shelf_ids)"
55 if since:
56 query += " AND created >= $since"
57 elif since:
58 query += " WHERE created >= $since"
59 results = oldb.query(query, vars={'since': since, 'shelf_ids': shelf_ids})
60 return results[0] if results else None
61
62 @classmethod
63 def total_unique_users(cls, since=None):
64 """Returns the total number of unique users who have logged a
65 book. `since` may be provided to only return the number of users after
66 a certain datetime.date.
67 """
68 oldb = db.get_db()
69 query = "select count(DISTINCT username) from bookshelves_books"
70 if since:
71 query += " WHERE created >= $since"
72 results = oldb.query(query, vars={'since': since})
73 return results[0] if results else None
74
75 @classmethod
76 def most_logged_books(cls, shelf_id, limit=10, since=False):
77 """Returns a ranked list of work OLIDs (in the form of an integer --
78 i.e. OL123W would be 123) which have been most logged by
79 users. This query is limited to a specific shelf_id (e.g. 1
80 for "Want to Read").
81 """
82 oldb = db.get_db()
83 query = 'select work_id, count(*) as cnt from bookshelves_books WHERE bookshelf_id=$shelf_id '
84 if since:
85 query += " AND created >= $since"
86 query += ' group by work_id order by cnt desc limit $limit'
87 return list(oldb.query(query, vars={'shelf_id': shelf_id, 'limit': limit, 'since': since}))
88
89 @classmethod
90 def count_total_books_logged_by_user(cls, username, bookshelf_ids=None):
91 """Counts the (int) total number of books logged by this `username`,
92 with the option of limiting the count to specific bookshelves
93 by `bookshelf_id`
94 """
95 return sum(cls.count_total_books_logged_by_user_per_shelf(
96 username, bookshelf_ids=bookshelf_ids).values())
97
98 @classmethod
99 def count_total_books_logged_by_user_per_shelf(cls, username, bookshelf_ids=None):
100 """Returns a dict mapping the specified user's bookshelves_ids to the
101 number of number of books logged per each shelf, i.e. {bookshelf_id:
102 count}. By default, we limit bookshelf_ids to those in PRESET_BOOKSHELVES
103
104 TODO: add `since` to fetch books logged after a certain
105 date. Useful for following/subscribing-to users and being
106 notified of books they log. Also add to
107 count_total_books_logged_by_user
108 """
109 oldb = db.get_db()
110 data = {'username': username}
111 _bookshelf_ids = ','.join([str(x) for x in bookshelf_ids or cls.PRESET_BOOKSHELVES.values()])
112 query = ("SELECT bookshelf_id, count(*) from bookshelves_books WHERE "
113 "bookshelf_id=ANY('{" + _bookshelf_ids + "}'::int[]) "
114 "AND username=$username GROUP BY bookshelf_id")
115 result = oldb.query(query, vars=data)
116 return dict([(i['bookshelf_id'], i['count']) for i in result]) if result else {}
117
118 @classmethod
119 def get_users_logged_books(cls, username, bookshelf_id=None, limit=100, page=1):
120 """Returns a list of Reading Log database records for books which
121 the user has logged. Records are described in core/schema.py
122 and include:
123
124 username (str) - who logged this book
125 work_id (int) - the Open Library work ID as an int (e.g. OL123W becomes 123)
126 bookshelf_id (int) - the ID of the bookshelf, see: PRESET_BOOKSHELVES.
127 If bookshelf_id is None, return books from all bookshelves.
128 edition_id (int) [optional] - the specific edition logged, if applicable
129 created (datetime) - date the book was logged
130
131 """
132 oldb = db.get_db()
133 page = int(page) if page else 1
134 data = {
135 'username': username,
136 'limit': limit,
137 'offset': limit * (page - 1),
138 'bookshelf_id': bookshelf_id
139 }
140 query = ("SELECT * from bookshelves_books WHERE "
141 "bookshelf_id=$bookshelf_id AND username=$username "
142 "LIMIT $limit OFFSET $offset")
143 if bookshelf_id is None:
144 query = ("SELECT * from bookshelves_books WHERE "
145 "username=$username")
146 # XXX Removing limit, offset, etc from data looks like a bug
147 # unrelated / not fixing in this PR.
148 data = { 'username': username }
149 return list(oldb.query(query, vars=data))
150
151 @classmethod
152 def get_users_read_status_of_work(cls, username, work_id):
153 """A user can mark a book as (1) want to read, (2) currently reading,
154 or (3) already read. Each of these states is mutually
155 exclusive. Returns the user's read state of this work, if one
156 exists.
157 """
158 oldb = db.get_db()
159 data = {
160 'username': username,
161 'work_id': int(work_id)
162 }
163 bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])
164 query = ("SELECT bookshelf_id from bookshelves_books WHERE "
165 "bookshelf_id=ANY('{" + bookshelf_ids + "}'::int[]) "
166 "AND username=$username AND work_id=$work_id")
167 result = list(oldb.query(query, vars=data))
168 return result[0].bookshelf_id if result else None
169
170 @classmethod
171 def add(cls, username, bookshelf_id, work_id, edition_id=None):
172 """Adds a book with `work_id` to user's bookshelf designated by
173 `bookshelf_id`"""
174 oldb = db.get_db()
175 work_id = int(work_id)
176 bookshelf_id = int(bookshelf_id)
177 data = {
178 'work_id': work_id,
179 'username': username,
180 }
181
182 users_status = cls.get_users_read_status_of_work(username, work_id)
183 if not users_status:
184 return oldb.insert('bookshelves_books', username=username,
185 bookshelf_id=bookshelf_id,
186 work_id=work_id, edition_id=edition_id)
187 else:
188 where = "work_id=$work_id AND username=$username"
189 return oldb.update('bookshelves_books', where=where,
190 bookshelf_id=bookshelf_id, vars=data)
191
192 @classmethod
193 def remove(cls, username, work_id, bookshelf_id=None):
194 oldb = db.get_db()
195 where = {
196 'username': username,
197 'work_id': int(work_id)
198 }
199 if bookshelf_id:
200 where['bookshelf_id'] = int(bookshelf_id)
201
202 try:
203 return oldb.delete('bookshelves_books',
204 where=('work_id=$work_id AND username=$username'), vars=where)
205 except: # we want to catch no entry exists
206 return None
207
208 @classmethod
209 def get_works_shelves(cls, work_id, lazy=False):
210 """Bookshelves this work is on"""
211 oldb = db.get_db()
212 query = "SELECT * from bookshelves_books where work_id=$work_id"
213 try:
214 result = oldb.query(query, vars={'work_id': int(work_id)})
215 return result if lazy else list(result)
216 except:
217 return None
218
219 @classmethod
220 def get_num_users_by_bookshelf_by_work_id(cls, work_id):
221 """Returns a dict mapping a work_id to the
222 number of number of users who have placed that work_id in each shelf, i.e. {bookshelf_id:
223 count}.
224 """
225 oldb = db.get_db()
226 query = ("SELECT bookshelf_id, count(DISTINCT username) as user_count from bookshelves_books where"
227 " work_id=$work_id"
228 " GROUP BY bookshelf_id")
229 result = oldb.query(query, vars={'work_id': int(work_id)})
230 return dict([(i['bookshelf_id'], i['user_count']) for i in result]) if result else {}
231
232 @classmethod
233 def user_with_most_books(cls):
234 """
235 Which super patrons have the most books logged?
236
237 SELECT username, count(*) AS counted from bookshelves_books WHERE bookshelf_id=ANY('{1,3,2}'::int[]) GROUP BY username ORDER BY counted DESC, username LIMIT 10
238 """
239 oldb = db.get_db()
240 _bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])
241 query = ("SELECT username, count(*) AS counted "
242 "FROM bookshelves_books WHERE "
243 "bookshelf_id=ANY('{" + _bookshelf_ids + "}'::int[]) "
244 "GROUP BY username "
245 "ORDER BY counted DESC, username LIMIT 100")
246 result = oldb.query(query)
247 return list(result)
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/core/bookshelves.py b/openlibrary/core/bookshelves.py
--- a/openlibrary/core/bookshelves.py
+++ b/openlibrary/core/bookshelves.py
@@ -187,7 +187,7 @@
else:
where = "work_id=$work_id AND username=$username"
return oldb.update('bookshelves_books', where=where,
- bookshelf_id=bookshelf_id, vars=data)
+ bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)
@classmethod
def remove(cls, username, work_id, bookshelf_id=None):
|
{"golden_diff": "diff --git a/openlibrary/core/bookshelves.py b/openlibrary/core/bookshelves.py\n--- a/openlibrary/core/bookshelves.py\n+++ b/openlibrary/core/bookshelves.py\n@@ -187,7 +187,7 @@\n else:\n where = \"work_id=$work_id AND username=$username\"\n return oldb.update('bookshelves_books', where=where,\n- bookshelf_id=bookshelf_id, vars=data)\n+ bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)\n \n @classmethod\n def remove(cls, username, work_id, bookshelf_id=None):\n", "issue": "Bug in bookshelves.py add method when changing edition\nWhen calling the add method in openlibrary/core/bookshelves.py (e.g. POSTing to bookshelves.json) with a specific edition_id, the result is not as expected if another edition of the same work already exists on the user's bookshelves. \r\n\r\n### Steps to Reproduce\r\nSubmit a POST to bookshelves.json in a work context (e.g. https://openlibrary.org/works/OL460810W/bookshelves.json) with an add action and a specific edition_id: {'action':'add', 'redir':false, 'bookshelf_id':1, 'edition_id':'/books/OL7656518M', 'dont_remove':true}\r\n\r\nIf the user already has a different edition of the work on any bookshelf:\r\n\r\n* If dont_remove is true, no change is made\r\n* If dont_remove is false, the existing edition is removed from the bookshelf\r\n\r\nExpected behavior:\r\n\r\nThe record for the work on the user's bookshelf should be updated to reflect the passed edition_id if dont_remove is true.\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** Y\r\n- **Browser type/version?** N/A\r\n- **Operating system?** N/A\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n\r\nChanging the update call at line 189 from\r\n\r\n`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, vars=data)`\r\n\r\nto \r\n\r\n`return oldb.update('bookshelves_books', where=where, bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)`\r\n\r\nshould create the expected behavior without affecting other use cases.\r\n\r\n### Stakeholders\r\n@mheiman\r\n@mekarpeles\r\n\n", "before_files": [{"content": "from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO\n\nfrom . import db\n\n\nclass Bookshelves(object):\n\n PRESET_BOOKSHELVES = {\n 'Want to Read': 1,\n 'Currently Reading': 2,\n 'Already Read': 3\n }\n\n PRESET_BOOKSHELVES_JSON = {\n 'want_to_read': 1,\n 'currently_reading': 2,\n 'already_read': 3,\n }\n\n @classmethod\n def summary(cls):\n return {\n 'total_books_logged': {\n 'total': Bookshelves.total_books_logged(),\n 'month': Bookshelves.total_books_logged(since=DATE_ONE_MONTH_AGO),\n 'week': Bookshelves.total_books_logged(since=DATE_ONE_WEEK_AGO)\n },\n 'total_users_logged': {\n 'total': Bookshelves.total_unique_users(),\n 'month': Bookshelves.total_unique_users(since=DATE_ONE_MONTH_AGO),\n 'week': Bookshelves.total_unique_users(since=DATE_ONE_WEEK_AGO)\n }\n }\n\n @classmethod\n def total_books_logged(cls, shelf_ids=None, since=None):\n \"\"\"Returns (int) number of books logged across all Reading Log shelves (e.g. those\n specified in PRESET_BOOKSHELVES). One may alternatively specify a\n `list` of `shelf_ids` to isolate or span multiple\n shelves. `since` may be used to limit the result to those\n books logged since a specific date. Any python datetime.date\n type should work.\n\n Args:\n shelf_ids (list) - one or more bookshelf_id values, see\n also the default values specified in PRESET_BOOKSHELVES\n since (datetime.date) - returns all logged books after date\n\n \"\"\"\n\n oldb = db.get_db()\n query = \"SELECT count(*) from bookshelves_books\"\n if shelf_ids:\n query += \" WHERE bookshelf_id IN ($shelf_ids)\"\n if since:\n query += \" AND created >= $since\"\n elif since:\n query += \" WHERE created >= $since\"\n results = oldb.query(query, vars={'since': since, 'shelf_ids': shelf_ids})\n return results[0] if results else None\n\n @classmethod\n def total_unique_users(cls, since=None):\n \"\"\"Returns the total number of unique users who have logged a\n book. `since` may be provided to only return the number of users after\n a certain datetime.date.\n \"\"\"\n oldb = db.get_db()\n query = \"select count(DISTINCT username) from bookshelves_books\"\n if since:\n query += \" WHERE created >= $since\"\n results = oldb.query(query, vars={'since': since})\n return results[0] if results else None\n\n @classmethod\n def most_logged_books(cls, shelf_id, limit=10, since=False):\n \"\"\"Returns a ranked list of work OLIDs (in the form of an integer --\n i.e. OL123W would be 123) which have been most logged by\n users. This query is limited to a specific shelf_id (e.g. 1\n for \"Want to Read\").\n \"\"\"\n oldb = db.get_db()\n query = 'select work_id, count(*) as cnt from bookshelves_books WHERE bookshelf_id=$shelf_id '\n if since:\n query += \" AND created >= $since\"\n query += ' group by work_id order by cnt desc limit $limit'\n return list(oldb.query(query, vars={'shelf_id': shelf_id, 'limit': limit, 'since': since}))\n\n @classmethod\n def count_total_books_logged_by_user(cls, username, bookshelf_ids=None):\n \"\"\"Counts the (int) total number of books logged by this `username`,\n with the option of limiting the count to specific bookshelves\n by `bookshelf_id`\n \"\"\"\n return sum(cls.count_total_books_logged_by_user_per_shelf(\n username, bookshelf_ids=bookshelf_ids).values())\n\n @classmethod\n def count_total_books_logged_by_user_per_shelf(cls, username, bookshelf_ids=None):\n \"\"\"Returns a dict mapping the specified user's bookshelves_ids to the\n number of number of books logged per each shelf, i.e. {bookshelf_id:\n count}. By default, we limit bookshelf_ids to those in PRESET_BOOKSHELVES\n\n TODO: add `since` to fetch books logged after a certain\n date. Useful for following/subscribing-to users and being\n notified of books they log. Also add to\n count_total_books_logged_by_user\n \"\"\"\n oldb = db.get_db()\n data = {'username': username}\n _bookshelf_ids = ','.join([str(x) for x in bookshelf_ids or cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT bookshelf_id, count(*) from bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + _bookshelf_ids + \"}'::int[]) \"\n \"AND username=$username GROUP BY bookshelf_id\")\n result = oldb.query(query, vars=data)\n return dict([(i['bookshelf_id'], i['count']) for i in result]) if result else {}\n\n @classmethod\n def get_users_logged_books(cls, username, bookshelf_id=None, limit=100, page=1):\n \"\"\"Returns a list of Reading Log database records for books which\n the user has logged. Records are described in core/schema.py\n and include:\n\n username (str) - who logged this book\n work_id (int) - the Open Library work ID as an int (e.g. OL123W becomes 123)\n bookshelf_id (int) - the ID of the bookshelf, see: PRESET_BOOKSHELVES.\n If bookshelf_id is None, return books from all bookshelves.\n edition_id (int) [optional] - the specific edition logged, if applicable\n created (datetime) - date the book was logged\n\n \"\"\"\n oldb = db.get_db()\n page = int(page) if page else 1\n data = {\n 'username': username,\n 'limit': limit,\n 'offset': limit * (page - 1),\n 'bookshelf_id': bookshelf_id\n }\n query = (\"SELECT * from bookshelves_books WHERE \"\n \"bookshelf_id=$bookshelf_id AND username=$username \"\n \"LIMIT $limit OFFSET $offset\")\n if bookshelf_id is None:\n query = (\"SELECT * from bookshelves_books WHERE \"\n \"username=$username\")\n # XXX Removing limit, offset, etc from data looks like a bug\n # unrelated / not fixing in this PR.\n data = { 'username': username }\n return list(oldb.query(query, vars=data))\n\n @classmethod\n def get_users_read_status_of_work(cls, username, work_id):\n \"\"\"A user can mark a book as (1) want to read, (2) currently reading,\n or (3) already read. Each of these states is mutually\n exclusive. Returns the user's read state of this work, if one\n exists.\n \"\"\"\n oldb = db.get_db()\n data = {\n 'username': username,\n 'work_id': int(work_id)\n }\n bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT bookshelf_id from bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + bookshelf_ids + \"}'::int[]) \"\n \"AND username=$username AND work_id=$work_id\")\n result = list(oldb.query(query, vars=data))\n return result[0].bookshelf_id if result else None\n\n @classmethod\n def add(cls, username, bookshelf_id, work_id, edition_id=None):\n \"\"\"Adds a book with `work_id` to user's bookshelf designated by\n `bookshelf_id`\"\"\"\n oldb = db.get_db()\n work_id = int(work_id)\n bookshelf_id = int(bookshelf_id)\n data = {\n 'work_id': work_id,\n 'username': username,\n }\n\n users_status = cls.get_users_read_status_of_work(username, work_id)\n if not users_status:\n return oldb.insert('bookshelves_books', username=username,\n bookshelf_id=bookshelf_id,\n work_id=work_id, edition_id=edition_id)\n else:\n where = \"work_id=$work_id AND username=$username\"\n return oldb.update('bookshelves_books', where=where,\n bookshelf_id=bookshelf_id, vars=data)\n\n @classmethod\n def remove(cls, username, work_id, bookshelf_id=None):\n oldb = db.get_db()\n where = {\n 'username': username,\n 'work_id': int(work_id)\n }\n if bookshelf_id:\n where['bookshelf_id'] = int(bookshelf_id)\n\n try:\n return oldb.delete('bookshelves_books',\n where=('work_id=$work_id AND username=$username'), vars=where)\n except: # we want to catch no entry exists\n return None\n\n @classmethod\n def get_works_shelves(cls, work_id, lazy=False):\n \"\"\"Bookshelves this work is on\"\"\"\n oldb = db.get_db()\n query = \"SELECT * from bookshelves_books where work_id=$work_id\"\n try:\n result = oldb.query(query, vars={'work_id': int(work_id)})\n return result if lazy else list(result)\n except:\n return None\n\n @classmethod\n def get_num_users_by_bookshelf_by_work_id(cls, work_id):\n \"\"\"Returns a dict mapping a work_id to the\n number of number of users who have placed that work_id in each shelf, i.e. {bookshelf_id:\n count}.\n \"\"\"\n oldb = db.get_db()\n query = (\"SELECT bookshelf_id, count(DISTINCT username) as user_count from bookshelves_books where\"\n \" work_id=$work_id\"\n \" GROUP BY bookshelf_id\")\n result = oldb.query(query, vars={'work_id': int(work_id)})\n return dict([(i['bookshelf_id'], i['user_count']) for i in result]) if result else {}\n\n @classmethod\n def user_with_most_books(cls):\n \"\"\"\n Which super patrons have the most books logged?\n\n SELECT username, count(*) AS counted from bookshelves_books WHERE bookshelf_id=ANY('{1,3,2}'::int[]) GROUP BY username ORDER BY counted DESC, username LIMIT 10\n \"\"\"\n oldb = db.get_db()\n _bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT username, count(*) AS counted \"\n \"FROM bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + _bookshelf_ids + \"}'::int[]) \"\n \"GROUP BY username \"\n \"ORDER BY counted DESC, username LIMIT 100\")\n result = oldb.query(query)\n return list(result)\n", "path": "openlibrary/core/bookshelves.py"}], "after_files": [{"content": "from openlibrary.utils.dateutil import DATE_ONE_MONTH_AGO, DATE_ONE_WEEK_AGO\n\nfrom . import db\n\n\nclass Bookshelves(object):\n\n PRESET_BOOKSHELVES = {\n 'Want to Read': 1,\n 'Currently Reading': 2,\n 'Already Read': 3\n }\n\n PRESET_BOOKSHELVES_JSON = {\n 'want_to_read': 1,\n 'currently_reading': 2,\n 'already_read': 3,\n }\n\n @classmethod\n def summary(cls):\n return {\n 'total_books_logged': {\n 'total': Bookshelves.total_books_logged(),\n 'month': Bookshelves.total_books_logged(since=DATE_ONE_MONTH_AGO),\n 'week': Bookshelves.total_books_logged(since=DATE_ONE_WEEK_AGO)\n },\n 'total_users_logged': {\n 'total': Bookshelves.total_unique_users(),\n 'month': Bookshelves.total_unique_users(since=DATE_ONE_MONTH_AGO),\n 'week': Bookshelves.total_unique_users(since=DATE_ONE_WEEK_AGO)\n }\n }\n\n @classmethod\n def total_books_logged(cls, shelf_ids=None, since=None):\n \"\"\"Returns (int) number of books logged across all Reading Log shelves (e.g. those\n specified in PRESET_BOOKSHELVES). One may alternatively specify a\n `list` of `shelf_ids` to isolate or span multiple\n shelves. `since` may be used to limit the result to those\n books logged since a specific date. Any python datetime.date\n type should work.\n\n Args:\n shelf_ids (list) - one or more bookshelf_id values, see\n also the default values specified in PRESET_BOOKSHELVES\n since (datetime.date) - returns all logged books after date\n\n \"\"\"\n\n oldb = db.get_db()\n query = \"SELECT count(*) from bookshelves_books\"\n if shelf_ids:\n query += \" WHERE bookshelf_id IN ($shelf_ids)\"\n if since:\n query += \" AND created >= $since\"\n elif since:\n query += \" WHERE created >= $since\"\n results = oldb.query(query, vars={'since': since, 'shelf_ids': shelf_ids})\n return results[0] if results else None\n\n @classmethod\n def total_unique_users(cls, since=None):\n \"\"\"Returns the total number of unique users who have logged a\n book. `since` may be provided to only return the number of users after\n a certain datetime.date.\n \"\"\"\n oldb = db.get_db()\n query = \"select count(DISTINCT username) from bookshelves_books\"\n if since:\n query += \" WHERE created >= $since\"\n results = oldb.query(query, vars={'since': since})\n return results[0] if results else None\n\n @classmethod\n def most_logged_books(cls, shelf_id, limit=10, since=False):\n \"\"\"Returns a ranked list of work OLIDs (in the form of an integer --\n i.e. OL123W would be 123) which have been most logged by\n users. This query is limited to a specific shelf_id (e.g. 1\n for \"Want to Read\").\n \"\"\"\n oldb = db.get_db()\n query = 'select work_id, count(*) as cnt from bookshelves_books WHERE bookshelf_id=$shelf_id '\n if since:\n query += \" AND created >= $since\"\n query += ' group by work_id order by cnt desc limit $limit'\n return list(oldb.query(query, vars={'shelf_id': shelf_id, 'limit': limit, 'since': since}))\n\n @classmethod\n def count_total_books_logged_by_user(cls, username, bookshelf_ids=None):\n \"\"\"Counts the (int) total number of books logged by this `username`,\n with the option of limiting the count to specific bookshelves\n by `bookshelf_id`\n \"\"\"\n return sum(cls.count_total_books_logged_by_user_per_shelf(\n username, bookshelf_ids=bookshelf_ids).values())\n\n @classmethod\n def count_total_books_logged_by_user_per_shelf(cls, username, bookshelf_ids=None):\n \"\"\"Returns a dict mapping the specified user's bookshelves_ids to the\n number of number of books logged per each shelf, i.e. {bookshelf_id:\n count}. By default, we limit bookshelf_ids to those in PRESET_BOOKSHELVES\n\n TODO: add `since` to fetch books logged after a certain\n date. Useful for following/subscribing-to users and being\n notified of books they log. Also add to\n count_total_books_logged_by_user\n \"\"\"\n oldb = db.get_db()\n data = {'username': username}\n _bookshelf_ids = ','.join([str(x) for x in bookshelf_ids or cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT bookshelf_id, count(*) from bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + _bookshelf_ids + \"}'::int[]) \"\n \"AND username=$username GROUP BY bookshelf_id\")\n result = oldb.query(query, vars=data)\n return dict([(i['bookshelf_id'], i['count']) for i in result]) if result else {}\n\n @classmethod\n def get_users_logged_books(cls, username, bookshelf_id=None, limit=100, page=1):\n \"\"\"Returns a list of Reading Log database records for books which\n the user has logged. Records are described in core/schema.py\n and include:\n\n username (str) - who logged this book\n work_id (int) - the Open Library work ID as an int (e.g. OL123W becomes 123)\n bookshelf_id (int) - the ID of the bookshelf, see: PRESET_BOOKSHELVES.\n If bookshelf_id is None, return books from all bookshelves.\n edition_id (int) [optional] - the specific edition logged, if applicable\n created (datetime) - date the book was logged\n\n \"\"\"\n oldb = db.get_db()\n page = int(page) if page else 1\n data = {\n 'username': username,\n 'limit': limit,\n 'offset': limit * (page - 1),\n 'bookshelf_id': bookshelf_id\n }\n query = (\"SELECT * from bookshelves_books WHERE \"\n \"bookshelf_id=$bookshelf_id AND username=$username \"\n \"LIMIT $limit OFFSET $offset\")\n if bookshelf_id is None:\n query = (\"SELECT * from bookshelves_books WHERE \"\n \"username=$username\")\n # XXX Removing limit, offset, etc from data looks like a bug\n # unrelated / not fixing in this PR.\n data = { 'username': username }\n return list(oldb.query(query, vars=data))\n\n @classmethod\n def get_users_read_status_of_work(cls, username, work_id):\n \"\"\"A user can mark a book as (1) want to read, (2) currently reading,\n or (3) already read. Each of these states is mutually\n exclusive. Returns the user's read state of this work, if one\n exists.\n \"\"\"\n oldb = db.get_db()\n data = {\n 'username': username,\n 'work_id': int(work_id)\n }\n bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT bookshelf_id from bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + bookshelf_ids + \"}'::int[]) \"\n \"AND username=$username AND work_id=$work_id\")\n result = list(oldb.query(query, vars=data))\n return result[0].bookshelf_id if result else None\n\n @classmethod\n def add(cls, username, bookshelf_id, work_id, edition_id=None):\n \"\"\"Adds a book with `work_id` to user's bookshelf designated by\n `bookshelf_id`\"\"\"\n oldb = db.get_db()\n work_id = int(work_id)\n bookshelf_id = int(bookshelf_id)\n data = {\n 'work_id': work_id,\n 'username': username,\n }\n\n users_status = cls.get_users_read_status_of_work(username, work_id)\n if not users_status:\n return oldb.insert('bookshelves_books', username=username,\n bookshelf_id=bookshelf_id,\n work_id=work_id, edition_id=edition_id)\n else:\n where = \"work_id=$work_id AND username=$username\"\n return oldb.update('bookshelves_books', where=where,\n bookshelf_id=bookshelf_id, edition_id=edition_id, vars=data)\n\n @classmethod\n def remove(cls, username, work_id, bookshelf_id=None):\n oldb = db.get_db()\n where = {\n 'username': username,\n 'work_id': int(work_id)\n }\n if bookshelf_id:\n where['bookshelf_id'] = int(bookshelf_id)\n\n try:\n return oldb.delete('bookshelves_books',\n where=('work_id=$work_id AND username=$username'), vars=where)\n except: # we want to catch no entry exists\n return None\n\n @classmethod\n def get_works_shelves(cls, work_id, lazy=False):\n \"\"\"Bookshelves this work is on\"\"\"\n oldb = db.get_db()\n query = \"SELECT * from bookshelves_books where work_id=$work_id\"\n try:\n result = oldb.query(query, vars={'work_id': int(work_id)})\n return result if lazy else list(result)\n except:\n return None\n\n @classmethod\n def get_num_users_by_bookshelf_by_work_id(cls, work_id):\n \"\"\"Returns a dict mapping a work_id to the\n number of number of users who have placed that work_id in each shelf, i.e. {bookshelf_id:\n count}.\n \"\"\"\n oldb = db.get_db()\n query = (\"SELECT bookshelf_id, count(DISTINCT username) as user_count from bookshelves_books where\"\n \" work_id=$work_id\"\n \" GROUP BY bookshelf_id\")\n result = oldb.query(query, vars={'work_id': int(work_id)})\n return dict([(i['bookshelf_id'], i['user_count']) for i in result]) if result else {}\n\n @classmethod\n def user_with_most_books(cls):\n \"\"\"\n Which super patrons have the most books logged?\n\n SELECT username, count(*) AS counted from bookshelves_books WHERE bookshelf_id=ANY('{1,3,2}'::int[]) GROUP BY username ORDER BY counted DESC, username LIMIT 10\n \"\"\"\n oldb = db.get_db()\n _bookshelf_ids = ','.join([str(x) for x in cls.PRESET_BOOKSHELVES.values()])\n query = (\"SELECT username, count(*) AS counted \"\n \"FROM bookshelves_books WHERE \"\n \"bookshelf_id=ANY('{\" + _bookshelf_ids + \"}'::int[]) \"\n \"GROUP BY username \"\n \"ORDER BY counted DESC, username LIMIT 100\")\n result = oldb.query(query)\n return list(result)\n", "path": "openlibrary/core/bookshelves.py"}]}
| 3,732 | 138 |
gh_patches_debug_11817
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-3139
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sparse SGD + momentum = cuda memory issue.
When using classic SGD optimizer with momentum with sparse embeddings the memory keeps garbage collecting / allocating leading to slow down and out of memory error eventually. [Here is a minimal exemple to reproduce the issue](https://gist.github.com/cedias/946a380807b7e1bf92d738268b71415a)


The issue dissapears when momentum is not used

or when embeddings are not sparse

I'm using the last pytorch version on conda: `'0.2.0_4'`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/optim/sgd.py`
Content:
```
1 from .optimizer import Optimizer, required
2
3
4 class SGD(Optimizer):
5 r"""Implements stochastic gradient descent (optionally with momentum).
6
7 Nesterov momentum is based on the formula from
8 `On the importance of initialization and momentum in deep learning`__.
9
10 Args:
11 params (iterable): iterable of parameters to optimize or dicts defining
12 parameter groups
13 lr (float): learning rate
14 momentum (float, optional): momentum factor (default: 0)
15 weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
16 dampening (float, optional): dampening for momentum (default: 0)
17 nesterov (bool, optional): enables Nesterov momentum (default: False)
18
19 Example:
20 >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
21 >>> optimizer.zero_grad()
22 >>> loss_fn(model(input), target).backward()
23 >>> optimizer.step()
24
25 __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
26
27 .. note::
28 The implementation of SGD with Momentum/Nesterov subtly differs from
29 Sutskever et. al. and implementations in some other frameworks.
30
31 Considering the specific case of Momentum, the update can be written as
32
33 .. math::
34 v = \rho * v + g \\
35 p = p - lr * v
36
37 where p, g, v and :math:`\rho` denote the parameters, gradient,
38 velocity, and momentum respectively.
39
40 This is in contrast to Sutskever et. al. and
41 other frameworks which employ an update of the form
42
43 .. math::
44 v = \rho * v + lr * g \\
45 p = p - v
46
47 The Nesterov version is analogously modified.
48 """
49
50 def __init__(self, params, lr=required, momentum=0, dampening=0,
51 weight_decay=0, nesterov=False):
52 defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
53 weight_decay=weight_decay, nesterov=nesterov)
54 if nesterov and (momentum <= 0 or dampening != 0):
55 raise ValueError("Nesterov momentum requires a momentum and zero dampening")
56 super(SGD, self).__init__(params, defaults)
57
58 def __setstate__(self, state):
59 super(SGD, self).__setstate__(state)
60 for group in self.param_groups:
61 group.setdefault('nesterov', False)
62
63 def step(self, closure=None):
64 """Performs a single optimization step.
65
66 Arguments:
67 closure (callable, optional): A closure that reevaluates the model
68 and returns the loss.
69 """
70 loss = None
71 if closure is not None:
72 loss = closure()
73
74 for group in self.param_groups:
75 weight_decay = group['weight_decay']
76 momentum = group['momentum']
77 dampening = group['dampening']
78 nesterov = group['nesterov']
79
80 for p in group['params']:
81 if p.grad is None:
82 continue
83 d_p = p.grad.data
84 if weight_decay != 0:
85 d_p.add_(weight_decay, p.data)
86 if momentum != 0:
87 param_state = self.state[p]
88 if 'momentum_buffer' not in param_state:
89 buf = param_state['momentum_buffer'] = d_p.clone()
90 else:
91 buf = param_state['momentum_buffer']
92 buf.mul_(momentum).add_(1 - dampening, d_p)
93 if nesterov:
94 d_p = d_p.add(momentum, buf)
95 else:
96 d_p = buf
97
98 p.data.add_(-group['lr'], d_p)
99
100 return loss
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py
--- a/torch/optim/sgd.py
+++ b/torch/optim/sgd.py
@@ -86,7 +86,8 @@
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
- buf = param_state['momentum_buffer'] = d_p.clone()
+ buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()
+ buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(1 - dampening, d_p)
|
{"golden_diff": "diff --git a/torch/optim/sgd.py b/torch/optim/sgd.py\n--- a/torch/optim/sgd.py\n+++ b/torch/optim/sgd.py\n@@ -86,7 +86,8 @@\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n- buf = param_state['momentum_buffer'] = d_p.clone()\n+ buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()\n+ buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n", "issue": "Sparse SGD + momentum = cuda memory issue.\nWhen using classic SGD optimizer with momentum with sparse embeddings the memory keeps garbage collecting / allocating leading to slow down and out of memory error eventually. [Here is a minimal exemple to reproduce the issue](https://gist.github.com/cedias/946a380807b7e1bf92d738268b71415a)\r\n\r\n\r\n\r\n\r\n\r\nThe issue dissapears when momentum is not used\r\n\r\n\r\nor when embeddings are not sparse\r\n\r\n\r\n\r\nI'm using the last pytorch version on conda: `'0.2.0_4'`\r\n\n", "before_files": [{"content": "from .optimizer import Optimizer, required\n\n\nclass SGD(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n v = \\rho * v + g \\\\\n p = p - lr * v\n\n where p, g, v and :math:`\\rho` denote the parameters, gradient,\n velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n v = \\rho * v + lr * g \\\\\n p = p - v\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = d_p.clone()\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n p.data.add_(-group['lr'], d_p)\n\n return loss\n", "path": "torch/optim/sgd.py"}], "after_files": [{"content": "from .optimizer import Optimizer, required\n\n\nclass SGD(Optimizer):\n r\"\"\"Implements stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n\n Considering the specific case of Momentum, the update can be written as\n\n .. math::\n v = \\rho * v + g \\\\\n p = p - lr * v\n\n where p, g, v and :math:`\\rho` denote the parameters, gradient,\n velocity, and momentum respectively.\n\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n\n .. math::\n v = \\rho * v + lr * g \\\\\n p = p - v\n\n The Nesterov version is analogously modified.\n \"\"\"\n\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False):\n defaults = dict(lr=lr, momentum=momentum, dampening=dampening,\n weight_decay=weight_decay, nesterov=nesterov)\n if nesterov and (momentum <= 0 or dampening != 0):\n raise ValueError(\"Nesterov momentum requires a momentum and zero dampening\")\n super(SGD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGD, self).__setstate__(state)\n for group in self.param_groups:\n group.setdefault('nesterov', False)\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n weight_decay = group['weight_decay']\n momentum = group['momentum']\n dampening = group['dampening']\n nesterov = group['nesterov']\n\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n if weight_decay != 0:\n d_p.add_(weight_decay, p.data)\n if momentum != 0:\n param_state = self.state[p]\n if 'momentum_buffer' not in param_state:\n buf = param_state['momentum_buffer'] = p.data.new().resize_as_(p.data).zero_()\n buf.mul_(momentum).add_(d_p)\n else:\n buf = param_state['momentum_buffer']\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n\n p.data.add_(-group['lr'], d_p)\n\n return loss\n", "path": "torch/optim/sgd.py"}]}
| 1,669 | 166 |
gh_patches_debug_19947
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1463
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CEF spaces between each character
I followed the 7 steps https://cowrie.readthedocs.io/en/latest/INSTALL.html#step-1-install-dependencies
I set up cef in the config file
`
[output_localsyslog]
enabled = true
facility = USER
format = cef`
But the output I'm getting has a bunch of spaces it seems to have a space between each character below
`
Nov 26 04:42:45 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . s e s s i o n . c o n n e c t | c o w r i e . s e s s i o n . c o n n e c t | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = N e w c o n n e c t i o n : 1 9 2 . 1 6 8 . 2 . 5 7 : 3 3 6 2 6 ( 1 9 2 . 1 6 8 . 2 . 6 4 : 2 2 2 2 ) [ s e s s i o n : 8 a 9 0 7 9 8 c 8 9 f d ] s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p s p t = 3 3 6 2 6 d p t = 2 2 2 2 d s t = 1 9 2 . 1 6 8 . 2 . 6 4
Nov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . v e r s i o n | c o w r i e . c l i e n t . v e r s i o n | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = R e m o t e S S H v e r s i o n : b ' S S H - 2 . 0 - O p e n S S H _ 8 . 2 p 1 U b u n t u - 4 u b u n t u 0 . 1 ' s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p
Nov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . k e x | c o w r i e . c l i e n t . k e x | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = S S H c l i e n t h a s s h f i n g e r p r i n t : a e 8 b d 7 d d 0 9 9 7 0 5 5 5 a a 4 c 6 e d 2 2 a d b b f 5 6 s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p
`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/output/localsyslog.py`
Content:
```
1 # Copyright (c) 2015 Michel Oosterhof <[email protected]>
2 # All rights reserved.
3 #
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions
6 # are met:
7 #
8 # 1. Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # 2. Redistributions in binary form must reproduce the above copyright
11 # notice, this list of conditions and the following disclaimer in the
12 # documentation and/or other materials provided with the distribution.
13 # 3. The names of the author(s) may not be used to endorse or promote
14 # products derived from this software without specific prior written
15 # permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
18 # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 # SUCH DAMAGE.
28
29 from __future__ import absolute_import, division
30
31 import syslog
32
33 import twisted.python.syslog
34
35 import cowrie.core.cef
36 import cowrie.core.output
37 from cowrie.core.config import CowrieConfig
38
39
40 class Output(cowrie.core.output.Output):
41 """
42 localsyslog output
43 """
44
45 def start(self):
46 self.format = CowrieConfig().get('output_localsyslog', 'format')
47 facilityString = CowrieConfig().get('output_localsyslog', 'facility')
48 self.facility = vars(syslog)['LOG_' + facilityString]
49 self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)
50
51 def stop(self):
52 pass
53
54 def write(self, logentry):
55 if 'isError' not in logentry:
56 logentry['isError'] = False
57
58 if self.format == 'cef':
59 self.syslog.emit({
60 'message': cowrie.core.cef.formatCef(logentry),
61 'isError': False,
62 'system': 'cowrie'
63 })
64 else:
65 # message appears with additional spaces if message key is defined
66 logentry['message'] = [logentry['message']]
67 self.syslog.emit(logentry)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cowrie/output/localsyslog.py b/src/cowrie/output/localsyslog.py
--- a/src/cowrie/output/localsyslog.py
+++ b/src/cowrie/output/localsyslog.py
@@ -45,6 +45,7 @@
def start(self):
self.format = CowrieConfig().get('output_localsyslog', 'format')
facilityString = CowrieConfig().get('output_localsyslog', 'facility')
+ levelString = CowrieConfig().get('output_localsyslog', 'level')
self.facility = vars(syslog)['LOG_' + facilityString]
self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)
@@ -57,7 +58,7 @@
if self.format == 'cef':
self.syslog.emit({
- 'message': cowrie.core.cef.formatCef(logentry),
+ 'message': [cowrie.core.cef.formatCef(logentry)],
'isError': False,
'system': 'cowrie'
})
|
{"golden_diff": "diff --git a/src/cowrie/output/localsyslog.py b/src/cowrie/output/localsyslog.py\n--- a/src/cowrie/output/localsyslog.py\n+++ b/src/cowrie/output/localsyslog.py\n@@ -45,6 +45,7 @@\n def start(self):\n self.format = CowrieConfig().get('output_localsyslog', 'format')\n facilityString = CowrieConfig().get('output_localsyslog', 'facility')\n+ levelString = CowrieConfig().get('output_localsyslog', 'level')\n self.facility = vars(syslog)['LOG_' + facilityString]\n self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)\n \n@@ -57,7 +58,7 @@\n \n if self.format == 'cef':\n self.syslog.emit({\n- 'message': cowrie.core.cef.formatCef(logentry),\n+ 'message': [cowrie.core.cef.formatCef(logentry)],\n 'isError': False,\n 'system': 'cowrie'\n })\n", "issue": "CEF spaces between each character\nI followed the 7 steps https://cowrie.readthedocs.io/en/latest/INSTALL.html#step-1-install-dependencies\r\n\r\nI set up cef in the config file \r\n`\r\n[output_localsyslog]\r\nenabled = true\r\nfacility = USER\r\nformat = cef`\r\n\r\nBut the output I'm getting has a bunch of spaces it seems to have a space between each character below \r\n\r\n`\r\nNov 26 04:42:45 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . s e s s i o n . c o n n e c t | c o w r i e . s e s s i o n . c o n n e c t | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = N e w c o n n e c t i o n : 1 9 2 . 1 6 8 . 2 . 5 7 : 3 3 6 2 6 ( 1 9 2 . 1 6 8 . 2 . 6 4 : 2 2 2 2 ) [ s e s s i o n : 8 a 9 0 7 9 8 c 8 9 f d ] s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p s p t = 3 3 6 2 6 d p t = 2 2 2 2 d s t = 1 9 2 . 1 6 8 . 2 . 6 4\r\n\r\nNov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . v e r s i o n | c o w r i e . c l i e n t . v e r s i o n | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = R e m o t e S S H v e r s i o n : b ' S S H - 2 . 0 - O p e n S S H _ 8 . 2 p 1 U b u n t u - 4 u b u n t u 0 . 1 ' s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p\r\n\r\nNov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . k e x | c o w r i e . c l i e n t . k e x | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = S S H c l i e n t h a s s h f i n g e r p r i n t : a e 8 b d 7 d d 0 9 9 7 0 5 5 5 a a 4 c 6 e d 2 2 a d b b f 5 6 s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p\r\n`\n", "before_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom __future__ import absolute_import, division\n\nimport syslog\n\nimport twisted.python.syslog\n\nimport cowrie.core.cef\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n localsyslog output\n \"\"\"\n\n def start(self):\n self.format = CowrieConfig().get('output_localsyslog', 'format')\n facilityString = CowrieConfig().get('output_localsyslog', 'facility')\n self.facility = vars(syslog)['LOG_' + facilityString]\n self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)\n\n def stop(self):\n pass\n\n def write(self, logentry):\n if 'isError' not in logentry:\n logentry['isError'] = False\n\n if self.format == 'cef':\n self.syslog.emit({\n 'message': cowrie.core.cef.formatCef(logentry),\n 'isError': False,\n 'system': 'cowrie'\n })\n else:\n # message appears with additional spaces if message key is defined\n logentry['message'] = [logentry['message']]\n self.syslog.emit(logentry)\n", "path": "src/cowrie/output/localsyslog.py"}], "after_files": [{"content": "# Copyright (c) 2015 Michel Oosterhof <[email protected]>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. The names of the author(s) may not be used to endorse or promote\n# products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED\n# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGE.\n\nfrom __future__ import absolute_import, division\n\nimport syslog\n\nimport twisted.python.syslog\n\nimport cowrie.core.cef\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n localsyslog output\n \"\"\"\n\n def start(self):\n self.format = CowrieConfig().get('output_localsyslog', 'format')\n facilityString = CowrieConfig().get('output_localsyslog', 'facility')\n levelString = CowrieConfig().get('output_localsyslog', 'level')\n self.facility = vars(syslog)['LOG_' + facilityString]\n self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility)\n\n def stop(self):\n pass\n\n def write(self, logentry):\n if 'isError' not in logentry:\n logentry['isError'] = False\n\n if self.format == 'cef':\n self.syslog.emit({\n 'message': [cowrie.core.cef.formatCef(logentry)],\n 'isError': False,\n 'system': 'cowrie'\n })\n else:\n # message appears with additional spaces if message key is defined\n logentry['message'] = [logentry['message']]\n self.syslog.emit(logentry)\n", "path": "src/cowrie/output/localsyslog.py"}]}
| 1,973 | 239 |
gh_patches_debug_942
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-1578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0
* Cookiecutter version: 1.7.3
* Template project url: -
* Python version: 3.9.5
* Operating System: macOS Catalina 10.15.7
### Description:
Apparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json
cookiecutter.json example:
```json
{
"project_name": "",
"project_policy": {"project_policy_example": "yes"}
}
```
```
% python -m cookiecutter ../Projects/project-configs
devplatform_project_name [infra-dev]:
project_name []: t
project_policy [default]:
Error: Unable to decode to JSON.
```
Looking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`.
Link to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81

As far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/
### Quick local fix
Install click first and specify version older than 8.0.0
```
pip install click==7.1.2
pip install cookiecutter
```
### Quick fix for cookiecutter library
in `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'`
### What I've run:
```shell
% python3.9 -m venv test39
% source test39/bin/activate
% python -V
Python 3.9.5
% python -m pip install click==7.1.2
Collecting click==7.1.2
Using cached click-7.1.2-py2.py3-none-any.whl (82 kB)
Installing collected packages: click
Successfully installed click-7.1.2
(test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter
Collecting cookiecutter
Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB)
Collecting six>=1.10
................................................
Installing collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter
Successfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4
% python -m cookiecutter ../Projects/project-configs
project_name []: t
project_policy [default]:
% ls t
Makefile README.md t tests
% rm -rf t
% python -m pip install click==8.0.0
Collecting click==8.0.0
Using cached click-8.0.0-py3-none-any.whl (96 kB)
Installing collected packages: click
Attempting uninstall: click
Found existing installation: click 7.1.2
Uninstalling click-7.1.2:
Successfully uninstalled click-7.1.2
Successfully installed click-8.0.0
% python -m cookiecutter ../Projects/project-configs
devplatform_project_name [infra-dev]:
project_name []: t
project_policy [default]:
Error: Unable to decode to JSON.
project_policy [default]:
Error: Unable to decode to JSON.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """cookiecutter distutils configuration."""
5
6 import os
7 import io
8 import sys
9
10 from setuptools import setup
11
12 version = "1.7.3"
13
14 if sys.argv[-1] == 'publish':
15 os.system('python setup.py sdist upload')
16 os.system('python setup.py bdist_wheel upload')
17 sys.exit()
18
19 if sys.argv[-1] == 'tag':
20 os.system("git tag -a %s -m 'version %s'" % (version, version))
21 os.system("git push --tags")
22 sys.exit()
23
24 with io.open('README.md', 'r', encoding='utf-8') as readme_file:
25 readme = readme_file.read()
26
27 requirements = [
28 'binaryornot>=0.4.4',
29 'Jinja2>=2.7,<4.0.0',
30 'click>=7.0',
31 'poyo>=0.5.0',
32 'jinja2-time>=0.2.0',
33 'python-slugify>=4.0.0',
34 'requests>=2.23.0',
35 'six>=1.10',
36 ]
37
38 if sys.argv[-1] == 'readme':
39 print(readme)
40 sys.exit()
41
42
43 setup(
44 name='cookiecutter',
45 version=version,
46 description=(
47 'A command-line utility that creates projects from project '
48 'templates, e.g. creating a Python package project from a '
49 'Python package project template.'
50 ),
51 long_description=readme,
52 long_description_content_type='text/markdown',
53 author='Audrey Roy',
54 author_email='[email protected]',
55 url='https://github.com/cookiecutter/cookiecutter',
56 packages=['cookiecutter'],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},
59 include_package_data=True,
60 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
61 install_requires=requirements,
62 extras_require={':python_version<"3.3"': ['whichcraft>=0.4.0']},
63 license='BSD',
64 zip_safe=False,
65 classifiers=[
66 "Development Status :: 5 - Production/Stable",
67 "Environment :: Console",
68 "Intended Audience :: Developers",
69 "Natural Language :: English",
70 "License :: OSI Approved :: BSD License",
71 "Programming Language :: Python",
72 "Programming Language :: Python :: 2",
73 "Programming Language :: Python :: 2.7",
74 "Programming Language :: Python :: 3",
75 "Programming Language :: Python :: 3.5",
76 "Programming Language :: Python :: 3.6",
77 "Programming Language :: Python :: 3.7",
78 "Programming Language :: Python :: 3.8",
79 "Programming Language :: Python :: Implementation :: CPython",
80 "Programming Language :: Python :: Implementation :: PyPy",
81 "Topic :: Software Development",
82 ],
83 keywords=(
84 'cookiecutter, Python, projects, project templates, Jinja2, '
85 'skeleton, scaffolding, project directory, setup.py, package, '
86 'packaging'
87 ),
88 )
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
requirements = [
'binaryornot>=0.4.4',
'Jinja2>=2.7,<4.0.0',
- 'click>=7.0',
+ 'click>=7.0,<8.0.0',
'poyo>=0.5.0',
'jinja2-time>=0.2.0',
'python-slugify>=4.0.0',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,7 @@\n requirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n- 'click>=7.0',\n+ 'click>=7.0,<8.0.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n", "issue": "prompt.read_user_dict() is broken due to click upgrade from 7.1.2 to 8.0.0\n* Cookiecutter version: 1.7.3\r\n* Template project url: -\r\n* Python version: 3.9.5\r\n* Operating System: macOS Catalina 10.15.7\r\n\r\n### Description:\r\n\r\nApparently, there is a breaking change in `click==8.0.0` affecting dictionary values in cookiecutter.json\r\ncookiecutter.json example:\r\n```json\r\n{\r\n \"project_name\": \"\",\r\n \"project_policy\": {\"project_policy_example\": \"yes\"}\r\n}\r\n```\r\n \r\n```\r\n% python -m cookiecutter ../Projects/project-configs\r\ndevplatform_project_name [infra-dev]: \r\nproject_name []: t\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\n```\r\n\r\nLooking closer at the cookiecutter.promt, I can see that in `read_user_dict()`, click passes `user_value='default'` to `process_json()`, instead of passing an actual default value from the cookiecutter.json as it was in `click 7.1.2`. \r\nLink to the `process_json()` code: https://github.com/cookiecutter/cookiecutter/blob/master/cookiecutter/prompt.py#L81\r\n\r\n\r\nAs far as I can suppose, that issue could have been introduced by this PR https://github.com/pallets/click/pull/1517/\r\n\r\n### Quick local fix\r\nInstall click first and specify version older than 8.0.0\r\n```\r\npip install click==7.1.2\r\npip install cookiecutter\r\n```\r\n\r\n### Quick fix for cookiecutter library\r\nin `setup.py` replace 'click>=7.0' with `'click>=7,<8.0.0'`\r\n\r\n### What I've run:\r\n\r\n```shell\r\n% python3.9 -m venv test39 \r\n \r\n% source test39/bin/activate\r\n\r\n% python -V\r\nPython 3.9.5\r\n\r\n\r\n% python -m pip install click==7.1.2\r\nCollecting click==7.1.2\r\n Using cached click-7.1.2-py2.py3-none-any.whl (82 kB)\r\nInstalling collected packages: click\r\nSuccessfully installed click-7.1.2\r\n(test39) ro.solyanik@macbook-ro Environments % python -m pip install cookiecutter\r\nCollecting cookiecutter\r\n Using cached cookiecutter-1.7.3-py2.py3-none-any.whl (34 kB)\r\nCollecting six>=1.10\r\n................................................\r\nInstalling collected packages: six, python-dateutil, MarkupSafe, urllib3, text-unidecode, Jinja2, idna, chardet, certifi, arrow, requests, python-slugify, poyo, jinja2-time, binaryornot, cookiecutter\r\nSuccessfully installed Jinja2-3.0.1 MarkupSafe-2.0.1 arrow-1.1.0 binaryornot-0.4.4 certifi-2020.12.5 chardet-4.0.0 cookiecutter-1.7.3 idna-2.10 jinja2-time-0.2.0 poyo-0.5.0 python-dateutil-2.8.1 python-slugify-5.0.2 requests-2.25.1 six-1.16.0 text-unidecode-1.3 urllib3-1.26.4\r\n\r\n% python -m cookiecutter ../Projects/project-configs\r\nproject_name []: t\r\nproject_policy [default]: \r\n\r\n% ls t \r\nMakefile README.md t tests\r\n\r\n% rm -rf t\r\n\r\n% python -m pip install click==8.0.0 \r\nCollecting click==8.0.0\r\n Using cached click-8.0.0-py3-none-any.whl (96 kB)\r\nInstalling collected packages: click\r\n Attempting uninstall: click\r\n Found existing installation: click 7.1.2\r\n Uninstalling click-7.1.2:\r\n Successfully uninstalled click-7.1.2\r\nSuccessfully installed click-8.0.0\r\n\r\n% python -m cookiecutter ../Projects/project-configs\r\ndevplatform_project_name [infra-dev]: \r\nproject_name []: t\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\nproject_policy [default]: \r\nError: Unable to decode to JSON.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.3\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={':python_version<\"3.3\"': ['whichcraft>=0.4.0']},\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"cookiecutter distutils configuration.\"\"\"\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.7.3\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.md', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nrequirements = [\n 'binaryornot>=0.4.4',\n 'Jinja2>=2.7,<4.0.0',\n 'click>=7.0,<8.0.0',\n 'poyo>=0.5.0',\n 'jinja2-time>=0.2.0',\n 'python-slugify>=4.0.0',\n 'requests>=2.23.0',\n 'six>=1.10',\n]\n\nif sys.argv[-1] == 'readme':\n print(readme)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=(\n 'A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'\n ),\n long_description=readme,\n long_description_content_type='text/markdown',\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/cookiecutter/cookiecutter',\n packages=['cookiecutter'],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={'console_scripts': ['cookiecutter = cookiecutter.__main__:main']},\n include_package_data=True,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n install_requires=requirements,\n extras_require={':python_version<\"3.3\"': ['whichcraft>=0.4.0']},\n license='BSD',\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Natural Language :: English\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Software Development\",\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]}
| 2,184 | 124 |
gh_patches_debug_14833
|
rasdani/github-patches
|
git_diff
|
conda__conda-build-734
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CPU_COUNT reliability in MacOSX
Hello!
After a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use "sysctl -n hw.ncpu".
Have you encountered this bug ?
I see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:
``` c
#elif defined(__DragonFly__) || \
defined(__OpenBSD__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__APPLE__)
int mib[2];
size_t len = sizeof(ncpu);
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)
ncpu = 0;
```
To fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:
``` python
import subprocess
import sys
if sys.platform == 'darwin':
out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()
d['CPU_COUNT'] = str(out).strip()
```
Do you agree on this approach ?
Thank you very much
CPU_COUNT reliability in MacOSX
Hello!
After a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use "sysctl -n hw.ncpu".
Have you encountered this bug ?
I see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:
``` c
#elif defined(__DragonFly__) || \
defined(__OpenBSD__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__APPLE__)
int mib[2];
size_t len = sizeof(ncpu);
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)
ncpu = 0;
```
To fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:
``` python
import subprocess
import sys
if sys.platform == 'darwin':
out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()
d['CPU_COUNT'] = str(out).strip()
```
Do you agree on this approach ?
Thank you very much
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_build/environ.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import sys
5 from os.path import join
6 import subprocess
7 import multiprocessing
8
9 import conda.config as cc
10
11 from conda_build.config import config
12
13 from conda_build import source
14 from conda_build.scripts import prepend_bin_path
15
16
17 def get_perl_ver():
18 return str(config.CONDA_PERL)
19
20 def get_py_ver():
21 return '.'.join(str(config.CONDA_PY))
22
23 def get_npy_ver():
24 if config.CONDA_NPY:
25 # Convert int -> string, e.g.
26 # 17 -> '1.7'
27 # 110 -> '1.10'
28 conda_npy = str(config.CONDA_NPY)
29 return conda_npy[0] + '.' + conda_npy[1:]
30 return ''
31
32 def get_stdlib_dir():
33 return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else
34 'lib/python%s' % get_py_ver())
35
36 def get_sp_dir():
37 return join(get_stdlib_dir(), 'site-packages')
38
39 def get_git_build_info(src_dir):
40 env = os.environ.copy()
41 d = {}
42 git_dir = join(src_dir, '.git')
43 if os.path.exists(git_dir):
44 env['GIT_DIR'] = git_dir
45 else:
46 return d
47
48 # grab information from describe
49 key_name = lambda a: "GIT_DESCRIBE_{}".format(a)
50 keys = [key_name("TAG"), key_name("NUMBER"), key_name("HASH")]
51 env = {str(key): str(value) for key, value in env.items()}
52 process = subprocess.Popen(["git", "describe", "--tags", "--long", "HEAD"],
53 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
54 env=env)
55 output = process.communicate()[0].strip()
56 output = output.decode('utf-8')
57 parts = output.rsplit('-', 2)
58 parts_length = len(parts)
59 if parts_length == 3:
60 d.update(dict(zip(keys, parts)))
61 # get the _full_ hash of the current HEAD
62 process = subprocess.Popen(["git", "rev-parse", "HEAD"],
63 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
64 env=env)
65 output = process.communicate()[0].strip()
66 output = output.decode('utf-8')
67 d['GIT_FULL_HASH'] = output
68 # set up the build string
69 if key_name('NUMBER') in d and key_name('HASH') in d:
70 d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],
71 d[key_name('HASH')])
72
73 return d
74
75 def get_dict(m=None, prefix=None):
76 if not prefix:
77 prefix = config.build_prefix
78
79 python = config.build_python
80 d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}
81 d['CONDA_DEFAULT_ENV'] = config.build_prefix
82 d['ARCH'] = str(cc.bits)
83 d['PREFIX'] = prefix
84 d['PYTHON'] = python
85 d['PY3K'] = str(config.PY3K)
86 d['STDLIB_DIR'] = get_stdlib_dir()
87 d['SP_DIR'] = get_sp_dir()
88 d['SYS_PREFIX'] = sys.prefix
89 d['SYS_PYTHON'] = sys.executable
90 d['PERL_VER'] = get_perl_ver()
91 d['PY_VER'] = get_py_ver()
92 if get_npy_ver():
93 d['NPY_VER'] = get_npy_ver()
94 d['SRC_DIR'] = source.get_dir()
95 if "LANG" in os.environ:
96 d['LANG'] = os.environ['LANG']
97 if "HTTPS_PROXY" in os.environ:
98 d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']
99 if "HTTP_PROXY" in os.environ:
100 d['HTTP_PROXY'] = os.environ['HTTP_PROXY']
101
102 if m:
103 for var_name in m.get_value('build/script_env', []):
104 value = os.getenv(var_name)
105 if value is None:
106 value = '<UNDEFINED>'
107 d[var_name] = value
108
109 try:
110 d['CPU_COUNT'] = str(multiprocessing.cpu_count())
111 except NotImplementedError:
112 d['CPU_COUNT'] = "1"
113
114 d.update(**get_git_build_info(d['SRC_DIR']))
115 d['PATH'] = dict(os.environ)['PATH']
116 d = prepend_bin_path(d, prefix)
117
118 if sys.platform == 'win32': # -------- Windows
119 d['SCRIPTS'] = join(prefix, 'Scripts')
120 d['LIBRARY_PREFIX'] = join(prefix, 'Library')
121 d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')
122 d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')
123 d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')
124 # This probably should be done more generally
125 d['CYGWIN_PREFIX'] = prefix.replace('\\', '/').replace('C:', '/cygdrive/c')
126
127 d['R'] = join(prefix, 'Scripts', 'R.exe')
128 else: # -------- Unix
129 d['HOME'] = os.getenv('HOME', 'UNKNOWN')
130 d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')
131 d['R'] = join(prefix, 'bin', 'R')
132
133 if sys.platform == 'darwin': # -------- OSX
134 d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'
135 d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d
136 d['CXXFLAGS'] = d['CFLAGS']
137 d['LDFLAGS'] = d['CFLAGS']
138 d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
139
140 elif sys.platform.startswith('linux'): # -------- Linux
141 d['LD_RUN_PATH'] = prefix + '/lib'
142
143 if m:
144 d['PKG_NAME'] = m.name()
145 d['PKG_VERSION'] = m.version()
146 d['PKG_BUILDNUM'] = str(m.build_number())
147 d['PKG_BUILD_STRING'] = str(m.build_id())
148 d['RECIPE_DIR'] = m.path
149
150 return d
151
152
153 if __name__ == '__main__':
154 e = get_dict()
155 for k in sorted(e):
156 assert isinstance(e[k], str), k
157 print('%s=%s' % (k, e[k]))
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda_build/environ.py b/conda_build/environ.py
--- a/conda_build/environ.py
+++ b/conda_build/environ.py
@@ -106,10 +106,16 @@
value = '<UNDEFINED>'
d[var_name] = value
- try:
- d['CPU_COUNT'] = str(multiprocessing.cpu_count())
- except NotImplementedError:
- d['CPU_COUNT'] = "1"
+ if sys.platform == "darwin":
+ # multiprocessing.cpu_count() is not reliable on OSX
+ # See issue #645 on github.com/conda/conda-build
+ out, err = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True, stdout=subprocess.PIPE).communicate()
+ d['CPU_COUNT'] = out.decode('utf-8').strip()
+ else:
+ try:
+ d['CPU_COUNT'] = str(multiprocessing.cpu_count())
+ except NotImplementedError:
+ d['CPU_COUNT'] = "1"
d.update(**get_git_build_info(d['SRC_DIR']))
d['PATH'] = dict(os.environ)['PATH']
|
{"golden_diff": "diff --git a/conda_build/environ.py b/conda_build/environ.py\n--- a/conda_build/environ.py\n+++ b/conda_build/environ.py\n@@ -106,10 +106,16 @@\n value = '<UNDEFINED>'\n d[var_name] = value\n \n- try:\n- d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n- except NotImplementedError:\n- d['CPU_COUNT'] = \"1\"\n+ if sys.platform == \"darwin\":\n+ # multiprocessing.cpu_count() is not reliable on OSX\n+ # See issue #645 on github.com/conda/conda-build\n+ out, err = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True, stdout=subprocess.PIPE).communicate()\n+ d['CPU_COUNT'] = out.decode('utf-8').strip()\n+ else:\n+ try:\n+ d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n+ except NotImplementedError:\n+ d['CPU_COUNT'] = \"1\"\n \n d.update(**get_git_build_info(d['SRC_DIR']))\n d['PATH'] = dict(os.environ)['PATH']\n", "issue": "CPU_COUNT reliability in MacOSX\nHello!\nAfter a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use \"sysctl -n hw.ncpu\".\n\nHave you encountered this bug ?\nI see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:\n\n``` c\n#elif defined(__DragonFly__) || \\\n defined(__OpenBSD__) || \\\n defined(__FreeBSD__) || \\\n defined(__NetBSD__) || \\\n defined(__APPLE__)\n int mib[2];\n size_t len = sizeof(ncpu);\n mib[0] = CTL_HW;\n mib[1] = HW_NCPU;\n if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)\n ncpu = 0;\n```\n\nTo fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:\n\n``` python\nimport subprocess\nimport sys\n\nif sys.platform == 'darwin':\n out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()\n d['CPU_COUNT'] = str(out).strip()\n```\n\nDo you agree on this approach ?\nThank you very much\n\nCPU_COUNT reliability in MacOSX\nHello!\nAfter a talk with @ccordoba12 in [conda-recipes/pr427](https://github.com/conda/conda-recipes/pull/427), it seems that CPU_COUNT is not reliable in MacOSX and he suggests to use \"sysctl -n hw.ncpu\".\n\nHave you encountered this bug ?\nI see that in python3.5, multiprocessing.cpu_count uses os.cpu_count and this builtin command uses also sysctl : posixmodule.c, line 11220:\n\n``` c\n#elif defined(__DragonFly__) || \\\n defined(__OpenBSD__) || \\\n defined(__FreeBSD__) || \\\n defined(__NetBSD__) || \\\n defined(__APPLE__)\n int mib[2];\n size_t len = sizeof(ncpu);\n mib[0] = CTL_HW;\n mib[1] = HW_NCPU;\n if (sysctl(mib, 2, &ncpu, &len, NULL, 0) != 0)\n ncpu = 0;\n```\n\nTo fix it, I can create a patch that add an OSX case in in [conda-build/environ.py:109](https://github.com/conda/conda-build/blob/master/conda_build/environ.py#L109) with something like:\n\n``` python\nimport subprocess\nimport sys\n\nif sys.platform == 'darwin':\n out, err = subprocess.Popen('sysctl -n hw.ncpu', shell=True, stdout=subprocess.PIPE).communicate()\n d['CPU_COUNT'] = str(out).strip()\n```\n\nDo you agree on this approach ?\nThank you very much\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join\nimport subprocess\nimport multiprocessing\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\nfrom conda_build.scripts import prepend_bin_path\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\ndef get_npy_ver():\n if config.CONDA_NPY:\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n conda_npy = str(config.CONDA_NPY)\n return conda_npy[0] + '.' + conda_npy[1:]\n return ''\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\ndef get_git_build_info(src_dir):\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if os.path.exists(git_dir):\n env['GIT_DIR'] = git_dir\n else:\n return d\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = subprocess.Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n if get_npy_ver():\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n value = '<UNDEFINED>'\n d[var_name] = value\n\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n d.update(**get_git_build_info(d['SRC_DIR']))\n d['PATH'] = dict(os.environ)['PATH']\n d = prepend_bin_path(d, prefix)\n\n if sys.platform == 'win32': # -------- Windows\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n # This probably should be done more generally\n d['CYGWIN_PREFIX'] = prefix.replace('\\\\', '/').replace('C:', '/cygdrive/c')\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else: # -------- Unix\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n if sys.platform == 'darwin': # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = d['CFLAGS']\n d['LDFLAGS'] = d['CFLAGS']\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'): # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['PKG_BUILD_STRING'] = str(m.build_id())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n", "path": "conda_build/environ.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom os.path import join\nimport subprocess\nimport multiprocessing\n\nimport conda.config as cc\n\nfrom conda_build.config import config\n\nfrom conda_build import source\nfrom conda_build.scripts import prepend_bin_path\n\n\ndef get_perl_ver():\n return str(config.CONDA_PERL)\n\ndef get_py_ver():\n return '.'.join(str(config.CONDA_PY))\n\ndef get_npy_ver():\n if config.CONDA_NPY:\n # Convert int -> string, e.g.\n # 17 -> '1.7'\n # 110 -> '1.10'\n conda_npy = str(config.CONDA_NPY)\n return conda_npy[0] + '.' + conda_npy[1:]\n return ''\n\ndef get_stdlib_dir():\n return join(config.build_prefix, 'Lib' if sys.platform == 'win32' else\n 'lib/python%s' % get_py_ver())\n\ndef get_sp_dir():\n return join(get_stdlib_dir(), 'site-packages')\n\ndef get_git_build_info(src_dir):\n env = os.environ.copy()\n d = {}\n git_dir = join(src_dir, '.git')\n if os.path.exists(git_dir):\n env['GIT_DIR'] = git_dir\n else:\n return d\n\n # grab information from describe\n key_name = lambda a: \"GIT_DESCRIBE_{}\".format(a)\n keys = [key_name(\"TAG\"), key_name(\"NUMBER\"), key_name(\"HASH\")]\n env = {str(key): str(value) for key, value in env.items()}\n process = subprocess.Popen([\"git\", \"describe\", \"--tags\", \"--long\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n parts = output.rsplit('-', 2)\n parts_length = len(parts)\n if parts_length == 3:\n d.update(dict(zip(keys, parts)))\n # get the _full_ hash of the current HEAD\n process = subprocess.Popen([\"git\", \"rev-parse\", \"HEAD\"],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n env=env)\n output = process.communicate()[0].strip()\n output = output.decode('utf-8')\n d['GIT_FULL_HASH'] = output\n # set up the build string\n if key_name('NUMBER') in d and key_name('HASH') in d:\n d['GIT_BUILD_STR'] = '{}_{}'.format(d[key_name('NUMBER')],\n d[key_name('HASH')])\n\n return d\n\ndef get_dict(m=None, prefix=None):\n if not prefix:\n prefix = config.build_prefix\n\n python = config.build_python\n d = {'CONDA_BUILD': '1', 'PYTHONNOUSERSITE': '1'}\n d['CONDA_DEFAULT_ENV'] = config.build_prefix\n d['ARCH'] = str(cc.bits)\n d['PREFIX'] = prefix\n d['PYTHON'] = python\n d['PY3K'] = str(config.PY3K)\n d['STDLIB_DIR'] = get_stdlib_dir()\n d['SP_DIR'] = get_sp_dir()\n d['SYS_PREFIX'] = sys.prefix\n d['SYS_PYTHON'] = sys.executable\n d['PERL_VER'] = get_perl_ver()\n d['PY_VER'] = get_py_ver()\n if get_npy_ver():\n d['NPY_VER'] = get_npy_ver()\n d['SRC_DIR'] = source.get_dir()\n if \"LANG\" in os.environ:\n d['LANG'] = os.environ['LANG']\n if \"HTTPS_PROXY\" in os.environ:\n d['HTTPS_PROXY'] = os.environ['HTTPS_PROXY']\n if \"HTTP_PROXY\" in os.environ:\n d['HTTP_PROXY'] = os.environ['HTTP_PROXY']\n\n if m:\n for var_name in m.get_value('build/script_env', []):\n value = os.getenv(var_name)\n if value is None:\n value = '<UNDEFINED>'\n d[var_name] = value\n\n if sys.platform == \"darwin\":\n # multiprocessing.cpu_count() is not reliable on OSX\n # See issue #645 on github.com/conda/conda-build\n out, err = subprocess.Popen('sysctl -n hw.logicalcpu', shell=True, stdout=subprocess.PIPE).communicate()\n d['CPU_COUNT'] = out.decode('utf-8').strip()\n else:\n try:\n d['CPU_COUNT'] = str(multiprocessing.cpu_count())\n except NotImplementedError:\n d['CPU_COUNT'] = \"1\"\n\n d.update(**get_git_build_info(d['SRC_DIR']))\n d['PATH'] = dict(os.environ)['PATH']\n d = prepend_bin_path(d, prefix)\n\n if sys.platform == 'win32': # -------- Windows\n d['SCRIPTS'] = join(prefix, 'Scripts')\n d['LIBRARY_PREFIX'] = join(prefix, 'Library')\n d['LIBRARY_BIN'] = join(d['LIBRARY_PREFIX'], 'bin')\n d['LIBRARY_INC'] = join(d['LIBRARY_PREFIX'], 'include')\n d['LIBRARY_LIB'] = join(d['LIBRARY_PREFIX'], 'lib')\n # This probably should be done more generally\n d['CYGWIN_PREFIX'] = prefix.replace('\\\\', '/').replace('C:', '/cygdrive/c')\n\n d['R'] = join(prefix, 'Scripts', 'R.exe')\n else: # -------- Unix\n d['HOME'] = os.getenv('HOME', 'UNKNOWN')\n d['PKG_CONFIG_PATH'] = join(prefix, 'lib', 'pkgconfig')\n d['R'] = join(prefix, 'bin', 'R')\n\n if sys.platform == 'darwin': # -------- OSX\n d['OSX_ARCH'] = 'i386' if cc.bits == 32 else 'x86_64'\n d['CFLAGS'] = '-arch %(OSX_ARCH)s' % d\n d['CXXFLAGS'] = d['CFLAGS']\n d['LDFLAGS'] = d['CFLAGS']\n d['MACOSX_DEPLOYMENT_TARGET'] = '10.6'\n\n elif sys.platform.startswith('linux'): # -------- Linux\n d['LD_RUN_PATH'] = prefix + '/lib'\n\n if m:\n d['PKG_NAME'] = m.name()\n d['PKG_VERSION'] = m.version()\n d['PKG_BUILDNUM'] = str(m.build_number())\n d['PKG_BUILD_STRING'] = str(m.build_id())\n d['RECIPE_DIR'] = m.path\n\n return d\n\n\nif __name__ == '__main__':\n e = get_dict()\n for k in sorted(e):\n assert isinstance(e[k], str), k\n print('%s=%s' % (k, e[k]))\n", "path": "conda_build/environ.py"}]}
| 2,754 | 251 |
gh_patches_debug_30442
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3324
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve the logging of SSH tokens
If a user has many different SSH keys assigned on one machine for different ssh-users on this machine, this line gets logged for each SSH key, that is not used:
https://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L84
We should change this to "debug".
On the other hand we should add a log here
https://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L73
like
~~~~python
log.info(u"Using SSH key {0!s} for user {1!s}".format(tokclass.token.serial, options.get("user")))
~~~~
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/lib/applications/ssh.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA
4 # Jul 18, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # This code is free software; you can redistribute it and/or
9 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
10 # License as published by the Free Software Foundation; either
11 # version 3 of the License, or any later version.
12 #
13 # This code is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
17 #
18 # You should have received a copy of the GNU Affero General Public
19 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #
21 """
22 This file is tested in tests/test_lib_machinetokens.py
23 """
24 from privacyidea.lib.applications import MachineApplicationBase
25 import logging
26 from privacyidea.lib.token import get_tokens
27 log = logging.getLogger(__name__)
28
29
30 class MachineApplication(MachineApplicationBase):
31 """
32 This is the application for SSH.
33
34 Possible options:
35 user
36
37 """
38 application_name = "ssh"
39 '''as the authentication item is no sensitive information,
40 we can set bulk_call to True. Thus the admin can call
41 all public keys to distribute them via salt.
42 FIXME: This is only true for SSH pub keys.
43 If we would support OTP with SSH, this might be sensitive information!
44 '''
45 allow_bulk_call = True
46
47 @staticmethod
48 def get_authentication_item(token_type,
49 serial,
50 challenge=None, options=None,
51 filter_param=None):
52 """
53 :param token_type: the type of the token. At the moment
54 we support the tokenype "sshkey"
55 :param serial: the serial number of the token.
56 :return auth_item: Return the SSH pub keys.
57 """
58 options = options or {}
59 ret = {}
60 filter_param = filter_param or {}
61 user_filter = filter_param.get("user")
62 if token_type.lower() == "sshkey":
63 toks = get_tokens(serial=serial, active=True)
64 if len(toks) == 1:
65 # We return this entry, either if no user_filter is requested
66 # or if the user_filter matches the user
67 if (user_filter and user_filter == options.get("user")) or \
68 not user_filter:
69 # tokenclass is a SSHkeyTokenClass
70 tokclass = toks[0]
71 # We just return the ssh public key, so that
72 # it can be included into authorized keys.
73 ret["sshkey"] = tokclass.get_sshkey()
74 # We return the username if the token is assigned to a
75 # user, so that this username could be used to save
76 # the ssh key accordingly
77 user_object = toks[0].user
78 if user_object:
79 uInfo = user_object.info
80 if "username" in uInfo:
81 ret["username"] = uInfo.get("username")
82 # ret["info"] = uInfo
83 else:
84 log.info("The requested user %s does not match the user "
85 "option (%s) of the SSH application." % (
86 user_filter, options.get("user")))
87 else:
88 log.info("Token %r, type %r is not supported by "
89 "SSH application module" % (serial, token_type))
90
91 return ret
92
93 @staticmethod
94 def get_options():
95 """
96 returns a dictionary with a list of required and optional options
97 """
98 return {'required': [],
99 'optional': ['user']}
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/privacyidea/lib/applications/ssh.py b/privacyidea/lib/applications/ssh.py
--- a/privacyidea/lib/applications/ssh.py
+++ b/privacyidea/lib/applications/ssh.py
@@ -70,6 +70,8 @@
tokclass = toks[0]
# We just return the ssh public key, so that
# it can be included into authorized keys.
+ log.info(u"Using SSH key {0!s} for SSH user {1!s}".format(tokclass.token.serial,
+ options.get("user")))
ret["sshkey"] = tokclass.get_sshkey()
# We return the username if the token is assigned to a
# user, so that this username could be used to save
@@ -79,14 +81,13 @@
uInfo = user_object.info
if "username" in uInfo:
ret["username"] = uInfo.get("username")
- # ret["info"] = uInfo
else:
- log.info("The requested user %s does not match the user "
- "option (%s) of the SSH application." % (
+ log.debug(u"The requested user {0!s} does not match the user "
+ "option ({0!s}) of the SSH application.".format(
user_filter, options.get("user")))
else:
- log.info("Token %r, type %r is not supported by "
- "SSH application module" % (serial, token_type))
+ log.info(u"Token {0!r}, type {0!r} is not supported by "
+ "SSH application module".format(serial, token_type))
return ret
|
{"golden_diff": "diff --git a/privacyidea/lib/applications/ssh.py b/privacyidea/lib/applications/ssh.py\n--- a/privacyidea/lib/applications/ssh.py\n+++ b/privacyidea/lib/applications/ssh.py\n@@ -70,6 +70,8 @@\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n+ log.info(u\"Using SSH key {0!s} for SSH user {1!s}\".format(tokclass.token.serial,\n+ options.get(\"user\")))\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n@@ -79,14 +81,13 @@\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n- # ret[\"info\"] = uInfo\n else:\n- log.info(\"The requested user %s does not match the user \"\n- \"option (%s) of the SSH application.\" % (\n+ log.debug(u\"The requested user {0!s} does not match the user \"\n+ \"option ({0!s}) of the SSH application.\".format(\n user_filter, options.get(\"user\")))\n else:\n- log.info(\"Token %r, type %r is not supported by \"\n- \"SSH application module\" % (serial, token_type))\n+ log.info(u\"Token {0!r}, type {0!r} is not supported by \"\n+ \"SSH application module\".format(serial, token_type))\n \n return ret\n", "issue": "Improve the logging of SSH tokens\nIf a user has many different SSH keys assigned on one machine for different ssh-users on this machine, this line gets logged for each SSH key, that is not used:\r\n\r\nhttps://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L84\r\n\r\nWe should change this to \"debug\".\r\n\r\nOn the other hand we should add a log here \r\nhttps://github.com/privacyidea/privacyidea/blob/745a829d89fb2824d253e27b510027316c856245/privacyidea/lib/applications/ssh.py#L73\r\nlike\r\n\r\n~~~~python\r\nlog.info(u\"Using SSH key {0!s} for user {1!s}\".format(tokclass.token.serial, options.get(\"user\")))\r\n~~~~\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Jul 18, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file is tested in tests/test_lib_machinetokens.py\n\"\"\"\nfrom privacyidea.lib.applications import MachineApplicationBase\nimport logging\nfrom privacyidea.lib.token import get_tokens\nlog = logging.getLogger(__name__)\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for SSH.\n\n Possible options:\n user\n\n \"\"\"\n application_name = \"ssh\"\n '''as the authentication item is no sensitive information,\n we can set bulk_call to True. Thus the admin can call\n all public keys to distribute them via salt.\n FIXME: This is only true for SSH pub keys.\n If we would support OTP with SSH, this might be sensitive information!\n '''\n allow_bulk_call = True\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we support the tokenype \"sshkey\"\n :param serial: the serial number of the token.\n :return auth_item: Return the SSH pub keys.\n \"\"\"\n options = options or {}\n ret = {}\n filter_param = filter_param or {}\n user_filter = filter_param.get(\"user\")\n if token_type.lower() == \"sshkey\":\n toks = get_tokens(serial=serial, active=True)\n if len(toks) == 1:\n # We return this entry, either if no user_filter is requested\n # or if the user_filter matches the user\n if (user_filter and user_filter == options.get(\"user\")) or \\\n not user_filter:\n # tokenclass is a SSHkeyTokenClass\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n # the ssh key accordingly\n user_object = toks[0].user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n # ret[\"info\"] = uInfo\n else:\n log.info(\"The requested user %s does not match the user \"\n \"option (%s) of the SSH application.\" % (\n user_filter, options.get(\"user\")))\n else:\n log.info(\"Token %r, type %r is not supported by \"\n \"SSH application module\" % (serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'required': [],\n 'optional': ['user']}\n", "path": "privacyidea/lib/applications/ssh.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Jul 18, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file is tested in tests/test_lib_machinetokens.py\n\"\"\"\nfrom privacyidea.lib.applications import MachineApplicationBase\nimport logging\nfrom privacyidea.lib.token import get_tokens\nlog = logging.getLogger(__name__)\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for SSH.\n\n Possible options:\n user\n\n \"\"\"\n application_name = \"ssh\"\n '''as the authentication item is no sensitive information,\n we can set bulk_call to True. Thus the admin can call\n all public keys to distribute them via salt.\n FIXME: This is only true for SSH pub keys.\n If we would support OTP with SSH, this might be sensitive information!\n '''\n allow_bulk_call = True\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we support the tokenype \"sshkey\"\n :param serial: the serial number of the token.\n :return auth_item: Return the SSH pub keys.\n \"\"\"\n options = options or {}\n ret = {}\n filter_param = filter_param or {}\n user_filter = filter_param.get(\"user\")\n if token_type.lower() == \"sshkey\":\n toks = get_tokens(serial=serial, active=True)\n if len(toks) == 1:\n # We return this entry, either if no user_filter is requested\n # or if the user_filter matches the user\n if (user_filter and user_filter == options.get(\"user\")) or \\\n not user_filter:\n # tokenclass is a SSHkeyTokenClass\n tokclass = toks[0]\n # We just return the ssh public key, so that\n # it can be included into authorized keys.\n log.info(u\"Using SSH key {0!s} for SSH user {1!s}\".format(tokclass.token.serial,\n options.get(\"user\")))\n ret[\"sshkey\"] = tokclass.get_sshkey()\n # We return the username if the token is assigned to a\n # user, so that this username could be used to save\n # the ssh key accordingly\n user_object = toks[0].user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"username\"] = uInfo.get(\"username\")\n else:\n log.debug(u\"The requested user {0!s} does not match the user \"\n \"option ({0!s}) of the SSH application.\".format(\n user_filter, options.get(\"user\")))\n else:\n log.info(u\"Token {0!r}, type {0!r} is not supported by \"\n \"SSH application module\".format(serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'required': [],\n 'optional': ['user']}\n", "path": "privacyidea/lib/applications/ssh.py"}]}
| 1,498 | 374 |
gh_patches_debug_20826
|
rasdani/github-patches
|
git_diff
|
dask__dask-1231
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add tests to package
In `setup.py`, make sure `tests` subdirectories are included in the package (otherwise, it is not possible for a user with an installed version of dask to verify its integrity).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from os.path import exists
4 from setuptools import setup
5 import dask
6
7 extras_require = {
8 'array': ['numpy', 'toolz >= 0.7.2'],
9 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],
10 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',
11 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],
12 'distributed': ['distributed >= 1.9'],
13 'imperative': ['toolz >= 0.7.2'],
14 }
15 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
16
17 setup(name='dask',
18 version=dask.__version__,
19 description='Minimal task scheduling abstraction',
20 url='http://github.com/dask/dask/',
21 maintainer='Matthew Rocklin',
22 maintainer_email='[email protected]',
23 license='BSD',
24 keywords='task-scheduling parallelism',
25 packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
26 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],
27 long_description=(open('README.rst').read() if exists('README.rst')
28 else ''),
29 extras_require=extras_require,
30 zip_safe=False)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,12 @@
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
+packages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
+ 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']
+
+tests = [p + '.tests' for p in packages]
+
+
setup(name='dask',
version=dask.__version__,
description='Minimal task scheduling abstraction',
@@ -22,8 +28,7 @@
maintainer_email='[email protected]',
license='BSD',
keywords='task-scheduling parallelism',
- packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',
- 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],
+ packages=packages + tests,
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
extras_require=extras_require,
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,12 @@\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n \n+packages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n+ 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']\n+\n+tests = [p + '.tests' for p in packages]\n+\n+\n setup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n@@ -22,8 +28,7 @@\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n- packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n- 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],\n+ packages=packages + tests,\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n", "issue": "Add tests to package\nIn `setup.py`, make sure `tests` subdirectories are included in the package (otherwise, it is not possible for a user with an installed version of dask to verify its integrity).\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom os.path import exists\nfrom setuptools import setup\nimport dask\n\nextras_require = {\n 'array': ['numpy', 'toolz >= 0.7.2'],\n 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],\n 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',\n 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],\n 'distributed': ['distributed >= 1.9'],\n 'imperative': ['toolz >= 0.7.2'],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n url='http://github.com/dask/dask/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n packages=['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics'],\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n zip_safe=False)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom os.path import exists\nfrom setuptools import setup\nimport dask\n\nextras_require = {\n 'array': ['numpy', 'toolz >= 0.7.2'],\n 'bag': ['cloudpickle >= 0.2.1', 'toolz >= 0.7.2', 'partd >= 0.3.3'],\n 'dataframe': ['numpy', 'pandas >= 0.18.0', 'toolz >= 0.7.2',\n 'partd >= 0.3.3', 'cloudpickle >= 0.2.1'],\n 'distributed': ['distributed >= 1.9'],\n 'imperative': ['toolz >= 0.7.2'],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\npackages = ['dask', 'dask.array', 'dask.bag', 'dask.store', 'dask.bytes',\n 'dask.dataframe', 'dask.dataframe.tseries', 'dask.diagnostics']\n\ntests = [p + '.tests' for p in packages]\n\n\nsetup(name='dask',\n version=dask.__version__,\n description='Minimal task scheduling abstraction',\n url='http://github.com/dask/dask/',\n maintainer='Matthew Rocklin',\n maintainer_email='[email protected]',\n license='BSD',\n keywords='task-scheduling parallelism',\n packages=packages + tests,\n long_description=(open('README.rst').read() if exists('README.rst')\n else ''),\n extras_require=extras_require,\n zip_safe=False)\n", "path": "setup.py"}]}
| 685 | 263 |
gh_patches_debug_33617
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-7910
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Catch additional pymacaroons.Macaroon.deserialize exceptions
It appears that someone has [enumerated the various exceptions `pymacaroons.Macaroon.deserialize` might raise](https://github.com/ecordell/pymacaroons/issues/50). It'd be great if that were resolved, but we might want to further harden the work from #7424 to handle these other cases?
_Originally posted by @ewdurbin in https://github.com/pypa/warehouse/issues/7298#issuecomment-589957864_
---
**Good First Issue**: This issue is good for first time contributors. If you've already contributed to Warehouse, work on [another issue without this label](https://github.com/pypa/warehouse/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3A%22good+first+issue%22) instead. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/macaroons/services.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import binascii
14 import datetime
15 import json
16 import uuid
17
18 import pymacaroons
19
20 from pymacaroons.exceptions import MacaroonDeserializationException
21 from sqlalchemy.orm import joinedload
22 from sqlalchemy.orm.exc import NoResultFound
23 from zope.interface import implementer
24
25 from warehouse.accounts.models import User
26 from warehouse.macaroons.caveats import InvalidMacaroon, Verifier
27 from warehouse.macaroons.interfaces import IMacaroonService
28 from warehouse.macaroons.models import Macaroon
29
30
31 @implementer(IMacaroonService)
32 class DatabaseMacaroonService:
33 def __init__(self, db_session):
34 self.db = db_session
35
36 def _extract_raw_macaroon(self, prefixed_macaroon):
37 """
38 Returns the base64-encoded macaroon component of a PyPI macaroon,
39 dropping the prefix.
40
41 Returns None if the macaroon is None, has no prefix, or has the
42 wrong prefix.
43 """
44 if prefixed_macaroon is None:
45 return None
46
47 prefix, _, raw_macaroon = prefixed_macaroon.partition("-")
48 if prefix != "pypi" or not raw_macaroon:
49 return None
50
51 return raw_macaroon
52
53 def find_macaroon(self, macaroon_id):
54 """
55 Returns a macaroon model from the DB by its identifier.
56 Returns None if no macaroon has the given ID.
57 """
58 try:
59 dm = (
60 self.db.query(Macaroon)
61 .options(joinedload("user"))
62 .filter(Macaroon.id == uuid.UUID(macaroon_id))
63 .one()
64 )
65 except NoResultFound:
66 return None
67
68 return dm
69
70 def find_userid(self, raw_macaroon):
71 """
72 Returns the id of the user associated with the given raw (serialized)
73 macaroon.
74 """
75 raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
76 if raw_macaroon is None:
77 return None
78
79 try:
80 m = pymacaroons.Macaroon.deserialize(raw_macaroon)
81 except binascii.Error:
82 return None
83 except MacaroonDeserializationException:
84 return None
85
86 dm = self.find_macaroon(m.identifier.decode())
87
88 if dm is None:
89 return None
90
91 return dm.user.id
92
93 def verify(self, raw_macaroon, context, principals, permission):
94 """
95 Returns True if the given raw (serialized) macaroon is
96 valid for the context, principals, and requested permission.
97
98 Raises InvalidMacaroon if the macaroon is not valid.
99 """
100 raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
101 if raw_macaroon is None:
102 raise InvalidMacaroon("malformed or nonexistent macaroon")
103
104 try:
105 m = pymacaroons.Macaroon.deserialize(raw_macaroon)
106 except MacaroonDeserializationException:
107 raise InvalidMacaroon("malformed macaroon")
108
109 dm = self.find_macaroon(m.identifier.decode())
110
111 if dm is None:
112 raise InvalidMacaroon("deleted or nonexistent macaroon")
113
114 verifier = Verifier(m, context, principals, permission)
115 if verifier.verify(dm.key):
116 dm.last_used = datetime.datetime.now()
117 return True
118
119 raise InvalidMacaroon("invalid macaroon")
120
121 def create_macaroon(self, location, user_id, description, caveats):
122 """
123 Returns a tuple of a new raw (serialized) macaroon and its DB model.
124 The description provided is not embedded into the macaroon, only stored
125 in the DB model.
126 """
127 user = self.db.query(User).filter(User.id == user_id).one()
128
129 dm = Macaroon(user=user, description=description, caveats=caveats)
130 self.db.add(dm)
131 self.db.flush()
132
133 m = pymacaroons.Macaroon(
134 location=location,
135 identifier=str(dm.id),
136 key=dm.key,
137 version=pymacaroons.MACAROON_V2,
138 )
139 m.add_first_party_caveat(json.dumps(caveats))
140 serialized_macaroon = f"pypi-{m.serialize()}"
141 return serialized_macaroon, dm
142
143 def delete_macaroon(self, macaroon_id):
144 """
145 Deletes a macaroon from the DB by its identifier.
146 """
147 dm = self.find_macaroon(macaroon_id)
148 self.db.delete(dm)
149 self.db.flush()
150
151 def get_macaroon_by_description(self, user_id, description):
152 """
153 Returns a macaroon model from the DB with the given description,
154 if one exists for the given user.
155
156 Returns None if the user doesn't have a macaroon with this description.
157 """
158 try:
159 dm = (
160 self.db.query(Macaroon)
161 .options(joinedload("user"))
162 .filter(Macaroon.description == description)
163 .filter(Macaroon.user_id == user_id)
164 .one()
165 )
166 except NoResultFound:
167 return None
168
169 return dm
170
171
172 def database_macaroon_factory(context, request):
173 return DatabaseMacaroonService(request.db)
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py
--- a/warehouse/macaroons/services.py
+++ b/warehouse/macaroons/services.py
@@ -13,6 +13,7 @@
import binascii
import datetime
import json
+import struct
import uuid
import pymacaroons
@@ -67,20 +68,32 @@
return dm
+ def _deserialize_raw_macaroon(self, raw_macaroon):
+ raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
+
+ if raw_macaroon is None:
+ raise InvalidMacaroon("malformed or nonexistent macaroon")
+
+ try:
+ return pymacaroons.Macaroon.deserialize(raw_macaroon)
+ except (
+ IndexError,
+ TypeError,
+ ValueError,
+ binascii.Error,
+ struct.error,
+ MacaroonDeserializationException,
+ ):
+ raise InvalidMacaroon("malformed macaroon")
+
def find_userid(self, raw_macaroon):
"""
Returns the id of the user associated with the given raw (serialized)
macaroon.
"""
- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
- if raw_macaroon is None:
- return None
-
try:
- m = pymacaroons.Macaroon.deserialize(raw_macaroon)
- except binascii.Error:
- return None
- except MacaroonDeserializationException:
+ m = self._deserialize_raw_macaroon(raw_macaroon)
+ except InvalidMacaroon:
return None
dm = self.find_macaroon(m.identifier.decode())
@@ -97,15 +110,7 @@
Raises InvalidMacaroon if the macaroon is not valid.
"""
- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)
- if raw_macaroon is None:
- raise InvalidMacaroon("malformed or nonexistent macaroon")
-
- try:
- m = pymacaroons.Macaroon.deserialize(raw_macaroon)
- except MacaroonDeserializationException:
- raise InvalidMacaroon("malformed macaroon")
-
+ m = self._deserialize_raw_macaroon(raw_macaroon)
dm = self.find_macaroon(m.identifier.decode())
if dm is None:
|
{"golden_diff": "diff --git a/warehouse/macaroons/services.py b/warehouse/macaroons/services.py\n--- a/warehouse/macaroons/services.py\n+++ b/warehouse/macaroons/services.py\n@@ -13,6 +13,7 @@\n import binascii\n import datetime\n import json\n+import struct\n import uuid\n \n import pymacaroons\n@@ -67,20 +68,32 @@\n \n return dm\n \n+ def _deserialize_raw_macaroon(self, raw_macaroon):\n+ raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n+\n+ if raw_macaroon is None:\n+ raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n+\n+ try:\n+ return pymacaroons.Macaroon.deserialize(raw_macaroon)\n+ except (\n+ IndexError,\n+ TypeError,\n+ ValueError,\n+ binascii.Error,\n+ struct.error,\n+ MacaroonDeserializationException,\n+ ):\n+ raise InvalidMacaroon(\"malformed macaroon\")\n+\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n- if raw_macaroon is None:\n- return None\n-\n try:\n- m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n- except binascii.Error:\n- return None\n- except MacaroonDeserializationException:\n+ m = self._deserialize_raw_macaroon(raw_macaroon)\n+ except InvalidMacaroon:\n return None\n \n dm = self.find_macaroon(m.identifier.decode())\n@@ -97,15 +110,7 @@\n \n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n- raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n- if raw_macaroon is None:\n- raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n-\n- try:\n- m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n- except MacaroonDeserializationException:\n- raise InvalidMacaroon(\"malformed macaroon\")\n-\n+ m = self._deserialize_raw_macaroon(raw_macaroon)\n dm = self.find_macaroon(m.identifier.decode())\n \n if dm is None:\n", "issue": "Catch additional pymacaroons.Macaroon.deserialize exceptions\nIt appears that someone has [enumerated the various exceptions `pymacaroons.Macaroon.deserialize` might raise](https://github.com/ecordell/pymacaroons/issues/50). It'd be great if that were resolved, but we might want to further harden the work from #7424 to handle these other cases?\r\n\r\n_Originally posted by @ewdurbin in https://github.com/pypa/warehouse/issues/7298#issuecomment-589957864_\r\n\r\n---\r\n\r\n**Good First Issue**: This issue is good for first time contributors. If you've already contributed to Warehouse, work on [another issue without this label](https://github.com/pypa/warehouse/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3A%22good+first+issue%22) instead. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport datetime\nimport json\nimport uuid\n\nimport pymacaroons\n\nfrom pymacaroons.exceptions import MacaroonDeserializationException\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.interface import implementer\n\nfrom warehouse.accounts.models import User\nfrom warehouse.macaroons.caveats import InvalidMacaroon, Verifier\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.models import Macaroon\n\n\n@implementer(IMacaroonService)\nclass DatabaseMacaroonService:\n def __init__(self, db_session):\n self.db = db_session\n\n def _extract_raw_macaroon(self, prefixed_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n if prefixed_macaroon is None:\n return None\n\n prefix, _, raw_macaroon = prefixed_macaroon.partition(\"-\")\n if prefix != \"pypi\" or not raw_macaroon:\n return None\n\n return raw_macaroon\n\n def find_macaroon(self, macaroon_id):\n \"\"\"\n Returns a macaroon model from the DB by its identifier.\n Returns None if no macaroon has the given ID.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.id == uuid.UUID(macaroon_id))\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n return None\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except binascii.Error:\n return None\n except MacaroonDeserializationException:\n return None\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n return None\n\n return dm.user.id\n\n def verify(self, raw_macaroon, context, principals, permission):\n \"\"\"\n Returns True if the given raw (serialized) macaroon is\n valid for the context, principals, and requested permission.\n\n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n if raw_macaroon is None:\n raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n\n try:\n m = pymacaroons.Macaroon.deserialize(raw_macaroon)\n except MacaroonDeserializationException:\n raise InvalidMacaroon(\"malformed macaroon\")\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n raise InvalidMacaroon(\"deleted or nonexistent macaroon\")\n\n verifier = Verifier(m, context, principals, permission)\n if verifier.verify(dm.key):\n dm.last_used = datetime.datetime.now()\n return True\n\n raise InvalidMacaroon(\"invalid macaroon\")\n\n def create_macaroon(self, location, user_id, description, caveats):\n \"\"\"\n Returns a tuple of a new raw (serialized) macaroon and its DB model.\n The description provided is not embedded into the macaroon, only stored\n in the DB model.\n \"\"\"\n user = self.db.query(User).filter(User.id == user_id).one()\n\n dm = Macaroon(user=user, description=description, caveats=caveats)\n self.db.add(dm)\n self.db.flush()\n\n m = pymacaroons.Macaroon(\n location=location,\n identifier=str(dm.id),\n key=dm.key,\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n serialized_macaroon = f\"pypi-{m.serialize()}\"\n return serialized_macaroon, dm\n\n def delete_macaroon(self, macaroon_id):\n \"\"\"\n Deletes a macaroon from the DB by its identifier.\n \"\"\"\n dm = self.find_macaroon(macaroon_id)\n self.db.delete(dm)\n self.db.flush()\n\n def get_macaroon_by_description(self, user_id, description):\n \"\"\"\n Returns a macaroon model from the DB with the given description,\n if one exists for the given user.\n\n Returns None if the user doesn't have a macaroon with this description.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.description == description)\n .filter(Macaroon.user_id == user_id)\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n\ndef database_macaroon_factory(context, request):\n return DatabaseMacaroonService(request.db)\n", "path": "warehouse/macaroons/services.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport datetime\nimport json\nimport struct\nimport uuid\n\nimport pymacaroons\n\nfrom pymacaroons.exceptions import MacaroonDeserializationException\nfrom sqlalchemy.orm import joinedload\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom zope.interface import implementer\n\nfrom warehouse.accounts.models import User\nfrom warehouse.macaroons.caveats import InvalidMacaroon, Verifier\nfrom warehouse.macaroons.interfaces import IMacaroonService\nfrom warehouse.macaroons.models import Macaroon\n\n\n@implementer(IMacaroonService)\nclass DatabaseMacaroonService:\n def __init__(self, db_session):\n self.db = db_session\n\n def _extract_raw_macaroon(self, prefixed_macaroon):\n \"\"\"\n Returns the base64-encoded macaroon component of a PyPI macaroon,\n dropping the prefix.\n\n Returns None if the macaroon is None, has no prefix, or has the\n wrong prefix.\n \"\"\"\n if prefixed_macaroon is None:\n return None\n\n prefix, _, raw_macaroon = prefixed_macaroon.partition(\"-\")\n if prefix != \"pypi\" or not raw_macaroon:\n return None\n\n return raw_macaroon\n\n def find_macaroon(self, macaroon_id):\n \"\"\"\n Returns a macaroon model from the DB by its identifier.\n Returns None if no macaroon has the given ID.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.id == uuid.UUID(macaroon_id))\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n def _deserialize_raw_macaroon(self, raw_macaroon):\n raw_macaroon = self._extract_raw_macaroon(raw_macaroon)\n\n if raw_macaroon is None:\n raise InvalidMacaroon(\"malformed or nonexistent macaroon\")\n\n try:\n return pymacaroons.Macaroon.deserialize(raw_macaroon)\n except (\n IndexError,\n TypeError,\n ValueError,\n binascii.Error,\n struct.error,\n MacaroonDeserializationException,\n ):\n raise InvalidMacaroon(\"malformed macaroon\")\n\n def find_userid(self, raw_macaroon):\n \"\"\"\n Returns the id of the user associated with the given raw (serialized)\n macaroon.\n \"\"\"\n try:\n m = self._deserialize_raw_macaroon(raw_macaroon)\n except InvalidMacaroon:\n return None\n\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n return None\n\n return dm.user.id\n\n def verify(self, raw_macaroon, context, principals, permission):\n \"\"\"\n Returns True if the given raw (serialized) macaroon is\n valid for the context, principals, and requested permission.\n\n Raises InvalidMacaroon if the macaroon is not valid.\n \"\"\"\n m = self._deserialize_raw_macaroon(raw_macaroon)\n dm = self.find_macaroon(m.identifier.decode())\n\n if dm is None:\n raise InvalidMacaroon(\"deleted or nonexistent macaroon\")\n\n verifier = Verifier(m, context, principals, permission)\n if verifier.verify(dm.key):\n dm.last_used = datetime.datetime.now()\n return True\n\n raise InvalidMacaroon(\"invalid macaroon\")\n\n def create_macaroon(self, location, user_id, description, caveats):\n \"\"\"\n Returns a tuple of a new raw (serialized) macaroon and its DB model.\n The description provided is not embedded into the macaroon, only stored\n in the DB model.\n \"\"\"\n user = self.db.query(User).filter(User.id == user_id).one()\n\n dm = Macaroon(user=user, description=description, caveats=caveats)\n self.db.add(dm)\n self.db.flush()\n\n m = pymacaroons.Macaroon(\n location=location,\n identifier=str(dm.id),\n key=dm.key,\n version=pymacaroons.MACAROON_V2,\n )\n m.add_first_party_caveat(json.dumps(caveats))\n serialized_macaroon = f\"pypi-{m.serialize()}\"\n return serialized_macaroon, dm\n\n def delete_macaroon(self, macaroon_id):\n \"\"\"\n Deletes a macaroon from the DB by its identifier.\n \"\"\"\n dm = self.find_macaroon(macaroon_id)\n self.db.delete(dm)\n self.db.flush()\n\n def get_macaroon_by_description(self, user_id, description):\n \"\"\"\n Returns a macaroon model from the DB with the given description,\n if one exists for the given user.\n\n Returns None if the user doesn't have a macaroon with this description.\n \"\"\"\n try:\n dm = (\n self.db.query(Macaroon)\n .options(joinedload(\"user\"))\n .filter(Macaroon.description == description)\n .filter(Macaroon.user_id == user_id)\n .one()\n )\n except NoResultFound:\n return None\n\n return dm\n\n\ndef database_macaroon_factory(context, request):\n return DatabaseMacaroonService(request.db)\n", "path": "warehouse/macaroons/services.py"}]}
| 2,277 | 545 |
gh_patches_debug_14884
|
rasdani/github-patches
|
git_diff
|
python-discord__bot-1205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: ` chars are not escaped when parsing !source

When responding to faulty `!source` commands, the backticks aren't escaped and a formatting issue occurs.
This _might_ lead to being able to ping roles/users, should Discord ever decide to change the embed ping behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/exts/info/source.py`
Content:
```
1 import inspect
2 from pathlib import Path
3 from typing import Optional, Tuple, Union
4
5 from discord import Embed
6 from discord.ext import commands
7
8 from bot.bot import Bot
9 from bot.constants import URLs
10
11 SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]
12
13
14 class SourceConverter(commands.Converter):
15 """Convert an argument into a help command, tag, command, or cog."""
16
17 async def convert(self, ctx: commands.Context, argument: str) -> SourceType:
18 """Convert argument into source object."""
19 if argument.lower().startswith("help"):
20 return ctx.bot.help_command
21
22 cog = ctx.bot.get_cog(argument)
23 if cog:
24 return cog
25
26 cmd = ctx.bot.get_command(argument)
27 if cmd:
28 return cmd
29
30 tags_cog = ctx.bot.get_cog("Tags")
31 show_tag = True
32
33 if not tags_cog:
34 show_tag = False
35 elif argument.lower() in tags_cog._cache:
36 return argument.lower()
37
38 raise commands.BadArgument(
39 f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
40 )
41
42
43 class BotSource(commands.Cog):
44 """Displays information about the bot's source code."""
45
46 def __init__(self, bot: Bot):
47 self.bot = bot
48
49 @commands.command(name="source", aliases=("src",))
50 async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
51 """Display information and a GitHub link to the source code of a command, tag, or cog."""
52 if not source_item:
53 embed = Embed(title="Bot's GitHub Repository")
54 embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
55 embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
56 await ctx.send(embed=embed)
57 return
58
59 embed = await self.build_embed(source_item)
60 await ctx.send(embed=embed)
61
62 def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
63 """
64 Build GitHub link of source item, return this link, file location and first line number.
65
66 Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
67 """
68 if isinstance(source_item, commands.Command):
69 src = source_item.callback.__code__
70 filename = src.co_filename
71 elif isinstance(source_item, str):
72 tags_cog = self.bot.get_cog("Tags")
73 filename = tags_cog._cache[source_item]["location"]
74 else:
75 src = type(source_item)
76 try:
77 filename = inspect.getsourcefile(src)
78 except TypeError:
79 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
80
81 if not isinstance(source_item, str):
82 try:
83 lines, first_line_no = inspect.getsourcelines(src)
84 except OSError:
85 raise commands.BadArgument("Cannot get source for a dynamically-created object.")
86
87 lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
88 else:
89 first_line_no = None
90 lines_extension = ""
91
92 # Handle tag file location differently than others to avoid errors in some cases
93 if not first_line_no:
94 file_location = Path(filename).relative_to("/bot/")
95 else:
96 file_location = Path(filename).relative_to(Path.cwd()).as_posix()
97
98 url = f"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}"
99
100 return url, file_location, first_line_no or None
101
102 async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
103 """Build embed based on source object."""
104 url, location, first_line = self.get_source_link(source_object)
105
106 if isinstance(source_object, commands.HelpCommand):
107 title = "Help Command"
108 description = source_object.__doc__.splitlines()[1]
109 elif isinstance(source_object, commands.Command):
110 description = source_object.short_doc
111 title = f"Command: {source_object.qualified_name}"
112 elif isinstance(source_object, str):
113 title = f"Tag: {source_object}"
114 description = ""
115 else:
116 title = f"Cog: {source_object.qualified_name}"
117 description = source_object.description.splitlines()[0]
118
119 embed = Embed(title=title, description=description)
120 embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
121 line_text = f":{first_line}" if first_line else ""
122 embed.set_footer(text=f"{location}{line_text}")
123
124 return embed
125
126
127 def setup(bot: Bot) -> None:
128 """Load the BotSource cog."""
129 bot.add_cog(BotSource(bot))
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py
--- a/bot/exts/info/source.py
+++ b/bot/exts/info/source.py
@@ -2,7 +2,7 @@
from pathlib import Path
from typing import Optional, Tuple, Union
-from discord import Embed
+from discord import Embed, utils
from discord.ext import commands
from bot.bot import Bot
@@ -35,8 +35,10 @@
elif argument.lower() in tags_cog._cache:
return argument.lower()
+ escaped_arg = utils.escape_markdown(argument)
+
raise commands.BadArgument(
- f"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog."
+ f"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog."
)
|
{"golden_diff": "diff --git a/bot/exts/info/source.py b/bot/exts/info/source.py\n--- a/bot/exts/info/source.py\n+++ b/bot/exts/info/source.py\n@@ -2,7 +2,7 @@\n from pathlib import Path\n from typing import Optional, Tuple, Union\n \n-from discord import Embed\n+from discord import Embed, utils\n from discord.ext import commands\n \n from bot.bot import Bot\n@@ -35,8 +35,10 @@\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n \n+ escaped_arg = utils.escape_markdown(argument)\n+\n raise commands.BadArgument(\n- f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n+ f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n", "issue": "Bug: ` chars are not escaped when parsing !source\n\r\n\r\nWhen responding to faulty `!source` commands, the backticks aren't escaped and a formatting issue occurs.\r\n\r\nThis _might_ lead to being able to ping roles/users, should Discord ever decide to change the embed ping behavior.\n", "before_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n raise commands.BadArgument(\n f\"Unable to convert `{argument}` to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}], "after_files": [{"content": "import inspect\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Union\n\nfrom discord import Embed, utils\nfrom discord.ext import commands\n\nfrom bot.bot import Bot\nfrom bot.constants import URLs\n\nSourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, str, commands.ExtensionNotLoaded]\n\n\nclass SourceConverter(commands.Converter):\n \"\"\"Convert an argument into a help command, tag, command, or cog.\"\"\"\n\n async def convert(self, ctx: commands.Context, argument: str) -> SourceType:\n \"\"\"Convert argument into source object.\"\"\"\n if argument.lower().startswith(\"help\"):\n return ctx.bot.help_command\n\n cog = ctx.bot.get_cog(argument)\n if cog:\n return cog\n\n cmd = ctx.bot.get_command(argument)\n if cmd:\n return cmd\n\n tags_cog = ctx.bot.get_cog(\"Tags\")\n show_tag = True\n\n if not tags_cog:\n show_tag = False\n elif argument.lower() in tags_cog._cache:\n return argument.lower()\n\n escaped_arg = utils.escape_markdown(argument)\n\n raise commands.BadArgument(\n f\"Unable to convert '{escaped_arg}' to valid command{', tag,' if show_tag else ''} or Cog.\"\n )\n\n\nclass BotSource(commands.Cog):\n \"\"\"Displays information about the bot's source code.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command(name=\"source\", aliases=(\"src\",))\n async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:\n \"\"\"Display information and a GitHub link to the source code of a command, tag, or cog.\"\"\"\n if not source_item:\n embed = Embed(title=\"Bot's GitHub Repository\")\n embed.add_field(name=\"Repository\", value=f\"[Go to GitHub]({URLs.github_bot_repo})\")\n embed.set_thumbnail(url=\"https://avatars1.githubusercontent.com/u/9919\")\n await ctx.send(embed=embed)\n return\n\n embed = await self.build_embed(source_item)\n await ctx.send(embed=embed)\n\n def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:\n \"\"\"\n Build GitHub link of source item, return this link, file location and first line number.\n\n Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).\n \"\"\"\n if isinstance(source_item, commands.Command):\n src = source_item.callback.__code__\n filename = src.co_filename\n elif isinstance(source_item, str):\n tags_cog = self.bot.get_cog(\"Tags\")\n filename = tags_cog._cache[source_item][\"location\"]\n else:\n src = type(source_item)\n try:\n filename = inspect.getsourcefile(src)\n except TypeError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n if not isinstance(source_item, str):\n try:\n lines, first_line_no = inspect.getsourcelines(src)\n except OSError:\n raise commands.BadArgument(\"Cannot get source for a dynamically-created object.\")\n\n lines_extension = f\"#L{first_line_no}-L{first_line_no+len(lines)-1}\"\n else:\n first_line_no = None\n lines_extension = \"\"\n\n # Handle tag file location differently than others to avoid errors in some cases\n if not first_line_no:\n file_location = Path(filename).relative_to(\"/bot/\")\n else:\n file_location = Path(filename).relative_to(Path.cwd()).as_posix()\n\n url = f\"{URLs.github_bot_repo}/blob/master/{file_location}{lines_extension}\"\n\n return url, file_location, first_line_no or None\n\n async def build_embed(self, source_object: SourceType) -> Optional[Embed]:\n \"\"\"Build embed based on source object.\"\"\"\n url, location, first_line = self.get_source_link(source_object)\n\n if isinstance(source_object, commands.HelpCommand):\n title = \"Help Command\"\n description = source_object.__doc__.splitlines()[1]\n elif isinstance(source_object, commands.Command):\n description = source_object.short_doc\n title = f\"Command: {source_object.qualified_name}\"\n elif isinstance(source_object, str):\n title = f\"Tag: {source_object}\"\n description = \"\"\n else:\n title = f\"Cog: {source_object.qualified_name}\"\n description = source_object.description.splitlines()[0]\n\n embed = Embed(title=title, description=description)\n embed.add_field(name=\"Source Code\", value=f\"[Go to GitHub]({url})\")\n line_text = f\":{first_line}\" if first_line else \"\"\n embed.set_footer(text=f\"{location}{line_text}\")\n\n return embed\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the BotSource cog.\"\"\"\n bot.add_cog(BotSource(bot))\n", "path": "bot/exts/info/source.py"}]}
| 1,654 | 195 |
gh_patches_debug_41439
|
rasdani/github-patches
|
git_diff
|
coqui-ai__TTS-3336
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Punctuation restoration works incorrect
### Describe the bug
Punctuation restoration works incorrect.
### To Reproduce
```
from TTS.tts.utils.text.punctuation import Punctuation, _PUNC_IDX, PuncPosition
# original text "...i think i understand."
punctuator = Punctuation()
text = ['', 'i think i understand']
punctuation = [_PUNC_IDX(punc='...', position=PuncPosition.BEGIN), _PUNC_IDX(punc='.', position=PuncPosition.END)]
punctuator.restore(text, punctuation)
# result ["....", "i think i understand"]
```
### Expected behavior
result: `["...i think i understand."]`
### Logs
_No response_
### Environment
```shell
{
"CUDA": {
"GPU": [
"NVIDIA RTX A6000"
],
"available": true,
"version": "11.8"
},
"Packages": {
"PyTorch_debug": false,
"PyTorch_version": "2.0.0+cu118",
"TTS": "0.16.6",
"numpy": "1.22.0"
},
"System": {
"OS": "Linux",
"architecture": [
"64bit",
""
],
"processor": "x86_64",
"python": "3.10.12",
"version": "#170-Ubuntu SMP Fri Jun 16 13:43:31 UTC 2023"
}
}
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `TTS/tts/utils/text/punctuation.py`
Content:
```
1 import collections
2 import re
3 from enum import Enum
4
5 import six
6
7 _DEF_PUNCS = ';:,.!?¡¿—…"«»“”'
8
9 _PUNC_IDX = collections.namedtuple("_punc_index", ["punc", "position"])
10
11
12 class PuncPosition(Enum):
13 """Enum for the punctuations positions"""
14
15 BEGIN = 0
16 END = 1
17 MIDDLE = 2
18 ALONE = 3
19
20
21 class Punctuation:
22 """Handle punctuations in text.
23
24 Just strip punctuations from text or strip and restore them later.
25
26 Args:
27 puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`.
28
29 Example:
30 >>> punc = Punctuation()
31 >>> punc.strip("This is. example !")
32 'This is example'
33
34 >>> text_striped, punc_map = punc.strip_to_restore("This is. example !")
35 >>> ' '.join(text_striped)
36 'This is example'
37
38 >>> text_restored = punc.restore(text_striped, punc_map)
39 >>> text_restored[0]
40 'This is. example !'
41 """
42
43 def __init__(self, puncs: str = _DEF_PUNCS):
44 self.puncs = puncs
45
46 @staticmethod
47 def default_puncs():
48 """Return default set of punctuations."""
49 return _DEF_PUNCS
50
51 @property
52 def puncs(self):
53 return self._puncs
54
55 @puncs.setter
56 def puncs(self, value):
57 if not isinstance(value, six.string_types):
58 raise ValueError("[!] Punctuations must be of type str.")
59 self._puncs = "".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder
60 self.puncs_regular_exp = re.compile(rf"(\s*[{re.escape(self._puncs)}]+\s*)+")
61
62 def strip(self, text):
63 """Remove all the punctuations by replacing with `space`.
64
65 Args:
66 text (str): The text to be processed.
67
68 Example::
69
70 "This is. example !" -> "This is example "
71 """
72 return re.sub(self.puncs_regular_exp, " ", text).rstrip().lstrip()
73
74 def strip_to_restore(self, text):
75 """Remove punctuations from text to restore them later.
76
77 Args:
78 text (str): The text to be processed.
79
80 Examples ::
81
82 "This is. example !" -> [["This is", "example"], [".", "!"]]
83
84 """
85 text, puncs = self._strip_to_restore(text)
86 return text, puncs
87
88 def _strip_to_restore(self, text):
89 """Auxiliary method for Punctuation.preserve()"""
90 matches = list(re.finditer(self.puncs_regular_exp, text))
91 if not matches:
92 return [text], []
93 # the text is only punctuations
94 if len(matches) == 1 and matches[0].group() == text:
95 return [], [_PUNC_IDX(text, PuncPosition.ALONE)]
96 # build a punctuation map to be used later to restore punctuations
97 puncs = []
98 for match in matches:
99 position = PuncPosition.MIDDLE
100 if match == matches[0] and text.startswith(match.group()):
101 position = PuncPosition.BEGIN
102 elif match == matches[-1] and text.endswith(match.group()):
103 position = PuncPosition.END
104 puncs.append(_PUNC_IDX(match.group(), position))
105 # convert str text to a List[str], each item is separated by a punctuation
106 splitted_text = []
107 for idx, punc in enumerate(puncs):
108 split = text.split(punc.punc)
109 prefix, suffix = split[0], punc.punc.join(split[1:])
110 splitted_text.append(prefix)
111 # if the text does not end with a punctuation, add it to the last item
112 if idx == len(puncs) - 1 and len(suffix) > 0:
113 splitted_text.append(suffix)
114 text = suffix
115 return splitted_text, puncs
116
117 @classmethod
118 def restore(cls, text, puncs):
119 """Restore punctuation in a text.
120
121 Args:
122 text (str): The text to be processed.
123 puncs (List[str]): The list of punctuations map to be used for restoring.
124
125 Examples ::
126
127 ['This is', 'example'], ['.', '!'] -> "This is. example!"
128
129 """
130 return cls._restore(text, puncs, 0)
131
132 @classmethod
133 def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements
134 """Auxiliary method for Punctuation.restore()"""
135 if not puncs:
136 return text
137
138 # nothing have been phonemized, returns the puncs alone
139 if not text:
140 return ["".join(m.punc for m in puncs)]
141
142 current = puncs[0]
143
144 if current.position == PuncPosition.BEGIN:
145 return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)
146
147 if current.position == PuncPosition.END:
148 return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)
149
150 if current.position == PuncPosition.ALONE:
151 return [current.mark] + cls._restore(text, puncs[1:], num + 1)
152
153 # POSITION == MIDDLE
154 if len(text) == 1: # pragma: nocover
155 # a corner case where the final part of an intermediate
156 # mark (I) has not been phonemized
157 return cls._restore([text[0] + current.punc], puncs[1:], num)
158
159 return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)
160
161
162 # if __name__ == "__main__":
163 # punc = Punctuation()
164 # text = "This is. This is, example!"
165
166 # print(punc.strip(text))
167
168 # split_text, puncs = punc.strip_to_restore(text)
169 # print(split_text, " ---- ", puncs)
170
171 # restored_text = punc.restore(split_text, puncs)
172 # print(restored_text)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/TTS/tts/utils/text/punctuation.py b/TTS/tts/utils/text/punctuation.py
--- a/TTS/tts/utils/text/punctuation.py
+++ b/TTS/tts/utils/text/punctuation.py
@@ -15,7 +15,6 @@
BEGIN = 0
END = 1
MIDDLE = 2
- ALONE = 3
class Punctuation:
@@ -92,7 +91,7 @@
return [text], []
# the text is only punctuations
if len(matches) == 1 and matches[0].group() == text:
- return [], [_PUNC_IDX(text, PuncPosition.ALONE)]
+ return [], [_PUNC_IDX(text, PuncPosition.BEGIN)]
# build a punctuation map to be used later to restore punctuations
puncs = []
for match in matches:
@@ -107,11 +106,14 @@
for idx, punc in enumerate(puncs):
split = text.split(punc.punc)
prefix, suffix = split[0], punc.punc.join(split[1:])
+ text = suffix
+ if prefix == "":
+ # We don't want to insert an empty string in case of initial punctuation
+ continue
splitted_text.append(prefix)
# if the text does not end with a punctuation, add it to the last item
if idx == len(puncs) - 1 and len(suffix) > 0:
splitted_text.append(suffix)
- text = suffix
return splitted_text, puncs
@classmethod
@@ -127,10 +129,10 @@
['This is', 'example'], ['.', '!'] -> "This is. example!"
"""
- return cls._restore(text, puncs, 0)
+ return cls._restore(text, puncs)
@classmethod
- def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements
+ def _restore(cls, text, puncs): # pylint: disable=too-many-return-statements
"""Auxiliary method for Punctuation.restore()"""
if not puncs:
return text
@@ -142,21 +144,18 @@
current = puncs[0]
if current.position == PuncPosition.BEGIN:
- return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)
+ return cls._restore([current.punc + text[0]] + text[1:], puncs[1:])
if current.position == PuncPosition.END:
- return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)
-
- if current.position == PuncPosition.ALONE:
- return [current.mark] + cls._restore(text, puncs[1:], num + 1)
+ return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:])
# POSITION == MIDDLE
if len(text) == 1: # pragma: nocover
# a corner case where the final part of an intermediate
# mark (I) has not been phonemized
- return cls._restore([text[0] + current.punc], puncs[1:], num)
+ return cls._restore([text[0] + current.punc], puncs[1:])
- return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)
+ return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:])
# if __name__ == "__main__":
|
{"golden_diff": "diff --git a/TTS/tts/utils/text/punctuation.py b/TTS/tts/utils/text/punctuation.py\n--- a/TTS/tts/utils/text/punctuation.py\n+++ b/TTS/tts/utils/text/punctuation.py\n@@ -15,7 +15,6 @@\n BEGIN = 0\n END = 1\n MIDDLE = 2\n- ALONE = 3\n \n \n class Punctuation:\n@@ -92,7 +91,7 @@\n return [text], []\n # the text is only punctuations\n if len(matches) == 1 and matches[0].group() == text:\n- return [], [_PUNC_IDX(text, PuncPosition.ALONE)]\n+ return [], [_PUNC_IDX(text, PuncPosition.BEGIN)]\n # build a punctuation map to be used later to restore punctuations\n puncs = []\n for match in matches:\n@@ -107,11 +106,14 @@\n for idx, punc in enumerate(puncs):\n split = text.split(punc.punc)\n prefix, suffix = split[0], punc.punc.join(split[1:])\n+ text = suffix\n+ if prefix == \"\":\n+ # We don't want to insert an empty string in case of initial punctuation\n+ continue\n splitted_text.append(prefix)\n # if the text does not end with a punctuation, add it to the last item\n if idx == len(puncs) - 1 and len(suffix) > 0:\n splitted_text.append(suffix)\n- text = suffix\n return splitted_text, puncs\n \n @classmethod\n@@ -127,10 +129,10 @@\n ['This is', 'example'], ['.', '!'] -> \"This is. example!\"\n \n \"\"\"\n- return cls._restore(text, puncs, 0)\n+ return cls._restore(text, puncs)\n \n @classmethod\n- def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements\n+ def _restore(cls, text, puncs): # pylint: disable=too-many-return-statements\n \"\"\"Auxiliary method for Punctuation.restore()\"\"\"\n if not puncs:\n return text\n@@ -142,21 +144,18 @@\n current = puncs[0]\n \n if current.position == PuncPosition.BEGIN:\n- return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)\n+ return cls._restore([current.punc + text[0]] + text[1:], puncs[1:])\n \n if current.position == PuncPosition.END:\n- return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)\n-\n- if current.position == PuncPosition.ALONE:\n- return [current.mark] + cls._restore(text, puncs[1:], num + 1)\n+ return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:])\n \n # POSITION == MIDDLE\n if len(text) == 1: # pragma: nocover\n # a corner case where the final part of an intermediate\n # mark (I) has not been phonemized\n- return cls._restore([text[0] + current.punc], puncs[1:], num)\n+ return cls._restore([text[0] + current.punc], puncs[1:])\n \n- return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)\n+ return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:])\n \n \n # if __name__ == \"__main__\":\n", "issue": "[Bug] Punctuation restoration works incorrect\n### Describe the bug\n\nPunctuation restoration works incorrect.\n\n### To Reproduce\n\n```\r\nfrom TTS.tts.utils.text.punctuation import Punctuation, _PUNC_IDX, PuncPosition\r\n\r\n# original text \"...i think i understand.\"\r\npunctuator = Punctuation()\r\ntext = ['', 'i think i understand']\r\npunctuation = [_PUNC_IDX(punc='...', position=PuncPosition.BEGIN), _PUNC_IDX(punc='.', position=PuncPosition.END)]\r\npunctuator.restore(text, punctuation)\r\n\r\n# result [\"....\", \"i think i understand\"]\r\n```\n\n### Expected behavior\n\nresult: `[\"...i think i understand.\"]`\n\n### Logs\n\n_No response_\n\n### Environment\n\n```shell\n{\r\n \"CUDA\": {\r\n \"GPU\": [\r\n \"NVIDIA RTX A6000\"\r\n ],\r\n \"available\": true,\r\n \"version\": \"11.8\"\r\n },\r\n \"Packages\": {\r\n \"PyTorch_debug\": false,\r\n \"PyTorch_version\": \"2.0.0+cu118\",\r\n \"TTS\": \"0.16.6\",\r\n \"numpy\": \"1.22.0\"\r\n },\r\n \"System\": {\r\n \"OS\": \"Linux\",\r\n \"architecture\": [\r\n \"64bit\",\r\n \"\"\r\n ],\r\n \"processor\": \"x86_64\",\r\n \"python\": \"3.10.12\",\r\n \"version\": \"#170-Ubuntu SMP Fri Jun 16 13:43:31 UTC 2023\"\r\n }\r\n}\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import collections\nimport re\nfrom enum import Enum\n\nimport six\n\n_DEF_PUNCS = ';:,.!?\u00a1\u00bf\u2014\u2026\"\u00ab\u00bb\u201c\u201d'\n\n_PUNC_IDX = collections.namedtuple(\"_punc_index\", [\"punc\", \"position\"])\n\n\nclass PuncPosition(Enum):\n \"\"\"Enum for the punctuations positions\"\"\"\n\n BEGIN = 0\n END = 1\n MIDDLE = 2\n ALONE = 3\n\n\nclass Punctuation:\n \"\"\"Handle punctuations in text.\n\n Just strip punctuations from text or strip and restore them later.\n\n Args:\n puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`.\n\n Example:\n >>> punc = Punctuation()\n >>> punc.strip(\"This is. example !\")\n 'This is example'\n\n >>> text_striped, punc_map = punc.strip_to_restore(\"This is. example !\")\n >>> ' '.join(text_striped)\n 'This is example'\n\n >>> text_restored = punc.restore(text_striped, punc_map)\n >>> text_restored[0]\n 'This is. example !'\n \"\"\"\n\n def __init__(self, puncs: str = _DEF_PUNCS):\n self.puncs = puncs\n\n @staticmethod\n def default_puncs():\n \"\"\"Return default set of punctuations.\"\"\"\n return _DEF_PUNCS\n\n @property\n def puncs(self):\n return self._puncs\n\n @puncs.setter\n def puncs(self, value):\n if not isinstance(value, six.string_types):\n raise ValueError(\"[!] Punctuations must be of type str.\")\n self._puncs = \"\".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder\n self.puncs_regular_exp = re.compile(rf\"(\\s*[{re.escape(self._puncs)}]+\\s*)+\")\n\n def strip(self, text):\n \"\"\"Remove all the punctuations by replacing with `space`.\n\n Args:\n text (str): The text to be processed.\n\n Example::\n\n \"This is. example !\" -> \"This is example \"\n \"\"\"\n return re.sub(self.puncs_regular_exp, \" \", text).rstrip().lstrip()\n\n def strip_to_restore(self, text):\n \"\"\"Remove punctuations from text to restore them later.\n\n Args:\n text (str): The text to be processed.\n\n Examples ::\n\n \"This is. example !\" -> [[\"This is\", \"example\"], [\".\", \"!\"]]\n\n \"\"\"\n text, puncs = self._strip_to_restore(text)\n return text, puncs\n\n def _strip_to_restore(self, text):\n \"\"\"Auxiliary method for Punctuation.preserve()\"\"\"\n matches = list(re.finditer(self.puncs_regular_exp, text))\n if not matches:\n return [text], []\n # the text is only punctuations\n if len(matches) == 1 and matches[0].group() == text:\n return [], [_PUNC_IDX(text, PuncPosition.ALONE)]\n # build a punctuation map to be used later to restore punctuations\n puncs = []\n for match in matches:\n position = PuncPosition.MIDDLE\n if match == matches[0] and text.startswith(match.group()):\n position = PuncPosition.BEGIN\n elif match == matches[-1] and text.endswith(match.group()):\n position = PuncPosition.END\n puncs.append(_PUNC_IDX(match.group(), position))\n # convert str text to a List[str], each item is separated by a punctuation\n splitted_text = []\n for idx, punc in enumerate(puncs):\n split = text.split(punc.punc)\n prefix, suffix = split[0], punc.punc.join(split[1:])\n splitted_text.append(prefix)\n # if the text does not end with a punctuation, add it to the last item\n if idx == len(puncs) - 1 and len(suffix) > 0:\n splitted_text.append(suffix)\n text = suffix\n return splitted_text, puncs\n\n @classmethod\n def restore(cls, text, puncs):\n \"\"\"Restore punctuation in a text.\n\n Args:\n text (str): The text to be processed.\n puncs (List[str]): The list of punctuations map to be used for restoring.\n\n Examples ::\n\n ['This is', 'example'], ['.', '!'] -> \"This is. example!\"\n\n \"\"\"\n return cls._restore(text, puncs, 0)\n\n @classmethod\n def _restore(cls, text, puncs, num): # pylint: disable=too-many-return-statements\n \"\"\"Auxiliary method for Punctuation.restore()\"\"\"\n if not puncs:\n return text\n\n # nothing have been phonemized, returns the puncs alone\n if not text:\n return [\"\".join(m.punc for m in puncs)]\n\n current = puncs[0]\n\n if current.position == PuncPosition.BEGIN:\n return cls._restore([current.punc + text[0]] + text[1:], puncs[1:], num)\n\n if current.position == PuncPosition.END:\n return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:], num + 1)\n\n if current.position == PuncPosition.ALONE:\n return [current.mark] + cls._restore(text, puncs[1:], num + 1)\n\n # POSITION == MIDDLE\n if len(text) == 1: # pragma: nocover\n # a corner case where the final part of an intermediate\n # mark (I) has not been phonemized\n return cls._restore([text[0] + current.punc], puncs[1:], num)\n\n return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:], num)\n\n\n# if __name__ == \"__main__\":\n# punc = Punctuation()\n# text = \"This is. This is, example!\"\n\n# print(punc.strip(text))\n\n# split_text, puncs = punc.strip_to_restore(text)\n# print(split_text, \" ---- \", puncs)\n\n# restored_text = punc.restore(split_text, puncs)\n# print(restored_text)\n", "path": "TTS/tts/utils/text/punctuation.py"}], "after_files": [{"content": "import collections\nimport re\nfrom enum import Enum\n\nimport six\n\n_DEF_PUNCS = ';:,.!?\u00a1\u00bf\u2014\u2026\"\u00ab\u00bb\u201c\u201d'\n\n_PUNC_IDX = collections.namedtuple(\"_punc_index\", [\"punc\", \"position\"])\n\n\nclass PuncPosition(Enum):\n \"\"\"Enum for the punctuations positions\"\"\"\n\n BEGIN = 0\n END = 1\n MIDDLE = 2\n\n\nclass Punctuation:\n \"\"\"Handle punctuations in text.\n\n Just strip punctuations from text or strip and restore them later.\n\n Args:\n puncs (str): The punctuations to be processed. Defaults to `_DEF_PUNCS`.\n\n Example:\n >>> punc = Punctuation()\n >>> punc.strip(\"This is. example !\")\n 'This is example'\n\n >>> text_striped, punc_map = punc.strip_to_restore(\"This is. example !\")\n >>> ' '.join(text_striped)\n 'This is example'\n\n >>> text_restored = punc.restore(text_striped, punc_map)\n >>> text_restored[0]\n 'This is. example !'\n \"\"\"\n\n def __init__(self, puncs: str = _DEF_PUNCS):\n self.puncs = puncs\n\n @staticmethod\n def default_puncs():\n \"\"\"Return default set of punctuations.\"\"\"\n return _DEF_PUNCS\n\n @property\n def puncs(self):\n return self._puncs\n\n @puncs.setter\n def puncs(self, value):\n if not isinstance(value, six.string_types):\n raise ValueError(\"[!] Punctuations must be of type str.\")\n self._puncs = \"\".join(list(dict.fromkeys(list(value)))) # remove duplicates without changing the oreder\n self.puncs_regular_exp = re.compile(rf\"(\\s*[{re.escape(self._puncs)}]+\\s*)+\")\n\n def strip(self, text):\n \"\"\"Remove all the punctuations by replacing with `space`.\n\n Args:\n text (str): The text to be processed.\n\n Example::\n\n \"This is. example !\" -> \"This is example \"\n \"\"\"\n return re.sub(self.puncs_regular_exp, \" \", text).rstrip().lstrip()\n\n def strip_to_restore(self, text):\n \"\"\"Remove punctuations from text to restore them later.\n\n Args:\n text (str): The text to be processed.\n\n Examples ::\n\n \"This is. example !\" -> [[\"This is\", \"example\"], [\".\", \"!\"]]\n\n \"\"\"\n text, puncs = self._strip_to_restore(text)\n return text, puncs\n\n def _strip_to_restore(self, text):\n \"\"\"Auxiliary method for Punctuation.preserve()\"\"\"\n matches = list(re.finditer(self.puncs_regular_exp, text))\n if not matches:\n return [text], []\n # the text is only punctuations\n if len(matches) == 1 and matches[0].group() == text:\n return [], [_PUNC_IDX(text, PuncPosition.BEGIN)]\n # build a punctuation map to be used later to restore punctuations\n puncs = []\n for match in matches:\n position = PuncPosition.MIDDLE\n if match == matches[0] and text.startswith(match.group()):\n position = PuncPosition.BEGIN\n elif match == matches[-1] and text.endswith(match.group()):\n position = PuncPosition.END\n puncs.append(_PUNC_IDX(match.group(), position))\n # convert str text to a List[str], each item is separated by a punctuation\n splitted_text = []\n for idx, punc in enumerate(puncs):\n split = text.split(punc.punc)\n prefix, suffix = split[0], punc.punc.join(split[1:])\n text = suffix\n if prefix == \"\":\n # We don't want to insert an empty string in case of initial punctuation\n continue\n splitted_text.append(prefix)\n # if the text does not end with a punctuation, add it to the last item\n if idx == len(puncs) - 1 and len(suffix) > 0:\n splitted_text.append(suffix)\n return splitted_text, puncs\n\n @classmethod\n def restore(cls, text, puncs):\n \"\"\"Restore punctuation in a text.\n\n Args:\n text (str): The text to be processed.\n puncs (List[str]): The list of punctuations map to be used for restoring.\n\n Examples ::\n\n ['This is', 'example'], ['.', '!'] -> \"This is. example!\"\n\n \"\"\"\n return cls._restore(text, puncs)\n\n @classmethod\n def _restore(cls, text, puncs): # pylint: disable=too-many-return-statements\n \"\"\"Auxiliary method for Punctuation.restore()\"\"\"\n if not puncs:\n return text\n\n # nothing have been phonemized, returns the puncs alone\n if not text:\n return [\"\".join(m.punc for m in puncs)]\n\n current = puncs[0]\n\n if current.position == PuncPosition.BEGIN:\n return cls._restore([current.punc + text[0]] + text[1:], puncs[1:])\n\n if current.position == PuncPosition.END:\n return [text[0] + current.punc] + cls._restore(text[1:], puncs[1:])\n\n # POSITION == MIDDLE\n if len(text) == 1: # pragma: nocover\n # a corner case where the final part of an intermediate\n # mark (I) has not been phonemized\n return cls._restore([text[0] + current.punc], puncs[1:])\n\n return cls._restore([text[0] + current.punc + text[1]] + text[2:], puncs[1:])\n\n\n# if __name__ == \"__main__\":\n# punc = Punctuation()\n# text = \"This is. This is, example!\"\n\n# print(punc.strip(text))\n\n# split_text, puncs = punc.strip_to_restore(text)\n# print(split_text, \" ---- \", puncs)\n\n# restored_text = punc.restore(split_text, puncs)\n# print(restored_text)\n", "path": "TTS/tts/utils/text/punctuation.py"}]}
| 2,457 | 869 |
gh_patches_debug_2552
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-6839
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`segmentation.watershed` returns wrong data type
### Description:
The documentation of `segmentation.watershed` says that:
> ### Returns
> **out**: ndarray
> A labeled matrix of the same type and shape as markers
[[0.18.x]](https://scikit-image.org/docs/0.18.x/api/skimage.segmentation.html#skimage.segmentation.watershed) [[0.19.x]](https://scikit-image.org/docs/0.19.x/api/skimage.segmentation.html#skimage.segmentation.watershed)
I have tested this with 0.18.1:
```python
import skimage.segmentation
import numpy as np
print(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)
```
Gives `int32` but `uint16` is expected.
### Way to reproduce:
```Python
import skimage.segmentation
import numpy as np
print(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)
```
### Traceback or output:
```Shell
int32
```
### Version information:
```Shell
3.8.5 (default, Sep 4 2020, 07:30:14)
[GCC 7.3.0]
Linux-5.8.0-36-generic-x86_64-with-glibc2.10
scikit-image version: 0.18.1
numpy version: 1.20.3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/segmentation/_watershed.py`
Content:
```
1 """watershed.py - watershed algorithm
2
3 This module implements a watershed algorithm that apportions pixels into
4 marked basins. The algorithm uses a priority queue to hold the pixels
5 with the metric for the priority queue being pixel value, then the time
6 of entry into the queue - this settles ties in favor of the closest marker.
7
8 Some ideas taken from
9 Soille, "Automated Basin Delineation from Digital Elevation Models Using
10 Mathematical Morphology", Signal Processing 20 (1990) 171-182.
11
12 The most important insight in the paper is that entry time onto the queue
13 solves two problems: a pixel should be assigned to the neighbor with the
14 largest gradient or, if there is no gradient, pixels on a plateau should
15 be split between markers on opposite sides.
16 """
17
18 import numpy as np
19 from scipy import ndimage as ndi
20
21 from . import _watershed_cy
22 from ..morphology.extrema import local_minima
23 from ..morphology._util import (_validate_connectivity,
24 _offsets_to_raveled_neighbors)
25 from ..util import crop, regular_seeds
26
27
28 def _validate_inputs(image, markers, mask, connectivity):
29 """Ensure that all inputs to watershed have matching shapes and types.
30
31 Parameters
32 ----------
33 image : array
34 The input image.
35 markers : int or array of int
36 The marker image.
37 mask : array, or None
38 A boolean mask, True where we want to compute the watershed.
39 connectivity : int in {1, ..., image.ndim}
40 The connectivity of the neighborhood of a pixel.
41
42 Returns
43 -------
44 image, markers, mask : arrays
45 The validated and formatted arrays. Image will have dtype float64,
46 markers int32, and mask int8. If ``None`` was given for the mask,
47 it is a volume of all 1s.
48
49 Raises
50 ------
51 ValueError
52 If the shapes of the given arrays don't match.
53 """
54 n_pixels = image.size
55 if mask is None:
56 # Use a complete `True` mask if none is provided
57 mask = np.ones(image.shape, bool)
58 else:
59 mask = np.asanyarray(mask, dtype=bool)
60 n_pixels = np.sum(mask)
61 if mask.shape != image.shape:
62 message = (f'`mask` (shape {mask.shape}) must have same shape '
63 f'as `image` (shape {image.shape})')
64 raise ValueError(message)
65 if markers is None:
66 markers_bool = local_minima(image, connectivity=connectivity) * mask
67 footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)
68 markers = ndi.label(markers_bool, structure=footprint)[0]
69 elif not isinstance(markers, (np.ndarray, list, tuple)):
70 # not array-like, assume int
71 # given int, assume that number of markers *within mask*.
72 markers = regular_seeds(image.shape,
73 int(markers / (n_pixels / image.size)))
74 markers *= mask
75 else:
76 markers = np.asanyarray(markers) * mask
77 if markers.shape != image.shape:
78 message = (f'`markers` (shape {markers.shape}) must have same '
79 f'shape as `image` (shape {image.shape})')
80 raise ValueError(message)
81 return (image.astype(np.float64),
82 markers.astype(np.int32),
83 mask.astype(np.int8))
84
85
86 def watershed(image, markers=None, connectivity=1, offset=None, mask=None,
87 compactness=0, watershed_line=False):
88 """Find watershed basins in `image` flooded from given `markers`.
89
90 Parameters
91 ----------
92 image : ndarray (2-D, 3-D, ...)
93 Data array where the lowest value points are labeled first.
94 markers : int, or ndarray of int, same shape as `image`, optional
95 The desired number of markers, or an array marking the basins with the
96 values to be assigned in the label matrix. Zero means not a marker. If
97 ``None`` (no markers given), the local minima of the image are used as
98 markers.
99 connectivity : ndarray, optional
100 An array with the same number of dimensions as `image` whose
101 non-zero elements indicate neighbors for connection.
102 Following the scipy convention, default is a one-connected array of
103 the dimension of the image.
104 offset : array_like of shape image.ndim, optional
105 offset of the connectivity (one offset per dimension)
106 mask : ndarray of bools or 0s and 1s, optional
107 Array of same shape as `image`. Only points at which mask == True
108 will be labeled.
109 compactness : float, optional
110 Use compact watershed [3]_ with given compactness parameter.
111 Higher values result in more regularly-shaped watershed basins.
112 watershed_line : bool, optional
113 If watershed_line is True, a one-pixel wide line separates the regions
114 obtained by the watershed algorithm. The line has the label 0.
115 Note that the method used for adding this line expects that
116 marker regions are not adjacent; the watershed line may not catch
117 borders between adjacent marker regions.
118
119 Returns
120 -------
121 out : ndarray
122 A labeled matrix of the same type and shape as markers
123
124 See Also
125 --------
126 skimage.segmentation.random_walker : random walker segmentation
127 A segmentation algorithm based on anisotropic diffusion, usually
128 slower than the watershed but with good results on noisy data and
129 boundaries with holes.
130
131 Notes
132 -----
133 This function implements a watershed algorithm [1]_ [2]_ that apportions
134 pixels into marked basins. The algorithm uses a priority queue to hold
135 the pixels with the metric for the priority queue being pixel value, then
136 the time of entry into the queue - this settles ties in favor of the
137 closest marker.
138
139 Some ideas taken from
140 Soille, "Automated Basin Delineation from Digital Elevation Models Using
141 Mathematical Morphology", Signal Processing 20 (1990) 171-182
142
143 The most important insight in the paper is that entry time onto the queue
144 solves two problems: a pixel should be assigned to the neighbor with the
145 largest gradient or, if there is no gradient, pixels on a plateau should
146 be split between markers on opposite sides.
147
148 This implementation converts all arguments to specific, lowest common
149 denominator types, then passes these to a C algorithm.
150
151 Markers can be determined manually, or automatically using for example
152 the local minima of the gradient of the image, or the local maxima of the
153 distance function to the background for separating overlapping objects
154 (see example).
155
156 References
157 ----------
158 .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29
159
160 .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html
161
162 .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and
163 Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation
164 Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`
165 https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf
166
167 Examples
168 --------
169 The watershed algorithm is useful to separate overlapping objects.
170
171 We first generate an initial image with two overlapping circles:
172
173 >>> x, y = np.indices((80, 80))
174 >>> x1, y1, x2, y2 = 28, 28, 44, 52
175 >>> r1, r2 = 16, 20
176 >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2
177 >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2
178 >>> image = np.logical_or(mask_circle1, mask_circle2)
179
180 Next, we want to separate the two circles. We generate markers at the
181 maxima of the distance to the background:
182
183 >>> from scipy import ndimage as ndi
184 >>> distance = ndi.distance_transform_edt(image)
185 >>> from skimage.feature import peak_local_max
186 >>> max_coords = peak_local_max(distance, labels=image,
187 ... footprint=np.ones((3, 3)))
188 >>> local_maxima = np.zeros_like(image, dtype=bool)
189 >>> local_maxima[tuple(max_coords.T)] = True
190 >>> markers = ndi.label(local_maxima)[0]
191
192 Finally, we run the watershed on the image and markers:
193
194 >>> labels = watershed(-distance, markers, mask=image)
195
196 The algorithm works also for 3-D images, and can be used for example to
197 separate overlapping spheres.
198 """
199 image, markers, mask = _validate_inputs(image, markers, mask, connectivity)
200 connectivity, offset = _validate_connectivity(image.ndim, connectivity,
201 offset)
202
203 # pad the image, markers, and mask so that we can use the mask to
204 # keep from running off the edges
205 pad_width = [(p, p) for p in offset]
206 image = np.pad(image, pad_width, mode='constant')
207 mask = np.pad(mask, pad_width, mode='constant').ravel()
208 output = np.pad(markers, pad_width, mode='constant')
209
210 flat_neighborhood = _offsets_to_raveled_neighbors(
211 image.shape, connectivity, center=offset)
212 marker_locations = np.flatnonzero(output)
213 image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize
214
215 _watershed_cy.watershed_raveled(image.ravel(),
216 marker_locations, flat_neighborhood,
217 mask, image_strides, compactness,
218 output.ravel(),
219 watershed_line)
220
221 output = crop(output, pad_width, copy=True)
222
223 return output
224
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py
--- a/skimage/segmentation/_watershed.py
+++ b/skimage/segmentation/_watershed.py
@@ -79,7 +79,7 @@
f'shape as `image` (shape {image.shape})')
raise ValueError(message)
return (image.astype(np.float64),
- markers.astype(np.int32),
+ markers,
mask.astype(np.int8))
|
{"golden_diff": "diff --git a/skimage/segmentation/_watershed.py b/skimage/segmentation/_watershed.py\n--- a/skimage/segmentation/_watershed.py\n+++ b/skimage/segmentation/_watershed.py\n@@ -79,7 +79,7 @@\n f'shape as `image` (shape {image.shape})')\n raise ValueError(message)\n return (image.astype(np.float64),\n- markers.astype(np.int32),\n+ markers,\n mask.astype(np.int8))\n", "issue": "`segmentation.watershed` returns wrong data type\n### Description:\n\nThe documentation of `segmentation.watershed` says that:\r\n\r\n> ### Returns\r\n> **out**: ndarray\r\n> A labeled matrix of the same type and shape as markers\r\n\r\n[[0.18.x]](https://scikit-image.org/docs/0.18.x/api/skimage.segmentation.html#skimage.segmentation.watershed) [[0.19.x]](https://scikit-image.org/docs/0.19.x/api/skimage.segmentation.html#skimage.segmentation.watershed)\r\n\r\nI have tested this with 0.18.1:\r\n\r\n```python\r\nimport skimage.segmentation\r\nimport numpy as np\r\nprint(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)\r\n```\r\n\r\nGives `int32` but `uint16` is expected.\n\n### Way to reproduce:\n\n```Python\nimport skimage.segmentation\r\nimport numpy as np\r\nprint(skimage.segmentation.watershed(np.zeros((100, 100)), np.zeros((100, 100), np.uint16)).dtype)\n```\n\n\n### Traceback or output:\n\n```Shell\nint32\n```\n\n\n### Version information:\n\n```Shell\n3.8.5 (default, Sep 4 2020, 07:30:14) \r\n[GCC 7.3.0]\r\nLinux-5.8.0-36-generic-x86_64-with-glibc2.10\r\nscikit-image version: 0.18.1\r\nnumpy version: 1.20.3\n```\n\n", "before_files": [{"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed_cy\nfrom ..morphology.extrema import local_minima\nfrom ..morphology._util import (_validate_connectivity,\n _offsets_to_raveled_neighbors)\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask, connectivity):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n connectivity : int in {1, ..., image.ndim}\n The connectivity of the neighborhood of a pixel.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n n_pixels = image.size\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n else:\n mask = np.asanyarray(mask, dtype=bool)\n n_pixels = np.sum(mask)\n if mask.shape != image.shape:\n message = (f'`mask` (shape {mask.shape}) must have same shape '\n f'as `image` (shape {image.shape})')\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)\n markers = ndi.label(markers_bool, structure=footprint)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n markers = regular_seeds(image.shape,\n int(markers / (n_pixels / image.size)))\n markers *= mask\n else:\n markers = np.asanyarray(markers) * mask\n if markers.shape != image.shape:\n message = (f'`markers` (shape {markers.shape}) must have same '\n f'shape as `image` (shape {image.shape})')\n raise ValueError(message)\n return (image.astype(np.float64),\n markers.astype(np.int32),\n mask.astype(np.int8))\n\n\ndef watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image : ndarray (2-D, 3-D, ...)\n Data array where the lowest value points are labeled first.\n markers : int, or ndarray of int, same shape as `image`, optional\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker. If\n ``None`` (no markers given), the local minima of the image are used as\n markers.\n connectivity : ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset : array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask : ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n Note that the method used for adding this line expects that\n marker regions are not adjacent; the watershed line may not catch\n borders between adjacent marker regions.\n\n Returns\n -------\n out : ndarray\n A labeled matrix of the same type and shape as markers\n\n See Also\n --------\n skimage.segmentation.random_walker : random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`\n https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> max_coords = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)))\n >>> local_maxima = np.zeros_like(image, dtype=bool)\n >>> local_maxima[tuple(max_coords.T)] = True\n >>> markers = ndi.label(local_maxima)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask, connectivity)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _offsets_to_raveled_neighbors(\n image.shape, connectivity, center=offset)\n marker_locations = np.flatnonzero(output)\n image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize\n\n _watershed_cy.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n", "path": "skimage/segmentation/_watershed.py"}], "after_files": [{"content": "\"\"\"watershed.py - watershed algorithm\n\nThis module implements a watershed algorithm that apportions pixels into\nmarked basins. The algorithm uses a priority queue to hold the pixels\nwith the metric for the priority queue being pixel value, then the time\nof entry into the queue - this settles ties in favor of the closest marker.\n\nSome ideas taken from\nSoille, \"Automated Basin Delineation from Digital Elevation Models Using\nMathematical Morphology\", Signal Processing 20 (1990) 171-182.\n\nThe most important insight in the paper is that entry time onto the queue\nsolves two problems: a pixel should be assigned to the neighbor with the\nlargest gradient or, if there is no gradient, pixels on a plateau should\nbe split between markers on opposite sides.\n\"\"\"\n\nimport numpy as np\nfrom scipy import ndimage as ndi\n\nfrom . import _watershed_cy\nfrom ..morphology.extrema import local_minima\nfrom ..morphology._util import (_validate_connectivity,\n _offsets_to_raveled_neighbors)\nfrom ..util import crop, regular_seeds\n\n\ndef _validate_inputs(image, markers, mask, connectivity):\n \"\"\"Ensure that all inputs to watershed have matching shapes and types.\n\n Parameters\n ----------\n image : array\n The input image.\n markers : int or array of int\n The marker image.\n mask : array, or None\n A boolean mask, True where we want to compute the watershed.\n connectivity : int in {1, ..., image.ndim}\n The connectivity of the neighborhood of a pixel.\n\n Returns\n -------\n image, markers, mask : arrays\n The validated and formatted arrays. Image will have dtype float64,\n markers int32, and mask int8. If ``None`` was given for the mask,\n it is a volume of all 1s.\n\n Raises\n ------\n ValueError\n If the shapes of the given arrays don't match.\n \"\"\"\n n_pixels = image.size\n if mask is None:\n # Use a complete `True` mask if none is provided\n mask = np.ones(image.shape, bool)\n else:\n mask = np.asanyarray(mask, dtype=bool)\n n_pixels = np.sum(mask)\n if mask.shape != image.shape:\n message = (f'`mask` (shape {mask.shape}) must have same shape '\n f'as `image` (shape {image.shape})')\n raise ValueError(message)\n if markers is None:\n markers_bool = local_minima(image, connectivity=connectivity) * mask\n footprint = ndi.generate_binary_structure(markers_bool.ndim, connectivity)\n markers = ndi.label(markers_bool, structure=footprint)[0]\n elif not isinstance(markers, (np.ndarray, list, tuple)):\n # not array-like, assume int\n # given int, assume that number of markers *within mask*.\n markers = regular_seeds(image.shape,\n int(markers / (n_pixels / image.size)))\n markers *= mask\n else:\n markers = np.asanyarray(markers) * mask\n if markers.shape != image.shape:\n message = (f'`markers` (shape {markers.shape}) must have same '\n f'shape as `image` (shape {image.shape})')\n raise ValueError(message)\n return (image.astype(np.float64),\n markers,\n mask.astype(np.int8))\n\n\ndef watershed(image, markers=None, connectivity=1, offset=None, mask=None,\n compactness=0, watershed_line=False):\n \"\"\"Find watershed basins in `image` flooded from given `markers`.\n\n Parameters\n ----------\n image : ndarray (2-D, 3-D, ...)\n Data array where the lowest value points are labeled first.\n markers : int, or ndarray of int, same shape as `image`, optional\n The desired number of markers, or an array marking the basins with the\n values to be assigned in the label matrix. Zero means not a marker. If\n ``None`` (no markers given), the local minima of the image are used as\n markers.\n connectivity : ndarray, optional\n An array with the same number of dimensions as `image` whose\n non-zero elements indicate neighbors for connection.\n Following the scipy convention, default is a one-connected array of\n the dimension of the image.\n offset : array_like of shape image.ndim, optional\n offset of the connectivity (one offset per dimension)\n mask : ndarray of bools or 0s and 1s, optional\n Array of same shape as `image`. Only points at which mask == True\n will be labeled.\n compactness : float, optional\n Use compact watershed [3]_ with given compactness parameter.\n Higher values result in more regularly-shaped watershed basins.\n watershed_line : bool, optional\n If watershed_line is True, a one-pixel wide line separates the regions\n obtained by the watershed algorithm. The line has the label 0.\n Note that the method used for adding this line expects that\n marker regions are not adjacent; the watershed line may not catch\n borders between adjacent marker regions.\n\n Returns\n -------\n out : ndarray\n A labeled matrix of the same type and shape as markers\n\n See Also\n --------\n skimage.segmentation.random_walker : random walker segmentation\n A segmentation algorithm based on anisotropic diffusion, usually\n slower than the watershed but with good results on noisy data and\n boundaries with holes.\n\n Notes\n -----\n This function implements a watershed algorithm [1]_ [2]_ that apportions\n pixels into marked basins. The algorithm uses a priority queue to hold\n the pixels with the metric for the priority queue being pixel value, then\n the time of entry into the queue - this settles ties in favor of the\n closest marker.\n\n Some ideas taken from\n Soille, \"Automated Basin Delineation from Digital Elevation Models Using\n Mathematical Morphology\", Signal Processing 20 (1990) 171-182\n\n The most important insight in the paper is that entry time onto the queue\n solves two problems: a pixel should be assigned to the neighbor with the\n largest gradient or, if there is no gradient, pixels on a plateau should\n be split between markers on opposite sides.\n\n This implementation converts all arguments to specific, lowest common\n denominator types, then passes these to a C algorithm.\n\n Markers can be determined manually, or automatically using for example\n the local minima of the gradient of the image, or the local maxima of the\n distance function to the background for separating overlapping objects\n (see example).\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Watershed_%28image_processing%29\n\n .. [2] http://cmm.ensmp.fr/~beucher/wtshed.html\n\n .. [3] Peer Neubert & Peter Protzel (2014). Compact Watershed and\n Preemptive SLIC: On Improving Trade-offs of Superpixel Segmentation\n Algorithms. ICPR 2014, pp 996-1001. :DOI:`10.1109/ICPR.2014.181`\n https://www.tu-chemnitz.de/etit/proaut/publications/cws_pSLIC_ICPR.pdf\n\n Examples\n --------\n The watershed algorithm is useful to separate overlapping objects.\n\n We first generate an initial image with two overlapping circles:\n\n >>> x, y = np.indices((80, 80))\n >>> x1, y1, x2, y2 = 28, 28, 44, 52\n >>> r1, r2 = 16, 20\n >>> mask_circle1 = (x - x1)**2 + (y - y1)**2 < r1**2\n >>> mask_circle2 = (x - x2)**2 + (y - y2)**2 < r2**2\n >>> image = np.logical_or(mask_circle1, mask_circle2)\n\n Next, we want to separate the two circles. We generate markers at the\n maxima of the distance to the background:\n\n >>> from scipy import ndimage as ndi\n >>> distance = ndi.distance_transform_edt(image)\n >>> from skimage.feature import peak_local_max\n >>> max_coords = peak_local_max(distance, labels=image,\n ... footprint=np.ones((3, 3)))\n >>> local_maxima = np.zeros_like(image, dtype=bool)\n >>> local_maxima[tuple(max_coords.T)] = True\n >>> markers = ndi.label(local_maxima)[0]\n\n Finally, we run the watershed on the image and markers:\n\n >>> labels = watershed(-distance, markers, mask=image)\n\n The algorithm works also for 3-D images, and can be used for example to\n separate overlapping spheres.\n \"\"\"\n image, markers, mask = _validate_inputs(image, markers, mask, connectivity)\n connectivity, offset = _validate_connectivity(image.ndim, connectivity,\n offset)\n\n # pad the image, markers, and mask so that we can use the mask to\n # keep from running off the edges\n pad_width = [(p, p) for p in offset]\n image = np.pad(image, pad_width, mode='constant')\n mask = np.pad(mask, pad_width, mode='constant').ravel()\n output = np.pad(markers, pad_width, mode='constant')\n\n flat_neighborhood = _offsets_to_raveled_neighbors(\n image.shape, connectivity, center=offset)\n marker_locations = np.flatnonzero(output)\n image_strides = np.array(image.strides, dtype=np.intp) // image.itemsize\n\n _watershed_cy.watershed_raveled(image.ravel(),\n marker_locations, flat_neighborhood,\n mask, image_strides, compactness,\n output.ravel(),\n watershed_line)\n\n output = crop(output, pad_width, copy=True)\n\n return output\n", "path": "skimage/segmentation/_watershed.py"}]}
| 3,411 | 114 |
gh_patches_debug_1365
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-2255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"checkov --add-check" failing due to missing templates directory in setup.py
When running `checkov --add-check`, you get an error due to the templates not being installed properly
```
gitpod /workspace/checkov $ checkov --add-check
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.0.744
What action would you like to take? (add) [add]:
Enter the title of your new check (without a .py) [MyNewTest]:
Select a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]:
Describe what this check does [Ensure that X does Y...]:
What kind of check would you like to add? (terraform) [terraform]:
Select the cloud provider this will run on (azure, aws, gcp) [aws]:
Select a terraform object for this check (data, provider, resource) [resource]:
Enter the terraform object type [aws_iam_policy]:
Please ensure you are at the root of the Checkov repository before completing this prompt
Traceback (most recent call last):
File "/home/gitpod/.pyenv/versions/3.8.12/bin/checkov", line 9, in <module>
sys.exit(run())
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/main.py", line 77, in run
check.action()
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 184, in action
getattr(self, self.chosen_action)()
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 189, in add
self.populate_templates()
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 196, in populate_templates
tf_unit_test_template = self.template_env().get_template("unittest-terraform.jinja2")
File "/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py", line 167, in template_env
print("jinja2.list_templates: %s" % jinja2.list_templates())
AttributeError: module 'jinja2' has no attribute 'list_templates'
gitpod /workspace/checkov $
```
The problem occurs on Mac, regardless of whether checkov is installed using `pip3 install checkov` or `brew install checkov`. I think it will probably occur in other environments as well.
The fix inside checkov's gitpod environment seems to be fairly simple - just copy the template files from the repo into the installed package:
```
gitpod /workspace/checkov $ ls /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/
__init__.py __pycache__
gitpod /workspace/checkov $ cp checkov/common/util/templates/*.jinja2 /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/
gitpod /workspace/checkov $ checkov --add-check
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.0.744
What action would you like to take? (add) [add]:
Enter the title of your new check (without a .py) [MyNewTest]:
Select a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]:
Describe what this check does [Ensure that X does Y...]:
What kind of check would you like to add? (terraform) [terraform]:
Select the cloud provider this will run on (azure, aws, gcp) [aws]:
Select a terraform object for this check (data, provider, resource) [resource]:
Enter the terraform object type [aws_iam_policy]:
Please ensure you are at the root of the Checkov repository before completing this prompt
Creating Check MyNewTest.py in /workspace/checkov/checkov/terraform/checks/resource/aws
Successfully created /workspace/checkov/checkov/terraform/checks/resource/aws/MyNewTest.py
Creating Unit Test Stubs for MyNewTest in /workspace/checkov/tests/terraform/checks/resource/aws
Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/example_MyNewTest/MyNewTest.tf
Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/test_MyNewTest.py
Next steps:
1) Edit your new check located in the checks/ directory listed above
2) Add both a PASS and FAIL unit test to the newly created unit test under the tests/ directory to show others how to fix failures
gitpod /workspace/checkov $
```
I _think_ the problem is simply due to the template files being inadvertently left out of checkov's `setup.py`, and that adding them into the `package_dir` section as below should probably fix things:
```
package_dir={
"checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
"checkov.common.util.templates": "checkov.common.util.templates"
},
```
However there's a number of directories under `checkov.common` that maybe should also be added to checkov's `setup.py`, and I'm not familiar enough with all of checkov's use cases to know which ones are important
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2>=0.3.24",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "argcomplete",
57 "detect-secrets",
58 "policyuniverse",
59 "typing-extensions",
60 "cachetools",
61 "cyclonedx-python-lib>=0.11.0,<1.0.0",
62 "click>=8.0.0",
63 "aiohttp",
64 "aiodns",
65 "aiomultiprocess",
66 "jsonpath_ng",
67 "jsonschema~=3.0"
68 ],
69 license="Apache License 2.0",
70 name="checkov",
71 version=version,
72 python_requires=">=3.7",
73 description="Infrastructure as code static analysis",
74 author="bridgecrew",
75 author_email="[email protected]",
76 url="https://github.com/bridgecrewio/checkov",
77 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
78 include_package_data=True,
79 package_dir={
80 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks"
81 },
82 package_data={
83 "checkov.terraform.checks.graph_checks": [
84 "aws/*.yaml",
85 "gcp/*.yaml",
86 "azure/*.yaml",
87 ]
88 },
89 scripts=["bin/checkov", "bin/checkov.cmd"],
90 long_description=long_description,
91 long_description_content_type="text/markdown",
92 classifiers=[
93 "Environment :: Console",
94 "Intended Audience :: Developers",
95 "Intended Audience :: System Administrators",
96 "Programming Language :: Python :: 3.7",
97 "Programming Language :: Python :: 3.8",
98 "Programming Language :: Python :: 3.9",
99 "Programming Language :: Python :: 3.10",
100 "Topic :: Security",
101 "Topic :: Software Development :: Build Tools",
102 ],
103 )
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -84,6 +84,9 @@
"aws/*.yaml",
"gcp/*.yaml",
"azure/*.yaml",
+ ],
+ "checkov.common.util.templates": [
+ "*.jinja2"
]
},
scripts=["bin/checkov", "bin/checkov.cmd"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -84,6 +84,9 @@\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n+ ],\n+ \"checkov.common.util.templates\": [\n+ \"*.jinja2\"\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n", "issue": "\"checkov --add-check\" failing due to missing templates directory in setup.py\nWhen running `checkov --add-check`, you get an error due to the templates not being installed properly\r\n\r\n```\r\ngitpod /workspace/checkov $ checkov --add-check\r\n\r\n _ _ \r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V / \r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/ \r\n \r\nBy bridgecrew.io | version: 2.0.744 \r\nWhat action would you like to take? (add) [add]: \r\n\r\nEnter the title of your new check (without a .py) [MyNewTest]: \r\n\r\nSelect a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]: \r\n\r\nDescribe what this check does [Ensure that X does Y...]: \r\n\r\nWhat kind of check would you like to add? (terraform) [terraform]: \r\n\r\nSelect the cloud provider this will run on (azure, aws, gcp) [aws]: \r\n\r\nSelect a terraform object for this check (data, provider, resource) [resource]: \r\n\r\nEnter the terraform object type [aws_iam_policy]: \r\n\r\nPlease ensure you are at the root of the Checkov repository before completing this prompt\r\nTraceback (most recent call last):\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/bin/checkov\", line 9, in <module>\r\n sys.exit(run())\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/main.py\", line 77, in run\r\n check.action()\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 184, in action\r\n getattr(self, self.chosen_action)()\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 189, in add\r\n self.populate_templates()\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 196, in populate_templates\r\n tf_unit_test_template = self.template_env().get_template(\"unittest-terraform.jinja2\")\r\n File \"/home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/prompt.py\", line 167, in template_env\r\n print(\"jinja2.list_templates: %s\" % jinja2.list_templates())\r\nAttributeError: module 'jinja2' has no attribute 'list_templates'\r\ngitpod /workspace/checkov $ \r\n```\r\nThe problem occurs on Mac, regardless of whether checkov is installed using `pip3 install checkov` or `brew install checkov`. I think it will probably occur in other environments as well.\r\n\r\nThe fix inside checkov's gitpod environment seems to be fairly simple - just copy the template files from the repo into the installed package:\r\n```\r\ngitpod /workspace/checkov $ ls /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/\r\n__init__.py __pycache__\r\ngitpod /workspace/checkov $ cp checkov/common/util/templates/*.jinja2 /home/gitpod/.pyenv/versions/3.8.12/lib/python3.8/site-packages/checkov/common/util/templates/\r\ngitpod /workspace/checkov $ checkov --add-check\r\n _ _ \r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V / \r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/ \r\n \r\nBy bridgecrew.io | version: 2.0.744 \r\nWhat action would you like to take? (add) [add]: \r\n\r\nEnter the title of your new check (without a .py) [MyNewTest]: \r\n\r\nSelect a category for this check (application_security, backup_and_recoveryconvention, encryption, general_security, iam, kubernetes, logging, networking, secrets) [iam]: \r\n\r\nDescribe what this check does [Ensure that X does Y...]: \r\n\r\nWhat kind of check would you like to add? (terraform) [terraform]: \r\n\r\nSelect the cloud provider this will run on (azure, aws, gcp) [aws]: \r\n\r\nSelect a terraform object for this check (data, provider, resource) [resource]: \r\n\r\nEnter the terraform object type [aws_iam_policy]: \r\n\r\nPlease ensure you are at the root of the Checkov repository before completing this prompt\r\nCreating Check MyNewTest.py in /workspace/checkov/checkov/terraform/checks/resource/aws\r\n Successfully created /workspace/checkov/checkov/terraform/checks/resource/aws/MyNewTest.py\r\nCreating Unit Test Stubs for MyNewTest in /workspace/checkov/tests/terraform/checks/resource/aws\r\n Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/example_MyNewTest/MyNewTest.tf\r\n Successfully created /workspace/checkov/tests/terraform/checks/resource/aws/test_MyNewTest.py\r\n\r\nNext steps:\r\n 1) Edit your new check located in the checks/ directory listed above\r\n 2) Add both a PASS and FAIL unit test to the newly created unit test under the tests/ directory to show others how to fix failures\r\n\r\ngitpod /workspace/checkov $ \r\n```\r\n\r\nI _think_ the problem is simply due to the template files being inadvertently left out of checkov's `setup.py`, and that adding them into the `package_dir` section as below should probably fix things:\r\n```\r\n package_dir={\r\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\r\n \"checkov.common.util.templates\": \"checkov.common.util.templates\"\r\n },\r\n```\r\n\r\nHowever there's a number of directories under `checkov.common` that maybe should also be added to checkov's `setup.py`, and I'm not familiar enough with all of checkov's use cases to know which ones are important\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib>=0.11.0,<1.0.0\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath_ng\",\n \"jsonschema~=3.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n \"checkov.common.util.templates\": [\n \"*.jinja2\"\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}]}
| 2,603 | 88 |
gh_patches_debug_25534
|
rasdani/github-patches
|
git_diff
|
getredash__redash-3078
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
URL query runner: URL base path doesn't need to be a required field
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/url.py`
Content:
```
1 from redash.query_runner import BaseHTTPQueryRunner, register
2
3
4 class Url(BaseHTTPQueryRunner):
5
6 @classmethod
7 def annotate_query(cls):
8 return False
9
10 def test_connection(self):
11 pass
12
13 def run_query(self, query, user):
14 base_url = self.configuration.get("url", None)
15
16 try:
17 query = query.strip()
18
19 if base_url is not None and base_url != "":
20 if query.find("://") > -1:
21 return None, "Accepting only relative URLs to '%s'" % base_url
22
23 if base_url is None:
24 base_url = ""
25
26 url = base_url + query
27
28 response, error = self.get_response(url)
29 if error is not None:
30 return None, error
31
32 json_data = response.content.strip()
33
34 if json_data:
35 return json_data, None
36 else:
37 return None, "Got empty response from '{}'.".format(url)
38 except KeyboardInterrupt:
39 return None, "Query cancelled by user."
40
41
42 register(Url)
43
```
Path: `redash/query_runner/__init__.py`
Content:
```
1 import logging
2 import requests
3
4 from redash import settings
5 from redash.utils import json_loads
6
7 logger = logging.getLogger(__name__)
8
9 __all__ = [
10 'BaseQueryRunner',
11 'BaseHTTPQueryRunner',
12 'InterruptException',
13 'BaseSQLQueryRunner',
14 'TYPE_DATETIME',
15 'TYPE_BOOLEAN',
16 'TYPE_INTEGER',
17 'TYPE_STRING',
18 'TYPE_DATE',
19 'TYPE_FLOAT',
20 'SUPPORTED_COLUMN_TYPES',
21 'register',
22 'get_query_runner',
23 'import_query_runners'
24 ]
25
26 # Valid types of columns returned in results:
27 TYPE_INTEGER = 'integer'
28 TYPE_FLOAT = 'float'
29 TYPE_BOOLEAN = 'boolean'
30 TYPE_STRING = 'string'
31 TYPE_DATETIME = 'datetime'
32 TYPE_DATE = 'date'
33
34 SUPPORTED_COLUMN_TYPES = set([
35 TYPE_INTEGER,
36 TYPE_FLOAT,
37 TYPE_BOOLEAN,
38 TYPE_STRING,
39 TYPE_DATETIME,
40 TYPE_DATE
41 ])
42
43
44 class InterruptException(Exception):
45 pass
46
47
48 class NotSupported(Exception):
49 pass
50
51
52 class BaseQueryRunner(object):
53 noop_query = None
54
55 def __init__(self, configuration):
56 self.syntax = 'sql'
57 self.configuration = configuration
58
59 @classmethod
60 def name(cls):
61 return cls.__name__
62
63 @classmethod
64 def type(cls):
65 return cls.__name__.lower()
66
67 @classmethod
68 def enabled(cls):
69 return True
70
71 @classmethod
72 def annotate_query(cls):
73 return True
74
75 @classmethod
76 def configuration_schema(cls):
77 return {}
78
79 def test_connection(self):
80 if self.noop_query is None:
81 raise NotImplementedError()
82 data, error = self.run_query(self.noop_query, None)
83
84 if error is not None:
85 raise Exception(error)
86
87 def run_query(self, query, user):
88 raise NotImplementedError()
89
90 def fetch_columns(self, columns):
91 column_names = []
92 duplicates_counter = 1
93 new_columns = []
94
95 for col in columns:
96 column_name = col[0]
97 if column_name in column_names:
98 column_name = "{}{}".format(column_name, duplicates_counter)
99 duplicates_counter += 1
100
101 column_names.append(column_name)
102 new_columns.append({'name': column_name,
103 'friendly_name': column_name,
104 'type': col[1]})
105
106 return new_columns
107
108 def get_schema(self, get_stats=False):
109 raise NotSupported()
110
111 def _run_query_internal(self, query):
112 results, error = self.run_query(query, None)
113
114 if error is not None:
115 raise Exception("Failed running query [%s]." % query)
116 return json_loads(results)['rows']
117
118 @classmethod
119 def to_dict(cls):
120 return {
121 'name': cls.name(),
122 'type': cls.type(),
123 'configuration_schema': cls.configuration_schema()
124 }
125
126
127 class BaseSQLQueryRunner(BaseQueryRunner):
128
129 def get_schema(self, get_stats=False):
130 schema_dict = {}
131 self._get_tables(schema_dict)
132 if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:
133 self._get_tables_stats(schema_dict)
134 return schema_dict.values()
135
136 def _get_tables(self, schema_dict):
137 return []
138
139 def _get_tables_stats(self, tables_dict):
140 for t in tables_dict.keys():
141 if type(tables_dict[t]) == dict:
142 res = self._run_query_internal('select count(*) as cnt from %s' % t)
143 tables_dict[t]['size'] = res[0]['cnt']
144
145
146 class BaseHTTPQueryRunner(BaseQueryRunner):
147 response_error = "Endpoint returned unexpected status code"
148 requires_authentication = False
149 url_title = 'URL base path'
150 username_title = 'HTTP Basic Auth Username'
151 password_title = 'HTTP Basic Auth Password'
152
153 @classmethod
154 def configuration_schema(cls):
155 schema = {
156 'type': 'object',
157 'properties': {
158 'url': {
159 'type': 'string',
160 'title': cls.url_title,
161 },
162 'username': {
163 'type': 'string',
164 'title': cls.username_title,
165 },
166 'password': {
167 'type': 'string',
168 'title': cls.password_title,
169 },
170 },
171 'required': ['url'],
172 'secret': ['password']
173 }
174 if cls.requires_authentication:
175 schema['required'] += ['username', 'password']
176 return schema
177
178 def get_auth(self):
179 username = self.configuration.get('username')
180 password = self.configuration.get('password')
181 if username and password:
182 return (username, password)
183 if self.requires_authentication:
184 raise ValueError("Username and Password required")
185 else:
186 return None
187
188 def get_response(self, url, auth=None, **kwargs):
189 # Get authentication values if not given
190 if auth is None:
191 auth = self.get_auth()
192
193 # Then call requests to get the response from the given endpoint
194 # URL optionally, with the additional requests parameters.
195 error = None
196 response = None
197 try:
198 response = requests.get(url, auth=auth, **kwargs)
199 # Raise a requests HTTP exception with the appropriate reason
200 # for 4xx and 5xx response status codes which is later caught
201 # and passed back.
202 response.raise_for_status()
203
204 # Any other responses (e.g. 2xx and 3xx):
205 if response.status_code != 200:
206 error = '{} ({}).'.format(
207 self.response_error,
208 response.status_code,
209 )
210
211 except requests.HTTPError as exc:
212 logger.exception(exc)
213 error = (
214 "Failed to execute query. "
215 "Return Code: {} Reason: {}".format(
216 response.status_code,
217 response.text
218 )
219 )
220 except requests.RequestException as exc:
221 # Catch all other requests exceptions and return the error.
222 logger.exception(exc)
223 error = str(exc)
224
225 # Return response and error.
226 return response, error
227
228
229 query_runners = {}
230
231
232 def register(query_runner_class):
233 global query_runners
234 if query_runner_class.enabled():
235 logger.debug("Registering %s (%s) query runner.", query_runner_class.name(), query_runner_class.type())
236 query_runners[query_runner_class.type()] = query_runner_class
237 else:
238 logger.debug("%s query runner enabled but not supported, not registering. Either disable or install missing "
239 "dependencies.", query_runner_class.name())
240
241
242 def get_query_runner(query_runner_type, configuration):
243 query_runner_class = query_runners.get(query_runner_type, None)
244 if query_runner_class is None:
245 return None
246
247 return query_runner_class(configuration)
248
249
250 def get_configuration_schema_for_query_runner_type(query_runner_type):
251 query_runner_class = query_runners.get(query_runner_type, None)
252 if query_runner_class is None:
253 return None
254
255 return query_runner_class.configuration_schema()
256
257
258 def import_query_runners(query_runner_imports):
259 for runner_import in query_runner_imports:
260 __import__(runner_import)
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py
--- a/redash/query_runner/__init__.py
+++ b/redash/query_runner/__init__.py
@@ -146,6 +146,7 @@
class BaseHTTPQueryRunner(BaseQueryRunner):
response_error = "Endpoint returned unexpected status code"
requires_authentication = False
+ requires_url = True
url_title = 'URL base path'
username_title = 'HTTP Basic Auth Username'
password_title = 'HTTP Basic Auth Password'
@@ -168,9 +169,15 @@
'title': cls.password_title,
},
},
- 'required': ['url'],
'secret': ['password']
}
+
+ if cls.requires_url or cls.requires_authentication:
+ schema['required'] = []
+
+ if cls.requires_url:
+ schema['required'] += ['url']
+
if cls.requires_authentication:
schema['required'] += ['username', 'password']
return schema
diff --git a/redash/query_runner/url.py b/redash/query_runner/url.py
--- a/redash/query_runner/url.py
+++ b/redash/query_runner/url.py
@@ -2,6 +2,7 @@
class Url(BaseHTTPQueryRunner):
+ requires_url = False
@classmethod
def annotate_query(cls):
|
{"golden_diff": "diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py\n--- a/redash/query_runner/__init__.py\n+++ b/redash/query_runner/__init__.py\n@@ -146,6 +146,7 @@\n class BaseHTTPQueryRunner(BaseQueryRunner):\n response_error = \"Endpoint returned unexpected status code\"\n requires_authentication = False\n+ requires_url = True\n url_title = 'URL base path'\n username_title = 'HTTP Basic Auth Username'\n password_title = 'HTTP Basic Auth Password'\n@@ -168,9 +169,15 @@\n 'title': cls.password_title,\n },\n },\n- 'required': ['url'],\n 'secret': ['password']\n }\n+\n+ if cls.requires_url or cls.requires_authentication:\n+ schema['required'] = []\n+\n+ if cls.requires_url:\n+ schema['required'] += ['url']\n+\n if cls.requires_authentication:\n schema['required'] += ['username', 'password']\n return schema\ndiff --git a/redash/query_runner/url.py b/redash/query_runner/url.py\n--- a/redash/query_runner/url.py\n+++ b/redash/query_runner/url.py\n@@ -2,6 +2,7 @@\n \n \n class Url(BaseHTTPQueryRunner):\n+ requires_url = False\n \n @classmethod\n def annotate_query(cls):\n", "issue": "URL query runner: URL base path doesn't need to be a required field\n\n", "before_files": [{"content": "from redash.query_runner import BaseHTTPQueryRunner, register\n\n\nclass Url(BaseHTTPQueryRunner):\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def test_connection(self):\n pass\n\n def run_query(self, query, user):\n base_url = self.configuration.get(\"url\", None)\n\n try:\n query = query.strip()\n\n if base_url is not None and base_url != \"\":\n if query.find(\"://\") > -1:\n return None, \"Accepting only relative URLs to '%s'\" % base_url\n\n if base_url is None:\n base_url = \"\"\n\n url = base_url + query\n\n response, error = self.get_response(url)\n if error is not None:\n return None, error\n\n json_data = response.content.strip()\n\n if json_data:\n return json_data, None\n else:\n return None, \"Got empty response from '{}'.\".format(url)\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(Url)\n", "path": "redash/query_runner/url.py"}, {"content": "import logging\nimport requests\n\nfrom redash import settings\nfrom redash.utils import json_loads\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\n 'BaseQueryRunner',\n 'BaseHTTPQueryRunner',\n 'InterruptException',\n 'BaseSQLQueryRunner',\n 'TYPE_DATETIME',\n 'TYPE_BOOLEAN',\n 'TYPE_INTEGER',\n 'TYPE_STRING',\n 'TYPE_DATE',\n 'TYPE_FLOAT',\n 'SUPPORTED_COLUMN_TYPES',\n 'register',\n 'get_query_runner',\n 'import_query_runners'\n]\n\n# Valid types of columns returned in results:\nTYPE_INTEGER = 'integer'\nTYPE_FLOAT = 'float'\nTYPE_BOOLEAN = 'boolean'\nTYPE_STRING = 'string'\nTYPE_DATETIME = 'datetime'\nTYPE_DATE = 'date'\n\nSUPPORTED_COLUMN_TYPES = set([\n TYPE_INTEGER,\n TYPE_FLOAT,\n TYPE_BOOLEAN,\n TYPE_STRING,\n TYPE_DATETIME,\n TYPE_DATE\n])\n\n\nclass InterruptException(Exception):\n pass\n\n\nclass NotSupported(Exception):\n pass\n\n\nclass BaseQueryRunner(object):\n noop_query = None\n\n def __init__(self, configuration):\n self.syntax = 'sql'\n self.configuration = configuration\n\n @classmethod\n def name(cls):\n return cls.__name__\n\n @classmethod\n def type(cls):\n return cls.__name__.lower()\n\n @classmethod\n def enabled(cls):\n return True\n\n @classmethod\n def annotate_query(cls):\n return True\n\n @classmethod\n def configuration_schema(cls):\n return {}\n\n def test_connection(self):\n if self.noop_query is None:\n raise NotImplementedError()\n data, error = self.run_query(self.noop_query, None)\n\n if error is not None:\n raise Exception(error)\n\n def run_query(self, query, user):\n raise NotImplementedError()\n\n def fetch_columns(self, columns):\n column_names = []\n duplicates_counter = 1\n new_columns = []\n\n for col in columns:\n column_name = col[0]\n if column_name in column_names:\n column_name = \"{}{}\".format(column_name, duplicates_counter)\n duplicates_counter += 1\n\n column_names.append(column_name)\n new_columns.append({'name': column_name,\n 'friendly_name': column_name,\n 'type': col[1]})\n\n return new_columns\n\n def get_schema(self, get_stats=False):\n raise NotSupported()\n\n def _run_query_internal(self, query):\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed running query [%s].\" % query)\n return json_loads(results)['rows']\n\n @classmethod\n def to_dict(cls):\n return {\n 'name': cls.name(),\n 'type': cls.type(),\n 'configuration_schema': cls.configuration_schema()\n }\n\n\nclass BaseSQLQueryRunner(BaseQueryRunner):\n\n def get_schema(self, get_stats=False):\n schema_dict = {}\n self._get_tables(schema_dict)\n if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:\n self._get_tables_stats(schema_dict)\n return schema_dict.values()\n\n def _get_tables(self, schema_dict):\n return []\n\n def _get_tables_stats(self, tables_dict):\n for t in tables_dict.keys():\n if type(tables_dict[t]) == dict:\n res = self._run_query_internal('select count(*) as cnt from %s' % t)\n tables_dict[t]['size'] = res[0]['cnt']\n\n\nclass BaseHTTPQueryRunner(BaseQueryRunner):\n response_error = \"Endpoint returned unexpected status code\"\n requires_authentication = False\n url_title = 'URL base path'\n username_title = 'HTTP Basic Auth Username'\n password_title = 'HTTP Basic Auth Password'\n\n @classmethod\n def configuration_schema(cls):\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {\n 'type': 'string',\n 'title': cls.url_title,\n },\n 'username': {\n 'type': 'string',\n 'title': cls.username_title,\n },\n 'password': {\n 'type': 'string',\n 'title': cls.password_title,\n },\n },\n 'required': ['url'],\n 'secret': ['password']\n }\n if cls.requires_authentication:\n schema['required'] += ['username', 'password']\n return schema\n\n def get_auth(self):\n username = self.configuration.get('username')\n password = self.configuration.get('password')\n if username and password:\n return (username, password)\n if self.requires_authentication:\n raise ValueError(\"Username and Password required\")\n else:\n return None\n\n def get_response(self, url, auth=None, **kwargs):\n # Get authentication values if not given\n if auth is None:\n auth = self.get_auth()\n\n # Then call requests to get the response from the given endpoint\n # URL optionally, with the additional requests parameters.\n error = None\n response = None\n try:\n response = requests.get(url, auth=auth, **kwargs)\n # Raise a requests HTTP exception with the appropriate reason\n # for 4xx and 5xx response status codes which is later caught\n # and passed back.\n response.raise_for_status()\n\n # Any other responses (e.g. 2xx and 3xx):\n if response.status_code != 200:\n error = '{} ({}).'.format(\n self.response_error,\n response.status_code,\n )\n\n except requests.HTTPError as exc:\n logger.exception(exc)\n error = (\n \"Failed to execute query. \"\n \"Return Code: {} Reason: {}\".format(\n response.status_code,\n response.text\n )\n )\n except requests.RequestException as exc:\n # Catch all other requests exceptions and return the error.\n logger.exception(exc)\n error = str(exc)\n\n # Return response and error.\n return response, error\n\n\nquery_runners = {}\n\n\ndef register(query_runner_class):\n global query_runners\n if query_runner_class.enabled():\n logger.debug(\"Registering %s (%s) query runner.\", query_runner_class.name(), query_runner_class.type())\n query_runners[query_runner_class.type()] = query_runner_class\n else:\n logger.debug(\"%s query runner enabled but not supported, not registering. Either disable or install missing \"\n \"dependencies.\", query_runner_class.name())\n\n\ndef get_query_runner(query_runner_type, configuration):\n query_runner_class = query_runners.get(query_runner_type, None)\n if query_runner_class is None:\n return None\n\n return query_runner_class(configuration)\n\n\ndef get_configuration_schema_for_query_runner_type(query_runner_type):\n query_runner_class = query_runners.get(query_runner_type, None)\n if query_runner_class is None:\n return None\n\n return query_runner_class.configuration_schema()\n\n\ndef import_query_runners(query_runner_imports):\n for runner_import in query_runner_imports:\n __import__(runner_import)\n", "path": "redash/query_runner/__init__.py"}], "after_files": [{"content": "from redash.query_runner import BaseHTTPQueryRunner, register\n\n\nclass Url(BaseHTTPQueryRunner):\n requires_url = False\n\n @classmethod\n def annotate_query(cls):\n return False\n\n def test_connection(self):\n pass\n\n def run_query(self, query, user):\n base_url = self.configuration.get(\"url\", None)\n\n try:\n query = query.strip()\n\n if base_url is not None and base_url != \"\":\n if query.find(\"://\") > -1:\n return None, \"Accepting only relative URLs to '%s'\" % base_url\n\n if base_url is None:\n base_url = \"\"\n\n url = base_url + query\n\n response, error = self.get_response(url)\n if error is not None:\n return None, error\n\n json_data = response.content.strip()\n\n if json_data:\n return json_data, None\n else:\n return None, \"Got empty response from '{}'.\".format(url)\n except KeyboardInterrupt:\n return None, \"Query cancelled by user.\"\n\n\nregister(Url)\n", "path": "redash/query_runner/url.py"}, {"content": "import logging\nimport requests\n\nfrom redash import settings\nfrom redash.utils import json_loads\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\n 'BaseQueryRunner',\n 'BaseHTTPQueryRunner',\n 'InterruptException',\n 'BaseSQLQueryRunner',\n 'TYPE_DATETIME',\n 'TYPE_BOOLEAN',\n 'TYPE_INTEGER',\n 'TYPE_STRING',\n 'TYPE_DATE',\n 'TYPE_FLOAT',\n 'SUPPORTED_COLUMN_TYPES',\n 'register',\n 'get_query_runner',\n 'import_query_runners'\n]\n\n# Valid types of columns returned in results:\nTYPE_INTEGER = 'integer'\nTYPE_FLOAT = 'float'\nTYPE_BOOLEAN = 'boolean'\nTYPE_STRING = 'string'\nTYPE_DATETIME = 'datetime'\nTYPE_DATE = 'date'\n\nSUPPORTED_COLUMN_TYPES = set([\n TYPE_INTEGER,\n TYPE_FLOAT,\n TYPE_BOOLEAN,\n TYPE_STRING,\n TYPE_DATETIME,\n TYPE_DATE\n])\n\n\nclass InterruptException(Exception):\n pass\n\n\nclass NotSupported(Exception):\n pass\n\n\nclass BaseQueryRunner(object):\n noop_query = None\n\n def __init__(self, configuration):\n self.syntax = 'sql'\n self.configuration = configuration\n\n @classmethod\n def name(cls):\n return cls.__name__\n\n @classmethod\n def type(cls):\n return cls.__name__.lower()\n\n @classmethod\n def enabled(cls):\n return True\n\n @classmethod\n def annotate_query(cls):\n return True\n\n @classmethod\n def configuration_schema(cls):\n return {}\n\n def test_connection(self):\n if self.noop_query is None:\n raise NotImplementedError()\n data, error = self.run_query(self.noop_query, None)\n\n if error is not None:\n raise Exception(error)\n\n def run_query(self, query, user):\n raise NotImplementedError()\n\n def fetch_columns(self, columns):\n column_names = []\n duplicates_counter = 1\n new_columns = []\n\n for col in columns:\n column_name = col[0]\n if column_name in column_names:\n column_name = \"{}{}\".format(column_name, duplicates_counter)\n duplicates_counter += 1\n\n column_names.append(column_name)\n new_columns.append({'name': column_name,\n 'friendly_name': column_name,\n 'type': col[1]})\n\n return new_columns\n\n def get_schema(self, get_stats=False):\n raise NotSupported()\n\n def _run_query_internal(self, query):\n results, error = self.run_query(query, None)\n\n if error is not None:\n raise Exception(\"Failed running query [%s].\" % query)\n return json_loads(results)['rows']\n\n @classmethod\n def to_dict(cls):\n return {\n 'name': cls.name(),\n 'type': cls.type(),\n 'configuration_schema': cls.configuration_schema()\n }\n\n\nclass BaseSQLQueryRunner(BaseQueryRunner):\n\n def get_schema(self, get_stats=False):\n schema_dict = {}\n self._get_tables(schema_dict)\n if settings.SCHEMA_RUN_TABLE_SIZE_CALCULATIONS and get_stats:\n self._get_tables_stats(schema_dict)\n return schema_dict.values()\n\n def _get_tables(self, schema_dict):\n return []\n\n def _get_tables_stats(self, tables_dict):\n for t in tables_dict.keys():\n if type(tables_dict[t]) == dict:\n res = self._run_query_internal('select count(*) as cnt from %s' % t)\n tables_dict[t]['size'] = res[0]['cnt']\n\n\nclass BaseHTTPQueryRunner(BaseQueryRunner):\n response_error = \"Endpoint returned unexpected status code\"\n requires_authentication = False\n requires_url = True\n url_title = 'URL base path'\n username_title = 'HTTP Basic Auth Username'\n password_title = 'HTTP Basic Auth Password'\n\n @classmethod\n def configuration_schema(cls):\n schema = {\n 'type': 'object',\n 'properties': {\n 'url': {\n 'type': 'string',\n 'title': cls.url_title,\n },\n 'username': {\n 'type': 'string',\n 'title': cls.username_title,\n },\n 'password': {\n 'type': 'string',\n 'title': cls.password_title,\n },\n },\n 'secret': ['password']\n }\n\n if cls.requires_url or cls.requires_authentication:\n schema['required'] = []\n\n if cls.requires_url:\n schema['required'] += ['url']\n\n if cls.requires_authentication:\n schema['required'] += ['username', 'password']\n return schema\n\n def get_auth(self):\n username = self.configuration.get('username')\n password = self.configuration.get('password')\n if username and password:\n return (username, password)\n if self.requires_authentication:\n raise ValueError(\"Username and Password required\")\n else:\n return None\n\n def get_response(self, url, auth=None, **kwargs):\n # Get authentication values if not given\n if auth is None:\n auth = self.get_auth()\n\n # Then call requests to get the response from the given endpoint\n # URL optionally, with the additional requests parameters.\n error = None\n response = None\n try:\n response = requests.get(url, auth=auth, **kwargs)\n # Raise a requests HTTP exception with the appropriate reason\n # for 4xx and 5xx response status codes which is later caught\n # and passed back.\n response.raise_for_status()\n\n # Any other responses (e.g. 2xx and 3xx):\n if response.status_code != 200:\n error = '{} ({}).'.format(\n self.response_error,\n response.status_code,\n )\n\n except requests.HTTPError as exc:\n logger.exception(exc)\n error = (\n \"Failed to execute query. \"\n \"Return Code: {} Reason: {}\".format(\n response.status_code,\n response.text\n )\n )\n except requests.RequestException as exc:\n # Catch all other requests exceptions and return the error.\n logger.exception(exc)\n error = str(exc)\n\n # Return response and error.\n return response, error\n\n\nquery_runners = {}\n\n\ndef register(query_runner_class):\n global query_runners\n if query_runner_class.enabled():\n logger.debug(\"Registering %s (%s) query runner.\", query_runner_class.name(), query_runner_class.type())\n query_runners[query_runner_class.type()] = query_runner_class\n else:\n logger.debug(\"%s query runner enabled but not supported, not registering. Either disable or install missing \"\n \"dependencies.\", query_runner_class.name())\n\n\ndef get_query_runner(query_runner_type, configuration):\n query_runner_class = query_runners.get(query_runner_type, None)\n if query_runner_class is None:\n return None\n\n return query_runner_class(configuration)\n\n\ndef get_configuration_schema_for_query_runner_type(query_runner_type):\n query_runner_class = query_runners.get(query_runner_type, None)\n if query_runner_class is None:\n return None\n\n return query_runner_class.configuration_schema()\n\n\ndef import_query_runners(query_runner_imports):\n for runner_import in query_runner_imports:\n __import__(runner_import)\n", "path": "redash/query_runner/__init__.py"}]}
| 2,785 | 296 |
gh_patches_debug_15470
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-567
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build nightly from tf-nightly
Currently we build against `tf-nightly-2.0-preview`. Now that TF2 is released we should switch to `tf-nightly` once we confirm that the switch has been made.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """TensorFlow Addons.
16
17 TensorFlow Addons is a repository of contributions that conform to well-
18 established API patterns, but implement new functionality not available
19 in core TensorFlow. TensorFlow natively supports a large number of
20 operators, layers, metrics, losses, and optimizers. However, in a fast
21 moving field like ML, there are many interesting new developments that
22 cannot be integrated into core TensorFlow (because their broad
23 applicability is not yet clear, or it is mostly used by a smaller subset
24 of the community).
25 """
26
27 from __future__ import absolute_import
28 from __future__ import division
29 from __future__ import print_function
30
31 import os
32 import platform
33 import sys
34
35 from datetime import datetime
36 from setuptools import find_packages
37 from setuptools import setup
38 from setuptools.dist import Distribution
39 from setuptools import Extension
40
41 DOCLINES = __doc__.split('\n')
42
43 TFA_NIGHTLY = 'tfa-nightly'
44 TFA_RELEASE = 'tensorflow-addons'
45
46 if '--nightly' in sys.argv:
47 project_name = TFA_NIGHTLY
48 nightly_idx = sys.argv.index('--nightly')
49 sys.argv.pop(nightly_idx)
50 else:
51 project_name = TFA_RELEASE
52
53 # Version
54 version = {}
55 base_dir = os.path.dirname(os.path.abspath(__file__))
56 with open(os.path.join(base_dir, "tensorflow_addons", "version.py")) as fp:
57 # yapf: disable
58 exec(fp.read(), version)
59 # yapf: enable
60
61 if project_name == TFA_NIGHTLY:
62 version['__version__'] += datetime.strftime(datetime.today(), "%Y%m%d")
63
64 # Dependencies
65 REQUIRED_PACKAGES = [
66 'six >= 1.10.0',
67 ]
68
69 if project_name == TFA_RELEASE:
70 # TODO: remove if-else condition when tf supports package consolidation.
71 if platform.system() == 'Linux':
72 REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
73 else:
74 REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
75 elif project_name == TFA_NIGHTLY:
76 # TODO: remove if-else condition when tf-nightly supports package consolidation.
77 if platform.system() == 'Linux':
78 REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')
79 else:
80 REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')
81
82
83 class BinaryDistribution(Distribution):
84 """This class is needed in order to create OS specific wheels."""
85
86 def has_ext_modules(self):
87 return True
88
89
90 setup(
91 name=project_name,
92 version=version['__version__'],
93 description=DOCLINES[0],
94 long_description='\n'.join(DOCLINES[2:]),
95 author='Google Inc.',
96 author_email='[email protected]',
97 packages=find_packages(),
98 ext_modules=[Extension('_foo', ['stub.cc'])],
99 install_requires=REQUIRED_PACKAGES,
100 include_package_data=True,
101 zip_safe=False,
102 distclass=BinaryDistribution,
103 classifiers=[
104 'Development Status :: 4 - Beta',
105 'Intended Audience :: Developers',
106 'Intended Audience :: Education',
107 'Intended Audience :: Science/Research',
108 'License :: OSI Approved :: Apache Software License',
109 'Programming Language :: Python :: 2.7',
110 'Programming Language :: Python :: 3.5',
111 'Programming Language :: Python :: 3.6',
112 'Programming Language :: Python :: 3.7',
113 'Topic :: Scientific/Engineering :: Mathematics',
114 'Topic :: Software Development :: Libraries :: Python Modules',
115 'Topic :: Software Development :: Libraries',
116 ],
117 license='Apache 2.0',
118 keywords='tensorflow addons machine learning',
119 )
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,15 +69,11 @@
if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')
else:
- REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0')
elif project_name == TFA_NIGHTLY:
- # TODO: remove if-else condition when tf-nightly supports package consolidation.
- if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')
- else:
- REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')
+ REQUIRED_PACKAGES.append('tf-nightly')
class BinaryDistribution(Distribution):
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,15 +69,11 @@\n if project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')\n+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n else:\n- REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')\n+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\n elif project_name == TFA_NIGHTLY:\n- # TODO: remove if-else condition when tf-nightly supports package consolidation.\n- if platform.system() == 'Linux':\n- REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')\n- else:\n- REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')\n+ REQUIRED_PACKAGES.append('tf-nightly')\n \n \n class BinaryDistribution(Distribution):\n", "issue": "Build nightly from tf-nightly\nCurrently we build against `tf-nightly-2.0-preview`. Now that TF2 is released we should switch to `tf-nightly` once we confirm that the switch has been made.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')\n else:\n REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')\nelif project_name == TFA_NIGHTLY:\n # TODO: remove if-else condition when tf-nightly supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tf-nightly-gpu-2.0-preview')\n else:\n REQUIRED_PACKAGES.append('tf-nightly-2.0-preview')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Addons.\n\nTensorFlow Addons is a repository of contributions that conform to well-\nestablished API patterns, but implement new functionality not available\nin core TensorFlow. TensorFlow natively supports a large number of\noperators, layers, metrics, losses, and optimizers. However, in a fast\nmoving field like ML, there are many interesting new developments that\ncannot be integrated into core TensorFlow (because their broad\napplicability is not yet clear, or it is mostly used by a smaller subset\nof the community).\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nfrom datetime import datetime\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.dist import Distribution\nfrom setuptools import Extension\n\nDOCLINES = __doc__.split('\\n')\n\nTFA_NIGHTLY = 'tfa-nightly'\nTFA_RELEASE = 'tensorflow-addons'\n\nif '--nightly' in sys.argv:\n project_name = TFA_NIGHTLY\n nightly_idx = sys.argv.index('--nightly')\n sys.argv.pop(nightly_idx)\nelse:\n project_name = TFA_RELEASE\n\n# Version\nversion = {}\nbase_dir = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(base_dir, \"tensorflow_addons\", \"version.py\")) as fp:\n # yapf: disable\n exec(fp.read(), version)\n # yapf: enable\n\nif project_name == TFA_NIGHTLY:\n version['__version__'] += datetime.strftime(datetime.today(), \"%Y%m%d\")\n\n# Dependencies\nREQUIRED_PACKAGES = [\n 'six >= 1.10.0',\n]\n\nif project_name == TFA_RELEASE:\n # TODO: remove if-else condition when tf supports package consolidation.\n if platform.system() == 'Linux':\n REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0')\n else:\n REQUIRED_PACKAGES.append('tensorflow == 2.0.0')\nelif project_name == TFA_NIGHTLY:\n REQUIRED_PACKAGES.append('tf-nightly')\n\n\nclass BinaryDistribution(Distribution):\n \"\"\"This class is needed in order to create OS specific wheels.\"\"\"\n\n def has_ext_modules(self):\n return True\n\n\nsetup(\n name=project_name,\n version=version['__version__'],\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n author='Google Inc.',\n author_email='[email protected]',\n packages=find_packages(),\n ext_modules=[Extension('_foo', ['stub.cc'])],\n install_requires=REQUIRED_PACKAGES,\n include_package_data=True,\n zip_safe=False,\n distclass=BinaryDistribution,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n license='Apache 2.0',\n keywords='tensorflow addons machine learning',\n)\n", "path": "setup.py"}]}
| 1,483 | 237 |
gh_patches_debug_16380
|
rasdani/github-patches
|
git_diff
|
googleapis__python-bigquery-701
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Noxfile is not updated with Sphinx version pin
`noxfile.py` is not updated with Sphinx version pin, please help take a look!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import pathlib
18 import os
19 import shutil
20
21 import nox
22
23
24 PYTYPE_VERSION = "pytype==2021.4.9"
25 BLACK_VERSION = "black==19.10b0"
26 BLACK_PATHS = ("docs", "google", "samples", "tests", "noxfile.py", "setup.py")
27
28 DEFAULT_PYTHON_VERSION = "3.8"
29 SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
30 UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
31 CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
32
33 # 'docfx' is excluded since it only needs to run in 'docs-presubmit'
34 nox.options.sessions = [
35 "unit_noextras",
36 "unit",
37 "system",
38 "snippets",
39 "cover",
40 "lint",
41 "lint_setup_py",
42 "blacken",
43 "pytype",
44 "docs",
45 ]
46
47
48 def default(session, install_extras=True):
49 """Default unit test session.
50
51 This is intended to be run **without** an interpreter set, so
52 that the current ``python`` (on the ``PATH``) or the version of
53 Python corresponding to the ``nox`` binary the ``PATH`` can
54 run the tests.
55 """
56 constraints_path = str(
57 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
58 )
59
60 # Install all test dependencies, then install local packages in-place.
61 session.install(
62 "mock",
63 "pytest",
64 "google-cloud-testutils",
65 "pytest-cov",
66 "freezegun",
67 "-c",
68 constraints_path,
69 )
70
71 install_target = ".[all]" if install_extras else "."
72 session.install("-e", install_target, "-c", constraints_path)
73
74 session.install("ipython", "-c", constraints_path)
75
76 # Run py.test against the unit tests.
77 session.run(
78 "py.test",
79 "--quiet",
80 "--cov=google/cloud/bigquery",
81 "--cov=tests/unit",
82 "--cov-append",
83 "--cov-config=.coveragerc",
84 "--cov-report=",
85 "--cov-fail-under=0",
86 os.path.join("tests", "unit"),
87 *session.posargs,
88 )
89
90
91 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
92 def unit(session):
93 """Run the unit test suite."""
94 default(session)
95
96
97 @nox.session(python=UNIT_TEST_PYTHON_VERSIONS[-1])
98 def unit_noextras(session):
99 """Run the unit test suite."""
100 default(session, install_extras=False)
101
102
103 @nox.session(python=DEFAULT_PYTHON_VERSION)
104 def pytype(session):
105 """Run type checks."""
106 # An indirect dependecy attrs==21.1.0 breaks the check, and installing a less
107 # recent version avoids the error until a possibly better fix is found.
108 # https://github.com/googleapis/python-bigquery/issues/655
109 session.install("attrs==20.3.0")
110 session.install("-e", ".[all]")
111 session.install("ipython")
112 session.install(PYTYPE_VERSION)
113 session.run("pytype")
114
115
116 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
117 def system(session):
118 """Run the system test suite."""
119
120 constraints_path = str(
121 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
122 )
123
124 # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
125 if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
126 session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
127
128 # Sanity check: Only run system tests if the environment variable is set.
129 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
130 session.skip("Credentials must be set via environment variable.")
131
132 # Use pre-release gRPC for system tests.
133 session.install("--pre", "grpcio", "-c", constraints_path)
134
135 # Install all test dependencies, then install local packages in place.
136 session.install(
137 "mock", "pytest", "psutil", "google-cloud-testutils", "-c", constraints_path
138 )
139 if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") == "true":
140 # mTLS test requires pyopenssl and latest google-cloud-storage
141 session.install("google-cloud-storage", "pyopenssl")
142 else:
143 session.install("google-cloud-storage", "-c", constraints_path)
144
145 session.install("-e", ".[all]", "-c", constraints_path)
146 session.install("ipython", "-c", constraints_path)
147
148 # Run py.test against the system tests.
149 session.run("py.test", "--quiet", os.path.join("tests", "system"), *session.posargs)
150
151
152 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
153 def snippets(session):
154 """Run the snippets test suite."""
155
156 # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.
157 if os.environ.get("RUN_SNIPPETS_TESTS", "true") == "false":
158 session.skip("RUN_SNIPPETS_TESTS is set to false, skipping")
159
160 # Sanity check: Only run snippets tests if the environment variable is set.
161 if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
162 session.skip("Credentials must be set via environment variable.")
163
164 constraints_path = str(
165 CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
166 )
167
168 # Install all test dependencies, then install local packages in place.
169 session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
170 session.install("google-cloud-storage", "-c", constraints_path)
171 session.install("grpcio", "-c", constraints_path)
172
173 session.install("-e", ".[all]", "-c", constraints_path)
174
175 # Run py.test against the snippets tests.
176 # Skip tests in samples/snippets, as those are run in a different session
177 # using the nox config from that directory.
178 session.run("py.test", os.path.join("docs", "snippets.py"), *session.posargs)
179 session.run(
180 "py.test",
181 "samples",
182 "--ignore=samples/snippets",
183 "--ignore=samples/geography",
184 *session.posargs,
185 )
186
187
188 @nox.session(python=DEFAULT_PYTHON_VERSION)
189 def cover(session):
190 """Run the final coverage report.
191
192 This outputs the coverage report aggregating coverage from the unit
193 test runs (not system test runs), and then erases coverage data.
194 """
195 session.install("coverage", "pytest-cov")
196 session.run("coverage", "report", "--show-missing", "--fail-under=100")
197 session.run("coverage", "erase")
198
199
200 @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
201 def prerelease_deps(session):
202 """Run all tests with prerelease versions of dependencies installed.
203
204 https://github.com/googleapis/python-bigquery/issues/95
205 """
206 # PyArrow prerelease packages are published to an alternative PyPI host.
207 # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages
208 session.install(
209 "--extra-index-url", "https://pypi.fury.io/arrow-nightlies/", "--pre", "pyarrow"
210 )
211 session.install("--pre", "grpcio", "pandas")
212 session.install(
213 "freezegun",
214 "google-cloud-storage",
215 "google-cloud-testutils",
216 "IPython",
217 "mock",
218 "psutil",
219 "pytest",
220 "pytest-cov",
221 )
222 session.install("-e", ".[all]")
223
224 # Print out prerelease package versions.
225 session.run("python", "-c", "import grpc; print(grpc.__version__)")
226 session.run("python", "-c", "import pandas; print(pandas.__version__)")
227 session.run("python", "-c", "import pyarrow; print(pyarrow.__version__)")
228
229 # Run all tests, except a few samples tests which require extra dependencies.
230 session.run("py.test", "tests/unit")
231 session.run("py.test", "tests/system")
232 session.run("py.test", "samples/tests")
233
234
235 @nox.session(python=DEFAULT_PYTHON_VERSION)
236 def lint(session):
237 """Run linters.
238
239 Returns a failure if the linters find linting errors or sufficiently
240 serious code quality issues.
241 """
242
243 session.install("flake8", BLACK_VERSION)
244 session.install("-e", ".")
245 session.run("flake8", os.path.join("google", "cloud", "bigquery"))
246 session.run("flake8", "tests")
247 session.run("flake8", os.path.join("docs", "samples"))
248 session.run("flake8", os.path.join("docs", "snippets.py"))
249 session.run("black", "--check", *BLACK_PATHS)
250
251
252 @nox.session(python=DEFAULT_PYTHON_VERSION)
253 def lint_setup_py(session):
254 """Verify that setup.py is valid (including RST check)."""
255
256 session.install("docutils", "Pygments")
257 session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
258
259
260 @nox.session(python=DEFAULT_PYTHON_VERSION)
261 def blacken(session):
262 """Run black.
263 Format code to uniform standard.
264 """
265
266 session.install(BLACK_VERSION)
267 session.run("black", *BLACK_PATHS)
268
269
270 @nox.session(python=DEFAULT_PYTHON_VERSION)
271 def docs(session):
272 """Build the docs."""
273
274 session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
275 session.install("google-cloud-storage")
276 session.install("-e", ".[all]")
277
278 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
279 session.run(
280 "sphinx-build",
281 "-W", # warnings as errors
282 "-T", # show full traceback on exception
283 "-N", # no colors
284 "-b",
285 "html",
286 "-d",
287 os.path.join("docs", "_build", "doctrees", ""),
288 os.path.join("docs", ""),
289 os.path.join("docs", "_build", "html", ""),
290 )
291
292
293 @nox.session(python=DEFAULT_PYTHON_VERSION)
294 def docfx(session):
295 """Build the docfx yaml files for this library."""
296
297 session.install("-e", ".")
298 session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
299
300 shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
301 session.run(
302 "sphinx-build",
303 "-T", # show full traceback on exception
304 "-N", # no colors
305 "-D",
306 (
307 "extensions=sphinx.ext.autodoc,"
308 "sphinx.ext.autosummary,"
309 "docfx_yaml.extension,"
310 "sphinx.ext.intersphinx,"
311 "sphinx.ext.coverage,"
312 "sphinx.ext.napoleon,"
313 "sphinx.ext.todo,"
314 "sphinx.ext.viewcode,"
315 "recommonmark"
316 ),
317 "-b",
318 "html",
319 "-d",
320 os.path.join("docs", "_build", "doctrees", ""),
321 os.path.join("docs", ""),
322 os.path.join("docs", "_build", "html", ""),
323 )
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -271,7 +271,7 @@
def docs(session):
"""Build the docs."""
- session.install("ipython", "recommonmark", "sphinx", "sphinx_rtd_theme")
+ session.install("ipython", "recommonmark", "sphinx==4.0.1", "sphinx_rtd_theme")
session.install("google-cloud-storage")
session.install("-e", ".[all]")
@@ -295,7 +295,9 @@
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
- session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml")
+ session.install(
+ "sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
+ )
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
|
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -271,7 +271,7 @@\n def docs(session):\n \"\"\"Build the docs.\"\"\"\n \n- session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n+ session.install(\"ipython\", \"recommonmark\", \"sphinx==4.0.1\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n \n@@ -295,7 +295,9 @@\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n \n session.install(\"-e\", \".\")\n- session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n+ session.install(\n+ \"sphinx==4.0.1\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\"\n+ )\n \n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n", "issue": "Noxfile is not updated with Sphinx version pin\n`noxfile.py` is not updated with Sphinx version pin, please help take a look!\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google/cloud/bigquery\",\n \"--cov=tests/unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n # An indirect dependecy attrs==21.1.0 breaks the check, and installing a less\n # recent version avoids the error until a possibly better fix is found.\n # https://github.com/googleapis/python-bigquery/issues/655\n session.install(\"attrs==20.3.0\")\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n \"\"\"\n\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\"sphinx\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport pathlib\nimport os\nimport shutil\n\nimport nox\n\n\nPYTYPE_VERSION = \"pytype==2021.4.9\"\nBLACK_VERSION = \"black==19.10b0\"\nBLACK_PATHS = (\"docs\", \"google\", \"samples\", \"tests\", \"noxfile.py\", \"setup.py\")\n\nDEFAULT_PYTHON_VERSION = \"3.8\"\nSYSTEM_TEST_PYTHON_VERSIONS = [\"3.8\"]\nUNIT_TEST_PYTHON_VERSIONS = [\"3.6\", \"3.7\", \"3.8\", \"3.9\"]\nCURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()\n\n# 'docfx' is excluded since it only needs to run in 'docs-presubmit'\nnox.options.sessions = [\n \"unit_noextras\",\n \"unit\",\n \"system\",\n \"snippets\",\n \"cover\",\n \"lint\",\n \"lint_setup_py\",\n \"blacken\",\n \"pytype\",\n \"docs\",\n]\n\n\ndef default(session, install_extras=True):\n \"\"\"Default unit test session.\n\n This is intended to be run **without** an interpreter set, so\n that the current ``python`` (on the ``PATH``) or the version of\n Python corresponding to the ``nox`` binary the ``PATH`` can\n run the tests.\n \"\"\"\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in-place.\n session.install(\n \"mock\",\n \"pytest\",\n \"google-cloud-testutils\",\n \"pytest-cov\",\n \"freezegun\",\n \"-c\",\n constraints_path,\n )\n\n install_target = \".[all]\" if install_extras else \".\"\n session.install(\"-e\", install_target, \"-c\", constraints_path)\n\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=google/cloud/bigquery\",\n \"--cov=tests/unit\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=0\",\n os.path.join(\"tests\", \"unit\"),\n *session.posargs,\n )\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS)\ndef unit(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session)\n\n\[email protected](python=UNIT_TEST_PYTHON_VERSIONS[-1])\ndef unit_noextras(session):\n \"\"\"Run the unit test suite.\"\"\"\n default(session, install_extras=False)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef pytype(session):\n \"\"\"Run type checks.\"\"\"\n # An indirect dependecy attrs==21.1.0 breaks the check, and installing a less\n # recent version avoids the error until a possibly better fix is found.\n # https://github.com/googleapis/python-bigquery/issues/655\n session.install(\"attrs==20.3.0\")\n session.install(\"-e\", \".[all]\")\n session.install(\"ipython\")\n session.install(PYTYPE_VERSION)\n session.run(\"pytype\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef system(session):\n \"\"\"Run the system test suite.\"\"\"\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SYSTEM_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SYSTEM_TESTS is set to false, skipping\")\n\n # Sanity check: Only run system tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n # Use pre-release gRPC for system tests.\n session.install(\"--pre\", \"grpcio\", \"-c\", constraints_path)\n\n # Install all test dependencies, then install local packages in place.\n session.install(\n \"mock\", \"pytest\", \"psutil\", \"google-cloud-testutils\", \"-c\", constraints_path\n )\n if os.environ.get(\"GOOGLE_API_USE_CLIENT_CERTIFICATE\", \"\") == \"true\":\n # mTLS test requires pyopenssl and latest google-cloud-storage\n session.install(\"google-cloud-storage\", \"pyopenssl\")\n else:\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n session.install(\"ipython\", \"-c\", constraints_path)\n\n # Run py.test against the system tests.\n session.run(\"py.test\", \"--quiet\", os.path.join(\"tests\", \"system\"), *session.posargs)\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef snippets(session):\n \"\"\"Run the snippets test suite.\"\"\"\n\n # Check the value of `RUN_SNIPPETS_TESTS` env var. It defaults to true.\n if os.environ.get(\"RUN_SNIPPETS_TESTS\", \"true\") == \"false\":\n session.skip(\"RUN_SNIPPETS_TESTS is set to false, skipping\")\n\n # Sanity check: Only run snippets tests if the environment variable is set.\n if not os.environ.get(\"GOOGLE_APPLICATION_CREDENTIALS\", \"\"):\n session.skip(\"Credentials must be set via environment variable.\")\n\n constraints_path = str(\n CURRENT_DIRECTORY / \"testing\" / f\"constraints-{session.python}.txt\"\n )\n\n # Install all test dependencies, then install local packages in place.\n session.install(\"mock\", \"pytest\", \"google-cloud-testutils\", \"-c\", constraints_path)\n session.install(\"google-cloud-storage\", \"-c\", constraints_path)\n session.install(\"grpcio\", \"-c\", constraints_path)\n\n session.install(\"-e\", \".[all]\", \"-c\", constraints_path)\n\n # Run py.test against the snippets tests.\n # Skip tests in samples/snippets, as those are run in a different session\n # using the nox config from that directory.\n session.run(\"py.test\", os.path.join(\"docs\", \"snippets.py\"), *session.posargs)\n session.run(\n \"py.test\",\n \"samples\",\n \"--ignore=samples/snippets\",\n \"--ignore=samples/geography\",\n *session.posargs,\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef cover(session):\n \"\"\"Run the final coverage report.\n\n This outputs the coverage report aggregating coverage from the unit\n test runs (not system test runs), and then erases coverage data.\n \"\"\"\n session.install(\"coverage\", \"pytest-cov\")\n session.run(\"coverage\", \"report\", \"--show-missing\", \"--fail-under=100\")\n session.run(\"coverage\", \"erase\")\n\n\[email protected](python=SYSTEM_TEST_PYTHON_VERSIONS)\ndef prerelease_deps(session):\n \"\"\"Run all tests with prerelease versions of dependencies installed.\n\n https://github.com/googleapis/python-bigquery/issues/95\n \"\"\"\n # PyArrow prerelease packages are published to an alternative PyPI host.\n # https://arrow.apache.org/docs/python/install.html#installing-nightly-packages\n session.install(\n \"--extra-index-url\", \"https://pypi.fury.io/arrow-nightlies/\", \"--pre\", \"pyarrow\"\n )\n session.install(\"--pre\", \"grpcio\", \"pandas\")\n session.install(\n \"freezegun\",\n \"google-cloud-storage\",\n \"google-cloud-testutils\",\n \"IPython\",\n \"mock\",\n \"psutil\",\n \"pytest\",\n \"pytest-cov\",\n )\n session.install(\"-e\", \".[all]\")\n\n # Print out prerelease package versions.\n session.run(\"python\", \"-c\", \"import grpc; print(grpc.__version__)\")\n session.run(\"python\", \"-c\", \"import pandas; print(pandas.__version__)\")\n session.run(\"python\", \"-c\", \"import pyarrow; print(pyarrow.__version__)\")\n\n # Run all tests, except a few samples tests which require extra dependencies.\n session.run(\"py.test\", \"tests/unit\")\n session.run(\"py.test\", \"tests/system\")\n session.run(\"py.test\", \"samples/tests\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint(session):\n \"\"\"Run linters.\n\n Returns a failure if the linters find linting errors or sufficiently\n serious code quality issues.\n \"\"\"\n\n session.install(\"flake8\", BLACK_VERSION)\n session.install(\"-e\", \".\")\n session.run(\"flake8\", os.path.join(\"google\", \"cloud\", \"bigquery\"))\n session.run(\"flake8\", \"tests\")\n session.run(\"flake8\", os.path.join(\"docs\", \"samples\"))\n session.run(\"flake8\", os.path.join(\"docs\", \"snippets.py\"))\n session.run(\"black\", \"--check\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef lint_setup_py(session):\n \"\"\"Verify that setup.py is valid (including RST check).\"\"\"\n\n session.install(\"docutils\", \"Pygments\")\n session.run(\"python\", \"setup.py\", \"check\", \"--restructuredtext\", \"--strict\")\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef blacken(session):\n \"\"\"Run black.\n Format code to uniform standard.\n \"\"\"\n\n session.install(BLACK_VERSION)\n session.run(\"black\", *BLACK_PATHS)\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docs(session):\n \"\"\"Build the docs.\"\"\"\n\n session.install(\"ipython\", \"recommonmark\", \"sphinx==4.0.1\", \"sphinx_rtd_theme\")\n session.install(\"google-cloud-storage\")\n session.install(\"-e\", \".[all]\")\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-W\", # warnings as errors\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n\n\[email protected](python=DEFAULT_PYTHON_VERSION)\ndef docfx(session):\n \"\"\"Build the docfx yaml files for this library.\"\"\"\n\n session.install(\"-e\", \".\")\n session.install(\n \"sphinx==4.0.1\", \"alabaster\", \"recommonmark\", \"gcp-sphinx-docfx-yaml\"\n )\n\n shutil.rmtree(os.path.join(\"docs\", \"_build\"), ignore_errors=True)\n session.run(\n \"sphinx-build\",\n \"-T\", # show full traceback on exception\n \"-N\", # no colors\n \"-D\",\n (\n \"extensions=sphinx.ext.autodoc,\"\n \"sphinx.ext.autosummary,\"\n \"docfx_yaml.extension,\"\n \"sphinx.ext.intersphinx,\"\n \"sphinx.ext.coverage,\"\n \"sphinx.ext.napoleon,\"\n \"sphinx.ext.todo,\"\n \"sphinx.ext.viewcode,\"\n \"recommonmark\"\n ),\n \"-b\",\n \"html\",\n \"-d\",\n os.path.join(\"docs\", \"_build\", \"doctrees\", \"\"),\n os.path.join(\"docs\", \"\"),\n os.path.join(\"docs\", \"_build\", \"html\", \"\"),\n )\n", "path": "noxfile.py"}]}
| 3,766 | 250 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.