problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
9.01k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
465
11.3k
num_tokens_prompt
int64
557
2.05k
num_tokens_diff
int64
48
1.02k
gh_patches_debug_29550
rasdani/github-patches
git_diff
DDMAL__CantusDB-334
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Automatically run populate_next_chant_fields in staging/production We need to have some automation to run `python manage.py populate_next_chant_fields` from time to time on our servers; otherwise, the function in /next_chants.py will not work. </issue> <code> [start of django/cantusdb_project/main_app/signals.py] 1 import operator 2 from functools import reduce 3 4 from django.contrib.postgres.search import SearchVector 5 from django.db import models 6 from django.db.models import Value 7 from django.db.models.signals import post_save, post_delete 8 from django.dispatch import receiver 9 10 from main_app.models import Chant 11 from main_app.models import Sequence 12 13 14 @receiver(post_save, sender=Chant) 15 def update_chant_search_vector(instance, **kwargs): 16 """When saving an instance of Chant, update its search vector field.""" 17 index_components = instance.index_components() 18 pk = instance.pk 19 search_vectors = [] 20 21 for weight, data in index_components.items(): 22 search_vectors.append( 23 SearchVector( 24 Value(data, output_field=models.TextField()), weight=weight 25 ) 26 ) 27 instance.__class__.objects.filter(pk=pk).update( 28 search_vector=reduce(operator.add, search_vectors) 29 ) 30 31 @receiver(post_save, sender=Chant) 32 @receiver(post_save, sender=Sequence) 33 @receiver(post_delete, sender=Chant) 34 @receiver(post_delete, sender=Sequence) 35 def update_source_chant_count(instance, **kwargs): 36 """When saving or deleting a Chant or Sequence, update its Source's number_of_chants field""" 37 source = instance.source 38 source.number_of_chants = source.chant_set.count() + source.sequence_set.count() 39 source.save() 40 41 @receiver(post_save, sender=Chant) 42 @receiver(post_delete, sender=Chant) 43 def update_source_melody_count(instance, **kwargs): 44 """When saving or deleting a Chant, update its Source's number_of_melodies field""" 45 source = instance.source 46 source.number_of_melodies = source.chant_set.filter(volpiano__isnull=False).count() 47 source.save() 48 [end of django/cantusdb_project/main_app/signals.py] [start of django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py] 1 from main_app.models import Chant 2 from django.core.exceptions import ValidationError 3 from django.core.management.base import BaseCommand 4 5 class Command(BaseCommand): 6 def add_arguments(self, parser): 7 parser.add_argument( 8 "-o", 9 "--overwrite", 10 action="store_true", 11 help="Overwrites next_chant of chants that already have a next_chant set." 12 ) 13 14 def handle(self, *args, **kwargs): 15 CHUNK_SIZE = 1_000 16 overwrite = kwargs["overwrite"] 17 chants = Chant.objects.all() 18 chants_count = chants.count() 19 start_index = 0 20 while start_index <= chants_count: 21 print("processing chunk with start_index of", start_index) 22 chunk = chants[start_index:start_index+CHUNK_SIZE] 23 for chant in chunk: 24 if chant.next_chant and not overwrite: # unless -o or -overwrite flag has been supplied, skip chants that already have a next_chant 25 continue 26 try: 27 chant.next_chant = chant.get_next_chant() 28 chant.save() 29 except ValidationError: # another chant's next_chant already points to this chant's next_chant 30 pass 31 del chunk # make sure we don't use too much RAM 32 start_index += CHUNK_SIZE 33 34 [end of django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py b/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py --- a/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py +++ b/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py @@ -2,6 +2,17 @@ from django.core.exceptions import ValidationError from django.core.management.base import BaseCommand +# This script memoizes the result of Chant.get_next_chant(), which is expensive +# to calculate on the fly, saving it as the Chant's .next_chant property. +# This script populates the next_chant field for all chants in the database. +# Once it has been calculated once (for example, after importing data +# from OldCantus), it shouldn't need to be run again - whenever chants are +# created, updated or deleted, the field should be recalculated for all chants +# in the source by update_next_chant_fields() in main_app/signals.py. + +# to calculate all chants' next_chants from scratch: `python manage.py populate_next_chant_fields --overwrite` +# to calculate next_chants for all chants that don't already have a next_chant specified: `python manage.py populate_next_chant_fields` + class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( diff --git a/django/cantusdb_project/main_app/signals.py b/django/cantusdb_project/main_app/signals.py --- a/django/cantusdb_project/main_app/signals.py +++ b/django/cantusdb_project/main_app/signals.py @@ -45,3 +45,15 @@ source = instance.source source.number_of_melodies = source.chant_set.filter(volpiano__isnull=False).count() source.save() + +@receiver(post_save, sender=Chant) +@receiver(post_delete, sender=Chant) +def update_next_chant_fields(instance, **kwargs): + """When saving or deleting a Chant, make sure the next_chant of each chant in the source is up-to-date""" + source = instance.source + for chant in source.chant_set.all(): + next_chant = chant.get_next_chant() + # use .update() instead of .save() to prevent RecursionError + # (otherwise, saving would trigger @receiver(post_save, ...) again) + Chant.objects.filter(id=chant.id).update(next_chant=next_chant) +
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py b/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py\n--- a/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py\n+++ b/django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py\n@@ -2,6 +2,17 @@\n from django.core.exceptions import ValidationError\n from django.core.management.base import BaseCommand\n \n+# This script memoizes the result of Chant.get_next_chant(), which is expensive\n+# to calculate on the fly, saving it as the Chant's .next_chant property.\n+# This script populates the next_chant field for all chants in the database.\n+# Once it has been calculated once (for example, after importing data\n+# from OldCantus), it shouldn't need to be run again - whenever chants are\n+# created, updated or deleted, the field should be recalculated for all chants\n+# in the source by update_next_chant_fields() in main_app/signals.py.\n+\n+# to calculate all chants' next_chants from scratch: `python manage.py populate_next_chant_fields --overwrite`\n+# to calculate next_chants for all chants that don't already have a next_chant specified: `python manage.py populate_next_chant_fields`\n+\n class Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\ndiff --git a/django/cantusdb_project/main_app/signals.py b/django/cantusdb_project/main_app/signals.py\n--- a/django/cantusdb_project/main_app/signals.py\n+++ b/django/cantusdb_project/main_app/signals.py\n@@ -45,3 +45,15 @@\n source = instance.source\n source.number_of_melodies = source.chant_set.filter(volpiano__isnull=False).count()\n source.save()\n+\n+@receiver(post_save, sender=Chant)\n+@receiver(post_delete, sender=Chant)\n+def update_next_chant_fields(instance, **kwargs):\n+ \"\"\"When saving or deleting a Chant, make sure the next_chant of each chant in the source is up-to-date\"\"\"\n+ source = instance.source\n+ for chant in source.chant_set.all():\n+ next_chant = chant.get_next_chant()\n+ # use .update() instead of .save() to prevent RecursionError\n+ # (otherwise, saving would trigger @receiver(post_save, ...) again)\n+ Chant.objects.filter(id=chant.id).update(next_chant=next_chant)\n+\n", "issue": "Automatically run populate_next_chant_fields in staging/production\nWe need to have some automation to run `python manage.py populate_next_chant_fields` from time to time on our servers; otherwise, the function in /next_chants.py will not work.\n", "before_files": [{"content": "import operator\nfrom functools import reduce\n\nfrom django.contrib.postgres.search import SearchVector\nfrom django.db import models\nfrom django.db.models import Value\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\n\nfrom main_app.models import Chant\nfrom main_app.models import Sequence\n\n\n@receiver(post_save, sender=Chant)\ndef update_chant_search_vector(instance, **kwargs):\n \"\"\"When saving an instance of Chant, update its search vector field.\"\"\"\n index_components = instance.index_components()\n pk = instance.pk\n search_vectors = []\n\n for weight, data in index_components.items():\n search_vectors.append(\n SearchVector(\n Value(data, output_field=models.TextField()), weight=weight\n )\n )\n instance.__class__.objects.filter(pk=pk).update(\n search_vector=reduce(operator.add, search_vectors)\n )\n\n@receiver(post_save, sender=Chant)\n@receiver(post_save, sender=Sequence)\n@receiver(post_delete, sender=Chant)\n@receiver(post_delete, sender=Sequence)\ndef update_source_chant_count(instance, **kwargs):\n \"\"\"When saving or deleting a Chant or Sequence, update its Source's number_of_chants field\"\"\"\n source = instance.source\n source.number_of_chants = source.chant_set.count() + source.sequence_set.count()\n source.save()\n\n@receiver(post_save, sender=Chant)\n@receiver(post_delete, sender=Chant)\ndef update_source_melody_count(instance, **kwargs):\n \"\"\"When saving or deleting a Chant, update its Source's number_of_melodies field\"\"\"\n source = instance.source\n source.number_of_melodies = source.chant_set.filter(volpiano__isnull=False).count()\n source.save()\n", "path": "django/cantusdb_project/main_app/signals.py"}, {"content": "from main_app.models import Chant\nfrom django.core.exceptions import ValidationError\nfrom django.core.management.base import BaseCommand\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"-o\",\n \"--overwrite\", \n action=\"store_true\", \n help=\"Overwrites next_chant of chants that already have a next_chant set.\"\n )\n\n def handle(self, *args, **kwargs):\n CHUNK_SIZE = 1_000\n overwrite = kwargs[\"overwrite\"]\n chants = Chant.objects.all()\n chants_count = chants.count()\n start_index = 0\n while start_index <= chants_count:\n print(\"processing chunk with start_index of\", start_index)\n chunk = chants[start_index:start_index+CHUNK_SIZE]\n for chant in chunk:\n if chant.next_chant and not overwrite: # unless -o or -overwrite flag has been supplied, skip chants that already have a next_chant\n continue\n try:\n chant.next_chant = chant.get_next_chant()\n chant.save()\n except ValidationError: # another chant's next_chant already points to this chant's next_chant\n pass\n del chunk # make sure we don't use too much RAM\n start_index += CHUNK_SIZE\n\n", "path": "django/cantusdb_project/main_app/management/commands/populate_next_chant_fields.py"}]}
1,438
575
gh_patches_debug_51925
rasdani/github-patches
git_diff
cisagov__manage.get.gov-1551
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User/Contact bug: Signals double saving, overwriting data for test cases ### Current Behavior A bug was introduced [with a PR](https://github.com/cisagov/manage.get.gov/pull/1491) that fixed our oidc login clearing out user information. To fix this, the Contact object was linked to the User object, such that they would remain in sync. However, this introduced a sneaky double-save bug wherein if signals get called more than once for the same contact object (as we found happens in our test cases), it overrides information when it should not do so. ![image](https://github.com/cisagov/manage.get.gov/assets/141044360/f9d35949-5138-4337-a189-9e2a843191f5) ![image](https://github.com/cisagov/manage.get.gov/assets/141044360/917e6db7-6cfd-48c8-a9b0-e0b8f01f2831) ### Expected Behavior When a `Contact` object is saved, the `save()` function checks to see if `self.user` is not None. When it is not, it will update the `self.user` object with whatever value is located at `self`. It appears that in certain situations, (such as our test cases), the `User` field is behaving as if it were a one-to-many relationship (one Contact object to many User objects). This should not be the case, and `Contact` should only update one `User` field. ### Steps to Reproduce 1. Create a new test case in `test_admin.py`, and create 3 fake User objects. Populate them with unique data. Then, log their values. Note that their data seems to get overwritten. ### Environment _No response_ ### Additional Context This bug seems to be originating in the `signals.py` file in the ` handle_profile` class. By passing in a flag that disables the save behavior (or just commenting it out), this issue seems to resolve. ![image](https://github.com/cisagov/manage.get.gov/assets/141044360/e19a1508-72c7-405a-96db-3540305dcc4b) ### Issue Links 🔄 Relates to: [#1464 / #1468](https://github.com/cisagov/manage.get.gov/pull/1491) 🔄 Relates to: [this PR](https://github.com/cisagov/manage.get.gov/pull/1543) (pinpoints where the behavior is occurring but does not solve it) </issue> <code> [start of src/registrar/models/contact.py] 1 from django.db import models 2 3 from phonenumber_field.modelfields import PhoneNumberField # type: ignore 4 5 from .utility.time_stamped_model import TimeStampedModel 6 7 8 class Contact(TimeStampedModel): 9 10 """Contact information follows a similar pattern for each contact.""" 11 12 user = models.OneToOneField( 13 "registrar.User", 14 null=True, 15 blank=True, 16 on_delete=models.SET_NULL, 17 ) 18 19 first_name = models.TextField( 20 null=True, 21 blank=True, 22 help_text="First name", 23 verbose_name="first name / given name", 24 db_index=True, 25 ) 26 middle_name = models.TextField( 27 null=True, 28 blank=True, 29 help_text="Middle name (optional)", 30 ) 31 last_name = models.TextField( 32 null=True, 33 blank=True, 34 help_text="Last name", 35 verbose_name="last name / family name", 36 db_index=True, 37 ) 38 title = models.TextField( 39 null=True, 40 blank=True, 41 help_text="Title", 42 verbose_name="title or role in your organization", 43 ) 44 email = models.EmailField( 45 null=True, 46 blank=True, 47 help_text="Email", 48 db_index=True, 49 ) 50 phone = PhoneNumberField( 51 null=True, 52 blank=True, 53 help_text="Phone", 54 db_index=True, 55 ) 56 57 def get_formatted_name(self): 58 """Returns the contact's name in Western order.""" 59 names = [n for n in [self.first_name, self.middle_name, self.last_name] if n] 60 return " ".join(names) if names else "Unknown" 61 62 def save(self, *args, **kwargs): 63 # Call the parent class's save method to perform the actual save 64 super().save(*args, **kwargs) 65 66 # Update the related User object's first_name and last_name 67 if self.user: 68 self.user.first_name = self.first_name 69 self.user.last_name = self.last_name 70 self.user.save() 71 72 def __str__(self): 73 if self.first_name or self.last_name: 74 return self.get_formatted_name() 75 elif self.email: 76 return self.email 77 elif self.pk: 78 return str(self.pk) 79 else: 80 return "" 81 [end of src/registrar/models/contact.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py --- a/src/registrar/models/contact.py +++ b/src/registrar/models/contact.py @@ -64,7 +64,7 @@ super().save(*args, **kwargs) # Update the related User object's first_name and last_name - if self.user: + if self.user and (not self.user.first_name or not self.user.last_name): self.user.first_name = self.first_name self.user.last_name = self.last_name self.user.save()
{"golden_diff": "diff --git a/src/registrar/models/contact.py b/src/registrar/models/contact.py\n--- a/src/registrar/models/contact.py\n+++ b/src/registrar/models/contact.py\n@@ -64,7 +64,7 @@\n super().save(*args, **kwargs)\n \n # Update the related User object's first_name and last_name\n- if self.user:\n+ if self.user and (not self.user.first_name or not self.user.last_name):\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n", "issue": "User/Contact bug: Signals double saving, overwriting data for test cases\n### Current Behavior\r\n\r\nA bug was introduced [with a PR](https://github.com/cisagov/manage.get.gov/pull/1491) that fixed our oidc login clearing out user information. To fix this, the Contact object was linked to the User object, such that they would remain in sync.\r\n\r\nHowever, this introduced a sneaky double-save bug wherein if signals get called more than once for the same contact object (as we found happens in our test cases), it overrides information when it should not do so.\r\n\r\n![image](https://github.com/cisagov/manage.get.gov/assets/141044360/f9d35949-5138-4337-a189-9e2a843191f5)\r\n![image](https://github.com/cisagov/manage.get.gov/assets/141044360/917e6db7-6cfd-48c8-a9b0-e0b8f01f2831)\r\n\r\n### Expected Behavior\r\n\r\nWhen a `Contact` object is saved, the `save()` function checks to see if `self.user` is not None. When it is not, it will update the `self.user` object with whatever value is located at `self`. It appears that in certain situations, (such as our test cases), the `User` field is behaving as if it were a one-to-many relationship (one Contact object to many User objects). This should not be the case, and `Contact` should only update one `User` field.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a new test case in `test_admin.py`, and create 3 fake User objects. Populate them with unique data. Then, log their values. Note that their data seems to get overwritten.\r\n\r\n\r\n### Environment\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\nThis bug seems to be originating in the `signals.py` file in the ` handle_profile` class. By passing in a flag that disables the save behavior (or just commenting it out), this issue seems to resolve. \r\n![image](https://github.com/cisagov/manage.get.gov/assets/141044360/e19a1508-72c7-405a-96db-3540305dcc4b)\r\n\r\n\r\n### Issue Links\r\n\r\n\ud83d\udd04 Relates to: [#1464 / #1468](https://github.com/cisagov/manage.get.gov/pull/1491)\r\n\ud83d\udd04 Relates to: [this PR](https://github.com/cisagov/manage.get.gov/pull/1543) (pinpoints where the behavior is occurring but does not solve it)\n", "before_files": [{"content": "from django.db import models\n\nfrom phonenumber_field.modelfields import PhoneNumberField # type: ignore\n\nfrom .utility.time_stamped_model import TimeStampedModel\n\n\nclass Contact(TimeStampedModel):\n\n \"\"\"Contact information follows a similar pattern for each contact.\"\"\"\n\n user = models.OneToOneField(\n \"registrar.User\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n\n first_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"First name\",\n verbose_name=\"first name / given name\",\n db_index=True,\n )\n middle_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Middle name (optional)\",\n )\n last_name = models.TextField(\n null=True,\n blank=True,\n help_text=\"Last name\",\n verbose_name=\"last name / family name\",\n db_index=True,\n )\n title = models.TextField(\n null=True,\n blank=True,\n help_text=\"Title\",\n verbose_name=\"title or role in your organization\",\n )\n email = models.EmailField(\n null=True,\n blank=True,\n help_text=\"Email\",\n db_index=True,\n )\n phone = PhoneNumberField(\n null=True,\n blank=True,\n help_text=\"Phone\",\n db_index=True,\n )\n\n def get_formatted_name(self):\n \"\"\"Returns the contact's name in Western order.\"\"\"\n names = [n for n in [self.first_name, self.middle_name, self.last_name] if n]\n return \" \".join(names) if names else \"Unknown\"\n\n def save(self, *args, **kwargs):\n # Call the parent class's save method to perform the actual save\n super().save(*args, **kwargs)\n\n # Update the related User object's first_name and last_name\n if self.user:\n self.user.first_name = self.first_name\n self.user.last_name = self.last_name\n self.user.save()\n\n def __str__(self):\n if self.first_name or self.last_name:\n return self.get_formatted_name()\n elif self.email:\n return self.email\n elif self.pk:\n return str(self.pk)\n else:\n return \"\"\n", "path": "src/registrar/models/contact.py"}]}
1,765
124
gh_patches_debug_12221
rasdani/github-patches
git_diff
OpenEnergyPlatform__oeplatform-690
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Database: Open data only. Where do we need / want to remind the users? I added this rule to the [DatabaseRules](http://wiki.openmod-initiative.org/wiki/DatabaseRules). Do we want to add a reminder to the upload interface? </issue> <code> [start of base/urls.py] 1 from django.conf.urls import url 2 3 from base import views 4 5 urlpatterns = [ 6 url(r"^robots.txt$", views.robot), 7 url(r"^$", views.Welcome.as_view(), name="index"), 8 url(r"^about/$", views.redir, {"target": "about"}, name="index"), 9 url(r"^faq/$", views.redir, {"target": "faq"}, name="index"), 10 url(r"^discussion/$", views.redir, {"target": "discussion"}, name="index"), 11 url(r"^contact/$", views.ContactView.as_view(), name="index"), 12 url(r"^legal/impressum/$", views.redir, {"target": "impressum"}, name="index"), 13 url(r"^legal/datasec/$", views.redir, {"target": "datasecurity"}, name="index"), 14 url(r"^legal/tou/$", views.redir, {"target": "terms_of_use"}, name="index"), 15 ] 16 [end of base/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/base/urls.py b/base/urls.py --- a/base/urls.py +++ b/base/urls.py @@ -9,7 +9,6 @@ url(r"^faq/$", views.redir, {"target": "faq"}, name="index"), url(r"^discussion/$", views.redir, {"target": "discussion"}, name="index"), url(r"^contact/$", views.ContactView.as_view(), name="index"), - url(r"^legal/impressum/$", views.redir, {"target": "impressum"}, name="index"), - url(r"^legal/datasec/$", views.redir, {"target": "datasecurity"}, name="index"), + url(r"^legal/privacy_policy/$", views.redir, {"target": "privacy_policy"}, name="index"), url(r"^legal/tou/$", views.redir, {"target": "terms_of_use"}, name="index"), ]
{"golden_diff": "diff --git a/base/urls.py b/base/urls.py\n--- a/base/urls.py\n+++ b/base/urls.py\n@@ -9,7 +9,6 @@\n url(r\"^faq/$\", views.redir, {\"target\": \"faq\"}, name=\"index\"),\n url(r\"^discussion/$\", views.redir, {\"target\": \"discussion\"}, name=\"index\"),\n url(r\"^contact/$\", views.ContactView.as_view(), name=\"index\"),\n- url(r\"^legal/impressum/$\", views.redir, {\"target\": \"impressum\"}, name=\"index\"),\n- url(r\"^legal/datasec/$\", views.redir, {\"target\": \"datasecurity\"}, name=\"index\"),\n+ url(r\"^legal/privacy_policy/$\", views.redir, {\"target\": \"privacy_policy\"}, name=\"index\"),\n url(r\"^legal/tou/$\", views.redir, {\"target\": \"terms_of_use\"}, name=\"index\"),\n ]\n", "issue": "Database: Open data only.\nWhere do we need / want to remind the users?\nI added this rule to the [DatabaseRules](http://wiki.openmod-initiative.org/wiki/DatabaseRules).\nDo we want to add a reminder to the upload interface?\n\n", "before_files": [{"content": "from django.conf.urls import url\n\nfrom base import views\n\nurlpatterns = [\n url(r\"^robots.txt$\", views.robot),\n url(r\"^$\", views.Welcome.as_view(), name=\"index\"),\n url(r\"^about/$\", views.redir, {\"target\": \"about\"}, name=\"index\"),\n url(r\"^faq/$\", views.redir, {\"target\": \"faq\"}, name=\"index\"),\n url(r\"^discussion/$\", views.redir, {\"target\": \"discussion\"}, name=\"index\"),\n url(r\"^contact/$\", views.ContactView.as_view(), name=\"index\"),\n url(r\"^legal/impressum/$\", views.redir, {\"target\": \"impressum\"}, name=\"index\"),\n url(r\"^legal/datasec/$\", views.redir, {\"target\": \"datasecurity\"}, name=\"index\"),\n url(r\"^legal/tou/$\", views.redir, {\"target\": \"terms_of_use\"}, name=\"index\"),\n]\n", "path": "base/urls.py"}]}
809
204
gh_patches_debug_14744
rasdani/github-patches
git_diff
webkom__lego-1279
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> EmailAddress case sensitivity When creating a email address for a user (for gsuite sync) that includes capital letters, the user will be suspended. All input should be lowercased, since that is ehat google is doing. </issue> <code> [start of lego/apps/email/validators.py] 1 from django.core.exceptions import ValidationError 2 from django.core.validators import EmailValidator, RegexValidator 3 4 from lego.utils.validators import ReservedNameValidator 5 6 7 def validate_email_address(email_address): 8 9 if email_address.is_assigned(): 10 raise ValidationError('The address is already assigned') 11 12 13 def validate_email_address_content(email_address): 14 """Make sure we only create valid emails.""" 15 16 regex_validator = RegexValidator(regex=EmailValidator.user_regex) 17 reserved_valdator = ReservedNameValidator() 18 19 regex_validator(email_address.email) 20 reserved_valdator(email_address.email) 21 [end of lego/apps/email/validators.py] [start of lego/apps/email/fields.py] 1 from django.core.exceptions import ObjectDoesNotExist 2 from rest_framework import serializers 3 4 from lego.apps.email.validators import validate_email_address, validate_email_address_content 5 6 7 class EmailAddressField(serializers.PrimaryKeyRelatedField): 8 """ 9 Manage the email address as a string. 10 """ 11 12 def __init__(self, **kwargs): 13 validators = kwargs.get('validators') 14 kwargs['validators'] = validators if validators is not None else [validate_email_address] 15 kwargs['validators'].append(validate_email_address_content) 16 super().__init__(**kwargs) 17 18 def to_internal_value(self, data): 19 """ 20 Create email if not exists. 21 """ 22 try: 23 email_address, _ = self.get_queryset().get_or_create(pk=data) 24 return email_address 25 except ObjectDoesNotExist: 26 self.fail('does_not_exist', pk_value=data) 27 except (TypeError, ValueError): 28 self.fail('incorrect_type', data_type=type(data).__name__) 29 [end of lego/apps/email/fields.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lego/apps/email/fields.py b/lego/apps/email/fields.py --- a/lego/apps/email/fields.py +++ b/lego/apps/email/fields.py @@ -20,6 +20,7 @@ Create email if not exists. """ try: + data = data.lower() email_address, _ = self.get_queryset().get_or_create(pk=data) return email_address except ObjectDoesNotExist: diff --git a/lego/apps/email/validators.py b/lego/apps/email/validators.py --- a/lego/apps/email/validators.py +++ b/lego/apps/email/validators.py @@ -5,6 +5,8 @@ def validate_email_address(email_address): + if email_address.email != email_address.email.lower(): + raise ValidationError('Email is not lowercased') if email_address.is_assigned(): raise ValidationError('The address is already assigned')
{"golden_diff": "diff --git a/lego/apps/email/fields.py b/lego/apps/email/fields.py\n--- a/lego/apps/email/fields.py\n+++ b/lego/apps/email/fields.py\n@@ -20,6 +20,7 @@\n Create email if not exists.\n \"\"\"\n try:\n+ data = data.lower()\n email_address, _ = self.get_queryset().get_or_create(pk=data)\n return email_address\n except ObjectDoesNotExist:\ndiff --git a/lego/apps/email/validators.py b/lego/apps/email/validators.py\n--- a/lego/apps/email/validators.py\n+++ b/lego/apps/email/validators.py\n@@ -5,6 +5,8 @@\n \n \n def validate_email_address(email_address):\n+ if email_address.email != email_address.email.lower():\n+ raise ValidationError('Email is not lowercased')\n \n if email_address.is_assigned():\n raise ValidationError('The address is already assigned')\n", "issue": "EmailAddress case sensitivity\nWhen creating a email address for a user (for gsuite sync) that includes capital letters, the user will be suspended. All input should be lowercased, since that is ehat google is doing.\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator, RegexValidator\n\nfrom lego.utils.validators import ReservedNameValidator\n\n\ndef validate_email_address(email_address):\n\n if email_address.is_assigned():\n raise ValidationError('The address is already assigned')\n\n\ndef validate_email_address_content(email_address):\n \"\"\"Make sure we only create valid emails.\"\"\"\n\n regex_validator = RegexValidator(regex=EmailValidator.user_regex)\n reserved_valdator = ReservedNameValidator()\n\n regex_validator(email_address.email)\n reserved_valdator(email_address.email)\n", "path": "lego/apps/email/validators.py"}, {"content": "from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\n\nfrom lego.apps.email.validators import validate_email_address, validate_email_address_content\n\n\nclass EmailAddressField(serializers.PrimaryKeyRelatedField):\n \"\"\"\n Manage the email address as a string.\n \"\"\"\n\n def __init__(self, **kwargs):\n validators = kwargs.get('validators')\n kwargs['validators'] = validators if validators is not None else [validate_email_address]\n kwargs['validators'].append(validate_email_address_content)\n super().__init__(**kwargs)\n\n def to_internal_value(self, data):\n \"\"\"\n Create email if not exists.\n \"\"\"\n try:\n email_address, _ = self.get_queryset().get_or_create(pk=data)\n return email_address\n except ObjectDoesNotExist:\n self.fail('does_not_exist', pk_value=data)\n except (TypeError, ValueError):\n self.fail('incorrect_type', data_type=type(data).__name__)\n", "path": "lego/apps/email/fields.py"}]}
1,002
206
gh_patches_debug_8994
rasdani/github-patches
git_diff
getnikola__nikola-760
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> USE_BUNDLES, attribute 'file_dep' must be {list, tuple} In Py3K map() and filter() return iterators. </issue> <code> [start of nikola/plugins/task/bundles.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright © 2012-2013 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 from __future__ import unicode_literals 28 29 import os 30 31 try: 32 import webassets 33 except ImportError: 34 webassets = None # NOQA 35 36 from nikola.plugin_categories import LateTask 37 from nikola import utils 38 39 40 class BuildBundles(LateTask): 41 """Bundle assets using WebAssets.""" 42 43 name = "build_bundles" 44 45 def set_site(self, site): 46 super(BuildBundles, self).set_site(site) 47 if webassets is None: 48 self.site.config['USE_BUNDLES'] = False 49 50 def gen_tasks(self): 51 """Bundle assets using WebAssets.""" 52 53 kw = { 54 'filters': self.site.config['FILTERS'], 55 'output_folder': self.site.config['OUTPUT_FOLDER'], 56 'cache_folder': self.site.config['CACHE_FOLDER'], 57 'theme_bundles': get_theme_bundles(self.site.THEMES), 58 'themes': self.site.THEMES, 59 'files_folders': self.site.config['FILES_FOLDERS'], 60 'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'], 61 } 62 63 def build_bundle(output, inputs): 64 out_dir = os.path.join(kw['output_folder'], 65 os.path.dirname(output)) 66 inputs = [i for i in inputs if os.path.isfile( 67 os.path.join(out_dir, i))] 68 cache_dir = os.path.join(kw['cache_folder'], 'webassets') 69 utils.makedirs(cache_dir) 70 env = webassets.Environment(out_dir, os.path.dirname(output), 71 cache=cache_dir) 72 bundle = webassets.Bundle(*inputs, output=os.path.basename(output)) 73 env.register(output, bundle) 74 # This generates the file 75 env[output].urls() 76 77 flag = False 78 if (webassets is not None and self.site.config['USE_BUNDLES'] is not 79 False): 80 for name, files in kw['theme_bundles'].items(): 81 output_path = os.path.join(kw['output_folder'], name) 82 dname = os.path.dirname(name) 83 file_dep = [os.path.join(kw['output_folder'], dname, fname) 84 for fname in files] 85 file_dep = filter(os.path.isfile, file_dep) # removes missing files 86 task = { 87 'file_dep': file_dep, 88 'task_dep': ['copy_assets'], 89 'basename': str(self.name), 90 'name': str(output_path), 91 'actions': [(build_bundle, (name, files))], 92 'targets': [output_path], 93 'uptodate': [utils.config_changed(kw)], 94 'clean': True, 95 } 96 flag = True 97 yield utils.apply_filters(task, kw['filters']) 98 if flag is False: # No page rendered, yield a dummy task 99 yield { 100 'basename': self.name, 101 'uptodate': [True], 102 'name': 'None', 103 'actions': [], 104 } 105 106 107 def get_theme_bundles(themes): 108 """Given a theme chain, return the bundle definitions.""" 109 bundles = {} 110 for theme_name in themes: 111 bundles_path = os.path.join( 112 utils.get_theme_path(theme_name), 'bundles') 113 if os.path.isfile(bundles_path): 114 with open(bundles_path) as fd: 115 for line in fd: 116 name, files = line.split('=') 117 files = [f.strip() for f in files.split(',')] 118 bundles[name.strip()] = files 119 break 120 return bundles 121 [end of nikola/plugins/task/bundles.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/task/bundles.py b/nikola/plugins/task/bundles.py --- a/nikola/plugins/task/bundles.py +++ b/nikola/plugins/task/bundles.py @@ -84,7 +84,7 @@ for fname in files] file_dep = filter(os.path.isfile, file_dep) # removes missing files task = { - 'file_dep': file_dep, + 'file_dep': list(file_dep), 'task_dep': ['copy_assets'], 'basename': str(self.name), 'name': str(output_path),
{"golden_diff": "diff --git a/nikola/plugins/task/bundles.py b/nikola/plugins/task/bundles.py\n--- a/nikola/plugins/task/bundles.py\n+++ b/nikola/plugins/task/bundles.py\n@@ -84,7 +84,7 @@\n for fname in files]\n file_dep = filter(os.path.isfile, file_dep) # removes missing files\n task = {\n- 'file_dep': file_dep,\n+ 'file_dep': list(file_dep),\n 'task_dep': ['copy_assets'],\n 'basename': str(self.name),\n 'name': str(output_path),\n", "issue": "USE_BUNDLES, attribute 'file_dep' must be {list, tuple}\nIn Py3K map() and filter() return iterators.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2013 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals\n\nimport os\n\ntry:\n import webassets\nexcept ImportError:\n webassets = None # NOQA\n\nfrom nikola.plugin_categories import LateTask\nfrom nikola import utils\n\n\nclass BuildBundles(LateTask):\n \"\"\"Bundle assets using WebAssets.\"\"\"\n\n name = \"build_bundles\"\n\n def set_site(self, site):\n super(BuildBundles, self).set_site(site)\n if webassets is None:\n self.site.config['USE_BUNDLES'] = False\n\n def gen_tasks(self):\n \"\"\"Bundle assets using WebAssets.\"\"\"\n\n kw = {\n 'filters': self.site.config['FILTERS'],\n 'output_folder': self.site.config['OUTPUT_FOLDER'],\n 'cache_folder': self.site.config['CACHE_FOLDER'],\n 'theme_bundles': get_theme_bundles(self.site.THEMES),\n 'themes': self.site.THEMES,\n 'files_folders': self.site.config['FILES_FOLDERS'],\n 'code_color_scheme': self.site.config['CODE_COLOR_SCHEME'],\n }\n\n def build_bundle(output, inputs):\n out_dir = os.path.join(kw['output_folder'],\n os.path.dirname(output))\n inputs = [i for i in inputs if os.path.isfile(\n os.path.join(out_dir, i))]\n cache_dir = os.path.join(kw['cache_folder'], 'webassets')\n utils.makedirs(cache_dir)\n env = webassets.Environment(out_dir, os.path.dirname(output),\n cache=cache_dir)\n bundle = webassets.Bundle(*inputs, output=os.path.basename(output))\n env.register(output, bundle)\n # This generates the file\n env[output].urls()\n\n flag = False\n if (webassets is not None and self.site.config['USE_BUNDLES'] is not\n False):\n for name, files in kw['theme_bundles'].items():\n output_path = os.path.join(kw['output_folder'], name)\n dname = os.path.dirname(name)\n file_dep = [os.path.join(kw['output_folder'], dname, fname)\n for fname in files]\n file_dep = filter(os.path.isfile, file_dep) # removes missing files\n task = {\n 'file_dep': file_dep,\n 'task_dep': ['copy_assets'],\n 'basename': str(self.name),\n 'name': str(output_path),\n 'actions': [(build_bundle, (name, files))],\n 'targets': [output_path],\n 'uptodate': [utils.config_changed(kw)],\n 'clean': True,\n }\n flag = True\n yield utils.apply_filters(task, kw['filters'])\n if flag is False: # No page rendered, yield a dummy task\n yield {\n 'basename': self.name,\n 'uptodate': [True],\n 'name': 'None',\n 'actions': [],\n }\n\n\ndef get_theme_bundles(themes):\n \"\"\"Given a theme chain, return the bundle definitions.\"\"\"\n bundles = {}\n for theme_name in themes:\n bundles_path = os.path.join(\n utils.get_theme_path(theme_name), 'bundles')\n if os.path.isfile(bundles_path):\n with open(bundles_path) as fd:\n for line in fd:\n name, files = line.split('=')\n files = [f.strip() for f in files.split(',')]\n bundles[name.strip()] = files\n break\n return bundles\n", "path": "nikola/plugins/task/bundles.py"}]}
1,811
129
gh_patches_debug_38719
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2460
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Spider afcurgentcare is broken During the global build at 2021-05-21-20-28-08, spider **afcurgentcare** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/logs/afcurgentcare.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/afcurgentcare.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/afcurgentcare.geojson)) </issue> <code> [start of locations/spiders/afcurgentcare.py] 1 import json 2 import re 3 import scrapy 4 from locations.items import GeojsonPointItem 5 from locations.hours import OpeningHours 6 7 class AfcUrgentCareSpider(scrapy.Spider): 8 name = "afcurgentcare" 9 item_attributes = { 'brand': "AFC Urgent Care" } 10 allowed_domains = ["afcurgentcare.com"] 11 download_delay = 0.2 12 start_urls = ( 13 'https://www.afcurgentcare.com/locations/', 14 ) 15 16 def parse(self, response): 17 for url in response.xpath('//li[@class="location"]/@data-href').extract(): 18 yield scrapy.Request( 19 response.urljoin(url), 20 callback=self.parse_store, 21 ) 22 23 def parse_store(self, response): 24 properties = { 25 'ref': response.url, 26 'lat': response.xpath('//div[@class="map-container"]/div/@data-latitude').extract_first(), 27 'lon': response.xpath('//div[@class="map-container"]/div/@data-longitude').extract_first(), 28 'phone': response.xpath('//a[@class="phone-link"]/span/text()').extract_first(), 29 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first().strip(), 30 'name': response.xpath('//meta[@itemprop="name legalName"]/@content').extract_first(), 31 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()[:-1], 32 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first().strip(), 33 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(), 34 } 35 36 o = OpeningHours() 37 for h in response.css('#LocalMapAreaOpenHourBanner li.h-day'): 38 day = h.xpath('em/span/text()').extract_first().strip()[:2] 39 day_range = h.xpath('em/text()').extract_first().strip(':').strip() 40 open_time, close_time = day_range.split(' - ') 41 42 o.add_range(day, open_time, close_time, '%I:%M %p') 43 properties['opening_hours'] = o.as_opening_hours() 44 45 yield GeojsonPointItem(**properties) 46 [end of locations/spiders/afcurgentcare.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/afcurgentcare.py b/locations/spiders/afcurgentcare.py --- a/locations/spiders/afcurgentcare.py +++ b/locations/spiders/afcurgentcare.py @@ -1,45 +1,48 @@ import json -import re +import urllib.parse + import scrapy + from locations.items import GeojsonPointItem -from locations.hours import OpeningHours +from locations.hours import OpeningHours, DAYS + class AfcUrgentCareSpider(scrapy.Spider): name = "afcurgentcare" - item_attributes = { 'brand': "AFC Urgent Care" } + item_attributes = {"brand": "AFC Urgent Care"} allowed_domains = ["afcurgentcare.com"] - download_delay = 0.2 start_urls = ( - 'https://www.afcurgentcare.com/locations/', + "https://www.afcurgentcare.com/modules/multilocation/?near_lat=39&near_lon=-98", ) def parse(self, response): - for url in response.xpath('//li[@class="location"]/@data-href').extract(): - yield scrapy.Request( - response.urljoin(url), - callback=self.parse_store, - ) + j = json.loads(response.body) + if j["meta"]["next"] is not None: + qs = "?" + urllib.parse.urlparse(j["meta"]["next"]).query + yield scrapy.Request(urllib.parse.urljoin(response.url, qs)) + for obj in j["objects"]: + yield from self.parse_store(obj) - def parse_store(self, response): + def parse_store(self, obj): properties = { - 'ref': response.url, - 'lat': response.xpath('//div[@class="map-container"]/div/@data-latitude').extract_first(), - 'lon': response.xpath('//div[@class="map-container"]/div/@data-longitude').extract_first(), - 'phone': response.xpath('//a[@class="phone-link"]/span/text()').extract_first(), - 'addr_full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first().strip(), - 'name': response.xpath('//meta[@itemprop="name legalName"]/@content').extract_first(), - 'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first()[:-1], - 'state': response.xpath('//span[@itemprop="addressRegion"]/text()').extract_first().strip(), - 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(), + "ref": obj["id"], + "lat": obj["lat"], + "lon": obj["lon"], + "phone": obj["phonemap_e164"].get("phone"), + "addr_full": obj["street"], + "name": obj["location_name"], + "city": obj["city"], + "state": obj["state"], + "postcode": obj["postal_code"], + "website": obj["location_url"], } o = OpeningHours() - for h in response.css('#LocalMapAreaOpenHourBanner li.h-day'): - day = h.xpath('em/span/text()').extract_first().strip()[:2] - day_range = h.xpath('em/text()').extract_first().strip(':').strip() - open_time, close_time = day_range.split(' - ') - - o.add_range(day, open_time, close_time, '%I:%M %p') - properties['opening_hours'] = o.as_opening_hours() + for ([h, _], day) in zip(obj["hours_of_operation"], DAYS): + if not h: + continue + open_time, close_time = h + o.add_range(day, open_time, close_time, "%H:%M:%S") + properties["opening_hours"] = o.as_opening_hours() yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/afcurgentcare.py b/locations/spiders/afcurgentcare.py\n--- a/locations/spiders/afcurgentcare.py\n+++ b/locations/spiders/afcurgentcare.py\n@@ -1,45 +1,48 @@\n import json\n-import re\n+import urllib.parse\n+\n import scrapy\n+\n from locations.items import GeojsonPointItem\n-from locations.hours import OpeningHours\n+from locations.hours import OpeningHours, DAYS\n+\n \n class AfcUrgentCareSpider(scrapy.Spider):\n name = \"afcurgentcare\"\n- item_attributes = { 'brand': \"AFC Urgent Care\" }\n+ item_attributes = {\"brand\": \"AFC Urgent Care\"}\n allowed_domains = [\"afcurgentcare.com\"]\n- download_delay = 0.2\n start_urls = (\n- 'https://www.afcurgentcare.com/locations/',\n+ \"https://www.afcurgentcare.com/modules/multilocation/?near_lat=39&near_lon=-98\",\n )\n \n def parse(self, response):\n- for url in response.xpath('//li[@class=\"location\"]/@data-href').extract():\n- yield scrapy.Request(\n- response.urljoin(url),\n- callback=self.parse_store,\n- )\n+ j = json.loads(response.body)\n+ if j[\"meta\"][\"next\"] is not None:\n+ qs = \"?\" + urllib.parse.urlparse(j[\"meta\"][\"next\"]).query\n+ yield scrapy.Request(urllib.parse.urljoin(response.url, qs))\n+ for obj in j[\"objects\"]:\n+ yield from self.parse_store(obj)\n \n- def parse_store(self, response):\n+ def parse_store(self, obj):\n properties = {\n- 'ref': response.url,\n- 'lat': response.xpath('//div[@class=\"map-container\"]/div/@data-latitude').extract_first(),\n- 'lon': response.xpath('//div[@class=\"map-container\"]/div/@data-longitude').extract_first(),\n- 'phone': response.xpath('//a[@class=\"phone-link\"]/span/text()').extract_first(),\n- 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first().strip(),\n- 'name': response.xpath('//meta[@itemprop=\"name legalName\"]/@content').extract_first(),\n- 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first()[:-1],\n- 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first().strip(),\n- 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first().strip(),\n+ \"ref\": obj[\"id\"],\n+ \"lat\": obj[\"lat\"],\n+ \"lon\": obj[\"lon\"],\n+ \"phone\": obj[\"phonemap_e164\"].get(\"phone\"),\n+ \"addr_full\": obj[\"street\"],\n+ \"name\": obj[\"location_name\"],\n+ \"city\": obj[\"city\"],\n+ \"state\": obj[\"state\"],\n+ \"postcode\": obj[\"postal_code\"],\n+ \"website\": obj[\"location_url\"],\n }\n \n o = OpeningHours()\n- for h in response.css('#LocalMapAreaOpenHourBanner li.h-day'):\n- day = h.xpath('em/span/text()').extract_first().strip()[:2]\n- day_range = h.xpath('em/text()').extract_first().strip(':').strip()\n- open_time, close_time = day_range.split(' - ')\n-\n- o.add_range(day, open_time, close_time, '%I:%M %p')\n- properties['opening_hours'] = o.as_opening_hours()\n+ for ([h, _], day) in zip(obj[\"hours_of_operation\"], DAYS):\n+ if not h:\n+ continue\n+ open_time, close_time = h\n+ o.add_range(day, open_time, close_time, \"%H:%M:%S\")\n+ properties[\"opening_hours\"] = o.as_opening_hours()\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider afcurgentcare is broken\nDuring the global build at 2021-05-21-20-28-08, spider **afcurgentcare** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/logs/afcurgentcare.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/afcurgentcare.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-21-20-28-08/output/afcurgentcare.geojson))\n", "before_files": [{"content": "import json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nclass AfcUrgentCareSpider(scrapy.Spider):\n name = \"afcurgentcare\"\n item_attributes = { 'brand': \"AFC Urgent Care\" }\n allowed_domains = [\"afcurgentcare.com\"]\n download_delay = 0.2\n start_urls = (\n 'https://www.afcurgentcare.com/locations/',\n )\n\n def parse(self, response):\n for url in response.xpath('//li[@class=\"location\"]/@data-href').extract():\n yield scrapy.Request(\n response.urljoin(url),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n properties = {\n 'ref': response.url,\n 'lat': response.xpath('//div[@class=\"map-container\"]/div/@data-latitude').extract_first(),\n 'lon': response.xpath('//div[@class=\"map-container\"]/div/@data-longitude').extract_first(),\n 'phone': response.xpath('//a[@class=\"phone-link\"]/span/text()').extract_first(),\n 'addr_full': response.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first().strip(),\n 'name': response.xpath('//meta[@itemprop=\"name legalName\"]/@content').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"]/text()').extract_first()[:-1],\n 'state': response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first().strip(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first().strip(),\n }\n\n o = OpeningHours()\n for h in response.css('#LocalMapAreaOpenHourBanner li.h-day'):\n day = h.xpath('em/span/text()').extract_first().strip()[:2]\n day_range = h.xpath('em/text()').extract_first().strip(':').strip()\n open_time, close_time = day_range.split(' - ')\n\n o.add_range(day, open_time, close_time, '%I:%M %p')\n properties['opening_hours'] = o.as_opening_hours()\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/afcurgentcare.py"}]}
1,295
885
gh_patches_debug_16472
rasdani/github-patches
git_diff
getsentry__sentry-python-1812
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SQLAlchemy integration fails to parse version `2.0.0rc1` ### How do you use Sentry? Sentry Saas (sentry.io) ### Version 1.12.1 ### Steps to Reproduce Call `sentry_sdk.init()` with SQLAlchemy integration and install SQLAlchemy==2.0.0rc1. ### Expected Result no error ### Actual Result ``` @staticmethod def setup_once(): # type: () -> None try: version = tuple(map(int, SQLALCHEMY_VERSION.split("b")[0].split("."))) except (TypeError, ValueError): > raise DidNotEnable( "Unparsable SQLAlchemy version: {}".format(SQLALCHEMY_VERSION) ) E sentry_sdk.integrations.DidNotEnable: Unparsable SQLAlchemy version: 2.0.0rc1 ``` </issue> <code> [start of sentry_sdk/integrations/sqlalchemy.py] 1 from __future__ import absolute_import 2 3 from sentry_sdk._types import MYPY 4 from sentry_sdk.hub import Hub 5 from sentry_sdk.integrations import Integration, DidNotEnable 6 from sentry_sdk.tracing_utils import record_sql_queries 7 8 try: 9 from sqlalchemy.engine import Engine # type: ignore 10 from sqlalchemy.event import listen # type: ignore 11 from sqlalchemy import __version__ as SQLALCHEMY_VERSION # type: ignore 12 except ImportError: 13 raise DidNotEnable("SQLAlchemy not installed.") 14 15 if MYPY: 16 from typing import Any 17 from typing import ContextManager 18 from typing import Optional 19 20 from sentry_sdk.tracing import Span 21 22 23 class SqlalchemyIntegration(Integration): 24 identifier = "sqlalchemy" 25 26 @staticmethod 27 def setup_once(): 28 # type: () -> None 29 30 try: 31 version = tuple(map(int, SQLALCHEMY_VERSION.split("b")[0].split("."))) 32 except (TypeError, ValueError): 33 raise DidNotEnable( 34 "Unparsable SQLAlchemy version: {}".format(SQLALCHEMY_VERSION) 35 ) 36 37 if version < (1, 2): 38 raise DidNotEnable("SQLAlchemy 1.2 or newer required.") 39 40 listen(Engine, "before_cursor_execute", _before_cursor_execute) 41 listen(Engine, "after_cursor_execute", _after_cursor_execute) 42 listen(Engine, "handle_error", _handle_error) 43 44 45 def _before_cursor_execute( 46 conn, cursor, statement, parameters, context, executemany, *args 47 ): 48 # type: (Any, Any, Any, Any, Any, bool, *Any) -> None 49 hub = Hub.current 50 if hub.get_integration(SqlalchemyIntegration) is None: 51 return 52 53 ctx_mgr = record_sql_queries( 54 hub, 55 cursor, 56 statement, 57 parameters, 58 paramstyle=context and context.dialect and context.dialect.paramstyle or None, 59 executemany=executemany, 60 ) 61 context._sentry_sql_span_manager = ctx_mgr 62 63 span = ctx_mgr.__enter__() 64 65 if span is not None: 66 context._sentry_sql_span = span 67 68 69 def _after_cursor_execute(conn, cursor, statement, parameters, context, *args): 70 # type: (Any, Any, Any, Any, Any, *Any) -> None 71 ctx_mgr = getattr( 72 context, "_sentry_sql_span_manager", None 73 ) # type: Optional[ContextManager[Any]] 74 75 if ctx_mgr is not None: 76 context._sentry_sql_span_manager = None 77 ctx_mgr.__exit__(None, None, None) 78 79 80 def _handle_error(context, *args): 81 # type: (Any, *Any) -> None 82 execution_context = context.execution_context 83 if execution_context is None: 84 return 85 86 span = getattr(execution_context, "_sentry_sql_span", None) # type: Optional[Span] 87 88 if span is not None: 89 span.set_status("internal_error") 90 91 # _after_cursor_execute does not get called for crashing SQL stmts. Judging 92 # from SQLAlchemy codebase it does seem like any error coming into this 93 # handler is going to be fatal. 94 ctx_mgr = getattr( 95 execution_context, "_sentry_sql_span_manager", None 96 ) # type: Optional[ContextManager[Any]] 97 98 if ctx_mgr is not None: 99 execution_context._sentry_sql_span_manager = None 100 ctx_mgr.__exit__(None, None, None) 101 [end of sentry_sdk/integrations/sqlalchemy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py --- a/sentry_sdk/integrations/sqlalchemy.py +++ b/sentry_sdk/integrations/sqlalchemy.py @@ -1,5 +1,7 @@ from __future__ import absolute_import +import re + from sentry_sdk._types import MYPY from sentry_sdk.hub import Hub from sentry_sdk.integrations import Integration, DidNotEnable @@ -28,7 +30,9 @@ # type: () -> None try: - version = tuple(map(int, SQLALCHEMY_VERSION.split("b")[0].split("."))) + version = tuple( + map(int, re.split("b|rc", SQLALCHEMY_VERSION)[0].split(".")) + ) except (TypeError, ValueError): raise DidNotEnable( "Unparsable SQLAlchemy version: {}".format(SQLALCHEMY_VERSION)
{"golden_diff": "diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py\n--- a/sentry_sdk/integrations/sqlalchemy.py\n+++ b/sentry_sdk/integrations/sqlalchemy.py\n@@ -1,5 +1,7 @@\n from __future__ import absolute_import\n \n+import re\n+\n from sentry_sdk._types import MYPY\n from sentry_sdk.hub import Hub\n from sentry_sdk.integrations import Integration, DidNotEnable\n@@ -28,7 +30,9 @@\n # type: () -> None\n \n try:\n- version = tuple(map(int, SQLALCHEMY_VERSION.split(\"b\")[0].split(\".\")))\n+ version = tuple(\n+ map(int, re.split(\"b|rc\", SQLALCHEMY_VERSION)[0].split(\".\"))\n+ )\n except (TypeError, ValueError):\n raise DidNotEnable(\n \"Unparsable SQLAlchemy version: {}\".format(SQLALCHEMY_VERSION)\n", "issue": "SQLAlchemy integration fails to parse version `2.0.0rc1`\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.12.1\n\n### Steps to Reproduce\n\nCall `sentry_sdk.init()` with SQLAlchemy integration and install SQLAlchemy==2.0.0rc1.\n\n### Expected Result\n\nno error\n\n### Actual Result\n\n```\r\n @staticmethod\r\n def setup_once():\r\n # type: () -> None\r\n \r\n try:\r\n version = tuple(map(int, SQLALCHEMY_VERSION.split(\"b\")[0].split(\".\")))\r\n except (TypeError, ValueError):\r\n > raise DidNotEnable(\r\n \"Unparsable SQLAlchemy version: {}\".format(SQLALCHEMY_VERSION)\r\n )\r\n E sentry_sdk.integrations.DidNotEnable: Unparsable SQLAlchemy version: 2.0.0rc1\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.tracing_utils import record_sql_queries\n\ntry:\n from sqlalchemy.engine import Engine # type: ignore\n from sqlalchemy.event import listen # type: ignore\n from sqlalchemy import __version__ as SQLALCHEMY_VERSION # type: ignore\nexcept ImportError:\n raise DidNotEnable(\"SQLAlchemy not installed.\")\n\nif MYPY:\n from typing import Any\n from typing import ContextManager\n from typing import Optional\n\n from sentry_sdk.tracing import Span\n\n\nclass SqlalchemyIntegration(Integration):\n identifier = \"sqlalchemy\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n try:\n version = tuple(map(int, SQLALCHEMY_VERSION.split(\"b\")[0].split(\".\")))\n except (TypeError, ValueError):\n raise DidNotEnable(\n \"Unparsable SQLAlchemy version: {}\".format(SQLALCHEMY_VERSION)\n )\n\n if version < (1, 2):\n raise DidNotEnable(\"SQLAlchemy 1.2 or newer required.\")\n\n listen(Engine, \"before_cursor_execute\", _before_cursor_execute)\n listen(Engine, \"after_cursor_execute\", _after_cursor_execute)\n listen(Engine, \"handle_error\", _handle_error)\n\n\ndef _before_cursor_execute(\n conn, cursor, statement, parameters, context, executemany, *args\n):\n # type: (Any, Any, Any, Any, Any, bool, *Any) -> None\n hub = Hub.current\n if hub.get_integration(SqlalchemyIntegration) is None:\n return\n\n ctx_mgr = record_sql_queries(\n hub,\n cursor,\n statement,\n parameters,\n paramstyle=context and context.dialect and context.dialect.paramstyle or None,\n executemany=executemany,\n )\n context._sentry_sql_span_manager = ctx_mgr\n\n span = ctx_mgr.__enter__()\n\n if span is not None:\n context._sentry_sql_span = span\n\n\ndef _after_cursor_execute(conn, cursor, statement, parameters, context, *args):\n # type: (Any, Any, Any, Any, Any, *Any) -> None\n ctx_mgr = getattr(\n context, \"_sentry_sql_span_manager\", None\n ) # type: Optional[ContextManager[Any]]\n\n if ctx_mgr is not None:\n context._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n\n\ndef _handle_error(context, *args):\n # type: (Any, *Any) -> None\n execution_context = context.execution_context\n if execution_context is None:\n return\n\n span = getattr(execution_context, \"_sentry_sql_span\", None) # type: Optional[Span]\n\n if span is not None:\n span.set_status(\"internal_error\")\n\n # _after_cursor_execute does not get called for crashing SQL stmts. Judging\n # from SQLAlchemy codebase it does seem like any error coming into this\n # handler is going to be fatal.\n ctx_mgr = getattr(\n execution_context, \"_sentry_sql_span_manager\", None\n ) # type: Optional[ContextManager[Any]]\n\n if ctx_mgr is not None:\n execution_context._sentry_sql_span_manager = None\n ctx_mgr.__exit__(None, None, None)\n", "path": "sentry_sdk/integrations/sqlalchemy.py"}]}
1,685
206
gh_patches_debug_27444
rasdani/github-patches
git_diff
instadeepai__Mava-433
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FEATURE] Abstract builder class for Jax-based systems ### Feature Abstract class for system building for Jax-based Mava systems. ### Proposal The builder should take care of building essentially elements of a MARL system that run in different processes. These include the data server, variable server, executor (and evaluator) and trainer. ### Testing Tests will only consider the proper inheritance of the abstract builder class. ### Definition of done All abstract methods are defined and have input and return types specified. ### Mandatory checklist before making a PR * [x] The success criteria laid down in “Definition of done” are met. * [x] Code is documented - docstrings for methods and classes, static types for arguments. * [x] Code is tested - unit, integration and/or functional tests are added. * [x] Documentation is updated - README, CONTRIBUTING, or other documentation. * [x] All functional tests are green. </issue> <code> [start of mava/core_jax.py] 1 # python3 2 # Copyright 2021 InstaDeep Ltd. All rights reserved. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 17 """Core Mava interfaces for Jax systems.""" 18 19 import abc 20 from types import SimpleNamespace 21 from typing import Any, List 22 23 24 class BaseSystem(abc.ABC): 25 """Abstract system object.""" 26 27 @abc.abstractmethod 28 def design(self) -> SimpleNamespace: 29 """System design specifying the list of components to use. 30 31 Returns: 32 system callback components 33 """ 34 35 @abc.abstractmethod 36 def update(self, component: Any) -> None: 37 """Update a component that has already been added to the system. 38 39 Args: 40 component : system callback component 41 """ 42 43 @abc.abstractmethod 44 def add(self, component: Any) -> None: 45 """Add a new component to the system. 46 47 Args: 48 component : system callback component 49 """ 50 51 @abc.abstractmethod 52 def configure(self, **kwargs: Any) -> None: 53 """Configure system hyperparameters.""" 54 55 @abc.abstractmethod 56 def launch( 57 self, 58 num_executors: int, 59 nodes_on_gpu: List[str], 60 multi_process: bool = True, 61 name: str = "system", 62 ) -> None: 63 """Run the system. 64 65 Args: 66 num_executors : number of executor processes to run in parallel 67 nodes_on_gpu : which processes to run on gpu 68 multi_process : whether to run single or multi process, single process runs 69 are primarily for debugging 70 name : name of the system 71 """ 72 [end of mava/core_jax.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mava/core_jax.py b/mava/core_jax.py --- a/mava/core_jax.py +++ b/mava/core_jax.py @@ -69,3 +69,59 @@ are primarily for debugging name : name of the system """ + + +class SystemBuilder(abc.ABC): + """Abstract system builder.""" + + @abc.abstractmethod + def data_server(self) -> List[Any]: + """Data server to store and serve transition data from and to system. + + Returns: + System data server + """ + + @abc.abstractmethod + def parameter_server(self) -> Any: + """Parameter server to store and serve system network parameters. + + Returns: + System parameter server + """ + + @abc.abstractmethod + def executor( + self, executor_id: str, data_server_client: Any, parameter_server_client: Any + ) -> Any: + """Executor, a collection of agents in an environment to gather experience. + + Args: + executor_id : id to identify the executor process for logging purposes + data_server_client : data server client for pushing transition data + parameter_server_client : parameter server client for pulling parameters + Returns: + System executor + """ + + @abc.abstractmethod + def trainer( + self, trainer_id: str, data_server_client: Any, parameter_server_client: Any + ) -> Any: + """Trainer, a system process for updating agent specific network parameters. + + Args: + trainer_id : id to identify the trainer process for logging purposes + data_server_client : data server client for pulling transition data + parameter_server_client : parameter server client for pushing parameters + Returns: + System trainer + """ + + @abc.abstractmethod + def build(self) -> None: + """Construct program nodes.""" + + @abc.abstractmethod + def launch(self) -> None: + """Run the graph program."""
{"golden_diff": "diff --git a/mava/core_jax.py b/mava/core_jax.py\n--- a/mava/core_jax.py\n+++ b/mava/core_jax.py\n@@ -69,3 +69,59 @@\n are primarily for debugging\n name : name of the system\n \"\"\"\n+\n+\n+class SystemBuilder(abc.ABC):\n+ \"\"\"Abstract system builder.\"\"\"\n+\n+ @abc.abstractmethod\n+ def data_server(self) -> List[Any]:\n+ \"\"\"Data server to store and serve transition data from and to system.\n+\n+ Returns:\n+ System data server\n+ \"\"\"\n+\n+ @abc.abstractmethod\n+ def parameter_server(self) -> Any:\n+ \"\"\"Parameter server to store and serve system network parameters.\n+\n+ Returns:\n+ System parameter server\n+ \"\"\"\n+\n+ @abc.abstractmethod\n+ def executor(\n+ self, executor_id: str, data_server_client: Any, parameter_server_client: Any\n+ ) -> Any:\n+ \"\"\"Executor, a collection of agents in an environment to gather experience.\n+\n+ Args:\n+ executor_id : id to identify the executor process for logging purposes\n+ data_server_client : data server client for pushing transition data\n+ parameter_server_client : parameter server client for pulling parameters\n+ Returns:\n+ System executor\n+ \"\"\"\n+\n+ @abc.abstractmethod\n+ def trainer(\n+ self, trainer_id: str, data_server_client: Any, parameter_server_client: Any\n+ ) -> Any:\n+ \"\"\"Trainer, a system process for updating agent specific network parameters.\n+\n+ Args:\n+ trainer_id : id to identify the trainer process for logging purposes\n+ data_server_client : data server client for pulling transition data\n+ parameter_server_client : parameter server client for pushing parameters\n+ Returns:\n+ System trainer\n+ \"\"\"\n+\n+ @abc.abstractmethod\n+ def build(self) -> None:\n+ \"\"\"Construct program nodes.\"\"\"\n+\n+ @abc.abstractmethod\n+ def launch(self) -> None:\n+ \"\"\"Run the graph program.\"\"\"\n", "issue": "[FEATURE] Abstract builder class for Jax-based systems\n### Feature\r\nAbstract class for system building for Jax-based Mava systems.\r\n\r\n### Proposal\r\nThe builder should take care of building essentially elements of a MARL system that run in different processes. These include the data server, variable server, executor (and evaluator) and trainer. \r\n\r\n### Testing\r\nTests will only consider the proper inheritance of the abstract builder class.\r\n\r\n### Definition of done\r\nAll abstract methods are defined and have input and return types specified. \r\n\r\n### Mandatory checklist before making a PR\r\n* [x] The success criteria laid down in \u201cDefinition of done\u201d are met.\r\n* [x] Code is documented - docstrings for methods and classes, static types for arguments.\r\n* [x] Code is tested - unit, integration and/or functional tests are added.\r\n* [x] Documentation is updated - README, CONTRIBUTING, or other documentation.\r\n* [x] All functional tests are green.\r\n\n", "before_files": [{"content": "# python3\n# Copyright 2021 InstaDeep Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Core Mava interfaces for Jax systems.\"\"\"\n\nimport abc\nfrom types import SimpleNamespace\nfrom typing import Any, List\n\n\nclass BaseSystem(abc.ABC):\n \"\"\"Abstract system object.\"\"\"\n\n @abc.abstractmethod\n def design(self) -> SimpleNamespace:\n \"\"\"System design specifying the list of components to use.\n\n Returns:\n system callback components\n \"\"\"\n\n @abc.abstractmethod\n def update(self, component: Any) -> None:\n \"\"\"Update a component that has already been added to the system.\n\n Args:\n component : system callback component\n \"\"\"\n\n @abc.abstractmethod\n def add(self, component: Any) -> None:\n \"\"\"Add a new component to the system.\n\n Args:\n component : system callback component\n \"\"\"\n\n @abc.abstractmethod\n def configure(self, **kwargs: Any) -> None:\n \"\"\"Configure system hyperparameters.\"\"\"\n\n @abc.abstractmethod\n def launch(\n self,\n num_executors: int,\n nodes_on_gpu: List[str],\n multi_process: bool = True,\n name: str = \"system\",\n ) -> None:\n \"\"\"Run the system.\n\n Args:\n num_executors : number of executor processes to run in parallel\n nodes_on_gpu : which processes to run on gpu\n multi_process : whether to run single or multi process, single process runs\n are primarily for debugging\n name : name of the system\n \"\"\"\n", "path": "mava/core_jax.py"}]}
1,327
457
gh_patches_debug_31759
rasdani/github-patches
git_diff
dotkom__onlineweb4-1513
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Users applying for membership changes with field of study gets expiredate in the past If you apply for membership today with a Bachelor started in 2011, you will get an expiry date of 2014-09-16. The given expiration date for a membership should be adjusted upward if the suggested duration of a field of study puts this date in the past. </issue> <code> [start of apps/approval/views.py] 1 # -*- encoding: utf-8 -*- 2 3 import datetime 4 5 from django.contrib import messages 6 from django.contrib.auth.decorators import login_required 7 from django.http import Http404 8 from django.shortcuts import get_object_or_404, redirect 9 from django.utils.translation import ugettext as _ 10 11 from apps.approval.forms import FieldOfStudyApplicationForm 12 from apps.approval.models import MembershipApproval 13 from apps.authentication.models import AllowedUsername, get_length_of_field_of_study 14 15 16 @login_required 17 def create_fos_application(request): 18 if request.method == 'POST': 19 if not request.user.ntnu_username: 20 messages.error(request, _("Du må knytte et NTNU-brukernavn til kontoen din.")) 21 return redirect('profiles_active', active_tab='membership') 22 23 form = FieldOfStudyApplicationForm(request.POST) 24 if form.is_valid(): 25 cleaned = form.cleaned_data 26 27 field_of_study = int(cleaned['field_of_study']) 28 29 if field_of_study == 0: 30 messages.warning(request, _("Denne studieretningen (Gjest) er ikke et gyldig alternativ.")) 31 return redirect('profiles_active', active_tab='membership') 32 33 started_day = 1 34 started_month = 0 35 started_year = int(cleaned['started_year']) 36 37 if cleaned['started_semester'] == "h": 38 started_month = 7 39 if cleaned['started_semester'] == "v": 40 started_month = 1 41 42 started_date = datetime.date(started_year, started_month, started_day) 43 44 # Does the user already have a field of study and started date? 45 if request.user.started_date and request.user.field_of_study: 46 # If there is no change from the current settings, ignore the request 47 if request.user.started_date == started_date and request.user.field_of_study == field_of_study: 48 messages.error( 49 request, 50 _("Du er allerede registrert med denne studieretningen og denne startdatoen.") 51 ) 52 return redirect('profiles_active', active_tab='membership') 53 54 application = MembershipApproval( 55 applicant=request.user, 56 field_of_study=field_of_study, 57 started_date=started_date 58 ) 59 60 length_of_fos = get_length_of_field_of_study(field_of_study) 61 if length_of_fos > 0: 62 # Expiry dates should be 15th September, so that we have tiem to get new lists from NTNU 63 application.new_expiry_date = datetime.date( 64 started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos) 65 application.save() 66 67 messages.success(request, _("Søknad om bytte av studieretning er sendt.")) 68 69 return redirect('profiles_active', active_tab='membership') 70 raise Http404 71 72 73 @login_required 74 def create_membership_application(request): 75 if request.method == 'POST': 76 if not request.user.has_expiring_membership: 77 messages.error(request, _("Din bruker har ikke et utløpende medlemskap.")) 78 return redirect('profiles_active', active_tab='membership') 79 80 if not request.user.ntnu_username: 81 messages.error(request, _("Du må knytte et NTNU-brukernavn til kontoen din.")) 82 return redirect('profiles_active', active_tab='membership') 83 84 # Extend length of membership by 1 year 85 membership = AllowedUsername.objects.get(username=request.user.ntnu_username) 86 new_expiration_date = datetime.date(membership.expiration_date.year + 1, 9, 16) 87 88 application = MembershipApproval( 89 applicant=request.user, 90 new_expiry_date=new_expiration_date, 91 ) 92 application.save() 93 94 messages.success(request, _("Søknad om ett års forlenget medlemskap er sendt.")) 95 96 return redirect('profiles_active', active_tab='membership') 97 raise Http404 98 99 100 @login_required 101 def cancel_application(request, application_id): 102 app = get_object_or_404(MembershipApproval, pk=application_id) 103 104 if app.applicant != request.user: 105 messages.error(request, _("Bare søkeren selv kan slette en søknad.")) 106 return redirect('profiles_active', active_tab='membership') 107 108 if app.processed: 109 messages.error(request, _("Denne søknaden er behandlet og kan ikke slettes.")) 110 return redirect('profiles_active', active_tab='membership') 111 112 app.delete() 113 114 return redirect('profiles_active', active_tab='membership') 115 [end of apps/approval/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/approval/views.py b/apps/approval/views.py --- a/apps/approval/views.py +++ b/apps/approval/views.py @@ -6,6 +6,7 @@ from django.contrib.auth.decorators import login_required from django.http import Http404 from django.shortcuts import get_object_or_404, redirect +from django.utils import timezone from django.utils.translation import ugettext as _ from apps.approval.forms import FieldOfStudyApplicationForm @@ -59,9 +60,7 @@ length_of_fos = get_length_of_field_of_study(field_of_study) if length_of_fos > 0: - # Expiry dates should be 15th September, so that we have tiem to get new lists from NTNU - application.new_expiry_date = datetime.date( - started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos) + application.new_expiry_date = get_expiry_date(started_year, length_of_fos) application.save() messages.success(request, _("Søknad om bytte av studieretning er sendt.")) @@ -70,6 +69,21 @@ raise Http404 +def get_expiry_date(started_year, length_of_fos): + today = timezone.now().date() + # Expiry dates should be 15th September, so that we have time to get new lists from NTNU + new_expiry_date = datetime.date( + started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos) + # Expiry dates in the past sets the expiry date to next september + if new_expiry_date < today: + if today < datetime.date(today.year, 9, 15): + new_expiry_date = datetime.date(today.year, 9, 15) + else: + new_expiry_date = datetime.date( + today.year, 9, 16) + datetime.timedelta(days=365) + return new_expiry_date + + @login_required def create_membership_application(request): if request.method == 'POST':
{"golden_diff": "diff --git a/apps/approval/views.py b/apps/approval/views.py\n--- a/apps/approval/views.py\n+++ b/apps/approval/views.py\n@@ -6,6 +6,7 @@\n from django.contrib.auth.decorators import login_required\n from django.http import Http404\n from django.shortcuts import get_object_or_404, redirect\n+from django.utils import timezone\n from django.utils.translation import ugettext as _\n \n from apps.approval.forms import FieldOfStudyApplicationForm\n@@ -59,9 +60,7 @@\n \n length_of_fos = get_length_of_field_of_study(field_of_study)\n if length_of_fos > 0:\n- # Expiry dates should be 15th September, so that we have tiem to get new lists from NTNU\n- application.new_expiry_date = datetime.date(\n- started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos)\n+ application.new_expiry_date = get_expiry_date(started_year, length_of_fos)\n application.save()\n \n messages.success(request, _(\"S\u00f8knad om bytte av studieretning er sendt.\"))\n@@ -70,6 +69,21 @@\n raise Http404\n \n \n+def get_expiry_date(started_year, length_of_fos):\n+ today = timezone.now().date()\n+ # Expiry dates should be 15th September, so that we have time to get new lists from NTNU\n+ new_expiry_date = datetime.date(\n+ started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos)\n+ # Expiry dates in the past sets the expiry date to next september\n+ if new_expiry_date < today:\n+ if today < datetime.date(today.year, 9, 15):\n+ new_expiry_date = datetime.date(today.year, 9, 15)\n+ else:\n+ new_expiry_date = datetime.date(\n+ today.year, 9, 16) + datetime.timedelta(days=365)\n+ return new_expiry_date\n+\n+\n @login_required\n def create_membership_application(request):\n if request.method == 'POST':\n", "issue": "Users applying for membership changes with field of study gets expiredate in the past\nIf you apply for membership today with a Bachelor started in 2011, you will get an expiry date of 2014-09-16.\n\nThe given expiration date for a membership should be adjusted upward if the suggested duration of a field of study puts this date in the past.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\n\nimport datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\n\nfrom apps.approval.forms import FieldOfStudyApplicationForm\nfrom apps.approval.models import MembershipApproval\nfrom apps.authentication.models import AllowedUsername, get_length_of_field_of_study\n\n\n@login_required\ndef create_fos_application(request):\n if request.method == 'POST':\n if not request.user.ntnu_username:\n messages.error(request, _(\"Du m\u00e5 knytte et NTNU-brukernavn til kontoen din.\"))\n return redirect('profiles_active', active_tab='membership')\n\n form = FieldOfStudyApplicationForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n field_of_study = int(cleaned['field_of_study'])\n\n if field_of_study == 0:\n messages.warning(request, _(\"Denne studieretningen (Gjest) er ikke et gyldig alternativ.\"))\n return redirect('profiles_active', active_tab='membership')\n\n started_day = 1\n started_month = 0\n started_year = int(cleaned['started_year'])\n\n if cleaned['started_semester'] == \"h\":\n started_month = 7\n if cleaned['started_semester'] == \"v\":\n started_month = 1\n\n started_date = datetime.date(started_year, started_month, started_day)\n\n # Does the user already have a field of study and started date?\n if request.user.started_date and request.user.field_of_study:\n # If there is no change from the current settings, ignore the request\n if request.user.started_date == started_date and request.user.field_of_study == field_of_study:\n messages.error(\n request,\n _(\"Du er allerede registrert med denne studieretningen og denne startdatoen.\")\n )\n return redirect('profiles_active', active_tab='membership')\n\n application = MembershipApproval(\n applicant=request.user,\n field_of_study=field_of_study,\n started_date=started_date\n )\n\n length_of_fos = get_length_of_field_of_study(field_of_study)\n if length_of_fos > 0:\n # Expiry dates should be 15th September, so that we have tiem to get new lists from NTNU\n application.new_expiry_date = datetime.date(\n started_year, 9, 16) + datetime.timedelta(days=365*length_of_fos)\n application.save()\n\n messages.success(request, _(\"S\u00f8knad om bytte av studieretning er sendt.\"))\n\n return redirect('profiles_active', active_tab='membership')\n raise Http404\n\n\n@login_required\ndef create_membership_application(request):\n if request.method == 'POST':\n if not request.user.has_expiring_membership:\n messages.error(request, _(\"Din bruker har ikke et utl\u00f8pende medlemskap.\"))\n return redirect('profiles_active', active_tab='membership')\n\n if not request.user.ntnu_username:\n messages.error(request, _(\"Du m\u00e5 knytte et NTNU-brukernavn til kontoen din.\"))\n return redirect('profiles_active', active_tab='membership')\n\n # Extend length of membership by 1 year\n membership = AllowedUsername.objects.get(username=request.user.ntnu_username)\n new_expiration_date = datetime.date(membership.expiration_date.year + 1, 9, 16)\n\n application = MembershipApproval(\n applicant=request.user,\n new_expiry_date=new_expiration_date,\n )\n application.save()\n\n messages.success(request, _(\"S\u00f8knad om ett \u00e5rs forlenget medlemskap er sendt.\"))\n\n return redirect('profiles_active', active_tab='membership')\n raise Http404\n\n\n@login_required\ndef cancel_application(request, application_id):\n app = get_object_or_404(MembershipApproval, pk=application_id)\n\n if app.applicant != request.user:\n messages.error(request, _(\"Bare s\u00f8keren selv kan slette en s\u00f8knad.\"))\n return redirect('profiles_active', active_tab='membership')\n\n if app.processed:\n messages.error(request, _(\"Denne s\u00f8knaden er behandlet og kan ikke slettes.\"))\n return redirect('profiles_active', active_tab='membership')\n\n app.delete()\n\n return redirect('profiles_active', active_tab='membership')\n", "path": "apps/approval/views.py"}]}
1,833
487
gh_patches_debug_28422
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-1314
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG]: Cannot synchronize grads of shared parameters cross pipeline stages when using ZERO-3 ### 🐛 Describe the bug @FrankLeeeee @ver217 Hi, in line 36 of _pipeline_parallel_gradient_handler.py: https://github.com/hpcaitech/ColossalAI/blob/1aad903c1537eafb73fac1729b6df30b7006312f/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py#L36 the condition "param.grad is not None" will not work properly with ZERO-3, because after ZERO-3 synchronized grads, all parameters's grads were set to "colo_attr",grads are None and buckets is empty here! This line also has the problem: https://github.com/hpcaitech/ColossalAI/blob/1aad903c1537eafb73fac1729b6df30b7006312f/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py#L43 ### Environment colossalai latest version </issue> <code> [start of colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py] 1 #!/usr/bin/env python 2 3 from collections import defaultdict 4 5 import torch 6 import torch.distributed as dist 7 from colossalai.core import global_context as gpc 8 from colossalai.registry import GRADIENT_HANDLER 9 from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors 10 11 from ._base_gradient_handler import BaseGradientHandler 12 13 14 @GRADIENT_HANDLER.register_module 15 class PipelineSharedModuleGradientHandler(BaseGradientHandler): 16 """A helper class to handle all-reduce operations in sub parallel groups. 17 A all-reduce collective communication will be operated in 18 :func:`handle_gradient` among all sub pipeline parallel groups. 19 For better performance, it bucketizes the gradients of all parameters that are 20 the same type to improve the efficiency of communication. 21 22 Args: 23 model (Module): Model where the gradients accumulate. 24 optimizer (Optimizer): Optimizer for updating the parameters. 25 """ 26 27 def handle_gradient(self): 28 """A method running a all-reduce operation in sub pipeline parallel groups. 29 """ 30 if gpc.pipeline_parallel_size > 1: 31 # bucketize and all-reduce 32 buckets = defaultdict(lambda: defaultdict(list)) 33 # Pack the buckets. 34 for param in self._model.parameters(): 35 group = getattr(param, 'pipeline_shared_module_pg', None) 36 if param.requires_grad and param.grad is not None and group is not None: 37 tp = param.data.type() 38 buckets[group][tp].append(param) 39 40 # For each bucket, all-reduce and copy all-reduced grads. 41 for group, group_buckets in buckets.items(): 42 for tp, bucket in group_buckets.items(): 43 grads = [param.grad.data for param in bucket] 44 coalesced = _flatten_dense_tensors(grads).to(torch.cuda.current_device()) 45 dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=group) 46 for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): 47 buf.copy_(synced) 48 [end of colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py b/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py --- a/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py +++ b/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py @@ -33,14 +33,19 @@ # Pack the buckets. for param in self._model.parameters(): group = getattr(param, 'pipeline_shared_module_pg', None) - if param.requires_grad and param.grad is not None and group is not None: + if param.requires_grad and group is not None and ( + (hasattr(param, 'colo_attr') and not param.colo_attr.saved_grad.is_null()) + or param.grad is not None): tp = param.data.type() buckets[group][tp].append(param) # For each bucket, all-reduce and copy all-reduced grads. for group, group_buckets in buckets.items(): for tp, bucket in group_buckets.items(): - grads = [param.grad.data for param in bucket] + grads = [ + param.colo_attr.grad_payload if hasattr(param, 'colo_attr') else param.grad.data + for param in bucket + ] coalesced = _flatten_dense_tensors(grads).to(torch.cuda.current_device()) dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=group) for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
{"golden_diff": "diff --git a/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py b/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py\n--- a/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py\n+++ b/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py\n@@ -33,14 +33,19 @@\n # Pack the buckets.\n for param in self._model.parameters():\n group = getattr(param, 'pipeline_shared_module_pg', None)\n- if param.requires_grad and param.grad is not None and group is not None:\n+ if param.requires_grad and group is not None and (\n+ (hasattr(param, 'colo_attr') and not param.colo_attr.saved_grad.is_null())\n+ or param.grad is not None):\n tp = param.data.type()\n buckets[group][tp].append(param)\n \n # For each bucket, all-reduce and copy all-reduced grads.\n for group, group_buckets in buckets.items():\n for tp, bucket in group_buckets.items():\n- grads = [param.grad.data for param in bucket]\n+ grads = [\n+ param.colo_attr.grad_payload if hasattr(param, 'colo_attr') else param.grad.data\n+ for param in bucket\n+ ]\n coalesced = _flatten_dense_tensors(grads).to(torch.cuda.current_device())\n dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=group)\n for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):\n", "issue": "[BUG]: Cannot synchronize grads of shared parameters cross pipeline stages when using ZERO-3\n### \ud83d\udc1b Describe the bug\r\n\r\n @FrankLeeeee @ver217 \r\nHi, in line 36 of _pipeline_parallel_gradient_handler.py:\r\nhttps://github.com/hpcaitech/ColossalAI/blob/1aad903c1537eafb73fac1729b6df30b7006312f/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py#L36\r\n\r\nthe condition \"param.grad is not None\" will not work properly with ZERO-3, because after ZERO-3 synchronized grads, all parameters's grads were set to \"colo_attr\"\uff0cgrads are None and buckets is empty here! \r\n\r\nThis line also has the problem:\r\nhttps://github.com/hpcaitech/ColossalAI/blob/1aad903c1537eafb73fac1729b6df30b7006312f/colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py#L43\r\n\r\n### Environment\r\n\r\ncolossalai latest version\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom collections import defaultdict\n\nimport torch\nimport torch.distributed as dist\nfrom colossalai.core import global_context as gpc\nfrom colossalai.registry import GRADIENT_HANDLER\nfrom torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors\n\nfrom ._base_gradient_handler import BaseGradientHandler\n\n\n@GRADIENT_HANDLER.register_module\nclass PipelineSharedModuleGradientHandler(BaseGradientHandler):\n \"\"\"A helper class to handle all-reduce operations in sub parallel groups.\n A all-reduce collective communication will be operated in \n :func:`handle_gradient` among all sub pipeline parallel groups.\n For better performance, it bucketizes the gradients of all parameters that are \n the same type to improve the efficiency of communication.\n\n Args:\n model (Module): Model where the gradients accumulate.\n optimizer (Optimizer): Optimizer for updating the parameters.\n \"\"\"\n\n def handle_gradient(self):\n \"\"\"A method running a all-reduce operation in sub pipeline parallel groups.\n \"\"\"\n if gpc.pipeline_parallel_size > 1:\n # bucketize and all-reduce\n buckets = defaultdict(lambda: defaultdict(list))\n # Pack the buckets.\n for param in self._model.parameters():\n group = getattr(param, 'pipeline_shared_module_pg', None)\n if param.requires_grad and param.grad is not None and group is not None:\n tp = param.data.type()\n buckets[group][tp].append(param)\n\n # For each bucket, all-reduce and copy all-reduced grads.\n for group, group_buckets in buckets.items():\n for tp, bucket in group_buckets.items():\n grads = [param.grad.data for param in bucket]\n coalesced = _flatten_dense_tensors(grads).to(torch.cuda.current_device())\n dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=group)\n for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):\n buf.copy_(synced)\n", "path": "colossalai/engine/gradient_handler/_pipeline_parallel_gradient_handler.py"}]}
1,308
336
gh_patches_debug_39002
rasdani/github-patches
git_diff
hylang__hy-2565
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Stop using Read the Docs Having ads in the manual is extraordinarily tacky. We should probably just host the web versions of Hy and Hyrule's manuals on Arfer.net, where I also host [the new Hylang.org](http://hylang.org). For simplicity, we can serve only the stable release of the manual. We would then just rebuild it as part of the release process. </issue> <code> [start of docs/conf.py] 1 # This file is execfile()d with the current directory set to its containing dir. 2 3 import html 4 import os 5 import re 6 import sys 7 import time 8 9 sys.path.insert(0, os.path.abspath("..")) 10 11 extensions = [ 12 "sphinx.ext.napoleon", 13 "sphinx.ext.intersphinx", 14 "sphinx.ext.autodoc", 15 "sphinx.ext.viewcode", 16 "sphinxcontrib.hydomain", 17 ] 18 19 import warnings; import sphinx.deprecation as SD 20 for c in (SD.RemovedInSphinx60Warning, SD.RemovedInSphinx70Warning): 21 warnings.filterwarnings('ignore', category = c) 22 23 from get_version import __version__ as hy_version 24 25 # Read the Docs might dirty its checkout, so strip the dirty flag. 26 hy_version = re.sub(r"[+.]dirty\Z", "", hy_version) 27 28 templates_path = ["_templates"] 29 source_suffix = ".rst" 30 31 master_doc = "index" 32 33 # General information about the project. 34 project = "hy" 35 copyright = "%s the authors" % time.strftime("%Y") 36 37 # The version info for the project you're documenting, acts as replacement for 38 # |version| and |release|, also used in various other places throughout the 39 # built documents. 40 # 41 # The short X.Y version. 42 version = ".".join(hy_version.split(".")[:-1]) 43 # The full version, including alpha/beta/rc tags. 44 release = hy_version 45 hy_descriptive_version = html.escape(hy_version) 46 if "+" in hy_version: 47 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" 48 49 exclude_patterns = ["_build", "coreteam.rst"] 50 add_module_names = True 51 52 pygments_style = "sphinx" 53 54 import sphinx_rtd_theme 55 56 html_theme = "sphinx_rtd_theme" 57 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 58 59 # Add any paths that contain custom static files (such as style sheets) here, 60 # relative to this directory. They are copied after the builtin static files, 61 # so a file named "default.css" will overwrite the builtin "default.css". 62 html_static_path = ["_static"] 63 64 html_use_smartypants = False 65 html_show_sphinx = False 66 67 html_context = dict( 68 hy_descriptive_version=hy_descriptive_version) 69 70 highlight_language = "hylang" 71 72 intersphinx_mapping = dict( 73 py=("https://docs.python.org/3/", None), 74 hyrule=("https://hyrule.readthedocs.io/en/master/", None), 75 ) 76 77 import hy 78 hy.I = type(hy.I) # A trick to enable `hy:autoclass:: hy.I` 79 80 81 # ** Sphinx App Setup 82 83 84 def setup(app): 85 app.add_css_file("overrides.css") 86 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -1,20 +1,14 @@ -# This file is execfile()d with the current directory set to its containing dir. +import os, re, sys, time, html -import html -import os -import re -import sys -import time +sys.path.insert(0, os.path.abspath('..')) -sys.path.insert(0, os.path.abspath("..")) +import hy; hy.I = type(hy.I) # A trick to enable `hy:autoclass:: hy.I` extensions = [ - "sphinx.ext.napoleon", - "sphinx.ext.intersphinx", - "sphinx.ext.autodoc", - "sphinx.ext.viewcode", - "sphinxcontrib.hydomain", -] + 'sphinx.ext.napoleon', + 'sphinx.ext.intersphinx', + 'sphinx.ext.autodoc', + 'sphinxcontrib.hydomain'] import warnings; import sphinx.deprecation as SD for c in (SD.RemovedInSphinx60Warning, SD.RemovedInSphinx70Warning): @@ -22,64 +16,33 @@ from get_version import __version__ as hy_version -# Read the Docs might dirty its checkout, so strip the dirty flag. -hy_version = re.sub(r"[+.]dirty\Z", "", hy_version) - -templates_path = ["_templates"] -source_suffix = ".rst" - -master_doc = "index" - -# General information about the project. -project = "hy" -copyright = "%s the authors" % time.strftime("%Y") - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = ".".join(hy_version.split(".")[:-1]) -# The full version, including alpha/beta/rc tags. +project = 'Hy' +copyright = '%s the authors' % time.strftime('%Y') +html_title = f'Hy {hy_version} manual' +version = '.'.join(hy_version.split('.')[:-1]) + # The short dotted version identifier release = hy_version -hy_descriptive_version = html.escape(hy_version) -if "+" in hy_version: - hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" - -exclude_patterns = ["_build", "coreteam.rst"] -add_module_names = True - -pygments_style = "sphinx" - -import sphinx_rtd_theme - -html_theme = "sphinx_rtd_theme" -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - + # The full version identifier, including alpha, beta, and RC tags + +source_suffix = '.rst' +master_doc = 'index' +exclude_patterns = ['_build', 'coreteam.rst'] + +html_theme = 'nature' +html_theme_options = dict( + nosidebar = True, + body_min_width = 0, + body_max_width = 'none') +html_css_files = ['custom.css'] +html_static_path = ['_static'] html_use_smartypants = False +html_copy_source = False html_show_sphinx = False -html_context = dict( - hy_descriptive_version=hy_descriptive_version) +add_module_names = True -highlight_language = "hylang" +highlight_language = 'hylang' intersphinx_mapping = dict( - py=("https://docs.python.org/3/", None), - hyrule=("https://hyrule.readthedocs.io/en/master/", None), -) - -import hy -hy.I = type(hy.I) # A trick to enable `hy:autoclass:: hy.I` - - -# ** Sphinx App Setup - - -def setup(app): - app.add_css_file("overrides.css") + py = ('https://docs.python.org/3/', None), + hyrule = ('https://hyrule.readthedocs.io/en/master/', None))
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,20 +1,14 @@\n-# This file is execfile()d with the current directory set to its containing dir.\n+import os, re, sys, time, html\n \n-import html\n-import os\n-import re\n-import sys\n-import time\n+sys.path.insert(0, os.path.abspath('..'))\n \n-sys.path.insert(0, os.path.abspath(\"..\"))\n+import hy; hy.I = type(hy.I) # A trick to enable `hy:autoclass:: hy.I`\n \n extensions = [\n- \"sphinx.ext.napoleon\",\n- \"sphinx.ext.intersphinx\",\n- \"sphinx.ext.autodoc\",\n- \"sphinx.ext.viewcode\",\n- \"sphinxcontrib.hydomain\",\n-]\n+ 'sphinx.ext.napoleon',\n+ 'sphinx.ext.intersphinx',\n+ 'sphinx.ext.autodoc',\n+ 'sphinxcontrib.hydomain']\n \n import warnings; import sphinx.deprecation as SD\n for c in (SD.RemovedInSphinx60Warning, SD.RemovedInSphinx70Warning):\n@@ -22,64 +16,33 @@\n \n from get_version import __version__ as hy_version\n \n-# Read the Docs might dirty its checkout, so strip the dirty flag.\n-hy_version = re.sub(r\"[+.]dirty\\Z\", \"\", hy_version)\n-\n-templates_path = [\"_templates\"]\n-source_suffix = \".rst\"\n-\n-master_doc = \"index\"\n-\n-# General information about the project.\n-project = \"hy\"\n-copyright = \"%s the authors\" % time.strftime(\"%Y\")\n-\n-# The version info for the project you're documenting, acts as replacement for\n-# |version| and |release|, also used in various other places throughout the\n-# built documents.\n-#\n-# The short X.Y version.\n-version = \".\".join(hy_version.split(\".\")[:-1])\n-# The full version, including alpha/beta/rc tags.\n+project = 'Hy'\n+copyright = '%s the authors' % time.strftime('%Y')\n+html_title = f'Hy {hy_version} manual'\n+version = '.'.join(hy_version.split('.')[:-1])\n+ # The short dotted version identifier\n release = hy_version\n-hy_descriptive_version = html.escape(hy_version)\n-if \"+\" in hy_version:\n- hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n-\n-exclude_patterns = [\"_build\", \"coreteam.rst\"]\n-add_module_names = True\n-\n-pygments_style = \"sphinx\"\n-\n-import sphinx_rtd_theme\n-\n-html_theme = \"sphinx_rtd_theme\"\n-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n-\n-# Add any paths that contain custom static files (such as style sheets) here,\n-# relative to this directory. They are copied after the builtin static files,\n-# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = [\"_static\"]\n-\n+ # The full version identifier, including alpha, beta, and RC tags\n+\n+source_suffix = '.rst'\n+master_doc = 'index'\n+exclude_patterns = ['_build', 'coreteam.rst']\n+\n+html_theme = 'nature'\n+html_theme_options = dict(\n+ nosidebar = True,\n+ body_min_width = 0,\n+ body_max_width = 'none')\n+html_css_files = ['custom.css']\n+html_static_path = ['_static']\n html_use_smartypants = False\n+html_copy_source = False\n html_show_sphinx = False\n \n-html_context = dict(\n- hy_descriptive_version=hy_descriptive_version)\n+add_module_names = True\n \n-highlight_language = \"hylang\"\n+highlight_language = 'hylang'\n \n intersphinx_mapping = dict(\n- py=(\"https://docs.python.org/3/\", None),\n- hyrule=(\"https://hyrule.readthedocs.io/en/master/\", None),\n-)\n-\n-import hy\n-hy.I = type(hy.I) # A trick to enable `hy:autoclass:: hy.I`\n-\n-\n-# ** Sphinx App Setup\n-\n-\n-def setup(app):\n- app.add_css_file(\"overrides.css\")\n+ py = ('https://docs.python.org/3/', None),\n+ hyrule = ('https://hyrule.readthedocs.io/en/master/', None))\n", "issue": "Stop using Read the Docs\nHaving ads in the manual is extraordinarily tacky. We should probably just host the web versions of Hy and Hyrule's manuals on Arfer.net, where I also host [the new Hylang.org](http://hylang.org). For simplicity, we can serve only the stable release of the manual. We would then just rebuild it as part of the release process.\n", "before_files": [{"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport html\nimport os\nimport re\nimport sys\nimport time\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.viewcode\",\n \"sphinxcontrib.hydomain\",\n]\n\nimport warnings; import sphinx.deprecation as SD\nfor c in (SD.RemovedInSphinx60Warning, SD.RemovedInSphinx70Warning):\n warnings.filterwarnings('ignore', category = c)\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r\"[+.]dirty\\Z\", \"\", hy_version)\n\ntemplates_path = [\"_templates\"]\nsource_suffix = \".rst\"\n\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"hy\"\ncopyright = \"%s the authors\" % time.strftime(\"%Y\")\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = [\"_build\", \"coreteam.rst\"]\nadd_module_names = True\n\npygments_style = \"sphinx\"\n\nimport sphinx_rtd_theme\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version=hy_descriptive_version)\n\nhighlight_language = \"hylang\"\n\nintersphinx_mapping = dict(\n py=(\"https://docs.python.org/3/\", None),\n hyrule=(\"https://hyrule.readthedocs.io/en/master/\", None),\n)\n\nimport hy\nhy.I = type(hy.I) # A trick to enable `hy:autoclass:: hy.I`\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file(\"overrides.css\")\n", "path": "docs/conf.py"}]}
1,377
978
gh_patches_debug_5936
rasdani/github-patches
git_diff
streamlit__streamlit-2248
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Request for tar file for lib in pypi. _(Note, you don't have to fill out every section here. They're just here for guidance. That said, nicely detailed feature requests are more likely to get eng attention sooner)_ ### Problem As of now for streamlit , we have only wheels file in pypi. Can the streamlit team add tar file as well. https://pypi.org/project/streamlit/#files ### Solution Create a sdist ( tar ) of the package and make it avaliable in pypi https://realpython.com/pypi-publish-python-package/ ### Additional context Add any other context or screenshots about the feature request here. For example, did this FR come from https://discuss.streamlit.io or another site? Link the original source here! </issue> <code> [start of lib/setup.py] 1 import os 2 import platform 3 import setuptools 4 import subprocess 5 import sys 6 7 from pipenv.project import Project 8 from pipenv.utils import convert_deps_to_pip 9 from setuptools.command.install import install 10 11 VERSION = "0.69.2" # PEP-440 12 13 NAME = "streamlit" 14 15 DESCRIPTION = "The fastest way to build data apps in Python" 16 17 LONG_DESCRIPTION = ( 18 "Streamlit's open-source app framework is the easiest way " 19 "for data scientists and machine learning engineers to " 20 "create beautiful, performant apps in only a few hours! " 21 "All in pure Python. All for free." 22 ) 23 24 pipfile = Project(chdir=False).parsed_pipfile 25 26 packages = pipfile["packages"].copy() 27 requirements = convert_deps_to_pip(packages, r=False) 28 29 # Check whether xcode tools are available before making watchdog a 30 # dependency (only if the current system is a Mac). 31 if platform.system() == "Darwin": 32 has_xcode = subprocess.call(["xcode-select", "--version"], shell=False) == 0 33 has_gcc = subprocess.call(["gcc", "--version"], shell=False) == 0 34 35 if not (has_xcode and has_gcc): 36 try: 37 requirements.remove("watchdog") 38 except ValueError: 39 pass 40 41 42 class VerifyVersionCommand(install): 43 """Custom command to verify that the git tag matches our version""" 44 45 description = "verify that the git tag matches our version" 46 47 def run(self): 48 tag = os.getenv("CIRCLE_TAG") 49 50 if tag != VERSION: 51 info = "Git tag: {0} does not match the version of this app: {1}".format( 52 tag, VERSION 53 ) 54 sys.exit(info) 55 56 57 setuptools.setup( 58 name=NAME, 59 version=VERSION, 60 description=DESCRIPTION, 61 long_description=LONG_DESCRIPTION, 62 url="https://streamlit.io", 63 author="Streamlit Inc", 64 author_email="[email protected]", 65 python_requires=">=3.6", 66 license="Apache 2", 67 packages=setuptools.find_packages(exclude=["tests", "tests.*"]), 68 # Requirements 69 install_requires=requirements, 70 zip_safe=False, # install source files not egg 71 include_package_data=True, # copy html and friends 72 entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]}, 73 # For Windows so that streamlit * commands work ie. 74 # - streamlit version 75 # - streamlit hello 76 scripts=["bin/streamlit.cmd"], 77 cmdclass={ 78 "verify": VerifyVersionCommand, 79 }, 80 ) 81 [end of lib/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/setup.py b/lib/setup.py --- a/lib/setup.py +++ b/lib/setup.py @@ -4,10 +4,17 @@ import subprocess import sys -from pipenv.project import Project -from pipenv.utils import convert_deps_to_pip from setuptools.command.install import install +try: + from pipenv.project import Project + from pipenv.utils import convert_deps_to_pip +except: + exit_msg = ( + "pipenv is required to package Streamlit. Please install pipenv and try again" + ) + sys.exit(exit_msg) + VERSION = "0.69.2" # PEP-440 NAME = "streamlit"
{"golden_diff": "diff --git a/lib/setup.py b/lib/setup.py\n--- a/lib/setup.py\n+++ b/lib/setup.py\n@@ -4,10 +4,17 @@\n import subprocess\n import sys\n \n-from pipenv.project import Project\n-from pipenv.utils import convert_deps_to_pip\n from setuptools.command.install import install\n \n+try:\n+ from pipenv.project import Project\n+ from pipenv.utils import convert_deps_to_pip\n+except:\n+ exit_msg = (\n+ \"pipenv is required to package Streamlit. Please install pipenv and try again\"\n+ )\n+ sys.exit(exit_msg)\n+\n VERSION = \"0.69.2\" # PEP-440\n \n NAME = \"streamlit\"\n", "issue": "Request for tar file for lib in pypi.\n_(Note, you don't have to fill out every section here. They're just here for guidance. That said, nicely detailed feature requests are more likely to get eng attention sooner)_\r\n\r\n### Problem\r\n\r\nAs of now for streamlit , we have only wheels file in pypi. Can the streamlit team add tar file as well. \r\n\r\nhttps://pypi.org/project/streamlit/#files \r\n\r\n### Solution\r\n\r\nCreate a sdist ( tar ) of the package and make it avaliable in pypi\r\nhttps://realpython.com/pypi-publish-python-package/ \r\n\r\n### Additional context\r\n\r\nAdd any other context or screenshots about the feature request here. For example, did this FR come from https://discuss.streamlit.io or another site? Link the original source here!\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport setuptools\nimport subprocess\nimport sys\n\nfrom pipenv.project import Project\nfrom pipenv.utils import convert_deps_to_pip\nfrom setuptools.command.install import install\n\nVERSION = \"0.69.2\" # PEP-440\n\nNAME = \"streamlit\"\n\nDESCRIPTION = \"The fastest way to build data apps in Python\"\n\nLONG_DESCRIPTION = (\n \"Streamlit's open-source app framework is the easiest way \"\n \"for data scientists and machine learning engineers to \"\n \"create beautiful, performant apps in only a few hours! \"\n \"All in pure Python. All for free.\"\n)\n\npipfile = Project(chdir=False).parsed_pipfile\n\npackages = pipfile[\"packages\"].copy()\nrequirements = convert_deps_to_pip(packages, r=False)\n\n# Check whether xcode tools are available before making watchdog a\n# dependency (only if the current system is a Mac).\nif platform.system() == \"Darwin\":\n has_xcode = subprocess.call([\"xcode-select\", \"--version\"], shell=False) == 0\n has_gcc = subprocess.call([\"gcc\", \"--version\"], shell=False) == 0\n\n if not (has_xcode and has_gcc):\n try:\n requirements.remove(\"watchdog\")\n except ValueError:\n pass\n\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n\n description = \"verify that the git tag matches our version\"\n\n def run(self):\n tag = os.getenv(\"CIRCLE_TAG\")\n\n if tag != VERSION:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n tag, VERSION\n )\n sys.exit(info)\n\n\nsetuptools.setup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n url=\"https://streamlit.io\",\n author=\"Streamlit Inc\",\n author_email=\"[email protected]\",\n python_requires=\">=3.6\",\n license=\"Apache 2\",\n packages=setuptools.find_packages(exclude=[\"tests\", \"tests.*\"]),\n # Requirements\n install_requires=requirements,\n zip_safe=False, # install source files not egg\n include_package_data=True, # copy html and friends\n entry_points={\"console_scripts\": [\"streamlit = streamlit.cli:main\"]},\n # For Windows so that streamlit * commands work ie.\n # - streamlit version\n # - streamlit hello\n scripts=[\"bin/streamlit.cmd\"],\n cmdclass={\n \"verify\": VerifyVersionCommand,\n },\n)\n", "path": "lib/setup.py"}]}
1,419
158
gh_patches_debug_41263
rasdani/github-patches
git_diff
hydroshare__hydroshare-5233
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Management command for published resources **Describe the feature you'd like and what it will do** We should have a management command to report on resources published within a timeframe. Consider also adding to our metrics system **Additional context** HS 2.9.2 </issue> <code> [start of hs_core/management/commands/list_published_by_year.py] 1 """Lists all the resources published in a given year. 2 """ 3 4 from django.core.management.base import BaseCommand 5 from django.contrib.auth.models import User 6 from hs_core.models import BaseResource 7 from hs_access_control.models import PrivilegeCodes 8 from hs_core import hydroshare 9 from django.db.models import F 10 from datetime import timedelta 11 from django.utils import timezone 12 from django.core.exceptions import ObjectDoesNotExist 13 14 15 class Command(BaseCommand): 16 help = "Print resource information" 17 18 def add_arguments(self, parser): 19 # Named (optional) arguments 20 parser.add_argument( 21 '--year', 22 dest='year', 23 help='limit to resources published in a given year' 24 ) 25 26 parser.add_argument('--days', type=int, dest='days', help='include resources updated in the last X days') 27 28 parser.add_argument( 29 '--type', 30 dest='type', 31 help='limit to resources of a particular type' 32 ) 33 34 parser.add_argument( 35 '--owned_by', 36 dest='owned_by', 37 help='limit to resources owned by specific user' 38 ) 39 40 def handle(self, *args, **options): 41 days = options['days'] 42 resources = BaseResource.objects.filter(raccess__published=True) 43 owner = options['owned_by'] 44 type = options['type'] 45 46 if owner is not None: 47 try: 48 owner = User.objects.get(username=owner) 49 resources.filter(r2urp__user=owner, 50 r2urp__privilege=PrivilegeCodes.OWNER) 51 except ObjectDoesNotExist: 52 print(f"User matching {owner} not found") 53 54 if type is not None: 55 if type in ["CompositeResource", "CollectionResource"]: 56 resources.filter(resource_type=type) 57 else: 58 print(f"Type {type} is not supported. Must be 'CompositeResource' or 'CollectionResource'") 59 60 resources = resources.order_by(F('updated').asc(nulls_first=True)) 61 62 for resource in resources: 63 pub_date = self.get_publication_date(resource) 64 if options['year']: 65 if pub_date.year != int(options['year']): 66 continue 67 if days: 68 cuttoff_time = timezone.now() - timedelta(days) 69 if not pub_date >= cuttoff_time: 70 continue 71 self.print_resource(resource, pub_date) 72 73 def get_publication_date(self, resource): 74 published_date = resource.metadata.dates.filter(type="published").first() 75 if not published_date: 76 print(f"Publication date not found for {resource.short_id}") 77 return published_date 78 79 def print_resource(self, res, pub_date): 80 site_url = hydroshare.utils.current_site_url() 81 res_url = site_url + res.absolute_url 82 funding_agencies = res.metadata.funding_agencies.all() 83 print("*" * 100) 84 print(f"{res_url}") 85 print(res.metadata.title.value) 86 print(f"Resource type: {res.resource_type}") 87 if pub_date: 88 print(f"Published on {pub_date}") 89 else: 90 print("Resource has no publication date") 91 92 if funding_agencies: 93 print("Funding agency/agencies:") 94 for f in funding_agencies: 95 print(f.agency_name) 96 else: 97 print("Resource has no funding agency") 98 99 if res.doi: 100 print(res.doi) 101 else: 102 print("Resource has no doi") 103 [end of hs_core/management/commands/list_published_by_year.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hs_core/management/commands/list_published_by_year.py b/hs_core/management/commands/list_published_by_year.py --- a/hs_core/management/commands/list_published_by_year.py +++ b/hs_core/management/commands/list_published_by_year.py @@ -41,7 +41,7 @@ days = options['days'] resources = BaseResource.objects.filter(raccess__published=True) owner = options['owned_by'] - type = options['type'] + res_type = options['type'] if owner is not None: try: @@ -51,11 +51,11 @@ except ObjectDoesNotExist: print(f"User matching {owner} not found") - if type is not None: - if type in ["CompositeResource", "CollectionResource"]: - resources.filter(resource_type=type) + if res_type is not None: + if res_type in ["CompositeResource", "CollectionResource"]: + resources.filter(resource_type=res_type) else: - print(f"Type {type} is not supported. Must be 'CompositeResource' or 'CollectionResource'") + print(f"Type {res_type} is not supported. Must be 'CompositeResource' or 'CollectionResource'") resources = resources.order_by(F('updated').asc(nulls_first=True)) @@ -74,14 +74,19 @@ published_date = resource.metadata.dates.filter(type="published").first() if not published_date: print(f"Publication date not found for {resource.short_id}") - return published_date + return published_date.start_date def print_resource(self, res, pub_date): site_url = hydroshare.utils.current_site_url() res_url = site_url + res.absolute_url funding_agencies = res.metadata.funding_agencies.all() + print("\n") print("*" * 100) print(f"{res_url}") + if res.doi: + print(res.doi) + else: + print("Resource has no doi") print(res.metadata.title.value) print(f"Resource type: {res.resource_type}") if pub_date: @@ -90,13 +95,24 @@ print("Resource has no publication date") if funding_agencies: - print("Funding agency/agencies:") - for f in funding_agencies: - print(f.agency_name) + print(f"Found {len(funding_agencies)} funder(s):") + for count, f in enumerate(funding_agencies, 1): + print(f"--- Funder #{count} ---") + if f.agency_name: + print(f"Agency name: {f.agency_name}") + else: + print("No agency name") + if f.agency_url: + print(f"Agency url: {f.agency_url}") + else: + print("No agency url") + if f.award_title: + print(f"Award title: {f.award_title}") + else: + print("No award title") + if f.award_number: + print(f"Award number: {f.award_number}") + else: + print("No award number") else: - print("Resource has no funding agency") - - if res.doi: - print(res.doi) - else: - print("Resource has no doi") + print("Resource has no funding information")
{"golden_diff": "diff --git a/hs_core/management/commands/list_published_by_year.py b/hs_core/management/commands/list_published_by_year.py\n--- a/hs_core/management/commands/list_published_by_year.py\n+++ b/hs_core/management/commands/list_published_by_year.py\n@@ -41,7 +41,7 @@\n days = options['days']\n resources = BaseResource.objects.filter(raccess__published=True)\n owner = options['owned_by']\n- type = options['type']\n+ res_type = options['type']\n \n if owner is not None:\n try:\n@@ -51,11 +51,11 @@\n except ObjectDoesNotExist:\n print(f\"User matching {owner} not found\")\n \n- if type is not None:\n- if type in [\"CompositeResource\", \"CollectionResource\"]:\n- resources.filter(resource_type=type)\n+ if res_type is not None:\n+ if res_type in [\"CompositeResource\", \"CollectionResource\"]:\n+ resources.filter(resource_type=res_type)\n else:\n- print(f\"Type {type} is not supported. Must be 'CompositeResource' or 'CollectionResource'\")\n+ print(f\"Type {res_type} is not supported. Must be 'CompositeResource' or 'CollectionResource'\")\n \n resources = resources.order_by(F('updated').asc(nulls_first=True))\n \n@@ -74,14 +74,19 @@\n published_date = resource.metadata.dates.filter(type=\"published\").first()\n if not published_date:\n print(f\"Publication date not found for {resource.short_id}\")\n- return published_date\n+ return published_date.start_date\n \n def print_resource(self, res, pub_date):\n site_url = hydroshare.utils.current_site_url()\n res_url = site_url + res.absolute_url\n funding_agencies = res.metadata.funding_agencies.all()\n+ print(\"\\n\")\n print(\"*\" * 100)\n print(f\"{res_url}\")\n+ if res.doi:\n+ print(res.doi)\n+ else:\n+ print(\"Resource has no doi\")\n print(res.metadata.title.value)\n print(f\"Resource type: {res.resource_type}\")\n if pub_date:\n@@ -90,13 +95,24 @@\n print(\"Resource has no publication date\")\n \n if funding_agencies:\n- print(\"Funding agency/agencies:\")\n- for f in funding_agencies:\n- print(f.agency_name)\n+ print(f\"Found {len(funding_agencies)} funder(s):\")\n+ for count, f in enumerate(funding_agencies, 1):\n+ print(f\"--- Funder #{count} ---\")\n+ if f.agency_name:\n+ print(f\"Agency name: {f.agency_name}\")\n+ else:\n+ print(\"No agency name\")\n+ if f.agency_url:\n+ print(f\"Agency url: {f.agency_url}\")\n+ else:\n+ print(\"No agency url\")\n+ if f.award_title:\n+ print(f\"Award title: {f.award_title}\")\n+ else:\n+ print(\"No award title\")\n+ if f.award_number:\n+ print(f\"Award number: {f.award_number}\")\n+ else:\n+ print(\"No award number\")\n else:\n- print(\"Resource has no funding agency\")\n-\n- if res.doi:\n- print(res.doi)\n- else:\n- print(\"Resource has no doi\")\n+ print(\"Resource has no funding information\")\n", "issue": "Management command for published resources\n**Describe the feature you'd like and what it will do**\r\nWe should have a management command to report on resources published within a timeframe.\r\nConsider also adding to our metrics system\r\n\r\n**Additional context**\r\nHS 2.9.2\r\n\n", "before_files": [{"content": "\"\"\"Lists all the resources published in a given year.\n\"\"\"\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\nfrom hs_core.models import BaseResource\nfrom hs_access_control.models import PrivilegeCodes\nfrom hs_core import hydroshare\nfrom django.db.models import F\nfrom datetime import timedelta\nfrom django.utils import timezone\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass Command(BaseCommand):\n help = \"Print resource information\"\n\n def add_arguments(self, parser):\n # Named (optional) arguments\n parser.add_argument(\n '--year',\n dest='year',\n help='limit to resources published in a given year'\n )\n\n parser.add_argument('--days', type=int, dest='days', help='include resources updated in the last X days')\n\n parser.add_argument(\n '--type',\n dest='type',\n help='limit to resources of a particular type'\n )\n\n parser.add_argument(\n '--owned_by',\n dest='owned_by',\n help='limit to resources owned by specific user'\n )\n\n def handle(self, *args, **options):\n days = options['days']\n resources = BaseResource.objects.filter(raccess__published=True)\n owner = options['owned_by']\n type = options['type']\n\n if owner is not None:\n try:\n owner = User.objects.get(username=owner)\n resources.filter(r2urp__user=owner,\n r2urp__privilege=PrivilegeCodes.OWNER)\n except ObjectDoesNotExist:\n print(f\"User matching {owner} not found\")\n\n if type is not None:\n if type in [\"CompositeResource\", \"CollectionResource\"]:\n resources.filter(resource_type=type)\n else:\n print(f\"Type {type} is not supported. Must be 'CompositeResource' or 'CollectionResource'\")\n\n resources = resources.order_by(F('updated').asc(nulls_first=True))\n\n for resource in resources:\n pub_date = self.get_publication_date(resource)\n if options['year']:\n if pub_date.year != int(options['year']):\n continue\n if days:\n cuttoff_time = timezone.now() - timedelta(days)\n if not pub_date >= cuttoff_time:\n continue\n self.print_resource(resource, pub_date)\n\n def get_publication_date(self, resource):\n published_date = resource.metadata.dates.filter(type=\"published\").first()\n if not published_date:\n print(f\"Publication date not found for {resource.short_id}\")\n return published_date\n\n def print_resource(self, res, pub_date):\n site_url = hydroshare.utils.current_site_url()\n res_url = site_url + res.absolute_url\n funding_agencies = res.metadata.funding_agencies.all()\n print(\"*\" * 100)\n print(f\"{res_url}\")\n print(res.metadata.title.value)\n print(f\"Resource type: {res.resource_type}\")\n if pub_date:\n print(f\"Published on {pub_date}\")\n else:\n print(\"Resource has no publication date\")\n\n if funding_agencies:\n print(\"Funding agency/agencies:\")\n for f in funding_agencies:\n print(f.agency_name)\n else:\n print(\"Resource has no funding agency\")\n\n if res.doi:\n print(res.doi)\n else:\n print(\"Resource has no doi\")\n", "path": "hs_core/management/commands/list_published_by_year.py"}]}
1,520
768
gh_patches_debug_29593
rasdani/github-patches
git_diff
ManageIQ__integration_tests-91
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add SCP support The ssh_client fixture supports running a rake command. Some of these commands presuppose a local file exists on the appliance, such as when importing an Automate custom namespace. Extending ssh_client would be ideal. Here's my os.system hack: ``` import os ... os.system("sshpass -p '%s' scp %s@%s:/root/" % \ (mozwebqa.credentials['ssh']['password'], mozwebqa.credentials['ssh']['username'], soap_client.evm_server_hostname)) ``` </issue> <code> [start of utils/ssh.py] 1 import paramiko 2 3 class SSHClient(paramiko.SSHClient): 4 """paramiko.SSHClient wrapper 5 6 Allows copying/overriding and use as a context manager 7 Constructor kwargs are handed directly to paramiko.SSHClient.connect() 8 """ 9 def __init__(self, **connect_kwargs): 10 super(SSHClient, self).__init__() 11 self.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 12 13 # Set up some sane defaults 14 if 'timeout' not in connect_kwargs: 15 connect_kwargs['timeout'] = 10 16 if 'allow_agent' not in connect_kwargs: 17 connect_kwargs['allow_agent'] = False 18 self._connect_kwargs = connect_kwargs 19 20 def __call__(self, **connect_kwargs): 21 # Update a copy of this instance's connect kwargs with passed in kwargs, 22 # then return a new instance with the updated kwargs 23 new_connect_kwargs = dict(self._connect_kwargs) 24 new_connect_kwargs.update(connect_kwargs) 25 new_client = SSHClient(**new_connect_kwargs) 26 return new_client 27 28 def __enter__(self): 29 self.connect(**self._connect_kwargs) 30 return self 31 32 def __exit__(self, *args, **kwargs): 33 self.close() 34 35 def run_command(self, command): 36 return command_runner(self, command) 37 38 def run_rails_command(self, command): 39 return rails_runner(self, command) 40 41 def run_rake_command(self, command): 42 return rake_runner(self, command) 43 44 45 def command_runner(client, command): 46 template = '%s\n' 47 command = template % command 48 with client as ctx: 49 transport = ctx.get_transport() 50 session = transport.open_session() 51 session.set_combine_stderr(True) 52 session.exec_command(command) 53 exit_status = session.recv_exit_status() 54 output = session.recv(-1) 55 return exit_status, output 56 57 # Returning two things so tuple unpacking the return works even if the ssh client fails 58 return None, None 59 60 def rails_runner(client, command): 61 template = '/var/www/miq/vmdb/script/rails runner %s' 62 return command_runner(client, template % command) 63 64 def rake_runner(client, command): 65 template = '/var/www/miq/vmdb/script/rake -f /var/www/miq/vmdb/Rakefile %s' 66 return rails_runner(client, template % command) 67 68 [end of utils/ssh.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/utils/ssh.py b/utils/ssh.py --- a/utils/ssh.py +++ b/utils/ssh.py @@ -1,4 +1,5 @@ import paramiko +from scp import SCPClient class SSHClient(paramiko.SSHClient): """paramiko.SSHClient wrapper @@ -15,6 +16,8 @@ connect_kwargs['timeout'] = 10 if 'allow_agent' not in connect_kwargs: connect_kwargs['allow_agent'] = False + if 'look_for_keys' not in connect_kwargs: + connect_kwargs['look_for_keys'] = False self._connect_kwargs = connect_kwargs def __call__(self, **connect_kwargs): @@ -41,6 +44,11 @@ def run_rake_command(self, command): return rake_runner(self, command) + def put_file(self, local_file, remote_file = '.'): + return scp_putter(self, local_file, remote_file) + + def get_file(self, remote_file, local_path = ''): + return scp_getter(self, remote_file, local_path) def command_runner(client, command): template = '%s\n' @@ -65,3 +73,13 @@ template = '/var/www/miq/vmdb/script/rake -f /var/www/miq/vmdb/Rakefile %s' return rails_runner(client, template % command) +def scp_putter(client, local_file, remote_file): + with client as ctx: + transport = ctx.get_transport() + SCPClient(transport).put(local_file, remote_file) + +def scp_getter(client, remote_file, local_path): + with client as ctx: + transport = ctx.get_transport() + SCPClient(transport).get(remote_file, local_path) +
{"golden_diff": "diff --git a/utils/ssh.py b/utils/ssh.py\n--- a/utils/ssh.py\n+++ b/utils/ssh.py\n@@ -1,4 +1,5 @@\n import paramiko\n+from scp import SCPClient\n \n class SSHClient(paramiko.SSHClient):\n \"\"\"paramiko.SSHClient wrapper\n@@ -15,6 +16,8 @@\n connect_kwargs['timeout'] = 10\n if 'allow_agent' not in connect_kwargs:\n connect_kwargs['allow_agent'] = False\n+ if 'look_for_keys' not in connect_kwargs:\n+ connect_kwargs['look_for_keys'] = False\n self._connect_kwargs = connect_kwargs\n \n def __call__(self, **connect_kwargs):\n@@ -41,6 +44,11 @@\n def run_rake_command(self, command):\n return rake_runner(self, command)\n \n+ def put_file(self, local_file, remote_file = '.'):\n+ return scp_putter(self, local_file, remote_file)\n+\n+ def get_file(self, remote_file, local_path = ''):\n+ return scp_getter(self, remote_file, local_path)\n \n def command_runner(client, command):\n template = '%s\\n'\n@@ -65,3 +73,13 @@\n template = '/var/www/miq/vmdb/script/rake -f /var/www/miq/vmdb/Rakefile %s'\n return rails_runner(client, template % command)\n \n+def scp_putter(client, local_file, remote_file):\n+ with client as ctx:\n+ transport = ctx.get_transport()\n+ SCPClient(transport).put(local_file, remote_file)\n+\n+def scp_getter(client, remote_file, local_path):\n+ with client as ctx:\n+ transport = ctx.get_transport()\n+ SCPClient(transport).get(remote_file, local_path)\n+\n", "issue": "Add SCP support\nThe ssh_client fixture supports running a rake command. Some of these commands presuppose a local file exists on the appliance, such as when importing an Automate custom namespace. Extending ssh_client would be ideal.\n\nHere's my os.system hack:\n\n```\nimport os\n...\nos.system(\"sshpass -p '%s' scp %s@%s:/root/\" % \\\n (mozwebqa.credentials['ssh']['password'], \n mozwebqa.credentials['ssh']['username'], \n soap_client.evm_server_hostname))\n```\n\n", "before_files": [{"content": "import paramiko\n\nclass SSHClient(paramiko.SSHClient):\n \"\"\"paramiko.SSHClient wrapper\n\n Allows copying/overriding and use as a context manager\n Constructor kwargs are handed directly to paramiko.SSHClient.connect()\n \"\"\"\n def __init__(self, **connect_kwargs):\n super(SSHClient, self).__init__()\n self.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Set up some sane defaults\n if 'timeout' not in connect_kwargs:\n connect_kwargs['timeout'] = 10\n if 'allow_agent' not in connect_kwargs:\n connect_kwargs['allow_agent'] = False\n self._connect_kwargs = connect_kwargs\n\n def __call__(self, **connect_kwargs):\n # Update a copy of this instance's connect kwargs with passed in kwargs,\n # then return a new instance with the updated kwargs\n new_connect_kwargs = dict(self._connect_kwargs)\n new_connect_kwargs.update(connect_kwargs)\n new_client = SSHClient(**new_connect_kwargs)\n return new_client\n\n def __enter__(self):\n self.connect(**self._connect_kwargs)\n return self\n\n def __exit__(self, *args, **kwargs):\n self.close()\n\n def run_command(self, command):\n return command_runner(self, command)\n\n def run_rails_command(self, command):\n return rails_runner(self, command)\n\n def run_rake_command(self, command):\n return rake_runner(self, command)\n\n\ndef command_runner(client, command):\n template = '%s\\n'\n command = template % command\n with client as ctx:\n transport = ctx.get_transport()\n session = transport.open_session()\n session.set_combine_stderr(True)\n session.exec_command(command)\n exit_status = session.recv_exit_status()\n output = session.recv(-1)\n return exit_status, output\n\n # Returning two things so tuple unpacking the return works even if the ssh client fails\n return None, None\n\ndef rails_runner(client, command):\n template = '/var/www/miq/vmdb/script/rails runner %s'\n return command_runner(client, template % command)\n\ndef rake_runner(client, command):\n template = '/var/www/miq/vmdb/script/rake -f /var/www/miq/vmdb/Rakefile %s'\n return rails_runner(client, template % command)\n\n", "path": "utils/ssh.py"}]}
1,278
401
gh_patches_debug_6010
rasdani/github-patches
git_diff
ethereum__web3.py-460
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add field name of formatter failure to apply_formatters_to_dict * Version: 3.x & 4.x * Python: 2.7/3.4/3.5 * OS: osx/linux/win ### What was wrong? When applying formatters to a dict, if one of the values fails, it's impossible to tell from the trace which one did. ### How can it be fixed? Catch ValueError exceptions in `apply_formatters_to_dict` and rethrow with the name of the field that failed. </issue> <code> [start of web3/utils/formatters.py] 1 from collections import ( 2 Iterable, 3 Mapping, 4 ) 5 import sys 6 7 from cytoolz.functoolz import ( 8 curry, 9 compose, 10 ) 11 12 from eth_utils import ( 13 is_string, 14 to_list, 15 to_dict, 16 ) 17 18 from web3.utils.decorators import ( 19 reject_recursive_repeats, 20 ) 21 22 23 def hex_to_integer(value): 24 return int(value, 16) 25 26 27 if sys.version_info.major == 2: 28 def integer_to_hex(value): 29 return hex(value).rstrip('L') 30 else: 31 integer_to_hex = hex 32 33 34 @curry 35 @to_list 36 def apply_formatter_at_index(formatter, at_index, value): 37 if at_index + 1 > len(value): 38 raise IndexError( 39 "Not enough values in iterable to apply formatter. Got: {0}. " 40 "Need: {1}".format(len(value), at_index) 41 ) 42 for index, item in enumerate(value): 43 if index == at_index: 44 yield formatter(item) 45 else: 46 yield item 47 48 49 def apply_formatters_to_args(*formatters): 50 return compose(*( 51 apply_formatter_at_index(formatter, index) 52 for index, formatter 53 in enumerate(formatters) 54 )) 55 56 57 @curry 58 def apply_formatter_if(condition, formatter, value): 59 if condition(value): 60 return formatter(value) 61 else: 62 return value 63 64 65 @curry 66 @to_dict 67 def apply_formatters_to_dict(formatters, value): 68 for key, item in value.items(): 69 if key in formatters: 70 yield key, formatters[key](item) 71 else: 72 yield key, item 73 74 75 @curry 76 @to_list 77 def apply_formatter_to_array(formatter, value): 78 for item in value: 79 yield formatter(item) 80 81 82 @curry 83 def apply_one_of_formatters(formatter_condition_pairs, value): 84 for formatter, condition in formatter_condition_pairs: 85 if condition(value): 86 return formatter(value) 87 else: 88 raise ValueError("The provided value did not satisfy any of the formatter conditions") 89 90 91 def map_collection(func, collection): 92 ''' 93 Apply func to each element of a collection, or value of a dictionary. 94 If the value is not a collection, return it unmodified 95 ''' 96 datatype = type(collection) 97 if isinstance(collection, Mapping): 98 return datatype((key, func(val)) for key, val in collection.items()) 99 if is_string(collection): 100 return collection 101 elif isinstance(collection, Iterable): 102 return datatype(map(func, collection)) 103 else: 104 return collection 105 106 107 @reject_recursive_repeats 108 def recursive_map(func, data): 109 ''' 110 Apply func to data, and any collection items inside data (using map_collection). 111 Define func so that it only applies to the type of value that you want it to apply to. 112 ''' 113 def recurse(item): 114 return recursive_map(func, item) 115 items_mapped = map_collection(recurse, data) 116 return func(items_mapped) 117 118 119 def static_return(value): 120 def inner(*args, **kwargs): 121 return value 122 return inner 123 124 125 def static_result(value): 126 def inner(*args, **kwargs): 127 return {'result': value} 128 return inner 129 130 131 @curry 132 @to_dict 133 def apply_key_map(key_mappings, value): 134 for key, item in value.items(): 135 if key in key_mappings: 136 yield key_mappings[key], item 137 else: 138 yield key, item 139 [end of web3/utils/formatters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/web3/utils/formatters.py b/web3/utils/formatters.py --- a/web3/utils/formatters.py +++ b/web3/utils/formatters.py @@ -67,7 +67,10 @@ def apply_formatters_to_dict(formatters, value): for key, item in value.items(): if key in formatters: - yield key, formatters[key](item) + try: + yield key, formatters[key](item) + except (TypeError, ValueError) as exc: + raise type(exc)("Could not format value %r as field %r" % (item, key)) from exc else: yield key, item
{"golden_diff": "diff --git a/web3/utils/formatters.py b/web3/utils/formatters.py\n--- a/web3/utils/formatters.py\n+++ b/web3/utils/formatters.py\n@@ -67,7 +67,10 @@\n def apply_formatters_to_dict(formatters, value):\n for key, item in value.items():\n if key in formatters:\n- yield key, formatters[key](item)\n+ try:\n+ yield key, formatters[key](item)\n+ except (TypeError, ValueError) as exc:\n+ raise type(exc)(\"Could not format value %r as field %r\" % (item, key)) from exc\n else:\n yield key, item\n", "issue": "Add field name of formatter failure to apply_formatters_to_dict\n* Version: 3.x & 4.x\r\n* Python: 2.7/3.4/3.5\r\n* OS: osx/linux/win\r\n\r\n\r\n### What was wrong?\r\n\r\nWhen applying formatters to a dict, if one of the values fails, it's impossible to tell from the trace which one did.\r\n\r\n### How can it be fixed?\r\n\r\nCatch ValueError exceptions in `apply_formatters_to_dict` and rethrow with the name of the field that failed.\n", "before_files": [{"content": "from collections import (\n Iterable,\n Mapping,\n)\nimport sys\n\nfrom cytoolz.functoolz import (\n curry,\n compose,\n)\n\nfrom eth_utils import (\n is_string,\n to_list,\n to_dict,\n)\n\nfrom web3.utils.decorators import (\n reject_recursive_repeats,\n)\n\n\ndef hex_to_integer(value):\n return int(value, 16)\n\n\nif sys.version_info.major == 2:\n def integer_to_hex(value):\n return hex(value).rstrip('L')\nelse:\n integer_to_hex = hex\n\n\n@curry\n@to_list\ndef apply_formatter_at_index(formatter, at_index, value):\n if at_index + 1 > len(value):\n raise IndexError(\n \"Not enough values in iterable to apply formatter. Got: {0}. \"\n \"Need: {1}\".format(len(value), at_index)\n )\n for index, item in enumerate(value):\n if index == at_index:\n yield formatter(item)\n else:\n yield item\n\n\ndef apply_formatters_to_args(*formatters):\n return compose(*(\n apply_formatter_at_index(formatter, index)\n for index, formatter\n in enumerate(formatters)\n ))\n\n\n@curry\ndef apply_formatter_if(condition, formatter, value):\n if condition(value):\n return formatter(value)\n else:\n return value\n\n\n@curry\n@to_dict\ndef apply_formatters_to_dict(formatters, value):\n for key, item in value.items():\n if key in formatters:\n yield key, formatters[key](item)\n else:\n yield key, item\n\n\n@curry\n@to_list\ndef apply_formatter_to_array(formatter, value):\n for item in value:\n yield formatter(item)\n\n\n@curry\ndef apply_one_of_formatters(formatter_condition_pairs, value):\n for formatter, condition in formatter_condition_pairs:\n if condition(value):\n return formatter(value)\n else:\n raise ValueError(\"The provided value did not satisfy any of the formatter conditions\")\n\n\ndef map_collection(func, collection):\n '''\n Apply func to each element of a collection, or value of a dictionary.\n If the value is not a collection, return it unmodified\n '''\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection\n\n\n@reject_recursive_repeats\ndef recursive_map(func, data):\n '''\n Apply func to data, and any collection items inside data (using map_collection).\n Define func so that it only applies to the type of value that you want it to apply to.\n '''\n def recurse(item):\n return recursive_map(func, item)\n items_mapped = map_collection(recurse, data)\n return func(items_mapped)\n\n\ndef static_return(value):\n def inner(*args, **kwargs):\n return value\n return inner\n\n\ndef static_result(value):\n def inner(*args, **kwargs):\n return {'result': value}\n return inner\n\n\n@curry\n@to_dict\ndef apply_key_map(key_mappings, value):\n for key, item in value.items():\n if key in key_mappings:\n yield key_mappings[key], item\n else:\n yield key, item\n", "path": "web3/utils/formatters.py"}]}
1,689
149
gh_patches_debug_37604
rasdani/github-patches
git_diff
OpenEnergyPlatform__oeplatform-974
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Logos in base/static/logos should be more organized Currently, all logos (partner+project logos) are stored together in the `static/logos` directory. Introduce two sub-dirs. called `partern` and `project` . </issue> <code> [start of base/views.py] 1 import os 2 import re 3 4 import markdown2 5 from django.core.mail import send_mail 6 from django.shortcuts import render 7 from django.views.generic import View 8 9 try: 10 import oeplatform.securitysettings as sec 11 except: 12 import logging 13 logging.error("No securitysettings found. Triggerd in base/views.py") 14 15 from base.forms import ContactForm 16 17 # Create your views here. 18 19 SITE_ROOT = os.path.dirname(os.path.realpath(__file__)) 20 21 22 class Welcome(View): 23 def get(self, request): 24 os.path.dirname(os.path.realpath(__file__)) 25 version_expr = r"^(?P<major>\d+)\.(?P<minor>\d+)+\.(?P<patch>\d+)$" 26 markdowner = markdown2.Markdown() 27 with open(os.path.join(SITE_ROOT, "..", "VERSION")) as version_file: 28 match = re.match(version_expr, version_file.read()) 29 major, minor, patch = match.groups() 30 with open( 31 os.path.join( 32 SITE_ROOT, 33 "..", 34 "versions/changelogs/%s_%s_%s.md" % (major, minor, patch), 35 ) 36 ) as change_file: 37 changes = markdowner.convert( 38 "\n".join(line for line in change_file.readlines()) 39 ) 40 return render( 41 request, 42 "base/index.html", 43 {"version": "%s.%s.%s" % (major, minor, patch), "changes": changes}, 44 ) 45 46 47 def get_logs(request): 48 version_expr = r"^(?P<major>\d+)_(?P<major>\d+)+_(?P<major>\d+)\.md$" 49 for file in os.listdir("../versions/changelogs"): 50 match = re.match(version_expr, file) 51 markdowner = markdown2.Markdown() 52 if match: 53 major, minor, patch = match.groups() 54 with open("versions/changelogs" + file) as f: 55 logs[(major, minor, patch)] = markdowner.convert( 56 "\n".join(line for line in f.readlines()) 57 ) 58 59 60 def redir(request, target): 61 return render(request, "base/{target}.html".format(target=target), {}) 62 63 64 class ContactView(View): 65 error_css_class = "error" 66 required_css_class = "required" 67 68 def post(self, request): 69 form = ContactForm(data=request.POST) 70 if form.is_valid(): 71 receps = sec.CONTACT_ADDRESSES.get( 72 request.POST["contact_category"], "technical" 73 ) 74 send_mail( 75 request.POST.get("contact_topic"), 76 f"{request.POST.get('contact_name')} ({request.POST.get('contact_email')}) wrote: \n" 77 + request.POST.get("content"), 78 sec.DEFAULT_FROM_EMAIL, 79 receps, 80 fail_silently=False, 81 ) 82 return render( 83 request, "base/contact.html", {"form": ContactForm(), "success": True} 84 ) 85 else: 86 return render( 87 request, "base/contact.html", {"form": form, "success": False} 88 ) 89 90 def get(self, request): 91 return render( 92 request, "base/contact.html", {"form": ContactForm(), "success": False} 93 ) 94 95 96 def robot(request): 97 return render(request, "base/robots.txt", {}, content_type="text/plain") 98 99 100 def handler500(request): 101 response = render(request, "base/500.html", {}) 102 response.status_code = 500 103 return response 104 105 106 def handler404(request, exception): 107 response = render(request, "base/404.html", {}) 108 response.status_code = 404 109 return response 110 [end of base/views.py] [start of base/urls.py] 1 from django.conf.urls import url, include 2 from django.urls import path 3 from base import views 4 5 urlpatterns = [ 6 url(r"^robots.txt$", views.robot), 7 url(r"^$", views.Welcome.as_view(), name="index"), 8 url(r"^about/$", views.redir, {"target": "about"}, name="index"), 9 url(r"^faq/$", views.redir, {"target": "faq"}, name="index"), 10 url(r"^discussion/$", views.redir, {"target": "discussion"}, name="index"), 11 url(r"^contact/$", views.ContactView.as_view(), name="index"), 12 url(r"^legal/privacy_policy/$", views.redir, {"target": "privacy_policy"}, name="index"), 13 url(r"^legal/tou/$", views.redir, {"target": "terms_of_use"}, name="index"), 14 ] + [path('captcha/', include('captcha.urls'))] 15 [end of base/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/base/urls.py b/base/urls.py --- a/base/urls.py +++ b/base/urls.py @@ -5,7 +5,8 @@ urlpatterns = [ url(r"^robots.txt$", views.robot), url(r"^$", views.Welcome.as_view(), name="index"), - url(r"^about/$", views.redir, {"target": "about"}, name="index"), + url(r"^about/$", views.AboutPage.as_view(), name="index"), + url(r"^about/project-detail/(?P<project_id>[\w\-]+)/$", views.AboutProjectDetail.as_view(), name="project_detail"), url(r"^faq/$", views.redir, {"target": "faq"}, name="index"), url(r"^discussion/$", views.redir, {"target": "discussion"}, name="index"), url(r"^contact/$", views.ContactView.as_view(), name="index"), diff --git a/base/views.py b/base/views.py --- a/base/views.py +++ b/base/views.py @@ -1,5 +1,8 @@ +import json import os import re +import pathlib +from django.apps import apps import markdown2 from django.core.mail import send_mail @@ -107,3 +110,55 @@ response = render(request, "base/404.html", {}) response.status_code = 404 return response + + +def get_json_content(path, json_id=None): + """ Parse all jsons from given path and return as + list or return a single parsed json by id -> + The json must have a field called id. + + Args: + path (string): path to directory like 'static/project_pages_content/' + json_id (string, optional): ID value that must match the value of json[id]. Defaults to None. + + Returns: + list[object]: List of all deserialized json files in path + or + object: single json python object + """ + + if path is not None: + all_jsons=[] + for _json in os.listdir(path=path): + with open(os.path.join(path, _json), "r", encoding='utf-8') as json_content: + content = json.load(json_content) + all_jsons.append(content) + + if json_id is None: + return all_jsons + else: + content_by_id = [i for i in all_jsons if json_id == i["id"] and "template" != i["id"]] + return content_by_id[0] + # TODO: catch the exception if path is none + else: + return {"error": "Path cant be None. Please provide the path to '/static/project_detail_pages_content/' . You can create a new Project by adding an JSON file like the '/static/project_detail_pages_content/PROJECT_TEMPLATE.json'."} + +class AboutPage(View): +# docstring + projects_content_static = "project_detail_pages_content" + projects_content_path = os.path.join(sec.STATIC_ROOT, projects_content_static) + + def get(self, request, projects_content_path=projects_content_path): + projects = get_json_content(path=projects_content_path) + + return render(request, "base/about.html", {"projects": projects}) + +class AboutProjectDetail(AboutPage): +# docstring + + def get(self, request, project_id): + project = get_json_content(path=self.projects_content_path, json_id=project_id) + + return render(request, "base/project-detail.html", {"project": project}) + + \ No newline at end of file
{"golden_diff": "diff --git a/base/urls.py b/base/urls.py\n--- a/base/urls.py\n+++ b/base/urls.py\n@@ -5,7 +5,8 @@\n urlpatterns = [\n url(r\"^robots.txt$\", views.robot),\n url(r\"^$\", views.Welcome.as_view(), name=\"index\"),\n- url(r\"^about/$\", views.redir, {\"target\": \"about\"}, name=\"index\"),\n+ url(r\"^about/$\", views.AboutPage.as_view(), name=\"index\"),\n+ url(r\"^about/project-detail/(?P<project_id>[\\w\\-]+)/$\", views.AboutProjectDetail.as_view(), name=\"project_detail\"),\n url(r\"^faq/$\", views.redir, {\"target\": \"faq\"}, name=\"index\"),\n url(r\"^discussion/$\", views.redir, {\"target\": \"discussion\"}, name=\"index\"),\n url(r\"^contact/$\", views.ContactView.as_view(), name=\"index\"),\ndiff --git a/base/views.py b/base/views.py\n--- a/base/views.py\n+++ b/base/views.py\n@@ -1,5 +1,8 @@\n+import json\n import os\n import re\n+import pathlib\n+from django.apps import apps\n \n import markdown2\n from django.core.mail import send_mail\n@@ -107,3 +110,55 @@\n response = render(request, \"base/404.html\", {})\n response.status_code = 404\n return response\n+\n+\n+def get_json_content(path, json_id=None):\n+ \"\"\" Parse all jsons from given path and return as \n+ list or return a single parsed json by id -> \n+ The json must have a field called id. \n+\n+ Args:\n+ path (string): path to directory like 'static/project_pages_content/'\n+ json_id (string, optional): ID value that must match the value of json[id]. Defaults to None.\n+\n+ Returns:\n+ list[object]: List of all deserialized json files in path \n+ or\n+ object: single json python object\n+ \"\"\"\n+ \n+ if path is not None:\n+ all_jsons=[]\n+ for _json in os.listdir(path=path):\n+ with open(os.path.join(path, _json), \"r\", encoding='utf-8') as json_content:\n+ content = json.load(json_content)\n+ all_jsons.append(content)\n+\n+ if json_id is None:\n+ return all_jsons\n+ else:\n+ content_by_id = [i for i in all_jsons if json_id == i[\"id\"] and \"template\" != i[\"id\"]]\n+ return content_by_id[0]\n+ # TODO: catch the exception if path is none \n+ else:\n+ return {\"error\": \"Path cant be None. Please provide the path to '/static/project_detail_pages_content/' . You can create a new Project by adding an JSON file like the '/static/project_detail_pages_content/PROJECT_TEMPLATE.json'.\"}\n+\n+class AboutPage(View):\n+# docstring\n+ projects_content_static = \"project_detail_pages_content\"\n+ projects_content_path = os.path.join(sec.STATIC_ROOT, projects_content_static)\n+\n+ def get(self, request, projects_content_path=projects_content_path):\n+ projects = get_json_content(path=projects_content_path)\n+\n+ return render(request, \"base/about.html\", {\"projects\": projects})\n+\n+class AboutProjectDetail(AboutPage):\n+# docstring\n+\n+ def get(self, request, project_id):\n+ project = get_json_content(path=self.projects_content_path, json_id=project_id)\n+\n+ return render(request, \"base/project-detail.html\", {\"project\": project})\n+ \n+ \n\\ No newline at end of file\n", "issue": "Logos in base/static/logos should be more organized\nCurrently, all logos (partner+project logos) are stored together in the `static/logos` directory. Introduce two sub-dirs. called `partern` and `project` .\n", "before_files": [{"content": "import os\nimport re\n\nimport markdown2\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\ntry:\n import oeplatform.securitysettings as sec\nexcept:\n import logging\n logging.error(\"No securitysettings found. Triggerd in base/views.py\")\n\nfrom base.forms import ContactForm\n\n# Create your views here.\n\nSITE_ROOT = os.path.dirname(os.path.realpath(__file__))\n\n\nclass Welcome(View):\n def get(self, request):\n os.path.dirname(os.path.realpath(__file__))\n version_expr = r\"^(?P<major>\\d+)\\.(?P<minor>\\d+)+\\.(?P<patch>\\d+)$\"\n markdowner = markdown2.Markdown()\n with open(os.path.join(SITE_ROOT, \"..\", \"VERSION\")) as version_file:\n match = re.match(version_expr, version_file.read())\n major, minor, patch = match.groups()\n with open(\n os.path.join(\n SITE_ROOT,\n \"..\",\n \"versions/changelogs/%s_%s_%s.md\" % (major, minor, patch),\n )\n ) as change_file:\n changes = markdowner.convert(\n \"\\n\".join(line for line in change_file.readlines())\n )\n return render(\n request,\n \"base/index.html\",\n {\"version\": \"%s.%s.%s\" % (major, minor, patch), \"changes\": changes},\n )\n\n\ndef get_logs(request):\n version_expr = r\"^(?P<major>\\d+)_(?P<major>\\d+)+_(?P<major>\\d+)\\.md$\"\n for file in os.listdir(\"../versions/changelogs\"):\n match = re.match(version_expr, file)\n markdowner = markdown2.Markdown()\n if match:\n major, minor, patch = match.groups()\n with open(\"versions/changelogs\" + file) as f:\n logs[(major, minor, patch)] = markdowner.convert(\n \"\\n\".join(line for line in f.readlines())\n )\n\n\ndef redir(request, target):\n return render(request, \"base/{target}.html\".format(target=target), {})\n\n\nclass ContactView(View):\n error_css_class = \"error\"\n required_css_class = \"required\"\n\n def post(self, request):\n form = ContactForm(data=request.POST)\n if form.is_valid():\n receps = sec.CONTACT_ADDRESSES.get(\n request.POST[\"contact_category\"], \"technical\"\n )\n send_mail(\n request.POST.get(\"contact_topic\"),\n f\"{request.POST.get('contact_name')} ({request.POST.get('contact_email')}) wrote: \\n\"\n + request.POST.get(\"content\"),\n sec.DEFAULT_FROM_EMAIL,\n receps,\n fail_silently=False,\n )\n return render(\n request, \"base/contact.html\", {\"form\": ContactForm(), \"success\": True}\n )\n else:\n return render(\n request, \"base/contact.html\", {\"form\": form, \"success\": False}\n )\n\n def get(self, request):\n return render(\n request, \"base/contact.html\", {\"form\": ContactForm(), \"success\": False}\n )\n\n\ndef robot(request):\n return render(request, \"base/robots.txt\", {}, content_type=\"text/plain\")\n\n\ndef handler500(request):\n response = render(request, \"base/500.html\", {})\n response.status_code = 500\n return response\n\n\ndef handler404(request, exception):\n response = render(request, \"base/404.html\", {})\n response.status_code = 404\n return response\n", "path": "base/views.py"}, {"content": "from django.conf.urls import url, include\nfrom django.urls import path\nfrom base import views\n\nurlpatterns = [\n url(r\"^robots.txt$\", views.robot),\n url(r\"^$\", views.Welcome.as_view(), name=\"index\"),\n url(r\"^about/$\", views.redir, {\"target\": \"about\"}, name=\"index\"),\n url(r\"^faq/$\", views.redir, {\"target\": \"faq\"}, name=\"index\"),\n url(r\"^discussion/$\", views.redir, {\"target\": \"discussion\"}, name=\"index\"),\n url(r\"^contact/$\", views.ContactView.as_view(), name=\"index\"),\n url(r\"^legal/privacy_policy/$\", views.redir, {\"target\": \"privacy_policy\"}, name=\"index\"),\n url(r\"^legal/tou/$\", views.redir, {\"target\": \"terms_of_use\"}, name=\"index\"),\n] + [path('captcha/', include('captcha.urls'))]\n", "path": "base/urls.py"}]}
1,817
804
gh_patches_debug_19853
rasdani/github-patches
git_diff
urllib3__urllib3-1855
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Raw HTML in README.rst causing problems with uploading dists See: https://travis-ci.org/github/urllib3/urllib3/builds/675807537 For now I've manually uploaded the dists. We should strip this section from our `long_description` field and maybe run `twine check` within our CI to make sure we don't run into this issue on release time in the future. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup 4 5 import os 6 import re 7 import codecs 8 9 base_path = os.path.dirname(__file__) 10 11 # Get the version (borrowed from SQLAlchemy) 12 with open(os.path.join(base_path, "src", "urllib3", "__init__.py")) as fp: 13 VERSION = ( 14 re.compile(r""".*__version__ = ["'](.*?)['"]""", re.S).match(fp.read()).group(1) 15 ) 16 17 18 with codecs.open("README.rst", encoding="utf-8") as fp: 19 readme = fp.read() 20 21 with codecs.open("CHANGES.rst", encoding="utf-8") as fp: 22 changes = fp.read() 23 24 version = VERSION 25 26 setup( 27 name="urllib3", 28 version=version, 29 description="HTTP library with thread-safe connection pooling, file post, and more.", 30 long_description=u"\n\n".join([readme, changes]), 31 classifiers=[ 32 "Environment :: Web Environment", 33 "Intended Audience :: Developers", 34 "License :: OSI Approved :: MIT License", 35 "Operating System :: OS Independent", 36 "Programming Language :: Python", 37 "Programming Language :: Python :: 2", 38 "Programming Language :: Python :: 2.7", 39 "Programming Language :: Python :: 3", 40 "Programming Language :: Python :: 3.5", 41 "Programming Language :: Python :: 3.6", 42 "Programming Language :: Python :: 3.7", 43 "Programming Language :: Python :: 3.8", 44 "Programming Language :: Python :: 3.9", 45 "Programming Language :: Python :: Implementation :: CPython", 46 "Programming Language :: Python :: Implementation :: PyPy", 47 "Topic :: Internet :: WWW/HTTP", 48 "Topic :: Software Development :: Libraries", 49 ], 50 keywords="urllib httplib threadsafe filepost http https ssl pooling", 51 author="Andrey Petrov", 52 author_email="[email protected]", 53 url="https://urllib3.readthedocs.io/", 54 project_urls={ 55 "Documentation": "https://urllib3.readthedocs.io/", 56 "Code": "https://github.com/urllib3/urllib3", 57 "Issue tracker": "https://github.com/urllib3/urllib3/issues", 58 }, 59 license="MIT", 60 packages=[ 61 "urllib3", 62 "urllib3.packages", 63 "urllib3.packages.ssl_match_hostname", 64 "urllib3.packages.backports", 65 "urllib3.contrib", 66 "urllib3.contrib._securetransport", 67 "urllib3.util", 68 ], 69 package_dir={"": "src"}, 70 requires=[], 71 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4", 72 extras_require={ 73 "brotli": ["brotlipy>=0.6.0"], 74 "secure": [ 75 "pyOpenSSL>=0.14", 76 "cryptography>=1.3.4", 77 "idna>=2.0.0", 78 "certifi", 79 "ipaddress; python_version=='2.7'", 80 ], 81 "socks": ["PySocks>=1.5.6,<2.0,!=1.5.7"], 82 }, 83 ) 84 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -16,7 +16,18 @@ with codecs.open("README.rst", encoding="utf-8") as fp: - readme = fp.read() + # remove reST raw directive from README + mode = None + lines = [] + for line in fp: + if line.startswith(".. raw"): + mode = "ignore_raw" + elif line == "\n": + mode = None + + if mode != "ignore_raw": + lines.append(line) + readme = "".join(lines) with codecs.open("CHANGES.rst", encoding="utf-8") as fp: changes = fp.read() @@ -28,6 +39,7 @@ version=version, description="HTTP library with thread-safe connection pooling, file post, and more.", long_description=u"\n\n".join([readme, changes]), + long_description_content_type="text/x-rst", classifiers=[ "Environment :: Web Environment", "Intended Audience :: Developers",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,18 @@\n \n \n with codecs.open(\"README.rst\", encoding=\"utf-8\") as fp:\n- readme = fp.read()\n+ # remove reST raw directive from README\n+ mode = None\n+ lines = []\n+ for line in fp:\n+ if line.startswith(\".. raw\"):\n+ mode = \"ignore_raw\"\n+ elif line == \"\\n\":\n+ mode = None\n+\n+ if mode != \"ignore_raw\":\n+ lines.append(line)\n+ readme = \"\".join(lines)\n \n with codecs.open(\"CHANGES.rst\", encoding=\"utf-8\") as fp:\n changes = fp.read()\n@@ -28,6 +39,7 @@\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u\"\\n\\n\".join([readme, changes]),\n+ long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n", "issue": "Raw HTML in README.rst causing problems with uploading dists\nSee: https://travis-ci.org/github/urllib3/urllib3/builds/675807537\r\n\r\nFor now I've manually uploaded the dists.\r\n\r\nWe should strip this section from our `long_description` field and maybe run `twine check` within our CI to make sure we don't run into this issue on release time in the future.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nimport os\nimport re\nimport codecs\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, \"src\", \"urllib3\", \"__init__.py\")) as fp:\n VERSION = (\n re.compile(r\"\"\".*__version__ = [\"'](.*?)['\"]\"\"\", re.S).match(fp.read()).group(1)\n )\n\n\nwith codecs.open(\"README.rst\", encoding=\"utf-8\") as fp:\n readme = fp.read()\n\nwith codecs.open(\"CHANGES.rst\", encoding=\"utf-8\") as fp:\n changes = fp.read()\n\nversion = VERSION\n\nsetup(\n name=\"urllib3\",\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u\"\\n\\n\".join([readme, changes]),\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries\",\n ],\n keywords=\"urllib httplib threadsafe filepost http https ssl pooling\",\n author=\"Andrey Petrov\",\n author_email=\"[email protected]\",\n url=\"https://urllib3.readthedocs.io/\",\n project_urls={\n \"Documentation\": \"https://urllib3.readthedocs.io/\",\n \"Code\": \"https://github.com/urllib3/urllib3\",\n \"Issue tracker\": \"https://github.com/urllib3/urllib3/issues\",\n },\n license=\"MIT\",\n packages=[\n \"urllib3\",\n \"urllib3.packages\",\n \"urllib3.packages.ssl_match_hostname\",\n \"urllib3.packages.backports\",\n \"urllib3.contrib\",\n \"urllib3.contrib._securetransport\",\n \"urllib3.util\",\n ],\n package_dir={\"\": \"src\"},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4\",\n extras_require={\n \"brotli\": [\"brotlipy>=0.6.0\"],\n \"secure\": [\n \"pyOpenSSL>=0.14\",\n \"cryptography>=1.3.4\",\n \"idna>=2.0.0\",\n \"certifi\",\n \"ipaddress; python_version=='2.7'\",\n ],\n \"socks\": [\"PySocks>=1.5.6,<2.0,!=1.5.7\"],\n },\n)\n", "path": "setup.py"}]}
1,504
246
gh_patches_debug_2875
rasdani/github-patches
git_diff
TheAlgorithms__Python-7556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PYTEST WARNING] QasmSimulator will be deprecated ### Feature description The use of `q.Aer.get_backend("qasm_simulator")` raises the warning ``` /opt/hostedtoolcache/Python/3.10.7/x64/lib/python3.10/site-packages/qiskit_aer/backends/qasm_simulator.py:360: PendingDeprecationWarning: The `QasmSimulator` backend will be deprecated in the future. It has been superseded by the `AerSimulator` backend. warn('The `QasmSimulator` backend will be deprecated in the' ``` This code is found in the following files: - deutsch_jozsa @abhishekjiitr - half_adder @abhishekjiitr - not_gate @abhishekjiitr - single_quibit_measure @abhishekjiitr origin: #7211 </issue> <code> [start of quantum/superdense_coding.py] 1 """ 2 Build the superdense coding protocol. This quantum 3 circuit can send two classical bits using one quantum 4 bit. This circuit is designed using the Qiskit 5 framework. This experiment run in IBM Q simulator 6 with 1000 shots. 7 . 8 References: 9 https://qiskit.org/textbook/ch-algorithms/superdense-coding.html 10 https://en.wikipedia.org/wiki/Superdense_coding 11 """ 12 13 import math 14 15 import qiskit 16 from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute 17 18 19 def superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Counts: 20 """ 21 The input refer to the classical message 22 that you wants to send. {'00','01','10','11'} 23 result for default values: {11: 1000} 24 ┌───┐ ┌───┐ 25 qr_0: ─────┤ X ├──────────┤ X ├───── 26 ┌───┐└─┬─┘┌───┐┌───┐└─┬─┘┌───┐ 27 qr_1: ┤ H ├──■──┤ X ├┤ Z ├──■──┤ H ├ 28 └───┘ └───┘└───┘ └───┘ 29 cr: 2/══════════════════════════════ 30 Args: 31 bit_1: bit 1 of classical information to send. 32 bit_2: bit 2 of classical information to send. 33 Returns: 34 qiskit.result.counts.Counts: counts of send state. 35 >>> superdense_coding(0,0) 36 {'00': 1000} 37 >>> superdense_coding(0,1) 38 {'01': 1000} 39 >>> superdense_coding(-1,0) 40 Traceback (most recent call last): 41 ... 42 ValueError: inputs must be positive. 43 >>> superdense_coding(1,'j') 44 Traceback (most recent call last): 45 ... 46 TypeError: inputs must be integers. 47 >>> superdense_coding(1,0.5) 48 Traceback (most recent call last): 49 ... 50 ValueError: inputs must be exact integers. 51 >>> superdense_coding(2,1) 52 Traceback (most recent call last): 53 ... 54 ValueError: inputs must be less or equal to 1. 55 """ 56 if (type(bit_1) == str) or (type(bit_2) == str): 57 raise TypeError("inputs must be integers.") 58 if (bit_1 < 0) or (bit_2 < 0): 59 raise ValueError("inputs must be positive.") 60 if (math.floor(bit_1) != bit_1) or (math.floor(bit_2) != bit_2): 61 raise ValueError("inputs must be exact integers.") 62 if (bit_1 > 1) or (bit_2 > 1): 63 raise ValueError("inputs must be less or equal to 1.") 64 65 # build registers 66 qr = QuantumRegister(2, "qr") 67 cr = ClassicalRegister(2, "cr") 68 69 quantum_circuit = QuantumCircuit(qr, cr) 70 71 # entanglement the qubits 72 quantum_circuit.h(1) 73 quantum_circuit.cx(1, 0) 74 75 # send the information 76 c_information = str(bit_1) + str(bit_2) 77 78 if c_information == "11": 79 quantum_circuit.x(1) 80 quantum_circuit.z(1) 81 elif c_information == "10": 82 quantum_circuit.z(1) 83 elif c_information == "01": 84 quantum_circuit.x(1) 85 else: 86 quantum_circuit.i(1) 87 88 # unentangled the circuit 89 quantum_circuit.cx(1, 0) 90 quantum_circuit.h(1) 91 92 # measure the circuit 93 quantum_circuit.measure(qr, cr) 94 95 backend = Aer.get_backend("qasm_simulator") 96 job = execute(quantum_circuit, backend, shots=1000) 97 98 return job.result().get_counts(quantum_circuit) 99 100 101 if __name__ == "__main__": 102 print(f"Counts for classical state send: {superdense_coding(1,1)}") 103 [end of quantum/superdense_coding.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py --- a/quantum/superdense_coding.py +++ b/quantum/superdense_coding.py @@ -92,7 +92,7 @@ # measure the circuit quantum_circuit.measure(qr, cr) - backend = Aer.get_backend("qasm_simulator") + backend = Aer.get_backend("aer_simulator") job = execute(quantum_circuit, backend, shots=1000) return job.result().get_counts(quantum_circuit)
{"golden_diff": "diff --git a/quantum/superdense_coding.py b/quantum/superdense_coding.py\n--- a/quantum/superdense_coding.py\n+++ b/quantum/superdense_coding.py\n@@ -92,7 +92,7 @@\n # measure the circuit\n quantum_circuit.measure(qr, cr)\n \n- backend = Aer.get_backend(\"qasm_simulator\")\n+ backend = Aer.get_backend(\"aer_simulator\")\n job = execute(quantum_circuit, backend, shots=1000)\n \n return job.result().get_counts(quantum_circuit)\n", "issue": "[PYTEST WARNING] QasmSimulator will be deprecated\n### Feature description\n\nThe use of `q.Aer.get_backend(\"qasm_simulator\")` raises the warning\r\n```\r\n/opt/hostedtoolcache/Python/3.10.7/x64/lib/python3.10/site-packages/qiskit_aer/backends/qasm_simulator.py:360: PendingDeprecationWarning: The `QasmSimulator` backend will be deprecated in the future. It has been superseded by the `AerSimulator` backend.\r\n warn('The `QasmSimulator` backend will be deprecated in the'\r\n```\r\nThis code is found in the following files:\r\n - deutsch_jozsa @abhishekjiitr \r\n - half_adder @abhishekjiitr \r\n - not_gate @abhishekjiitr \r\n - single_quibit_measure @abhishekjiitr \r\n\r\norigin: #7211\n", "before_files": [{"content": "\"\"\"\nBuild the superdense coding protocol. This quantum\ncircuit can send two classical bits using one quantum\nbit. This circuit is designed using the Qiskit\nframework. This experiment run in IBM Q simulator\nwith 1000 shots.\n.\nReferences:\nhttps://qiskit.org/textbook/ch-algorithms/superdense-coding.html\nhttps://en.wikipedia.org/wiki/Superdense_coding\n\"\"\"\n\nimport math\n\nimport qiskit\nfrom qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute\n\n\ndef superdense_coding(bit_1: int = 1, bit_2: int = 1) -> qiskit.result.counts.Counts:\n \"\"\"\n The input refer to the classical message\n that you wants to send. {'00','01','10','11'}\n result for default values: {11: 1000}\n \u250c\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2510\n qr_0: \u2500\u2500\u2500\u2500\u2500\u2524 X \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 X \u251c\u2500\u2500\u2500\u2500\u2500\n \u250c\u2500\u2500\u2500\u2510\u2514\u2500\u252c\u2500\u2518\u250c\u2500\u2500\u2500\u2510\u250c\u2500\u2500\u2500\u2510\u2514\u2500\u252c\u2500\u2518\u250c\u2500\u2500\u2500\u2510\n qr_1: \u2524 H \u251c\u2500\u2500\u25a0\u2500\u2500\u2524 X \u251c\u2524 Z \u251c\u2500\u2500\u25a0\u2500\u2500\u2524 H \u251c\n \u2514\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2518\u2514\u2500\u2500\u2500\u2518 \u2514\u2500\u2500\u2500\u2518\n cr: 2/\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\u2550\n Args:\n bit_1: bit 1 of classical information to send.\n bit_2: bit 2 of classical information to send.\n Returns:\n qiskit.result.counts.Counts: counts of send state.\n >>> superdense_coding(0,0)\n {'00': 1000}\n >>> superdense_coding(0,1)\n {'01': 1000}\n >>> superdense_coding(-1,0)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be positive.\n >>> superdense_coding(1,'j')\n Traceback (most recent call last):\n ...\n TypeError: inputs must be integers.\n >>> superdense_coding(1,0.5)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be exact integers.\n >>> superdense_coding(2,1)\n Traceback (most recent call last):\n ...\n ValueError: inputs must be less or equal to 1.\n \"\"\"\n if (type(bit_1) == str) or (type(bit_2) == str):\n raise TypeError(\"inputs must be integers.\")\n if (bit_1 < 0) or (bit_2 < 0):\n raise ValueError(\"inputs must be positive.\")\n if (math.floor(bit_1) != bit_1) or (math.floor(bit_2) != bit_2):\n raise ValueError(\"inputs must be exact integers.\")\n if (bit_1 > 1) or (bit_2 > 1):\n raise ValueError(\"inputs must be less or equal to 1.\")\n\n # build registers\n qr = QuantumRegister(2, \"qr\")\n cr = ClassicalRegister(2, \"cr\")\n\n quantum_circuit = QuantumCircuit(qr, cr)\n\n # entanglement the qubits\n quantum_circuit.h(1)\n quantum_circuit.cx(1, 0)\n\n # send the information\n c_information = str(bit_1) + str(bit_2)\n\n if c_information == \"11\":\n quantum_circuit.x(1)\n quantum_circuit.z(1)\n elif c_information == \"10\":\n quantum_circuit.z(1)\n elif c_information == \"01\":\n quantum_circuit.x(1)\n else:\n quantum_circuit.i(1)\n\n # unentangled the circuit\n quantum_circuit.cx(1, 0)\n quantum_circuit.h(1)\n\n # measure the circuit\n quantum_circuit.measure(qr, cr)\n\n backend = Aer.get_backend(\"qasm_simulator\")\n job = execute(quantum_circuit, backend, shots=1000)\n\n return job.result().get_counts(quantum_circuit)\n\n\nif __name__ == \"__main__\":\n print(f\"Counts for classical state send: {superdense_coding(1,1)}\")\n", "path": "quantum/superdense_coding.py"}]}
1,906
137
gh_patches_debug_6867
rasdani/github-patches
git_diff
python-poetry__poetry-1621
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `poetry shell` puts Terminal in broken state and does not function <!-- Hi there! Thank you for discovering and submitting an issue. Before you submit this; let's make sure of a few things. Please make sure the following boxes are ticked if they are correct. If not, please try and fulfill these first. --> <!-- Checked checkbox should look like this: [x] --> - [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version. - [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate. - [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option). - **OS version and name**: Mac OS Mojave (10.14.6) - **Poetry version**: 1.0.0b5 - **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/orokusaki/0750bd0dfef13324353d302d74a48254 ## Further environment notes - Python 2.7.17 and Python 3.7.5 installed via Homebrew - Poetry installed via `curl -sSL https://raw.githubusercontent.com/sdispater/poetry/master/get-poetry.py | POETRY_PREVIEW=1 python` ## Issue Upon using `poetry shell -vvv` (also tried without `-vvv` flag) the shell appears to spawn, but when I attempt to type any command, no text appears in the Terminal, and when I hit <kbd>return</kbd> I get what you can see in the screenshot I attached (the screenshot reflects the state after I typed a few characters and then hit <kbd>return</kbd> twice). If I send `SIGINT` to the shell (<kbd>CTRL</kbd> + <kbd>C</kbd>), the Terminal drops to a new line with the same output and lack of responsiveness, and upon sending `SIGINT` many times I'm still left with the Terminal in an unusable state. If I attempt to close Terminal, I get "*Closing this tab will terminate the running processes: bash, Python.*", which indicates that some code in Poetry is still hung up. ### Screenshot <img src="https://user-images.githubusercontent.com/97720/69014062-6a16bf80-0954-11ea-9717-7ff259875eea.png"> </issue> <code> [start of poetry/utils/shell.py] 1 import os 2 import signal 3 import sys 4 5 import pexpect 6 7 from clikit.utils.terminal import Terminal 8 from shellingham import ShellDetectionFailure 9 from shellingham import detect_shell 10 11 from ._compat import WINDOWS 12 from .env import VirtualEnv 13 14 15 class Shell: 16 """ 17 Represents the current shell. 18 """ 19 20 _shell = None 21 22 def __init__(self, name, path): # type: (str, str) -> None 23 self._name = name 24 self._path = path 25 26 @property 27 def name(self): # type: () -> str 28 return self._name 29 30 @property 31 def path(self): # type: () -> str 32 return self._path 33 34 @classmethod 35 def get(cls): # type: () -> Shell 36 """ 37 Retrieve the current shell. 38 """ 39 if cls._shell is not None: 40 return cls._shell 41 42 try: 43 name, path = detect_shell(os.getpid()) 44 except (RuntimeError, ShellDetectionFailure): 45 raise RuntimeError("Unable to detect the current shell.") 46 47 cls._shell = cls(name, path) 48 49 return cls._shell 50 51 def activate(self, env): # type: (VirtualEnv) -> None 52 if WINDOWS: 53 return env.execute(self.path) 54 55 terminal = Terminal() 56 with env.temp_environ(): 57 c = pexpect.spawn( 58 self._path, ["-i"], dimensions=(terminal.height, terminal.width) 59 ) 60 61 c.setecho(False) 62 activate_script = self._get_activate_script() 63 bin_dir = "Scripts" if WINDOWS else "bin" 64 activate_path = env.path / bin_dir / activate_script 65 c.sendline("{} {}".format(self._get_source_command(), activate_path)) 66 67 def resize(sig, data): 68 terminal = Terminal() 69 c.setwinsize(terminal.height, terminal.width) 70 71 signal.signal(signal.SIGWINCH, resize) 72 73 # Interact with the new shell. 74 c.interact(escape_character=None) 75 c.close() 76 77 sys.exit(c.exitstatus) 78 79 def _get_activate_script(self): 80 if "fish" == self._name: 81 suffix = ".fish" 82 elif "csh" == self._name: 83 suffix = ".csh" 84 else: 85 suffix = "" 86 87 return "activate" + suffix 88 89 def _get_source_command(self): 90 if "fish" == self._name: 91 return "source" 92 elif "csh" == self._name: 93 return "source" 94 95 return "." 96 97 def __repr__(self): # type: () -> str 98 return '{}("{}", "{}")'.format(self.__class__.__name__, self._name, self._path) 99 [end of poetry/utils/shell.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py --- a/poetry/utils/shell.py +++ b/poetry/utils/shell.py @@ -58,7 +58,9 @@ self._path, ["-i"], dimensions=(terminal.height, terminal.width) ) - c.setecho(False) + if not self._name == "bash": + c.setecho(False) + activate_script = self._get_activate_script() bin_dir = "Scripts" if WINDOWS else "bin" activate_path = env.path / bin_dir / activate_script
{"golden_diff": "diff --git a/poetry/utils/shell.py b/poetry/utils/shell.py\n--- a/poetry/utils/shell.py\n+++ b/poetry/utils/shell.py\n@@ -58,7 +58,9 @@\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n \n- c.setecho(False)\n+ if not self._name == \"bash\":\n+ c.setecho(False)\n+\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n", "issue": "`poetry shell` puts Terminal in broken state and does not function\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Mac OS Mojave (10.14.6)\r\n- **Poetry version**: 1.0.0b5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/orokusaki/0750bd0dfef13324353d302d74a48254\r\n\r\n## Further environment notes\r\n\r\n - Python 2.7.17 and Python 3.7.5 installed via Homebrew\r\n - Poetry installed via `curl -sSL https://raw.githubusercontent.com/sdispater/poetry/master/get-poetry.py | POETRY_PREVIEW=1 python`\r\n\r\n## Issue\r\n\r\nUpon using `poetry shell -vvv` (also tried without `-vvv` flag) the shell appears to spawn, but when I attempt to type any command, no text appears in the Terminal, and when I hit <kbd>return</kbd> I get what you can see in the screenshot I attached (the screenshot reflects the state after I typed a few characters and then hit <kbd>return</kbd> twice). If I send `SIGINT` to the shell (<kbd>CTRL</kbd> + <kbd>C</kbd>), the Terminal drops to a new line with the same output and lack of responsiveness, and upon sending `SIGINT` many times I'm still left with the Terminal in an unusable state. If I attempt to close Terminal, I get \"*Closing this tab will terminate the running processes: bash, Python.*\", which indicates that some code in Poetry is still hung up.\r\n\r\n### Screenshot\r\n\r\n<img src=\"https://user-images.githubusercontent.com/97720/69014062-6a16bf80-0954-11ea-9717-7ff259875eea.png\">\n", "before_files": [{"content": "import os\nimport signal\nimport sys\n\nimport pexpect\n\nfrom clikit.utils.terminal import Terminal\nfrom shellingham import ShellDetectionFailure\nfrom shellingham import detect_shell\n\nfrom ._compat import WINDOWS\nfrom .env import VirtualEnv\n\n\nclass Shell:\n \"\"\"\n Represents the current shell.\n \"\"\"\n\n _shell = None\n\n def __init__(self, name, path): # type: (str, str) -> None\n self._name = name\n self._path = path\n\n @property\n def name(self): # type: () -> str\n return self._name\n\n @property\n def path(self): # type: () -> str\n return self._path\n\n @classmethod\n def get(cls): # type: () -> Shell\n \"\"\"\n Retrieve the current shell.\n \"\"\"\n if cls._shell is not None:\n return cls._shell\n\n try:\n name, path = detect_shell(os.getpid())\n except (RuntimeError, ShellDetectionFailure):\n raise RuntimeError(\"Unable to detect the current shell.\")\n\n cls._shell = cls(name, path)\n\n return cls._shell\n\n def activate(self, env): # type: (VirtualEnv) -> None\n if WINDOWS:\n return env.execute(self.path)\n\n terminal = Terminal()\n with env.temp_environ():\n c = pexpect.spawn(\n self._path, [\"-i\"], dimensions=(terminal.height, terminal.width)\n )\n\n c.setecho(False)\n activate_script = self._get_activate_script()\n bin_dir = \"Scripts\" if WINDOWS else \"bin\"\n activate_path = env.path / bin_dir / activate_script\n c.sendline(\"{} {}\".format(self._get_source_command(), activate_path))\n\n def resize(sig, data):\n terminal = Terminal()\n c.setwinsize(terminal.height, terminal.width)\n\n signal.signal(signal.SIGWINCH, resize)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n\n sys.exit(c.exitstatus)\n\n def _get_activate_script(self):\n if \"fish\" == self._name:\n suffix = \".fish\"\n elif \"csh\" == self._name:\n suffix = \".csh\"\n else:\n suffix = \"\"\n\n return \"activate\" + suffix\n\n def _get_source_command(self):\n if \"fish\" == self._name:\n return \"source\"\n elif \"csh\" == self._name:\n return \"source\"\n\n return \".\"\n\n def __repr__(self): # type: () -> str\n return '{}(\"{}\", \"{}\")'.format(self.__class__.__name__, self._name, self._path)\n", "path": "poetry/utils/shell.py"}]}
1,915
135
gh_patches_debug_30769
rasdani/github-patches
git_diff
napari__napari-873
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> new zarr release / numcodecs ## 🐛 Bug Looks like zarr's new release requires numcodecs > 0.6.4, but we pinned to exclude it see discussion #666. I think we need to resolve this ASAP and then make the 0.2.10 release (which also includes the #866 bug fix). Thoughts @tlambert03 @jni? Has the 0.6.4 numcodecs install problem been resolved? You can see our failing tests in #867. </issue> <code> [start of napari/utils/io.py] 1 import os 2 3 from glob import glob 4 from pathlib import Path 5 6 import numpy as np 7 from skimage import io 8 from skimage.io.collection import alphanumeric_key 9 10 from dask import delayed 11 from dask import array as da 12 import zarr 13 14 15 def magic_imread(filenames, *, use_dask=None, stack=True): 16 """Dispatch the appropriate reader given some files. 17 18 The files are assumed to all have the same shape. 19 20 Parameters 21 ------- 22 filenames : list 23 List of filenames or directories to be opened. 24 A list of `pathlib.Path` objects and a single filename or `Path` object 25 are also accepted. 26 use_dask : bool 27 Whether to use dask to create a lazy array, rather than NumPy. 28 Default of None will resolve to True if filenames contains more than 29 one image, False otherwise. 30 stack : bool 31 Whether to stack the images in multiple files into a single array. If 32 False, a list of arrays will be returned. 33 34 Returns 35 ------- 36 image : array-like 37 Array or list of images 38 """ 39 # cast Path to string 40 if isinstance(filenames, Path): 41 filenames = filenames.as_posix() 42 43 if len(filenames) == 0: 44 return None 45 if isinstance(filenames, str): 46 filenames = [filenames] # ensure list 47 48 # replace folders with their contents 49 filenames_expanded = [] 50 for filename in filenames: 51 ext = os.path.splitext(filename)[-1] 52 # zarr files are folders, but should be read as 1 file 53 if os.path.isdir(filename) and not ext == '.zarr': 54 dir_contents = sorted( 55 glob(os.path.join(filename, '*.*')), key=alphanumeric_key 56 ) 57 # remove subdirectories 58 dir_contents_files = filter( 59 lambda f: not os.path.isdir(f), dir_contents 60 ) 61 filenames_expanded.extend(dir_contents_files) 62 else: 63 filenames_expanded.append(filename) 64 65 if use_dask is None: 66 use_dask = len(filenames_expanded) > 1 67 68 # then, read in images 69 images = [] 70 shape = None 71 for filename in filenames_expanded: 72 ext = os.path.splitext(filename)[-1] 73 if ext == '.zarr': 74 image, zarr_shape = read_zarr_dataset(filename) 75 if shape is None: 76 shape = zarr_shape 77 else: 78 if shape is None: 79 image = io.imread(filename) 80 shape = image.shape 81 dtype = image.dtype 82 if use_dask: 83 image = da.from_delayed( 84 delayed(io.imread)(filename), shape=shape, dtype=dtype 85 ) 86 elif len(images) > 0: # not read by shape clause 87 image = io.imread(filename) 88 images.append(image) 89 if len(images) == 1: 90 image = images[0] 91 else: 92 if stack: 93 if use_dask: 94 image = da.stack(images) 95 else: 96 image = np.stack(images) 97 else: 98 image = images # return a list 99 return image 100 101 102 def read_zarr_dataset(filename): 103 """Read a zarr dataset, including an array or a group of arrays. 104 105 Parameters 106 -------- 107 filename : str 108 Path to file ending in '.zarr'. File can contain either an array 109 or a group of arrays in the case of pyramid data. 110 Returns 111 ------- 112 image : array-like 113 Array or list of arrays 114 shape : tuple 115 Shape of array or first array in list 116 """ 117 zr = zarr.open(filename, mode='r') 118 if isinstance(zr, zarr.core.Array): 119 # load zarr array 120 image = da.from_zarr(filename) 121 shape = image.shape 122 else: 123 # else load zarr all arrays inside file, useful for pyramid data 124 image = [da.from_zarr(filename, component=c) for c, a in zr.arrays()] 125 shape = image[0].shape 126 return image, shape 127 [end of napari/utils/io.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/napari/utils/io.py b/napari/utils/io.py --- a/napari/utils/io.py +++ b/napari/utils/io.py @@ -9,7 +9,6 @@ from dask import delayed from dask import array as da -import zarr def magic_imread(filenames, *, use_dask=None, stack=True): @@ -99,13 +98,13 @@ return image -def read_zarr_dataset(filename): +def read_zarr_dataset(path): """Read a zarr dataset, including an array or a group of arrays. Parameters -------- - filename : str - Path to file ending in '.zarr'. File can contain either an array + path : str + Path to directory ending in '.zarr'. Path can contain either an array or a group of arrays in the case of pyramid data. Returns ------- @@ -114,13 +113,17 @@ shape : tuple Shape of array or first array in list """ - zr = zarr.open(filename, mode='r') - if isinstance(zr, zarr.core.Array): + if os.path.exists(os.path.join(path, '.zarray')): # load zarr array - image = da.from_zarr(filename) + image = da.from_zarr(path) shape = image.shape - else: + elif os.path.exists(os.path.join(path, '.zgroup')): # else load zarr all arrays inside file, useful for pyramid data - image = [da.from_zarr(filename, component=c) for c, a in zr.arrays()] + image = [] + for subpath in sorted(os.listdir(path)): + if not subpath.startswith('.'): + image.append(read_zarr_dataset(os.path.join(path, subpath))[0]) shape = image[0].shape + else: + raise ValueError(f"Not a zarr dataset or group: {path}") return image, shape
{"golden_diff": "diff --git a/napari/utils/io.py b/napari/utils/io.py\n--- a/napari/utils/io.py\n+++ b/napari/utils/io.py\n@@ -9,7 +9,6 @@\n \n from dask import delayed\n from dask import array as da\n-import zarr\n \n \n def magic_imread(filenames, *, use_dask=None, stack=True):\n@@ -99,13 +98,13 @@\n return image\n \n \n-def read_zarr_dataset(filename):\n+def read_zarr_dataset(path):\n \"\"\"Read a zarr dataset, including an array or a group of arrays.\n \n Parameters\n --------\n- filename : str\n- Path to file ending in '.zarr'. File can contain either an array\n+ path : str\n+ Path to directory ending in '.zarr'. Path can contain either an array\n or a group of arrays in the case of pyramid data.\n Returns\n -------\n@@ -114,13 +113,17 @@\n shape : tuple\n Shape of array or first array in list\n \"\"\"\n- zr = zarr.open(filename, mode='r')\n- if isinstance(zr, zarr.core.Array):\n+ if os.path.exists(os.path.join(path, '.zarray')):\n # load zarr array\n- image = da.from_zarr(filename)\n+ image = da.from_zarr(path)\n shape = image.shape\n- else:\n+ elif os.path.exists(os.path.join(path, '.zgroup')):\n # else load zarr all arrays inside file, useful for pyramid data\n- image = [da.from_zarr(filename, component=c) for c, a in zr.arrays()]\n+ image = []\n+ for subpath in sorted(os.listdir(path)):\n+ if not subpath.startswith('.'):\n+ image.append(read_zarr_dataset(os.path.join(path, subpath))[0])\n shape = image[0].shape\n+ else:\n+ raise ValueError(f\"Not a zarr dataset or group: {path}\")\n return image, shape\n", "issue": "new zarr release / numcodecs\n## \ud83d\udc1b Bug\r\n\r\nLooks like zarr's new release requires numcodecs > 0.6.4, but we pinned to exclude it see discussion #666. I think we need to resolve this ASAP and then make the 0.2.10 release (which also includes the #866 bug fix). Thoughts @tlambert03 @jni? Has the 0.6.4 numcodecs install problem been resolved? You can see our failing tests in #867. \n", "before_files": [{"content": "import os\n\nfrom glob import glob\nfrom pathlib import Path\n\nimport numpy as np\nfrom skimage import io\nfrom skimage.io.collection import alphanumeric_key\n\nfrom dask import delayed\nfrom dask import array as da\nimport zarr\n\n\ndef magic_imread(filenames, *, use_dask=None, stack=True):\n \"\"\"Dispatch the appropriate reader given some files.\n\n The files are assumed to all have the same shape.\n\n Parameters\n -------\n filenames : list\n List of filenames or directories to be opened.\n A list of `pathlib.Path` objects and a single filename or `Path` object\n are also accepted.\n use_dask : bool\n Whether to use dask to create a lazy array, rather than NumPy.\n Default of None will resolve to True if filenames contains more than\n one image, False otherwise.\n stack : bool\n Whether to stack the images in multiple files into a single array. If\n False, a list of arrays will be returned.\n\n Returns\n -------\n image : array-like\n Array or list of images\n \"\"\"\n # cast Path to string\n if isinstance(filenames, Path):\n filenames = filenames.as_posix()\n\n if len(filenames) == 0:\n return None\n if isinstance(filenames, str):\n filenames = [filenames] # ensure list\n\n # replace folders with their contents\n filenames_expanded = []\n for filename in filenames:\n ext = os.path.splitext(filename)[-1]\n # zarr files are folders, but should be read as 1 file\n if os.path.isdir(filename) and not ext == '.zarr':\n dir_contents = sorted(\n glob(os.path.join(filename, '*.*')), key=alphanumeric_key\n )\n # remove subdirectories\n dir_contents_files = filter(\n lambda f: not os.path.isdir(f), dir_contents\n )\n filenames_expanded.extend(dir_contents_files)\n else:\n filenames_expanded.append(filename)\n\n if use_dask is None:\n use_dask = len(filenames_expanded) > 1\n\n # then, read in images\n images = []\n shape = None\n for filename in filenames_expanded:\n ext = os.path.splitext(filename)[-1]\n if ext == '.zarr':\n image, zarr_shape = read_zarr_dataset(filename)\n if shape is None:\n shape = zarr_shape\n else:\n if shape is None:\n image = io.imread(filename)\n shape = image.shape\n dtype = image.dtype\n if use_dask:\n image = da.from_delayed(\n delayed(io.imread)(filename), shape=shape, dtype=dtype\n )\n elif len(images) > 0: # not read by shape clause\n image = io.imread(filename)\n images.append(image)\n if len(images) == 1:\n image = images[0]\n else:\n if stack:\n if use_dask:\n image = da.stack(images)\n else:\n image = np.stack(images)\n else:\n image = images # return a list\n return image\n\n\ndef read_zarr_dataset(filename):\n \"\"\"Read a zarr dataset, including an array or a group of arrays.\n\n Parameters\n --------\n filename : str\n Path to file ending in '.zarr'. File can contain either an array\n or a group of arrays in the case of pyramid data.\n Returns\n -------\n image : array-like\n Array or list of arrays\n shape : tuple\n Shape of array or first array in list\n \"\"\"\n zr = zarr.open(filename, mode='r')\n if isinstance(zr, zarr.core.Array):\n # load zarr array\n image = da.from_zarr(filename)\n shape = image.shape\n else:\n # else load zarr all arrays inside file, useful for pyramid data\n image = [da.from_zarr(filename, component=c) for c, a in zr.arrays()]\n shape = image[0].shape\n return image, shape\n", "path": "napari/utils/io.py"}]}
1,806
450
gh_patches_debug_10983
rasdani/github-patches
git_diff
goauthentik__authentik-4957
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Launch URL in Application UI Settings can't be entered for some domains **Describe the bug** When I try to add a fixed link to an application, it will return an error with null text. I think this is happening only for any subdomain that has a dash character on the subdomain portion of the name: ej: https://tbb-assets.domain.com **Screenshots** This one gets saved without any problems: https://application.com ![image](https://user-images.githubusercontent.com/75147745/225165939-0cc8c789-65e8-4fad-bec5-c0434ceecb6d.png) But if i edit this domain to something else like: https://tbb-assets.easyfoodsin.com ![image](https://user-images.githubusercontent.com/75147745/225165898-f2946215-3846-4f23-be88-8b4d84c067e2.png) **Logs** Output of docker-compose logs or kubectl logs respectively. I can't find anything on the logs it seems that nothing is submitted is a validation error within the application edit screen. **Version and Deployment (please complete the following information):** - authentik version: 2023.3.0 - Deployment: docker-compose **Additional context** This error is not happening on version (2023.2.2) because I created a few applications recently that have many urls that have a dash on the subdomain. </issue> <code> [start of authentik/lib/models.py] 1 """Generic models""" 2 import re 3 4 from django.core.validators import URLValidator 5 from django.db import models 6 from django.utils.regex_helper import _lazy_re_compile 7 from model_utils.managers import InheritanceManager 8 from rest_framework.serializers import BaseSerializer 9 10 11 class SerializerModel(models.Model): 12 """Base Abstract Model which has a serializer""" 13 14 @property 15 def serializer(self) -> type[BaseSerializer]: 16 """Get serializer for this model""" 17 raise NotImplementedError 18 19 class Meta: 20 abstract = True 21 22 23 class CreatedUpdatedModel(models.Model): 24 """Base Abstract Model to save created and update""" 25 26 created = models.DateTimeField(auto_now_add=True) 27 last_updated = models.DateTimeField(auto_now=True) 28 29 class Meta: 30 abstract = True 31 32 33 class InheritanceAutoManager(InheritanceManager): 34 """Object manager which automatically selects the subclass""" 35 36 def get_queryset(self): 37 return super().get_queryset().select_subclasses() 38 39 40 class InheritanceForwardManyToOneDescriptor(models.fields.related.ForwardManyToOneDescriptor): 41 """Forward ManyToOne Descriptor that selects subclass. Requires InheritanceAutoManager.""" 42 43 def get_queryset(self, **hints): 44 return self.field.remote_field.model.objects.db_manager(hints=hints).select_subclasses() 45 46 47 class InheritanceForeignKey(models.ForeignKey): 48 """Custom ForeignKey that uses InheritanceForwardManyToOneDescriptor""" 49 50 forward_related_accessor_class = InheritanceForwardManyToOneDescriptor 51 52 53 class DomainlessURLValidator(URLValidator): 54 """Subclass of URLValidator which doesn't check the domain 55 (to allow hostnames without domain)""" 56 57 def __init__(self, *args, **kwargs) -> None: 58 super().__init__(*args, **kwargs) 59 self.host_re = "(" + self.hostname_re + self.domain_re + "|localhost)" 60 self.regex = _lazy_re_compile( 61 r"^(?:[a-z0-9.+-]*)://" # scheme is validated separately 62 r"(?:[^\s:@/]+(?::[^\s:@/]*)?@)?" # user:pass authentication 63 r"(?:" + self.ipv4_re + "|" + self.ipv6_re + "|" + self.host_re + ")" 64 r"(?::\d{2,5})?" # port 65 r"(?:[/?#][^\s]*)?" # resource path 66 r"\Z", 67 re.IGNORECASE, 68 ) 69 self.schemes = ["http", "https", "blank"] + list(self.schemes) 70 71 def __call__(self, value: str): 72 # Check if the scheme is valid. 73 scheme = value.split("://")[0].lower() 74 if scheme not in self.schemes: 75 value = "default" + value 76 super().__call__(value) 77 78 79 class DomainlessFormattedURLValidator(DomainlessURLValidator): 80 """URL validator which allows for python format strings""" 81 82 def __init__(self, *args, **kwargs) -> None: 83 super().__init__(*args, **kwargs) 84 self.host_re = r"([%\(\)a-zA-Z])+" + self.domain_re + self.domain_re 85 self.regex = _lazy_re_compile( 86 r"^(?:[a-z0-9.+-]*)://" # scheme is validated separately 87 r"(?:[^\s:@/]+(?::[^\s:@/]*)?@)?" # user:pass authentication 88 r"(?:" + self.ipv4_re + "|" + self.ipv6_re + "|" + self.host_re + ")" 89 r"(?::\d{2,5})?" # port 90 r"(?:[/?#][^\s]*)?" # resource path 91 r"\Z", 92 re.IGNORECASE, 93 ) 94 self.schemes = ["http", "https", "blank"] + list(self.schemes) 95 [end of authentik/lib/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/lib/models.py b/authentik/lib/models.py --- a/authentik/lib/models.py +++ b/authentik/lib/models.py @@ -81,7 +81,8 @@ def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) - self.host_re = r"([%\(\)a-zA-Z])+" + self.domain_re + self.domain_re + self.formatter_re = r"([%\(\)a-zA-Z])*" + self.host_re = "(" + self.formatter_re + self.hostname_re + self.domain_re + "|localhost)" self.regex = _lazy_re_compile( r"^(?:[a-z0-9.+-]*)://" # scheme is validated separately r"(?:[^\s:@/]+(?::[^\s:@/]*)?@)?" # user:pass authentication
{"golden_diff": "diff --git a/authentik/lib/models.py b/authentik/lib/models.py\n--- a/authentik/lib/models.py\n+++ b/authentik/lib/models.py\n@@ -81,7 +81,8 @@\n \n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n- self.host_re = r\"([%\\(\\)a-zA-Z])+\" + self.domain_re + self.domain_re\n+ self.formatter_re = r\"([%\\(\\)a-zA-Z])*\"\n+ self.host_re = \"(\" + self.formatter_re + self.hostname_re + self.domain_re + \"|localhost)\"\n self.regex = _lazy_re_compile(\n r\"^(?:[a-z0-9.+-]*)://\" # scheme is validated separately\n r\"(?:[^\\s:@/]+(?::[^\\s:@/]*)?@)?\" # user:pass authentication\n", "issue": "Launch URL in Application UI Settings can't be entered for some domains\n**Describe the bug**\r\nWhen I try to add a fixed link to an application, it will return an error with null text.\r\nI think this is happening only for any subdomain that has a dash character on the subdomain portion of the name:\r\nej: https://tbb-assets.domain.com\r\n\r\n**Screenshots**\r\nThis one gets saved without any problems:\r\nhttps://application.com\r\n![image](https://user-images.githubusercontent.com/75147745/225165939-0cc8c789-65e8-4fad-bec5-c0434ceecb6d.png)\r\n\r\nBut if i edit this domain to something else like:\r\nhttps://tbb-assets.easyfoodsin.com\r\n![image](https://user-images.githubusercontent.com/75147745/225165898-f2946215-3846-4f23-be88-8b4d84c067e2.png)\r\n\r\n**Logs**\r\nOutput of docker-compose logs or kubectl logs respectively.\r\nI can't find anything on the logs it seems that nothing is submitted is a validation error within the application edit screen.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 2023.3.0\r\n - Deployment: docker-compose\r\n\r\n**Additional context**\r\nThis error is not happening on version (2023.2.2) because I created a few applications recently that have many urls that have a dash on the subdomain.\n", "before_files": [{"content": "\"\"\"Generic models\"\"\"\nimport re\n\nfrom django.core.validators import URLValidator\nfrom django.db import models\nfrom django.utils.regex_helper import _lazy_re_compile\nfrom model_utils.managers import InheritanceManager\nfrom rest_framework.serializers import BaseSerializer\n\n\nclass SerializerModel(models.Model):\n \"\"\"Base Abstract Model which has a serializer\"\"\"\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n \"\"\"Get serializer for this model\"\"\"\n raise NotImplementedError\n\n class Meta:\n abstract = True\n\n\nclass CreatedUpdatedModel(models.Model):\n \"\"\"Base Abstract Model to save created and update\"\"\"\n\n created = models.DateTimeField(auto_now_add=True)\n last_updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass InheritanceAutoManager(InheritanceManager):\n \"\"\"Object manager which automatically selects the subclass\"\"\"\n\n def get_queryset(self):\n return super().get_queryset().select_subclasses()\n\n\nclass InheritanceForwardManyToOneDescriptor(models.fields.related.ForwardManyToOneDescriptor):\n \"\"\"Forward ManyToOne Descriptor that selects subclass. Requires InheritanceAutoManager.\"\"\"\n\n def get_queryset(self, **hints):\n return self.field.remote_field.model.objects.db_manager(hints=hints).select_subclasses()\n\n\nclass InheritanceForeignKey(models.ForeignKey):\n \"\"\"Custom ForeignKey that uses InheritanceForwardManyToOneDescriptor\"\"\"\n\n forward_related_accessor_class = InheritanceForwardManyToOneDescriptor\n\n\nclass DomainlessURLValidator(URLValidator):\n \"\"\"Subclass of URLValidator which doesn't check the domain\n (to allow hostnames without domain)\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.host_re = \"(\" + self.hostname_re + self.domain_re + \"|localhost)\"\n self.regex = _lazy_re_compile(\n r\"^(?:[a-z0-9.+-]*)://\" # scheme is validated separately\n r\"(?:[^\\s:@/]+(?::[^\\s:@/]*)?@)?\" # user:pass authentication\n r\"(?:\" + self.ipv4_re + \"|\" + self.ipv6_re + \"|\" + self.host_re + \")\"\n r\"(?::\\d{2,5})?\" # port\n r\"(?:[/?#][^\\s]*)?\" # resource path\n r\"\\Z\",\n re.IGNORECASE,\n )\n self.schemes = [\"http\", \"https\", \"blank\"] + list(self.schemes)\n\n def __call__(self, value: str):\n # Check if the scheme is valid.\n scheme = value.split(\"://\")[0].lower()\n if scheme not in self.schemes:\n value = \"default\" + value\n super().__call__(value)\n\n\nclass DomainlessFormattedURLValidator(DomainlessURLValidator):\n \"\"\"URL validator which allows for python format strings\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.host_re = r\"([%\\(\\)a-zA-Z])+\" + self.domain_re + self.domain_re\n self.regex = _lazy_re_compile(\n r\"^(?:[a-z0-9.+-]*)://\" # scheme is validated separately\n r\"(?:[^\\s:@/]+(?::[^\\s:@/]*)?@)?\" # user:pass authentication\n r\"(?:\" + self.ipv4_re + \"|\" + self.ipv6_re + \"|\" + self.host_re + \")\"\n r\"(?::\\d{2,5})?\" # port\n r\"(?:[/?#][^\\s]*)?\" # resource path\n r\"\\Z\",\n re.IGNORECASE,\n )\n self.schemes = [\"http\", \"https\", \"blank\"] + list(self.schemes)\n", "path": "authentik/lib/models.py"}]}
1,876
204
gh_patches_debug_6104
rasdani/github-patches
git_diff
pre-commit__pre-commit-949
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cspell hook install fails due pre-commit assumptions regarding npm packages I am raising this bug here as cspell is still unusable as a pre-commit hook even after the author made additional changes and I am afraid that the root cause is no longer inside cspell package. Mainly cspell is a typescript project that is published on npm and you cannot run it without building it first. Apparently pre-commit does not know about this concenpt (or I am not aware about it).' More information can be found on https://github.com/Jason3S/cspell/issues/53#issuecomment-402562237 To enabled cspell hook it should be enough to add this: ``` - repo: https://github.com/Jason3S/cspell.git rev: v3.2.2 hooks: - id: cspell ``` Still, once you run pre-precommit you soon endup with something like: ``` cspell...................................................................Failed hookid: cspell internal/modules/cjs/loader.js:611 throw err; ^ Error: Cannot find module './dist/app' at Function.Module._resolveFilename (internal/modules/cjs/loader.js:609:15) at Function.Module._load (internal/modules/cjs/loader.js:535:25) at Module.require (internal/modules/cjs/loader.js:663:17) at require (internal/modules/cjs/helpers.js:20:18) at Object.<anonymous> (/Users/ssbarnea/.cache/pre-commit/repolvipoC/bin.js:5:1) at Module._compile (internal/modules/cjs/loader.js:734:30) at Object.Module._extensions..js (internal/modules/cjs/loader.js:745:10) at Module.load (internal/modules/cjs/loader.js:626:32) at tryModuleLoad (internal/modules/cjs/loader.js:566:12) at Function.Module._load (internal/modules/cjs/loader.js:558:3) internal/modules/cjs/loader.js:611 throw err; ^ ``` The maintainer of cspell mentioned that the project was not designed to run from source, and the expected behavior is to install the npm package. I have to say that I kinda agree with his view. How can we address this issue? </issue> <code> [start of pre_commit/languages/node.py] 1 from __future__ import unicode_literals 2 3 import contextlib 4 import os 5 import sys 6 7 import pre_commit.constants as C 8 from pre_commit.envcontext import envcontext 9 from pre_commit.envcontext import Var 10 from pre_commit.languages import helpers 11 from pre_commit.languages.python import bin_dir 12 from pre_commit.util import clean_path_on_failure 13 from pre_commit.util import cmd_output 14 15 16 ENVIRONMENT_DIR = 'node_env' 17 get_default_version = helpers.basic_get_default_version 18 healthy = helpers.basic_healthy 19 20 21 def _envdir(prefix, version): 22 directory = helpers.environment_dir(ENVIRONMENT_DIR, version) 23 return prefix.path(directory) 24 25 26 def get_env_patch(venv): 27 if sys.platform == 'cygwin': # pragma: no cover 28 _, win_venv, _ = cmd_output('cygpath', '-w', venv) 29 install_prefix = r'{}\bin'.format(win_venv.strip()) 30 elif sys.platform == 'win32': # pragma: no cover 31 install_prefix = bin_dir(venv) 32 else: # pragma: windows no cover 33 install_prefix = venv 34 return ( 35 ('NODE_VIRTUAL_ENV', venv), 36 ('NPM_CONFIG_PREFIX', install_prefix), 37 ('npm_config_prefix', install_prefix), 38 ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')), 39 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 40 ) 41 42 43 @contextlib.contextmanager 44 def in_env(prefix, language_version): 45 with envcontext(get_env_patch(_envdir(prefix, language_version))): 46 yield 47 48 49 def install_environment(prefix, version, additional_dependencies): 50 additional_dependencies = tuple(additional_dependencies) 51 assert prefix.exists('package.json') 52 envdir = _envdir(prefix, version) 53 54 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath 55 if sys.platform == 'win32': # pragma: no cover 56 envdir = '\\\\?\\' + os.path.normpath(envdir) 57 with clean_path_on_failure(envdir): 58 cmd = [ 59 sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir, 60 ] 61 if version != C.DEFAULT: 62 cmd.extend(['-n', version]) 63 cmd_output(*cmd) 64 65 with in_env(prefix, version): 66 helpers.run_setup_cmd( 67 prefix, 68 ('npm', 'install', '-g', '.') + additional_dependencies, 69 ) 70 71 72 def run_hook(hook, file_args): 73 with in_env(hook.prefix, hook.language_version): 74 return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args) 75 [end of pre_commit/languages/node.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py --- a/pre_commit/languages/node.py +++ b/pre_commit/languages/node.py @@ -62,10 +62,11 @@ cmd.extend(['-n', version]) cmd_output(*cmd) + dep = 'git+file:///{}'.format(prefix.prefix_dir) with in_env(prefix, version): helpers.run_setup_cmd( prefix, - ('npm', 'install', '-g', '.') + additional_dependencies, + ('npm', 'install', '-g', dep) + additional_dependencies, )
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -62,10 +62,11 @@\n cmd.extend(['-n', version])\n cmd_output(*cmd)\n \n+ dep = 'git+file:///{}'.format(prefix.prefix_dir)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix,\n- ('npm', 'install', '-g', '.') + additional_dependencies,\n+ ('npm', 'install', '-g', dep) + additional_dependencies,\n )\n", "issue": "cspell hook install fails due pre-commit assumptions regarding npm packages\nI am raising this bug here as cspell is still unusable as a pre-commit hook even after the author made additional changes and I am afraid that the root cause is no longer inside cspell package.\r\n\r\nMainly cspell is a typescript project that is published on npm and you cannot run it without building it first. Apparently pre-commit does not know about this concenpt (or I am not aware about it).'\r\n\r\nMore information can be found on https://github.com/Jason3S/cspell/issues/53#issuecomment-402562237\r\n\r\nTo enabled cspell hook it should be enough to add this:\r\n```\r\n - repo: https://github.com/Jason3S/cspell.git\r\n rev: v3.2.2\r\n hooks:\r\n - id: cspell\r\n```\r\n\r\nStill, once you run pre-precommit you soon endup with something like:\r\n```\r\ncspell...................................................................Failed\r\nhookid: cspell\r\n\r\ninternal/modules/cjs/loader.js:611\r\n throw err;\r\n ^\r\n\r\nError: Cannot find module './dist/app'\r\n at Function.Module._resolveFilename (internal/modules/cjs/loader.js:609:15)\r\n at Function.Module._load (internal/modules/cjs/loader.js:535:25)\r\n at Module.require (internal/modules/cjs/loader.js:663:17)\r\n at require (internal/modules/cjs/helpers.js:20:18)\r\n at Object.<anonymous> (/Users/ssbarnea/.cache/pre-commit/repolvipoC/bin.js:5:1)\r\n at Module._compile (internal/modules/cjs/loader.js:734:30)\r\n at Object.Module._extensions..js (internal/modules/cjs/loader.js:745:10)\r\n at Module.load (internal/modules/cjs/loader.js:626:32)\r\n at tryModuleLoad (internal/modules/cjs/loader.js:566:12)\r\n at Function.Module._load (internal/modules/cjs/loader.js:558:3)\r\ninternal/modules/cjs/loader.js:611\r\n throw err;\r\n ^\r\n```\r\n\r\nThe maintainer of cspell mentioned that the project was not designed to run from source, and the expected behavior is to install the npm package. I have to say that I kinda agree with his view.\r\n\r\nHow can we address this issue? \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\n\n\nENVIRONMENT_DIR = 'node_env'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef _envdir(prefix, version):\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv):\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = r'{}\\bin'.format(win_venv.strip())\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n else: # pragma: windows no cover\n install_prefix = venv\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NODE_PATH', os.path.join(venv, 'lib', 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix, language_version):\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(prefix, version, additional_dependencies):\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = '\\\\\\\\?\\\\' + os.path.normpath(envdir)\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output(*cmd)\n\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.') + additional_dependencies,\n )\n\n\ndef run_hook(hook, file_args):\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, helpers.to_cmd(hook), file_args)\n", "path": "pre_commit/languages/node.py"}]}
1,812
134
gh_patches_debug_16802
rasdani/github-patches
git_diff
chainer__chainer-658
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TestConvolution2D.test_(forward, backward)_gpu_im2col does not check use_cudnn=False case `TestConvolution2D.test_forward_gpu_im2col` and `TestConvolution2D.test_backward_gpu_im2col` are expected to test `Convolution2DFunction.backward_gpu` works correctly when CuDNN is disabled. To achieve this, these test fixtures set `self.use_cudnn` attribute of the instance of `Convolution2D` to `False` . But what is actually passed to `convoluton_2d` function as `use_cudnn` option is the `use_cudnn` argument of `__init__` , not the attribute `self.use_cudnn` (See [here](https://github.com/pfnet/chainer/blob/af1f11d4e50b322286a041c416eddd4e0ee63d30/chainer/links/connection/convolution_2d.py#L75)). </issue> <code> [start of chainer/links/connection/convolution_2d.py] 1 import numpy 2 3 from chainer.functions.connection import convolution_2d 4 from chainer import link 5 6 7 class Convolution2D(link.Link): 8 9 """Two-dimensional convolutional layer. 10 11 This link wraps the :func:`~chainer.functions.convolution_2d` function and 12 holds the filter weight and bias vector as parameters. 13 14 Args: 15 in_channels (int): Number of channels of input arrays. 16 out_channels (int): Number of channels of output arrays. 17 ksize (int or (int, int)): Size of filters (a.k.a. kernels). 18 ``ksize=k`` and ``ksize=(k, k)`` are equivalent. 19 stride (int or (int, int)): Stride of filter applications. 20 ``stride=s`` and ``stride=(s, s)`` are equivalent. 21 pad (int or (int, int)): Spatial padding width for input arrays. 22 ``pad=p`` and ``pad=(p, p)`` are equivalent. 23 wscale (float): Scaling factor of the initial weight. 24 bias (float): Initial bias value. 25 nobias (bool): If True, then this link does not use the bias term. 26 use_cudnn (bool): If True, then this link uses CuDNN if available. 27 initialW (4-D array): Initial weight value. If ``None``, then this 28 function uses to initialize ``wscale``. 29 initial_bias (1-D array): Initial bias value. If ``None``, then this 30 function uses to initialize ``bias``. 31 32 .. seealso:: 33 See :func:`chainer.functions.convolution_2d` for the definition of 34 two-dimensional convolution. 35 36 Attributes: 37 W (~chainer.Variable): Weight parameter. 38 b (~chainer.Variable): Bias parameter. 39 40 """ 41 def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0, 42 wscale=1, bias=0, nobias=False, use_cudnn=True, 43 initialW=None, initial_bias=None): 44 kh, kw = _pair(ksize) 45 self._conv_arg = (stride, pad, use_cudnn) 46 47 W_shape = (out_channels, in_channels, kh, kw) 48 super(Convolution2D, self).__init__(W=W_shape) 49 50 if initialW is not None: 51 self.W.data[...] = initialW 52 else: 53 std = wscale * numpy.sqrt(1. / (kh * kw * in_channels)) 54 self.W.data[...] = numpy.random.normal(0, std, W_shape) 55 56 if nobias: 57 self.b = None 58 else: 59 self.add_param('b', out_channels) 60 if initial_bias is None: 61 initial_bias = bias 62 self.b.data[...] = initial_bias 63 64 def __call__(self, x): 65 """Applies the convolution layer. 66 67 Args: 68 x (~chainer.Variable): Input image. 69 70 Returns: 71 ~chainer.Variable: Output of the convolution. 72 73 """ 74 return convolution_2d.convolution_2d( 75 x, self.W, self.b, *self._conv_arg) 76 77 78 def _pair(x): 79 if hasattr(x, '__getitem__'): 80 return x 81 return (x, x) 82 [end of chainer/links/connection/convolution_2d.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/links/connection/convolution_2d.py b/chainer/links/connection/convolution_2d.py --- a/chainer/links/connection/convolution_2d.py +++ b/chainer/links/connection/convolution_2d.py @@ -42,7 +42,9 @@ wscale=1, bias=0, nobias=False, use_cudnn=True, initialW=None, initial_bias=None): kh, kw = _pair(ksize) - self._conv_arg = (stride, pad, use_cudnn) + self.stride = _pair(stride) + self.pad = _pair(pad) + self.use_cudnn = use_cudnn W_shape = (out_channels, in_channels, kh, kw) super(Convolution2D, self).__init__(W=W_shape) @@ -72,7 +74,7 @@ """ return convolution_2d.convolution_2d( - x, self.W, self.b, *self._conv_arg) + x, self.W, self.b, self.stride, self.pad, self.use_cudnn) def _pair(x):
{"golden_diff": "diff --git a/chainer/links/connection/convolution_2d.py b/chainer/links/connection/convolution_2d.py\n--- a/chainer/links/connection/convolution_2d.py\n+++ b/chainer/links/connection/convolution_2d.py\n@@ -42,7 +42,9 @@\n wscale=1, bias=0, nobias=False, use_cudnn=True,\n initialW=None, initial_bias=None):\n kh, kw = _pair(ksize)\n- self._conv_arg = (stride, pad, use_cudnn)\n+ self.stride = _pair(stride)\n+ self.pad = _pair(pad)\n+ self.use_cudnn = use_cudnn\n \n W_shape = (out_channels, in_channels, kh, kw)\n super(Convolution2D, self).__init__(W=W_shape)\n@@ -72,7 +74,7 @@\n \n \"\"\"\n return convolution_2d.convolution_2d(\n- x, self.W, self.b, *self._conv_arg)\n+ x, self.W, self.b, self.stride, self.pad, self.use_cudnn)\n \n \n def _pair(x):\n", "issue": "TestConvolution2D.test_(forward, backward)_gpu_im2col does not check use_cudnn=False case\n`TestConvolution2D.test_forward_gpu_im2col` and `TestConvolution2D.test_backward_gpu_im2col` are expected to test `Convolution2DFunction.backward_gpu` works correctly when CuDNN is disabled.\n\nTo achieve this, these test fixtures set `self.use_cudnn` attribute of the instance of `Convolution2D` to `False` . But what is actually passed to `convoluton_2d` function as `use_cudnn` option is the `use_cudnn` argument of `__init__` , not the attribute `self.use_cudnn` (See [here](https://github.com/pfnet/chainer/blob/af1f11d4e50b322286a041c416eddd4e0ee63d30/chainer/links/connection/convolution_2d.py#L75)).\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer.functions.connection import convolution_2d\nfrom chainer import link\n\n\nclass Convolution2D(link.Link):\n\n \"\"\"Two-dimensional convolutional layer.\n\n This link wraps the :func:`~chainer.functions.convolution_2d` function and\n holds the filter weight and bias vector as parameters.\n\n Args:\n in_channels (int): Number of channels of input arrays.\n out_channels (int): Number of channels of output arrays.\n ksize (int or (int, int)): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k)`` are equivalent.\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n wscale (float): Scaling factor of the initial weight.\n bias (float): Initial bias value.\n nobias (bool): If True, then this link does not use the bias term.\n use_cudnn (bool): If True, then this link uses CuDNN if available.\n initialW (4-D array): Initial weight value. If ``None``, then this\n function uses to initialize ``wscale``.\n initial_bias (1-D array): Initial bias value. If ``None``, then this\n function uses to initialize ``bias``.\n\n .. seealso::\n See :func:`chainer.functions.convolution_2d` for the definition of\n two-dimensional convolution.\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter.\n\n \"\"\"\n def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0,\n wscale=1, bias=0, nobias=False, use_cudnn=True,\n initialW=None, initial_bias=None):\n kh, kw = _pair(ksize)\n self._conv_arg = (stride, pad, use_cudnn)\n\n W_shape = (out_channels, in_channels, kh, kw)\n super(Convolution2D, self).__init__(W=W_shape)\n\n if initialW is not None:\n self.W.data[...] = initialW\n else:\n std = wscale * numpy.sqrt(1. / (kh * kw * in_channels))\n self.W.data[...] = numpy.random.normal(0, std, W_shape)\n\n if nobias:\n self.b = None\n else:\n self.add_param('b', out_channels)\n if initial_bias is None:\n initial_bias = bias\n self.b.data[...] = initial_bias\n\n def __call__(self, x):\n \"\"\"Applies the convolution layer.\n\n Args:\n x (~chainer.Variable): Input image.\n\n Returns:\n ~chainer.Variable: Output of the convolution.\n\n \"\"\"\n return convolution_2d.convolution_2d(\n x, self.W, self.b, *self._conv_arg)\n\n\ndef _pair(x):\n if hasattr(x, '__getitem__'):\n return x\n return (x, x)\n", "path": "chainer/links/connection/convolution_2d.py"}]}
1,636
262
gh_patches_debug_835
rasdani/github-patches
git_diff
scikit-hep__pyhf-336
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bumpversion missing from setup.py[develop] # Description As titled, `bumpversion` is not in list of develop dependencies. # Expected Behavior Installing `pyhf` installs `bumpversion`. # Actual Behavior It does not install `bumpversion`. # Steps to Reproduce `pip install pyhf[develop]` # Checklist - [x] Run `git fetch` to get the most up to date version of `master` - [x] Searched through existing Issues to confirm this is not a duplicate issue - [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 5 extras_require = { 6 'tensorflow': [ 7 'tensorflow>=1.10.0', 8 'tensorflow-probability==0.3.0', 9 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 10 'setuptools<=39.1.0', 11 ], 12 'torch': ['torch>=0.4.0'], 13 'mxnet': [ 14 'mxnet>=1.0.0', 15 'requests<2.19.0,>=2.18.4', 16 'numpy<1.15.0,>=1.8.2', 17 'requests<2.19.0,>=2.18.4', 18 ], 19 # 'dask': [ 20 # 'dask[array]' 21 # ], 22 'xmlimport': ['uproot'], 23 'minuit': ['iminuit'], 24 'develop': [ 25 'pyflakes', 26 'pytest>=3.5.1', 27 'pytest-cov>=2.5.1', 28 'pytest-benchmark[histogram]', 29 'pytest-console-scripts', 30 'python-coveralls', 31 'coverage>=4.0', # coveralls 32 'matplotlib', 33 'jupyter', 34 'nbdime', 35 'uproot>=3.0.0', 36 'papermill', 37 'graphviz', 38 'sphinx', 39 'sphinxcontrib-bibtex', 40 'sphinxcontrib-napoleon', 41 'sphinx_rtd_theme', 42 'nbsphinx', 43 'm2r', 44 'jsonpatch', 45 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 46 'pre-commit', 47 'black;python_version>="3.6"', # Black is Python3 only 48 ], 49 } 50 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 51 52 setup( 53 name='pyhf', 54 version='0.0.15', 55 description='(partial) pure python histfactory implementation', 56 url='https://github.com/diana-hep/pyhf', 57 author='Lukas Heinrich', 58 author_email='[email protected]', 59 license='Apache', 60 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 61 classifiers=[ 62 "Programming Language :: Python :: 2", 63 "Programming Language :: Python :: 2.7", 64 "Programming Language :: Python :: 3", 65 "Programming Language :: Python :: 3.6", 66 ], 67 packages=find_packages(), 68 include_package_data=True, 69 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 70 install_requires=[ 71 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 72 'click>=6.0', # for console scripts, 73 'tqdm', # for readxml 74 'six', # for modifiers 75 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 76 'jsonpatch', 77 ], 78 extras_require=extras_require, 79 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 80 dependency_links=[], 81 ) 82 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ 'uproot>=3.0.0', 'papermill', 'graphviz', + 'bumpversion', 'sphinx', 'sphinxcontrib-bibtex', 'sphinxcontrib-napoleon',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n+ 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n", "issue": "bumpversion missing from setup.py[develop]\n# Description\r\n\r\nAs titled, `bumpversion` is not in list of develop dependencies.\r\n\r\n# Expected Behavior\r\n\r\nInstalling `pyhf` installs `bumpversion`.\r\n\r\n# Actual Behavior\r\n\r\nIt does not install `bumpversion`.\r\n\r\n# Steps to Reproduce\r\n\r\n`pip install pyhf[develop]`\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
1,611
84
gh_patches_debug_60953
rasdani/github-patches
git_diff
voicepaw__so-vits-svc-fork-336
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "TypedStorage is deprecated" while Training **Describe the bug** Spammy "TypedStorage is deprecated" warning on every epoch. ``` [23:52:12] WARNING [23:52:12] C:\omited\venv\lib\site-packages\torch\_utils.py:776: UserWarning: warnings.py:109 TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() return self.fget.__get__(instance, owner)() ``` **To Reproduce** Simply train a voice. **Additional context** I updated to 3.6.1 today and start seeing the issue. Unfortunately I didn't know what was last good known version. I'm training a voice using CREPE F0 predictor and using PyTorch 2.0.0 in Windows 11 if that matters. </issue> <code> [start of src/so_vits_svc_fork/logger.py] 1 import os 2 import sys 3 from logging import ( 4 DEBUG, 5 INFO, 6 FileHandler, 7 StreamHandler, 8 basicConfig, 9 captureWarnings, 10 getLogger, 11 ) 12 from pathlib import Path 13 14 from rich.logging import RichHandler 15 16 LOGGER_INIT = False 17 18 19 def init_logger() -> None: 20 global LOGGER_INIT 21 if LOGGER_INIT: 22 return 23 24 IS_TEST = "test" in Path.cwd().stem 25 package_name = sys.modules[__name__].__package__ 26 basicConfig( 27 level=INFO, 28 format="%(asctime)s %(message)s", 29 datefmt="[%X]", 30 handlers=[ 31 StreamHandler() if is_notebook() else RichHandler(), 32 FileHandler(f"{package_name}.log"), 33 ], 34 ) 35 if IS_TEST: 36 getLogger(package_name).setLevel(DEBUG) 37 captureWarnings(True) 38 LOGGER_INIT = True 39 40 41 def is_notebook(): 42 try: 43 from IPython import get_ipython 44 45 if "IPKernelApp" not in get_ipython().config: # pragma: no cover 46 raise ImportError("console") 47 return False 48 if "VSCODE_PID" in os.environ: # pragma: no cover 49 raise ImportError("vscode") 50 return False 51 except Exception: 52 return False 53 else: # pragma: no cover 54 return True 55 [end of src/so_vits_svc_fork/logger.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/so_vits_svc_fork/logger.py b/src/so_vits_svc_fork/logger.py --- a/src/so_vits_svc_fork/logger.py +++ b/src/so_vits_svc_fork/logger.py @@ -1,5 +1,6 @@ import os import sys +import warnings from logging import ( DEBUG, INFO, @@ -35,6 +36,9 @@ if IS_TEST: getLogger(package_name).setLevel(DEBUG) captureWarnings(True) + warnings.filterwarnings( + "ignore", category=UserWarning, message="TypedStorage is deprecated" + ) LOGGER_INIT = True
{"golden_diff": "diff --git a/src/so_vits_svc_fork/logger.py b/src/so_vits_svc_fork/logger.py\n--- a/src/so_vits_svc_fork/logger.py\n+++ b/src/so_vits_svc_fork/logger.py\n@@ -1,5 +1,6 @@\n import os\n import sys\n+import warnings\n from logging import (\n DEBUG,\n INFO,\n@@ -35,6 +36,9 @@\n if IS_TEST:\n getLogger(package_name).setLevel(DEBUG)\n captureWarnings(True)\n+ warnings.filterwarnings(\n+ \"ignore\", category=UserWarning, message=\"TypedStorage is deprecated\"\n+ )\n LOGGER_INIT = True\n", "issue": "\"TypedStorage is deprecated\" while Training\n**Describe the bug**\r\nSpammy \"TypedStorage is deprecated\" warning on every epoch.\r\n\r\n```\r\n[23:52:12] WARNING [23:52:12] C:\\omited\\venv\\lib\\site-packages\\torch\\_utils.py:776: UserWarning: warnings.py:109\r\n TypedStorage is deprecated. It will be removed in the future and UntypedStorage will\r\n be the only storage class. This should only matter to you if you are using storages\r\n directly. To access UntypedStorage directly, use tensor.untyped_storage() instead\r\n of tensor.storage()\r\n return self.fget.__get__(instance, owner)()\r\n```\r\n\r\n**To Reproduce**\r\nSimply train a voice.\r\n\r\n**Additional context**\r\nI updated to 3.6.1 today and start seeing the issue. Unfortunately I didn't know what was last good known version.\r\n\r\nI'm training a voice using CREPE F0 predictor and using PyTorch 2.0.0 in Windows 11 if that matters.\r\n\n", "before_files": [{"content": "import os\nimport sys\nfrom logging import (\n DEBUG,\n INFO,\n FileHandler,\n StreamHandler,\n basicConfig,\n captureWarnings,\n getLogger,\n)\nfrom pathlib import Path\n\nfrom rich.logging import RichHandler\n\nLOGGER_INIT = False\n\n\ndef init_logger() -> None:\n global LOGGER_INIT\n if LOGGER_INIT:\n return\n\n IS_TEST = \"test\" in Path.cwd().stem\n package_name = sys.modules[__name__].__package__\n basicConfig(\n level=INFO,\n format=\"%(asctime)s %(message)s\",\n datefmt=\"[%X]\",\n handlers=[\n StreamHandler() if is_notebook() else RichHandler(),\n FileHandler(f\"{package_name}.log\"),\n ],\n )\n if IS_TEST:\n getLogger(package_name).setLevel(DEBUG)\n captureWarnings(True)\n LOGGER_INIT = True\n\n\ndef is_notebook():\n try:\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config: # pragma: no cover\n raise ImportError(\"console\")\n return False\n if \"VSCODE_PID\" in os.environ: # pragma: no cover\n raise ImportError(\"vscode\")\n return False\n except Exception:\n return False\n else: # pragma: no cover\n return True\n", "path": "src/so_vits_svc_fork/logger.py"}]}
1,174
145
gh_patches_debug_12843
rasdani/github-patches
git_diff
cobbler__cobbler-3598
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Backport] [scm-track] Fix commit command ### Original feature issue - PR: #3021 ### Target release - [x] release33 - [ ] release32 - [ ] release30 ### Reason Stabilizations of Cobbler 3.3.4 </issue> <code> [start of cobbler/modules/scm_track.py] 1 """ 2 (C) 2009, Red Hat Inc. 3 Michael DeHaan <michael.dehaan AT gmail> 4 5 This program is free software; you can redistribute it and/or modify 6 it under the terms of the GNU General Public License as published by 7 the Free Software Foundation; either version 2 of the License, or 8 (at your option) any later version. 9 10 This program is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 GNU General Public License for more details. 14 15 You should have received a copy of the GNU General Public License 16 along with this program; if not, write to the Free Software 17 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 18 02110-1301 USA 19 """ 20 21 22 import os 23 24 import cobbler.utils as utils 25 26 from cobbler.cexceptions import CX 27 28 29 def register() -> str: 30 """ 31 This pure python trigger acts as if it were a legacy shell-trigger, but is much faster. The return of this method 32 indicates the trigger type 33 :return: Always: ``/var/lib/cobbler/triggers/change/*`` 34 """ 35 36 return "/var/lib/cobbler/triggers/change/*" 37 38 39 def run(api, args): 40 """ 41 Runs the trigger, meaning in this case track any changed which happen to a config or data file. 42 43 :param api: The api instance of the Cobbler server. Used to look up if scm_track_enabled is true. 44 :param args: The parameter is currently unused for this trigger. 45 :return: 0 on success, otherwise an exception is risen. 46 """ 47 settings = api.settings() 48 49 if not settings.scm_track_enabled: 50 # feature disabled 51 return 0 52 53 mode = str(settings.scm_track_mode).lower() 54 author = str(settings.scm_track_author) 55 push_script = str(settings.scm_push_script) 56 57 if mode == "git": 58 old_dir = os.getcwd() 59 os.chdir("/var/lib/cobbler") 60 if os.getcwd() != "/var/lib/cobbler": 61 raise CX("danger will robinson") 62 63 if not os.path.exists("/var/lib/cobbler/.git"): 64 utils.subprocess_call(["git", "init"], shell=False) 65 66 # FIXME: If we know the remote user of an XMLRPC call use them as the author 67 utils.subprocess_call(["git", "add", "--all", "collections"], shell=False) 68 utils.subprocess_call(["git", "add", "--all", "templates"], shell=False) 69 utils.subprocess_call(["git", "add", "--all", "snippets"], shell=False) 70 utils.subprocess_call(["git", "commit", "-m", "API", "update", "--author", author], shell=False) 71 72 if push_script: 73 utils.subprocess_call([push_script], shell=False) 74 75 os.chdir(old_dir) 76 return 0 77 78 elif mode == "hg": 79 # use mercurial 80 old_dir = os.getcwd() 81 os.chdir("/var/lib/cobbler") 82 if os.getcwd() != "/var/lib/cobbler": 83 raise CX("danger will robinson") 84 85 if not os.path.exists("/var/lib/cobbler/.hg"): 86 utils.subprocess_call(["hg", "init"], shell=False) 87 88 # FIXME: If we know the remote user of an XMLRPC call use them as the user 89 utils.subprocess_call(["hg", "add collections"], shell=False) 90 utils.subprocess_call(["hg", "add templates"], shell=False) 91 utils.subprocess_call(["hg", "add snippets"], shell=False) 92 utils.subprocess_call(["hg", "commit", "-m", "API", "update", "--user", author], shell=False) 93 94 if push_script: 95 utils.subprocess_call([push_script], shell=False) 96 97 os.chdir(old_dir) 98 return 0 99 100 else: 101 raise CX("currently unsupported SCM type: %s" % mode) 102 [end of cobbler/modules/scm_track.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cobbler/modules/scm_track.py b/cobbler/modules/scm_track.py --- a/cobbler/modules/scm_track.py +++ b/cobbler/modules/scm_track.py @@ -67,7 +67,7 @@ utils.subprocess_call(["git", "add", "--all", "collections"], shell=False) utils.subprocess_call(["git", "add", "--all", "templates"], shell=False) utils.subprocess_call(["git", "add", "--all", "snippets"], shell=False) - utils.subprocess_call(["git", "commit", "-m", "API", "update", "--author", author], shell=False) + utils.subprocess_call(["git", "commit", "-m", "API update", "--author", author], shell=False) if push_script: utils.subprocess_call([push_script], shell=False)
{"golden_diff": "diff --git a/cobbler/modules/scm_track.py b/cobbler/modules/scm_track.py\n--- a/cobbler/modules/scm_track.py\n+++ b/cobbler/modules/scm_track.py\n@@ -67,7 +67,7 @@\n utils.subprocess_call([\"git\", \"add\", \"--all\", \"collections\"], shell=False)\n utils.subprocess_call([\"git\", \"add\", \"--all\", \"templates\"], shell=False)\n utils.subprocess_call([\"git\", \"add\", \"--all\", \"snippets\"], shell=False)\n- utils.subprocess_call([\"git\", \"commit\", \"-m\", \"API\", \"update\", \"--author\", author], shell=False)\n+ utils.subprocess_call([\"git\", \"commit\", \"-m\", \"API update\", \"--author\", author], shell=False)\n \n if push_script:\n utils.subprocess_call([push_script], shell=False)\n", "issue": "[Backport] [scm-track] Fix commit command\n### Original feature issue\r\n\r\n- PR: #3021\r\n\r\n### Target release\r\n\r\n- [x] release33\r\n- [ ] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nStabilizations of Cobbler 3.3.4\r\n\n", "before_files": [{"content": "\"\"\"\n(C) 2009, Red Hat Inc.\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\n\nimport os\n\nimport cobbler.utils as utils\n\nfrom cobbler.cexceptions import CX\n\n\ndef register() -> str:\n \"\"\"\n This pure python trigger acts as if it were a legacy shell-trigger, but is much faster. The return of this method\n indicates the trigger type\n :return: Always: ``/var/lib/cobbler/triggers/change/*``\n \"\"\"\n\n return \"/var/lib/cobbler/triggers/change/*\"\n\n\ndef run(api, args):\n \"\"\"\n Runs the trigger, meaning in this case track any changed which happen to a config or data file.\n\n :param api: The api instance of the Cobbler server. Used to look up if scm_track_enabled is true.\n :param args: The parameter is currently unused for this trigger.\n :return: 0 on success, otherwise an exception is risen.\n \"\"\"\n settings = api.settings()\n\n if not settings.scm_track_enabled:\n # feature disabled\n return 0\n\n mode = str(settings.scm_track_mode).lower()\n author = str(settings.scm_track_author)\n push_script = str(settings.scm_push_script)\n\n if mode == \"git\":\n old_dir = os.getcwd()\n os.chdir(\"/var/lib/cobbler\")\n if os.getcwd() != \"/var/lib/cobbler\":\n raise CX(\"danger will robinson\")\n\n if not os.path.exists(\"/var/lib/cobbler/.git\"):\n utils.subprocess_call([\"git\", \"init\"], shell=False)\n\n # FIXME: If we know the remote user of an XMLRPC call use them as the author\n utils.subprocess_call([\"git\", \"add\", \"--all\", \"collections\"], shell=False)\n utils.subprocess_call([\"git\", \"add\", \"--all\", \"templates\"], shell=False)\n utils.subprocess_call([\"git\", \"add\", \"--all\", \"snippets\"], shell=False)\n utils.subprocess_call([\"git\", \"commit\", \"-m\", \"API\", \"update\", \"--author\", author], shell=False)\n\n if push_script:\n utils.subprocess_call([push_script], shell=False)\n\n os.chdir(old_dir)\n return 0\n\n elif mode == \"hg\":\n # use mercurial\n old_dir = os.getcwd()\n os.chdir(\"/var/lib/cobbler\")\n if os.getcwd() != \"/var/lib/cobbler\":\n raise CX(\"danger will robinson\")\n\n if not os.path.exists(\"/var/lib/cobbler/.hg\"):\n utils.subprocess_call([\"hg\", \"init\"], shell=False)\n\n # FIXME: If we know the remote user of an XMLRPC call use them as the user\n utils.subprocess_call([\"hg\", \"add collections\"], shell=False)\n utils.subprocess_call([\"hg\", \"add templates\"], shell=False)\n utils.subprocess_call([\"hg\", \"add snippets\"], shell=False)\n utils.subprocess_call([\"hg\", \"commit\", \"-m\", \"API\", \"update\", \"--user\", author], shell=False)\n\n if push_script:\n utils.subprocess_call([push_script], shell=False)\n\n os.chdir(old_dir)\n return 0\n\n else:\n raise CX(\"currently unsupported SCM type: %s\" % mode)\n", "path": "cobbler/modules/scm_track.py"}]}
1,690
192
gh_patches_debug_8343
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add RQ subclass of HerokuWorker The "Using RQ on Heroku" docs section ( https://python-rq.org/patterns/ ) shows using a subclass of `Worker` specialized for Heroku. Unfortunateely using that, rather than the Scout RQ Worker subclass means that scout isn't instrumented. We should also provide a `ScoutHerokuWorker` class. </issue> <code> [start of src/scout_apm/rq.py] 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import datetime as dt 5 6 import wrapt 7 from rq import SimpleWorker as RqSimpleWorker 8 from rq import Worker as RqWorker 9 from rq.job import Job 10 11 import scout_apm.core 12 from scout_apm.core.tracked_request import TrackedRequest 13 14 install_attempted = False 15 installed = None 16 17 18 def ensure_scout_installed(): 19 global install_attempted, installed 20 21 if not install_attempted: 22 install_attempted = True 23 installed = scout_apm.core.install() 24 25 26 class WorkerMixin(object): 27 def __init__(self, *args, **kwargs): 28 global installed 29 ensure_scout_installed() 30 if installed: 31 ensure_job_instrumented() 32 super(WorkerMixin, self).__init__(*args, **kwargs) 33 34 35 class Worker(WorkerMixin, RqWorker): 36 pass 37 38 39 class SimpleWorker(WorkerMixin, RqSimpleWorker): 40 pass 41 42 43 job_instrumented = False 44 45 46 def ensure_job_instrumented(): 47 global job_instrumented 48 if job_instrumented: 49 return 50 job_instrumented = True 51 Job.perform = wrap_perform(Job.perform) 52 53 54 @wrapt.decorator 55 def wrap_perform(wrapped, instance, args, kwargs): 56 global installed 57 if not installed: 58 return wrapped(*args, **kwargs) 59 60 tracked_request = TrackedRequest.instance() 61 tracked_request.is_real_request = True 62 tracked_request.tag("task_id", instance.get_id()) 63 tracked_request.tag("queue", instance.origin) 64 queue_time = (dt.datetime.utcnow() - instance.enqueued_at).total_seconds() 65 tracked_request.tag("queue_time", queue_time) 66 tracked_request.start_span(operation="Job/{}".format(instance.func_name)) 67 try: 68 return wrapped(*args, **kwargs) 69 except Exception: 70 tracked_request.tag("error", "true") 71 raise 72 finally: 73 tracked_request.stop_span() 74 [end of src/scout_apm/rq.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/scout_apm/rq.py b/src/scout_apm/rq.py --- a/src/scout_apm/rq.py +++ b/src/scout_apm/rq.py @@ -7,6 +7,7 @@ from rq import SimpleWorker as RqSimpleWorker from rq import Worker as RqWorker from rq.job import Job +from rq.worker import HerokuWorker as RqHerokuWorker import scout_apm.core from scout_apm.core.tracked_request import TrackedRequest @@ -40,6 +41,10 @@ pass +class HerokuWorker(WorkerMixin, RqHerokuWorker): + pass + + job_instrumented = False
{"golden_diff": "diff --git a/src/scout_apm/rq.py b/src/scout_apm/rq.py\n--- a/src/scout_apm/rq.py\n+++ b/src/scout_apm/rq.py\n@@ -7,6 +7,7 @@\n from rq import SimpleWorker as RqSimpleWorker\n from rq import Worker as RqWorker\n from rq.job import Job\n+from rq.worker import HerokuWorker as RqHerokuWorker\n \n import scout_apm.core\n from scout_apm.core.tracked_request import TrackedRequest\n@@ -40,6 +41,10 @@\n pass\n \n \n+class HerokuWorker(WorkerMixin, RqHerokuWorker):\n+ pass\n+\n+\n job_instrumented = False\n", "issue": "Add RQ subclass of HerokuWorker\nThe \"Using RQ on Heroku\" docs section ( https://python-rq.org/patterns/ ) shows using a subclass of `Worker` specialized for Heroku. Unfortunateely using that, rather than the Scout RQ Worker subclass means that scout isn't instrumented. We should also provide a `ScoutHerokuWorker` class.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\n\nimport wrapt\nfrom rq import SimpleWorker as RqSimpleWorker\nfrom rq import Worker as RqWorker\nfrom rq.job import Job\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ninstall_attempted = False\ninstalled = None\n\n\ndef ensure_scout_installed():\n global install_attempted, installed\n\n if not install_attempted:\n install_attempted = True\n installed = scout_apm.core.install()\n\n\nclass WorkerMixin(object):\n def __init__(self, *args, **kwargs):\n global installed\n ensure_scout_installed()\n if installed:\n ensure_job_instrumented()\n super(WorkerMixin, self).__init__(*args, **kwargs)\n\n\nclass Worker(WorkerMixin, RqWorker):\n pass\n\n\nclass SimpleWorker(WorkerMixin, RqSimpleWorker):\n pass\n\n\njob_instrumented = False\n\n\ndef ensure_job_instrumented():\n global job_instrumented\n if job_instrumented:\n return\n job_instrumented = True\n Job.perform = wrap_perform(Job.perform)\n\n\[email protected]\ndef wrap_perform(wrapped, instance, args, kwargs):\n global installed\n if not installed:\n return wrapped(*args, **kwargs)\n\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n tracked_request.tag(\"task_id\", instance.get_id())\n tracked_request.tag(\"queue\", instance.origin)\n queue_time = (dt.datetime.utcnow() - instance.enqueued_at).total_seconds()\n tracked_request.tag(\"queue_time\", queue_time)\n tracked_request.start_span(operation=\"Job/{}\".format(instance.func_name))\n try:\n return wrapped(*args, **kwargs)\n except Exception:\n tracked_request.tag(\"error\", \"true\")\n raise\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/rq.py"}]}
1,195
160
gh_patches_debug_8529
rasdani/github-patches
git_diff
conan-io__conan-center-index-16999
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] fakeit/*: Package id ignores options ### Description The `fakeit` option for integration is meant to select the correct header file for the matching integration, as there are different header files based on the integration chosen e.g. `gtest`, `boost`, `standalone`. These options can be seen in the recipe. Including the package step in the recipe which copies a different header based on the `integration` option The link for the source shows the separate header files in it under the `single_header` folder: https://github.com/eranpeer/FakeIt/releases/tag/2.3.2 The problem is that there is only one package and it contains the header for the `standalone` `integration` option only. At least part of the cause of the problem can be seen in the recipe file with the `package_id()` The package id for fakeit is ignore the option `integration` which changes which header file is used for the package (and package id) Currently the recipe specifies: ``` def package_id(self): self.info.header_only() ``` But the header_only is designed to ignore options, which is incorrect in this case, as we have a different header filee to package based on the integrated test library e.g. gtest or boost (or standalone). ``` def header_only(self): self.settings.clear() self.options.clear() self.requires.clear() ``` ### Package and Environment Details * Package Name/Version: **fakeit/\*** * Operating System+version: **All** * Compiler+version: **All** * Docker image: **All** * Conan version: **All** * Python version: **All** ### Conan profile [settings] os=Windows os_build=Windows arch=x86_64 arch_build=x86_64 compiler=Visual Studio compiler.version=16 build_type=Debug [options] [conf] [build_requires] [env] ### Steps to reproduce conan install . ### Logs <details><summary>Click to expand log</summary> ``` Build requirements fakeit/2.3.2 from 'conan-center' - Cache gtest/1.11.0 from 'conan-center' - Cache Build requirements packages fakeit/2.3.2:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache gtest/1.11.0:875c67f4d8a79bdd002908b75efce119eb82836d - Cache ``` </details> </issue> <code> [start of recipes/fakeit/all/conanfile.py] 1 from conan import ConanFile 2 from conan.errors import ConanInvalidConfiguration 3 from conan.tools.build import check_min_cppstd 4 from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy 5 from conan.tools.layout import basic_layout 6 import os 7 8 9 required_conan_version = ">=1.52.0" 10 11 class FakeItConan(ConanFile): 12 name = "fakeit" 13 description = "C++ mocking made easy. A simple yet very expressive, headers only library for c++ mocking." 14 topics = ("mock", "fake", "spy") 15 license = "MIT" 16 homepage = "https://github.com/eranpeer/FakeIt" 17 url = "https://github.com/conan-io/conan-center-index" 18 package_type = "header-library" 19 settings = "os", "arch", "compiler", "build_type" 20 options = { 21 "integration": ["boost", "catch", "cute", "gtest", "mettle", "nunit", "mstest", "qtest", "standalone", "tpunit"] 22 } 23 default_options = {"integration": "standalone"} 24 no_copy_source = True 25 26 @property 27 def _min_cppstd(self): 28 return 11 29 30 def export_sources(self): 31 export_conandata_patches(self) 32 33 def layout(self): 34 basic_layout(self, src_folder="src") 35 36 def requirements(self): 37 if self.options.integration == "boost": 38 self.requires("boost/1.79.0") 39 elif self.options.integration == "catch": 40 self.requires("catch2/2.13.9") 41 elif self.options.integration == "gtest": 42 self.requires("gtest/1.11.0") 43 elif self.options.integration == "qtest": 44 self.requires("qt/6.3.0") 45 elif self.options.integration == "standalone": 46 pass 47 else: 48 raise ConanInvalidConfiguration("%s is not (yet) available on cci" % self.options.integration) 49 50 def package_id(self): 51 self.info.clear() 52 53 def validate(self): 54 if self.settings.compiler.get_safe("cppstd"): 55 check_min_cppstd(self, self._min_cppstd) 56 57 def source(self): 58 get(self, **self.conan_data["sources"][self.version], strip_root=True) 59 60 def build(self): 61 apply_conandata_patches(self) 62 63 def package(self): 64 copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) 65 copy( 66 self, 67 pattern="fakeit.hpp", 68 dst=os.path.join(self.package_folder, "include"), 69 src=os.path.join(self.source_folder, "single_header", str(self.options.integration)), 70 ) 71 [end of recipes/fakeit/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/fakeit/all/conanfile.py b/recipes/fakeit/all/conanfile.py --- a/recipes/fakeit/all/conanfile.py +++ b/recipes/fakeit/all/conanfile.py @@ -48,7 +48,10 @@ raise ConanInvalidConfiguration("%s is not (yet) available on cci" % self.options.integration) def package_id(self): - self.info.clear() + # The "integration" option must be kept because it will impact which header is packaged, + # therefor self.info.clear() cannot be used. + self.info.settings.clear() + self.info.requires.clear() def validate(self): if self.settings.compiler.get_safe("cppstd"):
{"golden_diff": "diff --git a/recipes/fakeit/all/conanfile.py b/recipes/fakeit/all/conanfile.py\n--- a/recipes/fakeit/all/conanfile.py\n+++ b/recipes/fakeit/all/conanfile.py\n@@ -48,7 +48,10 @@\n raise ConanInvalidConfiguration(\"%s is not (yet) available on cci\" % self.options.integration)\n \n def package_id(self):\n- self.info.clear()\n+ # The \"integration\" option must be kept because it will impact which header is packaged,\n+ # therefor self.info.clear() cannot be used.\n+ self.info.settings.clear()\n+ self.info.requires.clear()\n \n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n", "issue": "[package] fakeit/*: Package id ignores options\n### Description\r\n\r\nThe `fakeit` option for integration is meant to select the correct header file for the matching integration, as there are different header files based on the integration chosen e.g. `gtest`, `boost`, `standalone`.\r\n\r\nThese options can be seen in the recipe.\r\nIncluding the package step in the recipe which copies a different header based on the `integration` option\r\n\r\nThe link for the source shows the separate header files in it under the `single_header` folder: https://github.com/eranpeer/FakeIt/releases/tag/2.3.2\r\n\r\nThe problem is that there is only one package and it contains the header for the `standalone` `integration` option only.\r\n\r\nAt least part of the cause of the problem can be seen in the recipe file with the `package_id()`\r\n\r\nThe package id for fakeit is ignore the option `integration` which changes which header file is used for the package (and package id)\r\nCurrently the recipe specifies:\r\n```\r\n def package_id(self):\r\n self.info.header_only()\r\n```\r\n\r\nBut the header_only is designed to ignore options, which is incorrect in this case, as we have a different header filee to package based on the integrated test library e.g. gtest or boost (or standalone).\r\n\r\n```\r\n def header_only(self):\r\n self.settings.clear()\r\n self.options.clear()\r\n self.requires.clear()\r\n```\r\n\r\n\r\n### Package and Environment Details\r\n\r\n* Package Name/Version: **fakeit/\\***\r\n* Operating System+version: **All**\r\n* Compiler+version: **All**\r\n* Docker image: **All**\r\n* Conan version: **All**\r\n* Python version: **All**\r\n\r\n\r\n### Conan profile\r\n\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\nbuild_type=Debug\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n\r\n### Steps to reproduce\r\n\r\nconan install .\r\n\r\n### Logs\r\n\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nBuild requirements\r\n fakeit/2.3.2 from 'conan-center' - Cache\r\n gtest/1.11.0 from 'conan-center' - Cache\r\nBuild requirements packages\r\n fakeit/2.3.2:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache\r\n gtest/1.11.0:875c67f4d8a79bdd002908b75efce119eb82836d - Cache\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy\nfrom conan.tools.layout import basic_layout\nimport os\n\n\nrequired_conan_version = \">=1.52.0\"\n\nclass FakeItConan(ConanFile):\n name = \"fakeit\"\n description = \"C++ mocking made easy. A simple yet very expressive, headers only library for c++ mocking.\"\n topics = (\"mock\", \"fake\", \"spy\")\n license = \"MIT\"\n homepage = \"https://github.com/eranpeer/FakeIt\"\n url = \"https://github.com/conan-io/conan-center-index\"\n package_type = \"header-library\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"integration\": [\"boost\", \"catch\", \"cute\", \"gtest\", \"mettle\", \"nunit\", \"mstest\", \"qtest\", \"standalone\", \"tpunit\"]\n }\n default_options = {\"integration\": \"standalone\"}\n no_copy_source = True\n\n @property\n def _min_cppstd(self):\n return 11\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def requirements(self):\n if self.options.integration == \"boost\":\n self.requires(\"boost/1.79.0\")\n elif self.options.integration == \"catch\":\n self.requires(\"catch2/2.13.9\")\n elif self.options.integration == \"gtest\":\n self.requires(\"gtest/1.11.0\")\n elif self.options.integration == \"qtest\":\n self.requires(\"qt/6.3.0\")\n elif self.options.integration == \"standalone\":\n pass\n else:\n raise ConanInvalidConfiguration(\"%s is not (yet) available on cci\" % self.options.integration)\n\n def package_id(self):\n self.info.clear()\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n check_min_cppstd(self, self._min_cppstd)\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version], strip_root=True)\n\n def build(self):\n apply_conandata_patches(self)\n\n def package(self):\n copy(self, pattern=\"LICENSE\", dst=os.path.join(self.package_folder, \"licenses\"), src=self.source_folder)\n copy(\n self,\n pattern=\"fakeit.hpp\",\n dst=os.path.join(self.package_folder, \"include\"),\n src=os.path.join(self.source_folder, \"single_header\", str(self.options.integration)),\n )\n", "path": "recipes/fakeit/all/conanfile.py"}]}
1,866
165
gh_patches_debug_55064
rasdani/github-patches
git_diff
secdev__scapy-1402
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> man page spelling error intances should be instances. It would be nice if this wasn't gz compressed in the source, otherwise I'd have done a pull request. </issue> <code> [start of setup.py] 1 #! /usr/bin/env python 2 3 """ 4 Distutils setup file for Scapy. 5 """ 6 7 8 from distutils import archive_util 9 from distutils import sysconfig 10 from distutils.core import setup 11 from distutils.command.sdist import sdist 12 import os 13 14 15 EZIP_HEADER = """#! /bin/sh 16 PYTHONPATH=$0/%s exec python -m scapy 17 """ 18 19 20 def make_ezipfile(base_name, base_dir, verbose=0, dry_run=0, **kwargs): 21 fname = archive_util.make_zipfile(base_name, base_dir, verbose, dry_run) 22 ofname = fname + ".old" 23 os.rename(fname, ofname) 24 of = open(ofname) 25 f = open(fname, "w") 26 f.write(EZIP_HEADER % base_dir) 27 while True: 28 data = of.read(8192) 29 if not data: 30 break 31 f.write(data) 32 f.close() 33 os.system("zip -A '%s'" % fname) 34 of.close() 35 os.unlink(ofname) 36 os.chmod(fname, 0o755) 37 return fname 38 39 40 archive_util.ARCHIVE_FORMATS["ezip"] = ( 41 make_ezipfile, [], 'Executable ZIP file') 42 43 SCRIPTS = ['bin/scapy', 'bin/UTscapy'] 44 # On Windows we also need additional batch files to run the above scripts 45 if os.name == "nt": 46 SCRIPTS += ['bin/scapy.bat', 'bin/UTscapy.bat'] 47 48 setup( 49 name='scapy', 50 version=__import__('scapy').VERSION, 51 packages=[ 52 'scapy', 53 'scapy/arch', 54 'scapy/arch/bpf', 55 'scapy/arch/windows', 56 'scapy/contrib', 57 'scapy/layers', 58 'scapy/layers/tls', 59 'scapy/layers/tls/crypto', 60 'scapy/modules', 61 'scapy/modules/krack', 62 'scapy/asn1', 63 'scapy/tools', 64 ], 65 scripts=SCRIPTS, 66 data_files=[('share/man/man1', ["doc/scapy.1.gz"])], 67 package_data={ 68 'scapy': ['VERSION'], 69 }, 70 71 # Metadata 72 author='Philippe BIONDI', 73 author_email='phil(at)secdev.org', 74 maintainer='Pierre LALET, Guillaume VALADON', 75 description='Scapy: interactive packet manipulation tool', 76 license='GPLv2', 77 url='http://www.secdev.org/projects/scapy', 78 download_url='https://github.com/secdev/scapy/tarball/master', 79 keywords=["network"], 80 classifiers=[ 81 "Development Status :: 5 - Production/Stable", 82 "Environment :: Console", 83 "Intended Audience :: Developers", 84 "Intended Audience :: Information Technology", 85 "Intended Audience :: Science/Research", 86 "Intended Audience :: System Administrators", 87 "Intended Audience :: Telecommunications Industry", 88 "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", 89 "Programming Language :: Python :: 2", 90 "Programming Language :: Python :: 2.7", 91 "Programming Language :: Python :: 3", 92 "Programming Language :: Python :: 3.4", 93 "Programming Language :: Python :: 3.5", 94 "Programming Language :: Python :: 3.6", 95 "Topic :: Security", 96 "Topic :: System :: Networking", 97 "Topic :: System :: Networking :: Monitoring", 98 ] 99 ) 100 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ 'scapy/tools', ], scripts=SCRIPTS, - data_files=[('share/man/man1', ["doc/scapy.1.gz"])], + data_files=[('share/man/man1', ["doc/scapy.1"])], package_data={ 'scapy': ['VERSION'], },
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n 'scapy/tools',\n ],\n scripts=SCRIPTS,\n- data_files=[('share/man/man1', [\"doc/scapy.1.gz\"])],\n+ data_files=[('share/man/man1', [\"doc/scapy.1\"])],\n package_data={\n 'scapy': ['VERSION'],\n },\n", "issue": "man page spelling error\nintances should be instances.\r\n\r\nIt would be nice if this wasn't gz compressed in the source, otherwise I'd have done a pull request.\n", "before_files": [{"content": "#! /usr/bin/env python\n\n\"\"\"\nDistutils setup file for Scapy.\n\"\"\"\n\n\nfrom distutils import archive_util\nfrom distutils import sysconfig\nfrom distutils.core import setup\nfrom distutils.command.sdist import sdist\nimport os\n\n\nEZIP_HEADER = \"\"\"#! /bin/sh\nPYTHONPATH=$0/%s exec python -m scapy\n\"\"\"\n\n\ndef make_ezipfile(base_name, base_dir, verbose=0, dry_run=0, **kwargs):\n fname = archive_util.make_zipfile(base_name, base_dir, verbose, dry_run)\n ofname = fname + \".old\"\n os.rename(fname, ofname)\n of = open(ofname)\n f = open(fname, \"w\")\n f.write(EZIP_HEADER % base_dir)\n while True:\n data = of.read(8192)\n if not data:\n break\n f.write(data)\n f.close()\n os.system(\"zip -A '%s'\" % fname)\n of.close()\n os.unlink(ofname)\n os.chmod(fname, 0o755)\n return fname\n\n\narchive_util.ARCHIVE_FORMATS[\"ezip\"] = (\n make_ezipfile, [], 'Executable ZIP file')\n\nSCRIPTS = ['bin/scapy', 'bin/UTscapy']\n# On Windows we also need additional batch files to run the above scripts\nif os.name == \"nt\":\n SCRIPTS += ['bin/scapy.bat', 'bin/UTscapy.bat']\n\nsetup(\n name='scapy',\n version=__import__('scapy').VERSION,\n packages=[\n 'scapy',\n 'scapy/arch',\n 'scapy/arch/bpf',\n 'scapy/arch/windows',\n 'scapy/contrib',\n 'scapy/layers',\n 'scapy/layers/tls',\n 'scapy/layers/tls/crypto',\n 'scapy/modules',\n 'scapy/modules/krack',\n 'scapy/asn1',\n 'scapy/tools',\n ],\n scripts=SCRIPTS,\n data_files=[('share/man/man1', [\"doc/scapy.1.gz\"])],\n package_data={\n 'scapy': ['VERSION'],\n },\n\n # Metadata\n author='Philippe BIONDI',\n author_email='phil(at)secdev.org',\n maintainer='Pierre LALET, Guillaume VALADON',\n description='Scapy: interactive packet manipulation tool',\n license='GPLv2',\n url='http://www.secdev.org/projects/scapy',\n download_url='https://github.com/secdev/scapy/tarball/master',\n keywords=[\"network\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: System Administrators\",\n \"Intended Audience :: Telecommunications Industry\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Security\",\n \"Topic :: System :: Networking\",\n \"Topic :: System :: Networking :: Monitoring\",\n ]\n)\n", "path": "setup.py"}]}
1,504
100
gh_patches_debug_40795
rasdani/github-patches
git_diff
goauthentik__authentik-3556
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add `x5c` and `x5t`to the `jwks` response **Is your feature request related to a problem? Please describe.** I am trying to use Authentik as the identity provider for netbird via OAuth2/OIDC **Describe the solution you'd like** netbird expects the JWKS endpoint which is `/application/o/<provider name>/jwks/` to have a property for the `x5c`. The `x5c` (X.509 certificate chain) Header Parameter contains the X.509 public key certificate or certificate chain corresponding to the key used to digitally sign the JWS (JSON Web Signature). **Describe alternatives you've considered** n/a **Additional context** For the OAuth2 Provider, I specified a signing key which populated the `jwks` endpoint response with the following values: ``` { "keys": [ { "kty": "RSA", "alg": "RS256", "use": "sig", "kid": "*REDACTED*", "n": "*REDACTED*", "e": "AQAB" } ] } ``` Comparing it to the example here: https://example.eu.auth0.com/.well-known/jwks.json , it is missing the `x5t` and `x5c` properties. </issue> <code> [start of authentik/providers/oauth2/views/jwks.py] 1 """authentik OAuth2 JWKS Views""" 2 from base64 import urlsafe_b64encode 3 from typing import Optional 4 5 from cryptography.hazmat.primitives.asymmetric.ec import ( 6 EllipticCurvePrivateKey, 7 EllipticCurvePublicKey, 8 ) 9 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey 10 from django.http import HttpRequest, HttpResponse, JsonResponse 11 from django.shortcuts import get_object_or_404 12 from django.views import View 13 14 from authentik.core.models import Application 15 from authentik.crypto.models import CertificateKeyPair 16 from authentik.providers.oauth2.models import JWTAlgorithms, OAuth2Provider 17 18 19 def b64_enc(number: int) -> str: 20 """Convert number to base64-encoded octet-value""" 21 length = ((number).bit_length() + 7) // 8 22 number_bytes = number.to_bytes(length, "big") 23 final = urlsafe_b64encode(number_bytes).rstrip(b"=") 24 return final.decode("ascii") 25 26 27 class JWKSView(View): 28 """Show RSA Key data for Provider""" 29 30 def get_jwk_for_key(self, key: CertificateKeyPair) -> Optional[dict]: 31 """Convert a certificate-key pair into JWK""" 32 private_key = key.private_key 33 if not private_key: 34 return None 35 if isinstance(private_key, RSAPrivateKey): 36 public_key: RSAPublicKey = private_key.public_key() 37 public_numbers = public_key.public_numbers() 38 return { 39 "kty": "RSA", 40 "alg": JWTAlgorithms.RS256, 41 "use": "sig", 42 "kid": key.kid, 43 "n": b64_enc(public_numbers.n), 44 "e": b64_enc(public_numbers.e), 45 } 46 if isinstance(private_key, EllipticCurvePrivateKey): 47 public_key: EllipticCurvePublicKey = private_key.public_key() 48 public_numbers = public_key.public_numbers() 49 return { 50 "kty": "EC", 51 "alg": JWTAlgorithms.ES256, 52 "use": "sig", 53 "kid": key.kid, 54 "n": b64_enc(public_numbers.n), 55 "e": b64_enc(public_numbers.e), 56 } 57 return None 58 59 def get(self, request: HttpRequest, application_slug: str) -> HttpResponse: 60 """Show JWK Key data for Provider""" 61 application = get_object_or_404(Application, slug=application_slug) 62 provider: OAuth2Provider = get_object_or_404(OAuth2Provider, pk=application.provider_id) 63 signing_key: CertificateKeyPair = provider.signing_key 64 65 response_data = {} 66 67 if signing_key: 68 jwk = self.get_jwk_for_key(signing_key) 69 if jwk: 70 response_data["keys"] = [jwk] 71 72 response = JsonResponse(response_data) 73 response["Access-Control-Allow-Origin"] = "*" 74 75 return response 76 [end of authentik/providers/oauth2/views/jwks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/providers/oauth2/views/jwks.py b/authentik/providers/oauth2/views/jwks.py --- a/authentik/providers/oauth2/views/jwks.py +++ b/authentik/providers/oauth2/views/jwks.py @@ -1,12 +1,14 @@ """authentik OAuth2 JWKS Views""" -from base64 import urlsafe_b64encode +from base64 import b64encode, urlsafe_b64encode from typing import Optional +from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.asymmetric.ec import ( EllipticCurvePrivateKey, EllipticCurvePublicKey, ) from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey +from cryptography.hazmat.primitives.serialization import Encoding from django.http import HttpRequest, HttpResponse, JsonResponse from django.shortcuts import get_object_or_404 from django.views import View @@ -30,12 +32,13 @@ def get_jwk_for_key(self, key: CertificateKeyPair) -> Optional[dict]: """Convert a certificate-key pair into JWK""" private_key = key.private_key + key_data = None if not private_key: - return None + return key_data if isinstance(private_key, RSAPrivateKey): public_key: RSAPublicKey = private_key.public_key() public_numbers = public_key.public_numbers() - return { + key_data = { "kty": "RSA", "alg": JWTAlgorithms.RS256, "use": "sig", @@ -43,10 +46,10 @@ "n": b64_enc(public_numbers.n), "e": b64_enc(public_numbers.e), } - if isinstance(private_key, EllipticCurvePrivateKey): + elif isinstance(private_key, EllipticCurvePrivateKey): public_key: EllipticCurvePublicKey = private_key.public_key() public_numbers = public_key.public_numbers() - return { + key_data = { "kty": "EC", "alg": JWTAlgorithms.ES256, "use": "sig", @@ -54,7 +57,20 @@ "n": b64_enc(public_numbers.n), "e": b64_enc(public_numbers.e), } - return None + else: + return key_data + key_data["x5c"] = [b64encode(key.certificate.public_bytes(Encoding.DER)).decode("utf-8")] + key_data["x5t"] = ( + urlsafe_b64encode(key.certificate.fingerprint(hashes.SHA1())) # nosec + .decode("utf-8") + .rstrip("=") + ) + key_data["x5t#S256"] = ( + urlsafe_b64encode(key.certificate.fingerprint(hashes.SHA256())) + .decode("utf-8") + .rstrip("=") + ) + return key_data def get(self, request: HttpRequest, application_slug: str) -> HttpResponse: """Show JWK Key data for Provider"""
{"golden_diff": "diff --git a/authentik/providers/oauth2/views/jwks.py b/authentik/providers/oauth2/views/jwks.py\n--- a/authentik/providers/oauth2/views/jwks.py\n+++ b/authentik/providers/oauth2/views/jwks.py\n@@ -1,12 +1,14 @@\n \"\"\"authentik OAuth2 JWKS Views\"\"\"\n-from base64 import urlsafe_b64encode\n+from base64 import b64encode, urlsafe_b64encode\n from typing import Optional\n \n+from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives.asymmetric.ec import (\n EllipticCurvePrivateKey,\n EllipticCurvePublicKey,\n )\n from cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey\n+from cryptography.hazmat.primitives.serialization import Encoding\n from django.http import HttpRequest, HttpResponse, JsonResponse\n from django.shortcuts import get_object_or_404\n from django.views import View\n@@ -30,12 +32,13 @@\n def get_jwk_for_key(self, key: CertificateKeyPair) -> Optional[dict]:\n \"\"\"Convert a certificate-key pair into JWK\"\"\"\n private_key = key.private_key\n+ key_data = None\n if not private_key:\n- return None\n+ return key_data\n if isinstance(private_key, RSAPrivateKey):\n public_key: RSAPublicKey = private_key.public_key()\n public_numbers = public_key.public_numbers()\n- return {\n+ key_data = {\n \"kty\": \"RSA\",\n \"alg\": JWTAlgorithms.RS256,\n \"use\": \"sig\",\n@@ -43,10 +46,10 @@\n \"n\": b64_enc(public_numbers.n),\n \"e\": b64_enc(public_numbers.e),\n }\n- if isinstance(private_key, EllipticCurvePrivateKey):\n+ elif isinstance(private_key, EllipticCurvePrivateKey):\n public_key: EllipticCurvePublicKey = private_key.public_key()\n public_numbers = public_key.public_numbers()\n- return {\n+ key_data = {\n \"kty\": \"EC\",\n \"alg\": JWTAlgorithms.ES256,\n \"use\": \"sig\",\n@@ -54,7 +57,20 @@\n \"n\": b64_enc(public_numbers.n),\n \"e\": b64_enc(public_numbers.e),\n }\n- return None\n+ else:\n+ return key_data\n+ key_data[\"x5c\"] = [b64encode(key.certificate.public_bytes(Encoding.DER)).decode(\"utf-8\")]\n+ key_data[\"x5t\"] = (\n+ urlsafe_b64encode(key.certificate.fingerprint(hashes.SHA1())) # nosec\n+ .decode(\"utf-8\")\n+ .rstrip(\"=\")\n+ )\n+ key_data[\"x5t#S256\"] = (\n+ urlsafe_b64encode(key.certificate.fingerprint(hashes.SHA256()))\n+ .decode(\"utf-8\")\n+ .rstrip(\"=\")\n+ )\n+ return key_data\n \n def get(self, request: HttpRequest, application_slug: str) -> HttpResponse:\n \"\"\"Show JWK Key data for Provider\"\"\"\n", "issue": "Add `x5c` and `x5t`to the `jwks` response\n**Is your feature request related to a problem? Please describe.**\r\nI am trying to use Authentik as the identity provider for netbird via OAuth2/OIDC\r\n\r\n**Describe the solution you'd like**\r\nnetbird expects the JWKS endpoint which is `/application/o/<provider name>/jwks/` to have a property for the `x5c`. The `x5c` (X.509 certificate chain) Header Parameter contains the X.509 public key certificate or certificate chain corresponding to the key used to digitally sign the JWS (JSON Web Signature).\r\n\r\n**Describe alternatives you've considered**\r\nn/a\r\n\r\n**Additional context**\r\nFor the OAuth2 Provider, I specified a signing key which populated the `jwks` endpoint response with the following values:\r\n```\r\n{\r\n \"keys\": [\r\n {\r\n \"kty\": \"RSA\",\r\n \"alg\": \"RS256\",\r\n \"use\": \"sig\",\r\n \"kid\": \"*REDACTED*\",\r\n \"n\": \"*REDACTED*\",\r\n \"e\": \"AQAB\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\nComparing it to the example here: https://example.eu.auth0.com/.well-known/jwks.json , it is missing the `x5t` and `x5c` properties.\n", "before_files": [{"content": "\"\"\"authentik OAuth2 JWKS Views\"\"\"\nfrom base64 import urlsafe_b64encode\nfrom typing import Optional\n\nfrom cryptography.hazmat.primitives.asymmetric.ec import (\n EllipticCurvePrivateKey,\n EllipticCurvePublicKey,\n)\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPrivateKey, RSAPublicKey\nfrom django.http import HttpRequest, HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views import View\n\nfrom authentik.core.models import Application\nfrom authentik.crypto.models import CertificateKeyPair\nfrom authentik.providers.oauth2.models import JWTAlgorithms, OAuth2Provider\n\n\ndef b64_enc(number: int) -> str:\n \"\"\"Convert number to base64-encoded octet-value\"\"\"\n length = ((number).bit_length() + 7) // 8\n number_bytes = number.to_bytes(length, \"big\")\n final = urlsafe_b64encode(number_bytes).rstrip(b\"=\")\n return final.decode(\"ascii\")\n\n\nclass JWKSView(View):\n \"\"\"Show RSA Key data for Provider\"\"\"\n\n def get_jwk_for_key(self, key: CertificateKeyPair) -> Optional[dict]:\n \"\"\"Convert a certificate-key pair into JWK\"\"\"\n private_key = key.private_key\n if not private_key:\n return None\n if isinstance(private_key, RSAPrivateKey):\n public_key: RSAPublicKey = private_key.public_key()\n public_numbers = public_key.public_numbers()\n return {\n \"kty\": \"RSA\",\n \"alg\": JWTAlgorithms.RS256,\n \"use\": \"sig\",\n \"kid\": key.kid,\n \"n\": b64_enc(public_numbers.n),\n \"e\": b64_enc(public_numbers.e),\n }\n if isinstance(private_key, EllipticCurvePrivateKey):\n public_key: EllipticCurvePublicKey = private_key.public_key()\n public_numbers = public_key.public_numbers()\n return {\n \"kty\": \"EC\",\n \"alg\": JWTAlgorithms.ES256,\n \"use\": \"sig\",\n \"kid\": key.kid,\n \"n\": b64_enc(public_numbers.n),\n \"e\": b64_enc(public_numbers.e),\n }\n return None\n\n def get(self, request: HttpRequest, application_slug: str) -> HttpResponse:\n \"\"\"Show JWK Key data for Provider\"\"\"\n application = get_object_or_404(Application, slug=application_slug)\n provider: OAuth2Provider = get_object_or_404(OAuth2Provider, pk=application.provider_id)\n signing_key: CertificateKeyPair = provider.signing_key\n\n response_data = {}\n\n if signing_key:\n jwk = self.get_jwk_for_key(signing_key)\n if jwk:\n response_data[\"keys\"] = [jwk]\n\n response = JsonResponse(response_data)\n response[\"Access-Control-Allow-Origin\"] = \"*\"\n\n return response\n", "path": "authentik/providers/oauth2/views/jwks.py"}]}
1,632
718
gh_patches_debug_9868
rasdani/github-patches
git_diff
ckan__ckan-7906
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Replacing MD5 hashing algorithm with SHA512 In file: common_middleware.py, method: __call__, the used hashing algorithm is no longer considered secure because it is possible to have collisions. This can lead to brute force attempt to find two or more inputs that produce the same hash. iCR suggested that safer alternative hash algorithms, such as SHA-256, SHA-512, SHA-3 are used. In the file, MD5 is used to generate a key based on several parameters and inserted into the database as `user_key`. In that case, it's recommended to use a more secure, less collision prone hash function such as- SHA256 or SHA512. ### Sponsorship and Support: This work is done by the security researchers from OpenRefactory and is supported by the [Open Source Security Foundation (OpenSSF)](https://openssf.org/): [Project Alpha-Omega](https://alpha-omega.dev/). Alpha-Omega is a project partnering with open source software project maintainers to systematically find new, as-yet-undiscovered vulnerabilities in open source code - and get them fixed – to improve global software supply chain security. The bug is found by running the Intelligent Code Repair (iCR) tool by OpenRefactory and then manually triaging the results. </issue> <code> [start of ckanext/tracking/middleware.py] 1 import hashlib 2 3 from urllib.parse import unquote 4 5 from ckan.model.meta import engine 6 from ckan.common import request 7 from ckan.types import Response 8 9 10 def track_request(response: Response) -> Response: 11 path = request.environ.get('PATH_INFO') 12 method = request.environ.get('REQUEST_METHOD') 13 if path == '/_tracking' and method == 'POST': 14 # wsgi.input is a BytesIO object 15 payload = request.environ['wsgi.input'].read().decode() 16 parts = payload.split('&') 17 data = {} 18 for part in parts: 19 k, v = part.split('=') 20 data[k] = unquote(v) 21 22 # we want a unique anonomized key for each user so that we do 23 # not count multiple clicks from the same user. 24 key = ''.join([ 25 request.environ['HTTP_USER_AGENT'], 26 request.environ['REMOTE_ADDR'], 27 request.environ.get('HTTP_ACCEPT_LANGUAGE', ''), 28 request.environ.get('HTTP_ACCEPT_ENCODING', ''), 29 ]) 30 key = hashlib.md5(key.encode()).hexdigest() 31 # store key/data here 32 sql = '''INSERT INTO tracking_raw 33 (user_key, url, tracking_type) 34 VALUES (%s, %s, %s)''' 35 engine.execute( # type: ignore 36 sql, key, data.get('url'), data.get('type') 37 ) 38 return response 39 [end of ckanext/tracking/middleware.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext/tracking/middleware.py b/ckanext/tracking/middleware.py --- a/ckanext/tracking/middleware.py +++ b/ckanext/tracking/middleware.py @@ -27,7 +27,9 @@ request.environ.get('HTTP_ACCEPT_LANGUAGE', ''), request.environ.get('HTTP_ACCEPT_ENCODING', ''), ]) - key = hashlib.md5(key.encode()).hexdigest() + # raises a type error on python<3.9 + h = hashlib.new('md5', usedforsecurity=False) # type: ignore + key = h.update(key.encode()).hexdigest() # store key/data here sql = '''INSERT INTO tracking_raw (user_key, url, tracking_type)
{"golden_diff": "diff --git a/ckanext/tracking/middleware.py b/ckanext/tracking/middleware.py\n--- a/ckanext/tracking/middleware.py\n+++ b/ckanext/tracking/middleware.py\n@@ -27,7 +27,9 @@\n request.environ.get('HTTP_ACCEPT_LANGUAGE', ''),\n request.environ.get('HTTP_ACCEPT_ENCODING', ''),\n ])\n- key = hashlib.md5(key.encode()).hexdigest()\n+ # raises a type error on python<3.9\n+ h = hashlib.new('md5', usedforsecurity=False) # type: ignore\n+ key = h.update(key.encode()).hexdigest()\n # store key/data here\n sql = '''INSERT INTO tracking_raw\n (user_key, url, tracking_type)\n", "issue": "Replacing MD5 hashing algorithm with SHA512\nIn file: common_middleware.py, method: __call__, the used hashing algorithm is no longer considered secure because it is possible to have collisions. This can lead to brute force attempt to find two or more inputs that produce the same hash. iCR suggested that safer alternative hash algorithms, such as SHA-256, SHA-512, SHA-3 are used. \n\nIn the file, MD5 is used to generate a key based on several parameters and inserted into the database as `user_key`. In that case, it's recommended to use a more secure, less collision prone hash function such as- SHA256 or SHA512.\n\n\n### Sponsorship and Support:\n\nThis work is done by the security researchers from OpenRefactory and is supported by the [Open Source Security Foundation (OpenSSF)](https://openssf.org/): [Project Alpha-Omega](https://alpha-omega.dev/). Alpha-Omega is a project partnering with open source software project maintainers to systematically find new, as-yet-undiscovered vulnerabilities in open source code - and get them fixed \u2013 to improve global software supply chain security.\n\nThe bug is found by running the Intelligent Code Repair (iCR) tool by OpenRefactory and then manually triaging the results.\n", "before_files": [{"content": "import hashlib\n\nfrom urllib.parse import unquote\n\nfrom ckan.model.meta import engine\nfrom ckan.common import request\nfrom ckan.types import Response\n\n\ndef track_request(response: Response) -> Response:\n path = request.environ.get('PATH_INFO')\n method = request.environ.get('REQUEST_METHOD')\n if path == '/_tracking' and method == 'POST':\n # wsgi.input is a BytesIO object\n payload = request.environ['wsgi.input'].read().decode()\n parts = payload.split('&')\n data = {}\n for part in parts:\n k, v = part.split('=')\n data[k] = unquote(v)\n\n # we want a unique anonomized key for each user so that we do\n # not count multiple clicks from the same user.\n key = ''.join([\n request.environ['HTTP_USER_AGENT'],\n request.environ['REMOTE_ADDR'],\n request.environ.get('HTTP_ACCEPT_LANGUAGE', ''),\n request.environ.get('HTTP_ACCEPT_ENCODING', ''),\n ])\n key = hashlib.md5(key.encode()).hexdigest()\n # store key/data here\n sql = '''INSERT INTO tracking_raw\n (user_key, url, tracking_type)\n VALUES (%s, %s, %s)'''\n engine.execute( # type: ignore\n sql, key, data.get('url'), data.get('type')\n )\n return response\n", "path": "ckanext/tracking/middleware.py"}]}
1,177
167
gh_patches_debug_24799
rasdani/github-patches
git_diff
digitalfabrik__integreat-cms-645
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deliver fallback of missing imprint translations in API ### Motivation <!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. --> The imprint is mandatory for all regions and languages. ### Proposed Solution <!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. --> Always return a result in the [imprint API](https://github.com/Integreat/integreat-cms/blob/develop/src/api/v3/imprint.py). If the translation is missing, deliver the imprint in the region's default language. ### Alternatives <!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. --> ### Additional Context <!-- Add any other information or screenshots about the feature request here. --> </issue> <code> [start of src/api/v3/imprint.py] 1 """ 2 imprint API endpoint 3 """ 4 from django.http import JsonResponse 5 6 from backend.settings import BASE_URL 7 from cms.models import Region 8 9 from ..decorators import json_response 10 11 12 def transform_imprint(imprint_translation): 13 """ 14 Function to create a JSON from a single imprint_translation object. 15 16 :param imprint_translation: single page translation object 17 :type imprint_translation: ~cms.models.pages.page_translation.PageTranslation 18 19 :return: return data necessary for API 20 :rtype: dict 21 """ 22 if imprint_translation.page.icon: 23 thumbnail = BASE_URL + imprint_translation.page.icon.url 24 else: 25 thumbnail = None 26 return { 27 "id": imprint_translation.id, 28 "url": imprint_translation.permalink, 29 "title": imprint_translation.title, 30 "modified_gmt": imprint_translation.last_updated, 31 "excerpt": imprint_translation.text, 32 "content": imprint_translation.text, 33 "parent": None, 34 "available_languages": imprint_translation.available_languages, 35 "thumbnail": thumbnail, 36 "hash": None, 37 } 38 39 40 @json_response 41 # pylint: disable=unused-argument 42 def imprint(request, region_slug, language_code): 43 """ 44 Get imprint for language and return JSON object to client 45 46 :param request: Django request 47 :type request: ~django.http.HttpRequest 48 :param region_slug: slug of a region 49 :type region_slug: str 50 :param language_code: language code 51 :type language_code: str 52 53 :return: JSON object according to APIv3 imprint endpoint definition 54 :rtype: ~django.http.JsonResponse 55 """ 56 region = Region.get_current_region(request) 57 if hasattr(region, "imprint"): 58 imprint_translation = region.imprint.get_public_translation(language_code) 59 if imprint_translation: 60 return JsonResponse(transform_imprint(imprint_translation)) 61 # If imprint does not exist, return an empty response. Turn off Safe-Mode to allow serializing arrays 62 return JsonResponse([], safe=False) 63 [end of src/api/v3/imprint.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/api/v3/imprint.py b/src/api/v3/imprint.py --- a/src/api/v3/imprint.py +++ b/src/api/v3/imprint.py @@ -41,7 +41,9 @@ # pylint: disable=unused-argument def imprint(request, region_slug, language_code): """ - Get imprint for language and return JSON object to client + Get imprint for language and return JSON object to client. If no imprint translation + is available in the selected language, try to return the translation in the region + default language. :param request: Django request :type request: ~django.http.HttpRequest @@ -58,5 +60,11 @@ imprint_translation = region.imprint.get_public_translation(language_code) if imprint_translation: return JsonResponse(transform_imprint(imprint_translation)) + if region.default_language: + imprint_default_translation = region.imprint.get_public_translation( + region.default_language.code + ) + if imprint_default_translation: + return JsonResponse(transform_imprint(imprint_default_translation)) # If imprint does not exist, return an empty response. Turn off Safe-Mode to allow serializing arrays return JsonResponse([], safe=False)
{"golden_diff": "diff --git a/src/api/v3/imprint.py b/src/api/v3/imprint.py\n--- a/src/api/v3/imprint.py\n+++ b/src/api/v3/imprint.py\n@@ -41,7 +41,9 @@\n # pylint: disable=unused-argument\n def imprint(request, region_slug, language_code):\n \"\"\"\n- Get imprint for language and return JSON object to client\n+ Get imprint for language and return JSON object to client. If no imprint translation\n+ is available in the selected language, try to return the translation in the region\n+ default language.\n \n :param request: Django request\n :type request: ~django.http.HttpRequest\n@@ -58,5 +60,11 @@\n imprint_translation = region.imprint.get_public_translation(language_code)\n if imprint_translation:\n return JsonResponse(transform_imprint(imprint_translation))\n+ if region.default_language:\n+ imprint_default_translation = region.imprint.get_public_translation(\n+ region.default_language.code\n+ )\n+ if imprint_default_translation:\n+ return JsonResponse(transform_imprint(imprint_default_translation))\n # If imprint does not exist, return an empty response. Turn off Safe-Mode to allow serializing arrays\n return JsonResponse([], safe=False)\n", "issue": "Deliver fallback of missing imprint translations in API\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\nThe imprint is mandatory for all regions and languages.\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\nAlways return a result in the [imprint API](https://github.com/Integreat/integreat-cms/blob/develop/src/api/v3/imprint.py). If the translation is missing, deliver the imprint in the region's default language.\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nimprint API endpoint\n\"\"\"\nfrom django.http import JsonResponse\n\nfrom backend.settings import BASE_URL\nfrom cms.models import Region\n\nfrom ..decorators import json_response\n\n\ndef transform_imprint(imprint_translation):\n \"\"\"\n Function to create a JSON from a single imprint_translation object.\n\n :param imprint_translation: single page translation object\n :type imprint_translation: ~cms.models.pages.page_translation.PageTranslation\n\n :return: return data necessary for API\n :rtype: dict\n \"\"\"\n if imprint_translation.page.icon:\n thumbnail = BASE_URL + imprint_translation.page.icon.url\n else:\n thumbnail = None\n return {\n \"id\": imprint_translation.id,\n \"url\": imprint_translation.permalink,\n \"title\": imprint_translation.title,\n \"modified_gmt\": imprint_translation.last_updated,\n \"excerpt\": imprint_translation.text,\n \"content\": imprint_translation.text,\n \"parent\": None,\n \"available_languages\": imprint_translation.available_languages,\n \"thumbnail\": thumbnail,\n \"hash\": None,\n }\n\n\n@json_response\n# pylint: disable=unused-argument\ndef imprint(request, region_slug, language_code):\n \"\"\"\n Get imprint for language and return JSON object to client\n\n :param request: Django request\n :type request: ~django.http.HttpRequest\n :param region_slug: slug of a region\n :type region_slug: str\n :param language_code: language code\n :type language_code: str\n\n :return: JSON object according to APIv3 imprint endpoint definition\n :rtype: ~django.http.JsonResponse\n \"\"\"\n region = Region.get_current_region(request)\n if hasattr(region, \"imprint\"):\n imprint_translation = region.imprint.get_public_translation(language_code)\n if imprint_translation:\n return JsonResponse(transform_imprint(imprint_translation))\n # If imprint does not exist, return an empty response. Turn off Safe-Mode to allow serializing arrays\n return JsonResponse([], safe=False)\n", "path": "src/api/v3/imprint.py"}]}
1,252
269
gh_patches_debug_11379
rasdani/github-patches
git_diff
networkx__networkx-1045
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need JSON format description The page on JSON serialization lacks information about the actual structure of produced data. This make it hard to see if networkx is a suitable tool for a backend of already existing JavaScript front. http://networkx.lanl.gov/reference/readwrite.json_graph.html </issue> <code> [start of networkx/readwrite/json_graph/__init__.py] 1 """ 2 ********* 3 JSON data 4 ********* 5 Generate and parse JSON serializable data for NetworkX graphs. 6 """ 7 from networkx.readwrite.json_graph.node_link import * 8 from networkx.readwrite.json_graph.adjacency import * 9 from networkx.readwrite.json_graph.tree import * 10 from networkx.readwrite.json_graph.serialize import * 11 [end of networkx/readwrite/json_graph/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/networkx/readwrite/json_graph/__init__.py b/networkx/readwrite/json_graph/__init__.py --- a/networkx/readwrite/json_graph/__init__.py +++ b/networkx/readwrite/json_graph/__init__.py @@ -1,8 +1,16 @@ """ ********* -JSON data +JSON data ********* Generate and parse JSON serializable data for NetworkX graphs. + +These formats are suitable for use with the d3.js examples http://d3js.org/ + +The three formats that you can generate with NetworkX are: + + - node-link like in the d3.js example http://bl.ocks.org/mbostock/4062045 + - tree like in the d3.js example http://bl.ocks.org/mbostock/4063550 + - adjacency like in the d3.js example http://bost.ocks.org/mike/miserables/ """ from networkx.readwrite.json_graph.node_link import * from networkx.readwrite.json_graph.adjacency import *
{"golden_diff": "diff --git a/networkx/readwrite/json_graph/__init__.py b/networkx/readwrite/json_graph/__init__.py\n--- a/networkx/readwrite/json_graph/__init__.py\n+++ b/networkx/readwrite/json_graph/__init__.py\n@@ -1,8 +1,16 @@\n \"\"\"\n *********\n-JSON data \n+JSON data\n *********\n Generate and parse JSON serializable data for NetworkX graphs.\n+\n+These formats are suitable for use with the d3.js examples http://d3js.org/\n+\n+The three formats that you can generate with NetworkX are:\n+\n+ - node-link like in the d3.js example http://bl.ocks.org/mbostock/4062045\n+ - tree like in the d3.js example http://bl.ocks.org/mbostock/4063550\n+ - adjacency like in the d3.js example http://bost.ocks.org/mike/miserables/\n \"\"\"\n from networkx.readwrite.json_graph.node_link import *\n from networkx.readwrite.json_graph.adjacency import *\n", "issue": "Need JSON format description\nThe page on JSON serialization lacks information about the actual structure of produced data. This make it hard to see if networkx is a suitable tool for a backend of already existing JavaScript front.\n\nhttp://networkx.lanl.gov/reference/readwrite.json_graph.html\n\n", "before_files": [{"content": "\"\"\"\n*********\nJSON data \n*********\nGenerate and parse JSON serializable data for NetworkX graphs.\n\"\"\"\nfrom networkx.readwrite.json_graph.node_link import *\nfrom networkx.readwrite.json_graph.adjacency import *\nfrom networkx.readwrite.json_graph.tree import *\nfrom networkx.readwrite.json_graph.serialize import *\n", "path": "networkx/readwrite/json_graph/__init__.py"}]}
679
230
gh_patches_debug_8211
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1975
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Show banner throughout application when "live demo mode" is turned on. We should show a banner at the top of the screen on all pages that explains that Mathesar is in live demo mode and that each session has its own copy of demo data and that data will be deleted regularly. Assigning this to @mathemancer to make sure it gets implemented at some point, @ghislaineguerin for the design, and @pavish for the frontend. </issue> <code> [start of config/context_processors.py] 1 from django.conf import settings 2 3 from mathesar.utils.frontend import get_manifest_data 4 5 6 def frontend_settings(request): 7 frontend_settings = { 8 'development_mode': settings.MATHESAR_MODE == 'DEVELOPMENT', 9 'manifest_data': get_manifest_data() 10 } 11 # Only include development URL if we're in development mode. 12 if frontend_settings['development_mode'] is True: 13 frontend_settings['client_dev_url'] = settings.MATHESAR_CLIENT_DEV_URL 14 return frontend_settings 15 [end of config/context_processors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/config/context_processors.py b/config/context_processors.py --- a/config/context_processors.py +++ b/config/context_processors.py @@ -6,7 +6,8 @@ def frontend_settings(request): frontend_settings = { 'development_mode': settings.MATHESAR_MODE == 'DEVELOPMENT', - 'manifest_data': get_manifest_data() + 'manifest_data': get_manifest_data(), + 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False) } # Only include development URL if we're in development mode. if frontend_settings['development_mode'] is True:
{"golden_diff": "diff --git a/config/context_processors.py b/config/context_processors.py\n--- a/config/context_processors.py\n+++ b/config/context_processors.py\n@@ -6,7 +6,8 @@\n def frontend_settings(request):\n frontend_settings = {\n 'development_mode': settings.MATHESAR_MODE == 'DEVELOPMENT',\n- 'manifest_data': get_manifest_data()\n+ 'manifest_data': get_manifest_data(),\n+ 'live_demo_mode': getattr(settings, 'MATHESAR_LIVE_DEMO', False)\n }\n # Only include development URL if we're in development mode.\n if frontend_settings['development_mode'] is True:\n", "issue": "Show banner throughout application when \"live demo mode\" is turned on.\nWe should show a banner at the top of the screen on all pages that explains that Mathesar is in live demo mode and that each session has its own copy of demo data and that data will be deleted regularly.\r\n\r\nAssigning this to @mathemancer to make sure it gets implemented at some point, @ghislaineguerin for the design, and @pavish for the frontend.\n", "before_files": [{"content": "from django.conf import settings\n\nfrom mathesar.utils.frontend import get_manifest_data\n\n\ndef frontend_settings(request):\n frontend_settings = {\n 'development_mode': settings.MATHESAR_MODE == 'DEVELOPMENT',\n 'manifest_data': get_manifest_data()\n }\n # Only include development URL if we're in development mode.\n if frontend_settings['development_mode'] is True:\n frontend_settings['client_dev_url'] = settings.MATHESAR_CLIENT_DEV_URL\n return frontend_settings\n", "path": "config/context_processors.py"}]}
755
134
gh_patches_debug_12637
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-10551
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> API: allow remote repo full name query The new dashboard is still using the API v2 remote repo API, which does not allow for expansion on project results and doesn't have all of the fields that I'd like to use in the results listing. The API v3 needs the v2 API implementation for searching by full_name, the current pattern for searching `full_name` by icontains on the v2 API works okay for now. I didn't want to alter the v2 API further, as we should really be moving towards the v3 API, but if it's just easier to add expansion there for some reason, that is also fine. Note: this also gives expansion on the nested projects in the result, so we can get fields like the avatar_url, etc. The current v2 search only returns the project slug and a link to the project dashboard. </issue> <code> [start of readthedocs/api/v3/filters.py] 1 import django_filters.rest_framework as filters 2 3 from readthedocs.builds.constants import BUILD_FINAL_STATES 4 from readthedocs.builds.models import Build, Version 5 from readthedocs.oauth.models import RemoteOrganization, RemoteRepository 6 from readthedocs.projects.models import Project 7 8 9 class ProjectFilter(filters.FilterSet): 10 11 # TODO this is copying the patterns from other filter sets, where the fields 12 # are all ``icontains`` lookups by default. We discussed reversing this 13 # pattern in the future though, see: 14 # https://github.com/readthedocs/readthedocs.org/issues/9862 15 name = filters.CharFilter(lookup_expr="icontains") 16 slug = filters.CharFilter(lookup_expr="icontains") 17 18 class Meta: 19 model = Project 20 fields = [ 21 "name", 22 "slug", 23 "language", 24 "programming_language", 25 ] 26 27 28 class VersionFilter(filters.FilterSet): 29 slug = filters.CharFilter(lookup_expr='icontains') 30 verbose_name = filters.CharFilter(lookup_expr='icontains') 31 32 class Meta: 33 model = Version 34 fields = [ 35 'verbose_name', 36 'privacy_level', 37 'active', 38 'built', 39 'uploaded', 40 'slug', 41 'type', 42 ] 43 44 45 class BuildFilter(filters.FilterSet): 46 running = filters.BooleanFilter(method='get_running') 47 48 class Meta: 49 model = Build 50 fields = [ 51 'commit', 52 'running', 53 ] 54 55 def get_running(self, queryset, name, value): 56 if value: 57 return queryset.exclude(state__in=BUILD_FINAL_STATES) 58 59 return queryset.filter(state__in=BUILD_FINAL_STATES) 60 61 62 class RemoteRepositoryFilter(filters.FilterSet): 63 name = filters.CharFilter(field_name='name', lookup_expr='icontains') 64 organization = filters.CharFilter(field_name='organization__slug') 65 66 class Meta: 67 model = RemoteRepository 68 fields = [ 69 'name', 70 'vcs_provider', 71 'organization', 72 ] 73 74 75 class RemoteOrganizationFilter(filters.FilterSet): 76 name = filters.CharFilter(field_name='name', lookup_expr='icontains') 77 78 class Meta: 79 model = RemoteOrganization 80 fields = [ 81 'name', 82 'vcs_provider', 83 ] 84 [end of readthedocs/api/v3/filters.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/api/v3/filters.py b/readthedocs/api/v3/filters.py --- a/readthedocs/api/v3/filters.py +++ b/readthedocs/api/v3/filters.py @@ -60,15 +60,17 @@ class RemoteRepositoryFilter(filters.FilterSet): - name = filters.CharFilter(field_name='name', lookup_expr='icontains') - organization = filters.CharFilter(field_name='organization__slug') + name = filters.CharFilter(field_name="name", lookup_expr="icontains") + full_name = filters.CharFilter(field_name="full_name", lookup_expr="icontains") + organization = filters.CharFilter(field_name="organization__slug") class Meta: model = RemoteRepository fields = [ - 'name', - 'vcs_provider', - 'organization', + "name", + "full_name", + "vcs_provider", + "organization", ]
{"golden_diff": "diff --git a/readthedocs/api/v3/filters.py b/readthedocs/api/v3/filters.py\n--- a/readthedocs/api/v3/filters.py\n+++ b/readthedocs/api/v3/filters.py\n@@ -60,15 +60,17 @@\n \n \n class RemoteRepositoryFilter(filters.FilterSet):\n- name = filters.CharFilter(field_name='name', lookup_expr='icontains')\n- organization = filters.CharFilter(field_name='organization__slug')\n+ name = filters.CharFilter(field_name=\"name\", lookup_expr=\"icontains\")\n+ full_name = filters.CharFilter(field_name=\"full_name\", lookup_expr=\"icontains\")\n+ organization = filters.CharFilter(field_name=\"organization__slug\")\n \n class Meta:\n model = RemoteRepository\n fields = [\n- 'name',\n- 'vcs_provider',\n- 'organization',\n+ \"name\",\n+ \"full_name\",\n+ \"vcs_provider\",\n+ \"organization\",\n ]\n", "issue": "API: allow remote repo full name query\nThe new dashboard is still using the API v2 remote repo API, which does not allow for expansion on project results and doesn't have all of the fields that I'd like to use in the results listing. The API v3 needs the v2 API implementation for searching by full_name, the current pattern for searching `full_name` by icontains on the v2 API works okay for now.\r\n\r\nI didn't want to alter the v2 API further, as we should really be moving towards the v3 API, but if it's just easier to add expansion there for some reason, that is also fine.\r\n\r\nNote: this also gives expansion on the nested projects in the result, so we can get fields like the avatar_url, etc. The current v2 search only returns the project slug and a link to the project dashboard.\n", "before_files": [{"content": "import django_filters.rest_framework as filters\n\nfrom readthedocs.builds.constants import BUILD_FINAL_STATES\nfrom readthedocs.builds.models import Build, Version\nfrom readthedocs.oauth.models import RemoteOrganization, RemoteRepository\nfrom readthedocs.projects.models import Project\n\n\nclass ProjectFilter(filters.FilterSet):\n\n # TODO this is copying the patterns from other filter sets, where the fields\n # are all ``icontains`` lookups by default. We discussed reversing this\n # pattern in the future though, see:\n # https://github.com/readthedocs/readthedocs.org/issues/9862\n name = filters.CharFilter(lookup_expr=\"icontains\")\n slug = filters.CharFilter(lookup_expr=\"icontains\")\n\n class Meta:\n model = Project\n fields = [\n \"name\",\n \"slug\",\n \"language\",\n \"programming_language\",\n ]\n\n\nclass VersionFilter(filters.FilterSet):\n slug = filters.CharFilter(lookup_expr='icontains')\n verbose_name = filters.CharFilter(lookup_expr='icontains')\n\n class Meta:\n model = Version\n fields = [\n 'verbose_name',\n 'privacy_level',\n 'active',\n 'built',\n 'uploaded',\n 'slug',\n 'type',\n ]\n\n\nclass BuildFilter(filters.FilterSet):\n running = filters.BooleanFilter(method='get_running')\n\n class Meta:\n model = Build\n fields = [\n 'commit',\n 'running',\n ]\n\n def get_running(self, queryset, name, value):\n if value:\n return queryset.exclude(state__in=BUILD_FINAL_STATES)\n\n return queryset.filter(state__in=BUILD_FINAL_STATES)\n\n\nclass RemoteRepositoryFilter(filters.FilterSet):\n name = filters.CharFilter(field_name='name', lookup_expr='icontains')\n organization = filters.CharFilter(field_name='organization__slug')\n\n class Meta:\n model = RemoteRepository\n fields = [\n 'name',\n 'vcs_provider',\n 'organization',\n ]\n\n\nclass RemoteOrganizationFilter(filters.FilterSet):\n name = filters.CharFilter(field_name='name', lookup_expr='icontains')\n\n class Meta:\n model = RemoteOrganization\n fields = [\n 'name',\n 'vcs_provider',\n ]\n", "path": "readthedocs/api/v3/filters.py"}]}
1,355
210
gh_patches_debug_330
rasdani/github-patches
git_diff
Pylons__pyramid-3272
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bump Sphinx to >=1.7.2 Would anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex. Refs: * #667 * #2572 * https://github.com/rtfd/readthedocs.org/issues/4015 </issue> <code> [start of setup.py] 1 ############################################################################## 2 # 3 # Copyright (c) 2008-2013 Agendaless Consulting and Contributors. 4 # All Rights Reserved. 5 # 6 # This software is subject to the provisions of the BSD-like license at 7 # http://www.repoze.org/LICENSE.txt. A copy of the license should accompany 8 # this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL 9 # EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, 10 # THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND 11 # FITNESS FOR A PARTICULAR PURPOSE 12 # 13 ############################################################################## 14 15 import os 16 17 from setuptools import setup, find_packages 18 19 here = os.path.abspath(os.path.dirname(__file__)) 20 try: 21 with open(os.path.join(here, 'README.rst')) as f: 22 README = f.read() 23 with open(os.path.join(here, 'CHANGES.txt')) as f: 24 CHANGES = f.read() 25 except IOError: 26 README = CHANGES = '' 27 28 install_requires = [ 29 'setuptools', 30 'WebOb >= 1.7.0', # Response.has_body 31 'repoze.lru >= 0.4', # py3 compat 32 'zope.interface >= 3.8.0', # has zope.interface.registry 33 'zope.deprecation >= 3.5.0', # py3 compat 34 'venusian >= 1.0a3', # ``ignore`` 35 'translationstring >= 0.4', # py3 compat 36 'PasteDeploy >= 1.5.0', # py3 compat 37 'plaster', 38 'plaster_pastedeploy', 39 'hupper', 40 ] 41 42 tests_require = [ 43 'WebTest >= 1.3.1', # py3 compat 44 'zope.component >= 4.0', # py3 compat 45 ] 46 47 48 docs_extras = [ 49 'Sphinx >= 1.3.5, != 1.7.3', 50 'docutils', 51 'repoze.sphinx.autointerface', 52 'pylons_sphinx_latesturl', 53 'pylons-sphinx-themes', 54 'sphinxcontrib-autoprogram', 55 ] 56 57 testing_extras = tests_require + [ 58 'nose', 59 'coverage', 60 'virtualenv', # for scaffolding tests 61 ] 62 63 setup(name='pyramid', 64 version='1.9.2', 65 description='The Pyramid Web Framework, a Pylons project', 66 long_description=README + '\n\n' + CHANGES, 67 classifiers=[ 68 "Development Status :: 6 - Mature", 69 "Intended Audience :: Developers", 70 "Programming Language :: Python", 71 "Programming Language :: Python :: 2.7", 72 "Programming Language :: Python :: 3", 73 "Programming Language :: Python :: 3.4", 74 "Programming Language :: Python :: 3.5", 75 "Programming Language :: Python :: 3.6", 76 "Programming Language :: Python :: Implementation :: CPython", 77 "Programming Language :: Python :: Implementation :: PyPy", 78 "Framework :: Pyramid", 79 "Topic :: Internet :: WWW/HTTP", 80 "Topic :: Internet :: WWW/HTTP :: WSGI", 81 "License :: Repoze Public License", 82 ], 83 keywords='web wsgi pylons pyramid', 84 author="Chris McDonough, Agendaless Consulting", 85 author_email="[email protected]", 86 url="https://trypyramid.com", 87 license="BSD-derived (http://www.repoze.org/LICENSE.txt)", 88 packages=find_packages(), 89 include_package_data=True, 90 zip_safe=False, 91 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', 92 install_requires=install_requires, 93 extras_require={ 94 'testing': testing_extras, 95 'docs': docs_extras, 96 }, 97 tests_require=tests_require, 98 test_suite="pyramid.tests", 99 entry_points="""\ 100 [pyramid.scaffold] 101 starter=pyramid.scaffolds:StarterProjectTemplate 102 zodb=pyramid.scaffolds:ZODBProjectTemplate 103 alchemy=pyramid.scaffolds:AlchemyProjectTemplate 104 [pyramid.pshell_runner] 105 python=pyramid.scripts.pshell:python_shell_runner 106 [console_scripts] 107 pcreate = pyramid.scripts.pcreate:main 108 pserve = pyramid.scripts.pserve:main 109 pshell = pyramid.scripts.pshell:main 110 proutes = pyramid.scripts.proutes:main 111 pviews = pyramid.scripts.pviews:main 112 ptweens = pyramid.scripts.ptweens:main 113 prequest = pyramid.scripts.prequest:main 114 pdistreport = pyramid.scripts.pdistreport:main 115 [paste.server_runner] 116 wsgiref = pyramid.scripts.pserve:wsgiref_server_runner 117 cherrypy = pyramid.scripts.pserve:cherrypy_server_runner 118 """ 119 ) 120 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ docs_extras = [ - 'Sphinx >= 1.3.5, != 1.7.3', + 'Sphinx >= 1.7.4', 'docutils', 'repoze.sphinx.autointerface', 'pylons_sphinx_latesturl',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n \n \n docs_extras = [\n- 'Sphinx >= 1.3.5, != 1.7.3',\n+ 'Sphinx >= 1.7.4',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n", "issue": "Bump Sphinx to >=1.7.2\nWould anyone be opposed to bumping Sphinx to >=1.7.2, != 1.7.3 in `setup.py`? I really want our PDFs to have `emphasize-lines` support, at long last, and bring in support for Unicode characters in PDFs via xelatex.\r\n\r\nRefs:\r\n* #667\r\n* #2572\r\n* https://github.com/rtfd/readthedocs.org/issues/4015\r\n\n", "before_files": [{"content": "##############################################################################\n#\n# Copyright (c) 2008-2013 Agendaless Consulting and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the BSD-like license at\n# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany\n# this distribution. THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL\n# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND\n# FITNESS FOR A PARTICULAR PURPOSE\n#\n##############################################################################\n\nimport os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n with open(os.path.join(here, 'README.rst')) as f:\n README = f.read()\n with open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\nexcept IOError:\n README = CHANGES = ''\n\ninstall_requires = [\n 'setuptools',\n 'WebOb >= 1.7.0', # Response.has_body\n 'repoze.lru >= 0.4', # py3 compat\n 'zope.interface >= 3.8.0', # has zope.interface.registry\n 'zope.deprecation >= 3.5.0', # py3 compat\n 'venusian >= 1.0a3', # ``ignore``\n 'translationstring >= 0.4', # py3 compat\n 'PasteDeploy >= 1.5.0', # py3 compat\n 'plaster',\n 'plaster_pastedeploy',\n 'hupper',\n ]\n\ntests_require = [\n 'WebTest >= 1.3.1', # py3 compat\n 'zope.component >= 4.0', # py3 compat\n ]\n\n\ndocs_extras = [\n 'Sphinx >= 1.3.5, != 1.7.3',\n 'docutils',\n 'repoze.sphinx.autointerface',\n 'pylons_sphinx_latesturl',\n 'pylons-sphinx-themes',\n 'sphinxcontrib-autoprogram',\n ]\n\ntesting_extras = tests_require + [\n 'nose',\n 'coverage',\n 'virtualenv', # for scaffolding tests\n ]\n\nsetup(name='pyramid',\n version='1.9.2',\n description='The Pyramid Web Framework, a Pylons project',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Development Status :: 6 - Mature\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Framework :: Pyramid\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI\",\n \"License :: Repoze Public License\",\n ],\n keywords='web wsgi pylons pyramid',\n author=\"Chris McDonough, Agendaless Consulting\",\n author_email=\"[email protected]\",\n url=\"https://trypyramid.com\",\n license=\"BSD-derived (http://www.repoze.org/LICENSE.txt)\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': docs_extras,\n },\n tests_require=tests_require,\n test_suite=\"pyramid.tests\",\n entry_points=\"\"\"\\\n [pyramid.scaffold]\n starter=pyramid.scaffolds:StarterProjectTemplate\n zodb=pyramid.scaffolds:ZODBProjectTemplate\n alchemy=pyramid.scaffolds:AlchemyProjectTemplate\n [pyramid.pshell_runner]\n python=pyramid.scripts.pshell:python_shell_runner\n [console_scripts]\n pcreate = pyramid.scripts.pcreate:main\n pserve = pyramid.scripts.pserve:main\n pshell = pyramid.scripts.pshell:main\n proutes = pyramid.scripts.proutes:main\n pviews = pyramid.scripts.pviews:main\n ptweens = pyramid.scripts.ptweens:main\n prequest = pyramid.scripts.prequest:main\n pdistreport = pyramid.scripts.pdistreport:main\n [paste.server_runner]\n wsgiref = pyramid.scripts.pserve:wsgiref_server_runner\n cherrypy = pyramid.scripts.pserve:cherrypy_server_runner\n \"\"\"\n )\n", "path": "setup.py"}]}
1,973
99
gh_patches_debug_23277
rasdani/github-patches
git_diff
fidals__shopelectro-1006
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Search shows products with no category It should not, of course ![image](https://user-images.githubusercontent.com/3526796/67692684-9a9cb680-f9b1-11e9-8038-99534e063e03.png) Search link: https://www.shopelectro.ru/search/?term=MK1215NC Link to the product: https://www.shopelectro.ru/catalog/products/7608/ </issue> <code> [start of shopelectro/views/search.py] 1 from django.conf import settings 2 3 from search import views as search_views, search as search_engine 4 5 from pages.models import Page 6 7 from shopelectro.models import Category, Product 8 9 10 class Search(search_views.SearchView): 11 def get_redirect_search_entity(self): 12 return next(s for s in self.search_entities if s.name == 'product') 13 14 # ignore CPDBear 15 search_entities = [ 16 search_engine.Search( 17 name='category', 18 qs=Category.objects.active(), 19 fields=['name'], # Ignore CPDBear 20 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 21 ), 22 search_engine.Search( 23 name='product', 24 qs=Product.objects.active(), 25 fields=['name'], 26 redirect_field='vendor_code', 27 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 28 ), 29 search_engine.Search( 30 name='page', # Ignore CPDBear 31 qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), 32 fields=['name'], 33 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 34 ) 35 ] 36 37 redirect_field = 'vendor_code' 38 39 40 class Autocomplete(search_views.AutocompleteView): 41 42 # ignore CPDBear 43 search_entities = [ 44 search_engine.Search( 45 name='category', 46 qs=Category.objects.filter(page__is_active=True), 47 fields=['name', 'id'], 48 template_fields=['name', 'url'], 49 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 50 ), 51 search_engine.Search( 52 name='product', 53 qs=Product.objects.active(), 54 fields=['name', 'id', 'vendor_code'], 55 template_fields=['name', 'price', 'url'], # Ignore CPDBear 56 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 57 ), 58 search_engine.Search( 59 name='pages', 60 qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), 61 fields=['name'], 62 template_fields=['name', 'url'], 63 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 64 ) 65 ] 66 67 see_all_label = settings.SEARCH_SEE_ALL_LABEL 68 69 70 class AdminAutocomplete(search_views.AdminAutocompleteView): 71 72 # ignore CPDBear 73 search_entities = [ 74 search_engine.Search( 75 name='category', 76 qs=Category.objects.filter(page__is_active=True), 77 fields=['name'], 78 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 79 ), 80 search_engine.Search( 81 name='product', 82 qs=Product.objects.active(), 83 fields=['name'], 84 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 85 ), 86 search_engine.Search( 87 name='pages', 88 qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), 89 fields=['name'], 90 min_similarity=settings.TRIGRAM_MIN_SIMILARITY, 91 ) 92 ] 93 [end of shopelectro/views/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/views/search.py b/shopelectro/views/search.py --- a/shopelectro/views/search.py +++ b/shopelectro/views/search.py @@ -1,9 +1,7 @@ from django.conf import settings -from search import views as search_views, search as search_engine - from pages.models import Page - +from search import views as search_views, search as search_engine from shopelectro.models import Category, Product @@ -21,14 +19,14 @@ ), search_engine.Search( name='product', - qs=Product.objects.active(), + qs=Product.objects.active().exclude(category__isnull=True), fields=['name'], redirect_field='vendor_code', min_similarity=settings.TRIGRAM_MIN_SIMILARITY, ), search_engine.Search( name='page', # Ignore CPDBear - qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE), + qs=Page.objects.active().exclude(type=Page.MODEL_TYPE), fields=['name'], min_similarity=settings.TRIGRAM_MIN_SIMILARITY, )
{"golden_diff": "diff --git a/shopelectro/views/search.py b/shopelectro/views/search.py\n--- a/shopelectro/views/search.py\n+++ b/shopelectro/views/search.py\n@@ -1,9 +1,7 @@\n from django.conf import settings\n \n-from search import views as search_views, search as search_engine\n-\n from pages.models import Page\n-\n+from search import views as search_views, search as search_engine\n from shopelectro.models import Category, Product\n \n \n@@ -21,14 +19,14 @@\n ),\n search_engine.Search(\n name='product',\n- qs=Product.objects.active(),\n+ qs=Product.objects.active().exclude(category__isnull=True),\n fields=['name'],\n redirect_field='vendor_code',\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='page', # Ignore CPDBear\n- qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n+ qs=Page.objects.active().exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n", "issue": "Search shows products with no category\nIt should not, of course\r\n\r\n![image](https://user-images.githubusercontent.com/3526796/67692684-9a9cb680-f9b1-11e9-8038-99534e063e03.png)\r\n\r\nSearch link: https://www.shopelectro.ru/search/?term=MK1215NC\r\nLink to the product: https://www.shopelectro.ru/catalog/products/7608/\r\n\n", "before_files": [{"content": "from django.conf import settings\n\nfrom search import views as search_views, search as search_engine\n\nfrom pages.models import Page\n\nfrom shopelectro.models import Category, Product\n\n\nclass Search(search_views.SearchView):\n def get_redirect_search_entity(self):\n return next(s for s in self.search_entities if s.name == 'product')\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.active(),\n fields=['name'], # Ignore CPDBear\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name'],\n redirect_field='vendor_code',\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='page', # Ignore CPDBear\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n\n redirect_field = 'vendor_code'\n\n\nclass Autocomplete(search_views.AutocompleteView):\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.filter(page__is_active=True),\n fields=['name', 'id'],\n template_fields=['name', 'url'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name', 'id', 'vendor_code'],\n template_fields=['name', 'price', 'url'], # Ignore CPDBear\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='pages',\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n template_fields=['name', 'url'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n\n see_all_label = settings.SEARCH_SEE_ALL_LABEL\n\n\nclass AdminAutocomplete(search_views.AdminAutocompleteView):\n\n # ignore CPDBear\n search_entities = [\n search_engine.Search(\n name='category',\n qs=Category.objects.filter(page__is_active=True),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='product',\n qs=Product.objects.active(),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n ),\n search_engine.Search(\n name='pages',\n qs=Page.objects.filter(is_active=True).exclude(type=Page.MODEL_TYPE),\n fields=['name'],\n min_similarity=settings.TRIGRAM_MIN_SIMILARITY,\n )\n ]\n", "path": "shopelectro/views/search.py"}]}
1,446
250
gh_patches_debug_14820
rasdani/github-patches
git_diff
crytic__slither-786
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'StructureTopLevel' object has no attribute 'contract' On 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a Here there should be a different case for top-level elements and non-top-level: https://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66 AttributeError: 'StructureTopLevel' object has no attribute 'contract' On 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a Here there should be a different case for top-level elements and non-top-level: https://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66 </issue> <code> [start of slither/core/solidity_types/user_defined_type.py] 1 from typing import Union, TYPE_CHECKING, Tuple 2 import math 3 4 from slither.core.solidity_types.type import Type 5 from slither.exceptions import SlitherException 6 7 if TYPE_CHECKING: 8 from slither.core.declarations.structure import Structure 9 from slither.core.declarations.enum import Enum 10 from slither.core.declarations.contract import Contract 11 12 # pylint: disable=import-outside-toplevel 13 class UserDefinedType(Type): 14 def __init__(self, t): 15 from slither.core.declarations.structure import Structure 16 from slither.core.declarations.enum import Enum 17 from slither.core.declarations.contract import Contract 18 19 assert isinstance(t, (Contract, Enum, Structure)) 20 super().__init__() 21 self._type = t 22 23 @property 24 def type(self) -> Union["Contract", "Enum", "Structure"]: 25 return self._type 26 27 @property 28 def storage_size(self) -> Tuple[int, bool]: 29 from slither.core.declarations.structure import Structure 30 from slither.core.declarations.enum import Enum 31 from slither.core.declarations.contract import Contract 32 33 if isinstance(self._type, Contract): 34 return 20, False 35 if isinstance(self._type, Enum): 36 return int(math.ceil(math.log2(len(self._type.values)) / 8)), False 37 if isinstance(self._type, Structure): 38 # todo there's some duplicate logic here and slither_core, can we refactor this? 39 slot = 0 40 offset = 0 41 for elem in self._type.elems_ordered: 42 size, new_slot = elem.type.storage_size 43 if new_slot: 44 if offset > 0: 45 slot += 1 46 offset = 0 47 elif size + offset > 32: 48 slot += 1 49 offset = 0 50 51 if new_slot: 52 slot += math.ceil(size / 32) 53 else: 54 offset += size 55 if offset > 0: 56 slot += 1 57 return slot * 32, True 58 to_log = f"{self} does not have storage size" 59 raise SlitherException(to_log) 60 61 def __str__(self): 62 from slither.core.declarations.structure import Structure 63 from slither.core.declarations.enum import Enum 64 65 if isinstance(self.type, (Enum, Structure)): 66 return str(self.type.contract) + "." + str(self.type.name) 67 return str(self.type.name) 68 69 def __eq__(self, other): 70 if not isinstance(other, UserDefinedType): 71 return False 72 return self.type == other.type 73 74 def __hash__(self): 75 return hash(str(self)) 76 [end of slither/core/solidity_types/user_defined_type.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/slither/core/solidity_types/user_defined_type.py b/slither/core/solidity_types/user_defined_type.py --- a/slither/core/solidity_types/user_defined_type.py +++ b/slither/core/solidity_types/user_defined_type.py @@ -59,12 +59,13 @@ raise SlitherException(to_log) def __str__(self): - from slither.core.declarations.structure import Structure - from slither.core.declarations.enum import Enum + from slither.core.declarations.structure_contract import StructureContract + from slither.core.declarations.enum_contract import EnumContract - if isinstance(self.type, (Enum, Structure)): - return str(self.type.contract) + "." + str(self.type.name) - return str(self.type.name) + type_used = self.type + if isinstance(type_used, (EnumContract, StructureContract)): + return str(type_used.contract) + "." + str(type_used.name) + return str(type_used.name) def __eq__(self, other): if not isinstance(other, UserDefinedType):
{"golden_diff": "diff --git a/slither/core/solidity_types/user_defined_type.py b/slither/core/solidity_types/user_defined_type.py\n--- a/slither/core/solidity_types/user_defined_type.py\n+++ b/slither/core/solidity_types/user_defined_type.py\n@@ -59,12 +59,13 @@\n raise SlitherException(to_log)\n \n def __str__(self):\n- from slither.core.declarations.structure import Structure\n- from slither.core.declarations.enum import Enum\n+ from slither.core.declarations.structure_contract import StructureContract\n+ from slither.core.declarations.enum_contract import EnumContract\n \n- if isinstance(self.type, (Enum, Structure)):\n- return str(self.type.contract) + \".\" + str(self.type.name)\n- return str(self.type.name)\n+ type_used = self.type\n+ if isinstance(type_used, (EnumContract, StructureContract)):\n+ return str(type_used.contract) + \".\" + str(type_used.name)\n+ return str(type_used.name)\n \n def __eq__(self, other):\n if not isinstance(other, UserDefinedType):\n", "issue": "AttributeError: 'StructureTopLevel' object has no attribute 'contract'\nOn 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a\r\n\r\nHere there should be a different case for top-level elements and non-top-level:\r\n\r\nhttps://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66\nAttributeError: 'StructureTopLevel' object has no attribute 'contract'\nOn 0x0cf55d57d241161e0ec68e72cbb175dbfe84173a\r\n\r\nHere there should be a different case for top-level elements and non-top-level:\r\n\r\nhttps://github.com/crytic/slither/blob/c0c581b3ba830b6ce8dc3f4be82592a7a42e9752/slither/core/solidity_types/user_defined_type.py#L65-L66\n", "before_files": [{"content": "from typing import Union, TYPE_CHECKING, Tuple\nimport math\n\nfrom slither.core.solidity_types.type import Type\nfrom slither.exceptions import SlitherException\n\nif TYPE_CHECKING:\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n# pylint: disable=import-outside-toplevel\nclass UserDefinedType(Type):\n def __init__(self, t):\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n assert isinstance(t, (Contract, Enum, Structure))\n super().__init__()\n self._type = t\n\n @property\n def type(self) -> Union[\"Contract\", \"Enum\", \"Structure\"]:\n return self._type\n\n @property\n def storage_size(self) -> Tuple[int, bool]:\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n from slither.core.declarations.contract import Contract\n\n if isinstance(self._type, Contract):\n return 20, False\n if isinstance(self._type, Enum):\n return int(math.ceil(math.log2(len(self._type.values)) / 8)), False\n if isinstance(self._type, Structure):\n # todo there's some duplicate logic here and slither_core, can we refactor this?\n slot = 0\n offset = 0\n for elem in self._type.elems_ordered:\n size, new_slot = elem.type.storage_size\n if new_slot:\n if offset > 0:\n slot += 1\n offset = 0\n elif size + offset > 32:\n slot += 1\n offset = 0\n\n if new_slot:\n slot += math.ceil(size / 32)\n else:\n offset += size\n if offset > 0:\n slot += 1\n return slot * 32, True\n to_log = f\"{self} does not have storage size\"\n raise SlitherException(to_log)\n\n def __str__(self):\n from slither.core.declarations.structure import Structure\n from slither.core.declarations.enum import Enum\n\n if isinstance(self.type, (Enum, Structure)):\n return str(self.type.contract) + \".\" + str(self.type.name)\n return str(self.type.name)\n\n def __eq__(self, other):\n if not isinstance(other, UserDefinedType):\n return False\n return self.type == other.type\n\n def __hash__(self):\n return hash(str(self))\n", "path": "slither/core/solidity_types/user_defined_type.py"}]}
1,535
241
gh_patches_debug_31896
rasdani/github-patches
git_diff
rootpy__rootpy-785
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> basestring Hi there I found the following issue: If I'm using the F1 object from rootpy.plotting and try to access a parameter using [parnr] (the __getitem__) methode, I get the following error: `NameError: name 'basestring' is not defined` I'm using python 3.6 which doesn't has the basestring data type anymore.. https://github.com/rootpy/rootpy/blob/457e074056a916fff848978ef68b7f5107856e47/rootpy/plotting/func.py#L63 </issue> <code> [start of rootpy/plotting/func.py] 1 from __future__ import absolute_import 2 3 from .. import QROOT 4 from ..decorators import snake_case_methods 5 from .base import Plottable 6 from ..base import NameOnlyObject 7 8 9 __all__ = [ 10 'F1', 11 'F2', 12 'F3', 13 ] 14 15 class BaseFunction(object): 16 class ParProxy(object): 17 def __init__(self, fcn, idx): 18 self.fcn_ = fcn 19 self.idx_ = idx 20 21 @property 22 def index(self): 23 return self.idx_ 24 25 @property 26 def name(self): 27 return self.fcn_.GetParName(self.idx_) 28 29 @name.setter 30 def name(self, val): 31 return self.fcn_.SetParName(self.idx_, val) 32 33 @property 34 def value(self): 35 return self.fcn_.GetParameter(self.idx_) 36 37 @value.setter 38 def value(self, val): 39 self.fcn_.SetParameter(self.idx_, val) 40 41 @property 42 def error(self): 43 return self.fcn_.GetParError(self.idx_) 44 45 @error.setter 46 def error(self, val): 47 return self.fcn_.SetParError(self.idx_, val) 48 49 @property 50 def limits(self): 51 m = QROOT.Double() 52 M = QROOT.Double() 53 self.fcn_.GetParLimits(self.idx_, m, M) 54 return float(m), float(M) 55 56 @limits.setter 57 def limits(self, val): 58 if not hastattr(val, '__len__') and len(val) != 2: 59 raise RuntimeError('Function limits must be a tuple size 2') 60 self.fcn_.SetParLimits(self.idx_, val[0], val[1]) 61 62 def __getitem__(self, value): 63 if isinstance(value, basestring): 64 idx = self.GetParNumber(value) 65 elif isinstance(value, int): 66 idx = value 67 else: 68 raise ValueError('Function index must be a integer or a string') 69 return BaseFunction.ParProxy(self, idx) 70 71 72 @snake_case_methods 73 class F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1): 74 _ROOT = QROOT.TF1 75 76 def __init__(self, *args, **kwargs): 77 name = kwargs.pop('name', None) 78 super(F1, self).__init__(*args, name=name) 79 self._post_init(**kwargs) 80 81 82 @snake_case_methods 83 class F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2): 84 _ROOT = QROOT.TF2 85 86 def __init__(self, *args, **kwargs): 87 name = kwargs.pop('name', None) 88 super(F2, self).__init__(*args, name=name) 89 self._post_init(**kwargs) 90 91 92 @snake_case_methods 93 class F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3): 94 _ROOT = QROOT.TF3 95 96 def __init__(self, *args, **kwargs): 97 name = kwargs.pop('name', None) 98 super(F3, self).__init__(*args, name=name) 99 self._post_init(**kwargs) 100 [end of rootpy/plotting/func.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rootpy/plotting/func.py b/rootpy/plotting/func.py --- a/rootpy/plotting/func.py +++ b/rootpy/plotting/func.py @@ -4,7 +4,7 @@ from ..decorators import snake_case_methods from .base import Plottable from ..base import NameOnlyObject - +import six __all__ = [ 'F1', @@ -17,7 +17,7 @@ def __init__(self, fcn, idx): self.fcn_ = fcn self.idx_ = idx - + @property def index(self): return self.idx_ @@ -25,7 +25,7 @@ @property def name(self): return self.fcn_.GetParName(self.idx_) - + @name.setter def name(self, val): return self.fcn_.SetParName(self.idx_, val) @@ -55,14 +55,14 @@ @limits.setter def limits(self, val): - if not hastattr(val, '__len__') and len(val) != 2: + if not hasattr(val, '__len__') and len(val) != 2: raise RuntimeError('Function limits must be a tuple size 2') self.fcn_.SetParLimits(self.idx_, val[0], val[1]) def __getitem__(self, value): - if isinstance(value, basestring): + if isinstance(value, six.string_types): idx = self.GetParNumber(value) - elif isinstance(value, int): + elif isinstance(value, six.integer_types): idx = value else: raise ValueError('Function index must be a integer or a string')
{"golden_diff": "diff --git a/rootpy/plotting/func.py b/rootpy/plotting/func.py\n--- a/rootpy/plotting/func.py\n+++ b/rootpy/plotting/func.py\n@@ -4,7 +4,7 @@\n from ..decorators import snake_case_methods\n from .base import Plottable\n from ..base import NameOnlyObject\n-\n+import six\n \n __all__ = [\n 'F1',\n@@ -17,7 +17,7 @@\n def __init__(self, fcn, idx):\n self.fcn_ = fcn\n self.idx_ = idx\n- \n+\n @property\n def index(self):\n return self.idx_\n@@ -25,7 +25,7 @@\n @property\n def name(self):\n return self.fcn_.GetParName(self.idx_)\n- \n+\n @name.setter\n def name(self, val):\n return self.fcn_.SetParName(self.idx_, val)\n@@ -55,14 +55,14 @@\n \n @limits.setter\n def limits(self, val):\n- if not hastattr(val, '__len__') and len(val) != 2:\n+ if not hasattr(val, '__len__') and len(val) != 2:\n raise RuntimeError('Function limits must be a tuple size 2')\n self.fcn_.SetParLimits(self.idx_, val[0], val[1])\n \n def __getitem__(self, value):\n- if isinstance(value, basestring):\n+ if isinstance(value, six.string_types):\n idx = self.GetParNumber(value)\n- elif isinstance(value, int):\n+ elif isinstance(value, six.integer_types):\n idx = value\n else:\n raise ValueError('Function index must be a integer or a string')\n", "issue": "basestring\nHi there\r\nI found the following issue:\r\nIf I'm using the F1 object from rootpy.plotting and try to access a parameter using [parnr] (the __getitem__) methode, I get the following error:\r\n`NameError: name 'basestring' is not defined`\r\nI'm using python 3.6 which doesn't has the basestring data type anymore..\r\n\r\nhttps://github.com/rootpy/rootpy/blob/457e074056a916fff848978ef68b7f5107856e47/rootpy/plotting/func.py#L63\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom .. import QROOT\nfrom ..decorators import snake_case_methods\nfrom .base import Plottable\nfrom ..base import NameOnlyObject\n\n\n__all__ = [\n 'F1',\n 'F2',\n 'F3',\n]\n\nclass BaseFunction(object):\n class ParProxy(object):\n def __init__(self, fcn, idx):\n self.fcn_ = fcn\n self.idx_ = idx\n \n @property\n def index(self):\n return self.idx_\n\n @property\n def name(self):\n return self.fcn_.GetParName(self.idx_)\n \n @name.setter\n def name(self, val):\n return self.fcn_.SetParName(self.idx_, val)\n\n @property\n def value(self):\n return self.fcn_.GetParameter(self.idx_)\n\n @value.setter\n def value(self, val):\n self.fcn_.SetParameter(self.idx_, val)\n\n @property\n def error(self):\n return self.fcn_.GetParError(self.idx_)\n\n @error.setter\n def error(self, val):\n return self.fcn_.SetParError(self.idx_, val)\n\n @property\n def limits(self):\n m = QROOT.Double()\n M = QROOT.Double()\n self.fcn_.GetParLimits(self.idx_, m, M)\n return float(m), float(M)\n\n @limits.setter\n def limits(self, val):\n if not hastattr(val, '__len__') and len(val) != 2:\n raise RuntimeError('Function limits must be a tuple size 2')\n self.fcn_.SetParLimits(self.idx_, val[0], val[1])\n\n def __getitem__(self, value):\n if isinstance(value, basestring):\n idx = self.GetParNumber(value)\n elif isinstance(value, int):\n idx = value\n else:\n raise ValueError('Function index must be a integer or a string')\n return BaseFunction.ParProxy(self, idx)\n\n\n@snake_case_methods\nclass F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1):\n _ROOT = QROOT.TF1\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F1, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n\n\n@snake_case_methods\nclass F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2):\n _ROOT = QROOT.TF2\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F2, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n\n\n@snake_case_methods\nclass F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3):\n _ROOT = QROOT.TF3\n\n def __init__(self, *args, **kwargs):\n name = kwargs.pop('name', None)\n super(F3, self).__init__(*args, name=name)\n self._post_init(**kwargs)\n", "path": "rootpy/plotting/func.py"}]}
1,568
390
gh_patches_debug_19990
rasdani/github-patches
git_diff
Parsl__parsl-201
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Passing Files breaks over IPP The new File class contains a dictionary that maps DataFutures for each site to which it is being staged and contains a reference to the DataManager. Neither of these are pickle-able. So if we do something like this :+1: ``` data = File("foo.txt") fu = remote_app(inputs=[data]) fu.result() # <--- We'll get an error from here ``` Here's the relevant piece from the exception traceback : ``` File "/usr/local/lib/python3.5/dist-packages/ipyparallel/serialize/serialize.py", line 112, in serialize_object buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL)) TypeError: can't pickle _thread.lock objects ``` I believe that the File object is the best place to hold the Future information about itself, and that would give us the opportunity to do smarter file staging in the future. So I propose that we fix this with a custom pickler for the File class. This is blocker for 0.5.0. </issue> <code> [start of parsl/data_provider/files.py] 1 """Define the File Type. 2 3 The primary purpose of the File object is to track the protocol to be used 4 to transfer the file as well as to give the appropriate filepath depending 5 on where(client-side, remote-side, intermediary-side) the File.filepath is 6 being called from 7 """ 8 9 import os 10 import logging 11 from urllib.parse import urlparse 12 from parsl.data_provider.data_manager import DataManager 13 14 15 logger = logging.getLogger(__name__) 16 17 18 class File(str): 19 """The Parsl File Class. 20 21 This is planned to be a very simple class that simply 22 captures various attributes of a file, and relies on client-side and worker-side 23 systems to enable to appropriate transfer of files. 24 """ 25 26 def __init__(self, url, dman=None, cache=False, caching_dir=".", staging='direct'): 27 """Construct a File object from a url string. 28 29 Args: 30 - url (string) : url string of the file e.g. 31 - 'input.txt' 32 - 'file:///scratch/proj101/input.txt' 33 - 'globus://go#ep1/~/data/input.txt' 34 - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt' 35 - dman (DataManager) : data manager 36 """ 37 self.url = url 38 parsed_url = urlparse(self.url) 39 self.scheme = parsed_url.scheme if parsed_url.scheme else 'file' 40 self.netloc = parsed_url.netloc 41 self.path = parsed_url.path 42 self.filename = os.path.basename(self.path) 43 self.dman = dman if dman else DataManager.get_data_manager() 44 self.data_future = {} 45 if self.scheme != 'file': 46 self.dman.add_file(self) 47 48 self.cache = cache 49 self.caching_dir = caching_dir 50 self.staging = staging 51 52 def __str__(self): 53 return self.filepath 54 55 def __repr__(self): 56 return self.__str__() 57 58 def __fspath__(self): 59 return self.filepath 60 61 @property 62 def filepath(self): 63 """Return the resolved filepath on the side where it is called from. 64 65 The appropriate filepath will be returned when called from within 66 an app running remotely as well as regular python on the client side. 67 68 Args: 69 - self 70 Returns: 71 - filepath (string) 72 """ 73 if self.scheme == 'globus': 74 if hasattr(self, 'local_path'): 75 return self.local_path 76 77 if 'exec_site' not in globals() or self.staging == 'direct': 78 # Assume local and direct 79 return self.path 80 else: 81 # Return self.path for now 82 return self.path 83 84 def stage_in(self, site=None): 85 """Transport file from the site of origin to local site.""" 86 return self.dman.stage_in(self, site) 87 88 def stage_out(self): 89 """Transport file from local filesystem to origin site.""" 90 return self.dman.stage_out(self) 91 92 def set_data_future(self, df, site=None): 93 self.data_future[site] = df 94 95 def get_data_future(self, site): 96 return self.data_future.get(site) 97 98 99 if __name__ == '__main__': 100 101 x = File('./files.py') 102 [end of parsl/data_provider/files.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/parsl/data_provider/files.py b/parsl/data_provider/files.py --- a/parsl/data_provider/files.py +++ b/parsl/data_provider/files.py @@ -95,6 +95,34 @@ def get_data_future(self, site): return self.data_future.get(site) + def __getstate__(self): + """ Overriding the default pickling method. + + The File object get's pickled and transmitted to remote sites during app + execution. This enables pickling while retaining the lockable resources + to the DFK/Client side. + """ + + state = self.__dict__.copy() + + # We have already made a copy of the future objects, they are now no longer + # reliable as means to wait for the staging events + for site in state["data_future"]: + # This is assumed to be safe, since the data_future represents staging to a specific site + # and a site will only have one filepath. + state["data_future"][site] = state["data_future"][site].filepath + + state["dman"] = None + + return state + + def __setstate__(self, state): + """ Overloading the default pickle method to reconstruct a File from serialized form + + This might require knowledge of whethere a DataManager is already present in the context. + """ + self.__dict__.update(state) + if __name__ == '__main__':
{"golden_diff": "diff --git a/parsl/data_provider/files.py b/parsl/data_provider/files.py\n--- a/parsl/data_provider/files.py\n+++ b/parsl/data_provider/files.py\n@@ -95,6 +95,34 @@\n def get_data_future(self, site):\n return self.data_future.get(site)\n \n+ def __getstate__(self):\n+ \"\"\" Overriding the default pickling method.\n+\n+ The File object get's pickled and transmitted to remote sites during app\n+ execution. This enables pickling while retaining the lockable resources\n+ to the DFK/Client side.\n+ \"\"\"\n+\n+ state = self.__dict__.copy()\n+\n+ # We have already made a copy of the future objects, they are now no longer\n+ # reliable as means to wait for the staging events\n+ for site in state[\"data_future\"]:\n+ # This is assumed to be safe, since the data_future represents staging to a specific site\n+ # and a site will only have one filepath.\n+ state[\"data_future\"][site] = state[\"data_future\"][site].filepath\n+\n+ state[\"dman\"] = None\n+\n+ return state\n+\n+ def __setstate__(self, state):\n+ \"\"\" Overloading the default pickle method to reconstruct a File from serialized form\n+\n+ This might require knowledge of whethere a DataManager is already present in the context.\n+ \"\"\"\n+ self.__dict__.update(state)\n+\n \n if __name__ == '__main__':\n", "issue": "Passing Files breaks over IPP\nThe new File class contains a dictionary that maps DataFutures for each site to which it is being staged and contains a reference to the DataManager. Neither of these are pickle-able.\r\n\r\nSo if we do something like this :+1: \r\n```\r\ndata = File(\"foo.txt\")\r\nfu = remote_app(inputs=[data])\r\nfu.result() # <--- We'll get an error from here\r\n```\r\nHere's the relevant piece from the exception traceback :\r\n```\r\nFile \"/usr/local/lib/python3.5/dist-packages/ipyparallel/serialize/serialize.py\", line 112, in serialize_object\r\n buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL))\r\nTypeError: can't pickle _thread.lock objects\r\n```\r\n\r\nI believe that the File object is the best place to hold the Future information about itself, and that would give us the opportunity to do smarter file staging in the future. So I propose that we fix this with a custom pickler for the File class.\r\n\r\nThis is blocker for 0.5.0.\n", "before_files": [{"content": "\"\"\"Define the File Type.\n\nThe primary purpose of the File object is to track the protocol to be used\nto transfer the file as well as to give the appropriate filepath depending\non where(client-side, remote-side, intermediary-side) the File.filepath is\nbeing called from\n\"\"\"\n\nimport os\nimport logging\nfrom urllib.parse import urlparse\nfrom parsl.data_provider.data_manager import DataManager\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass File(str):\n \"\"\"The Parsl File Class.\n\n This is planned to be a very simple class that simply\n captures various attributes of a file, and relies on client-side and worker-side\n systems to enable to appropriate transfer of files.\n \"\"\"\n\n def __init__(self, url, dman=None, cache=False, caching_dir=\".\", staging='direct'):\n \"\"\"Construct a File object from a url string.\n\n Args:\n - url (string) : url string of the file e.g.\n - 'input.txt'\n - 'file:///scratch/proj101/input.txt'\n - 'globus://go#ep1/~/data/input.txt'\n - 'globus://ddb59aef-6d04-11e5-ba46-22000b92c6ec/home/johndoe/data/input.txt'\n - dman (DataManager) : data manager\n \"\"\"\n self.url = url\n parsed_url = urlparse(self.url)\n self.scheme = parsed_url.scheme if parsed_url.scheme else 'file'\n self.netloc = parsed_url.netloc\n self.path = parsed_url.path\n self.filename = os.path.basename(self.path)\n self.dman = dman if dman else DataManager.get_data_manager()\n self.data_future = {}\n if self.scheme != 'file':\n self.dman.add_file(self)\n\n self.cache = cache\n self.caching_dir = caching_dir\n self.staging = staging\n\n def __str__(self):\n return self.filepath\n\n def __repr__(self):\n return self.__str__()\n\n def __fspath__(self):\n return self.filepath\n\n @property\n def filepath(self):\n \"\"\"Return the resolved filepath on the side where it is called from.\n\n The appropriate filepath will be returned when called from within\n an app running remotely as well as regular python on the client side.\n\n Args:\n - self\n Returns:\n - filepath (string)\n \"\"\"\n if self.scheme == 'globus':\n if hasattr(self, 'local_path'):\n return self.local_path\n\n if 'exec_site' not in globals() or self.staging == 'direct':\n # Assume local and direct\n return self.path\n else:\n # Return self.path for now\n return self.path\n\n def stage_in(self, site=None):\n \"\"\"Transport file from the site of origin to local site.\"\"\"\n return self.dman.stage_in(self, site)\n\n def stage_out(self):\n \"\"\"Transport file from local filesystem to origin site.\"\"\"\n return self.dman.stage_out(self)\n\n def set_data_future(self, df, site=None):\n self.data_future[site] = df\n\n def get_data_future(self, site):\n return self.data_future.get(site)\n\n\nif __name__ == '__main__':\n\n x = File('./files.py')\n", "path": "parsl/data_provider/files.py"}]}
1,680
333
gh_patches_debug_64419
rasdani/github-patches
git_diff
pwndbg__pwndbg-584
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>) ### Description This happens when i initiate r2 after loading a binary in pwndbg I have tested both in wsl and a 64bit ubuntu machine same behavior sorta ### Steps to reproduce 1. Load a binary 2. Run r2 Exception occured: r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>) Traceback (most recent call last): File "/root/reverse/pwndbg/pwndbg/commands/__init__.py", line 135, in __call__ return self.function(*args, **kwargs) File "/root/reverse/pwndbg/pwndbg/commands/__init__.py", line 215, in _OnlyWithFile return function(*a, **kw) File "/root/reverse/pwndbg/pwndbg/commands/radare2.py", line 28, in r2 addr = pwndbg.regs.pc File "/root/reverse/pwndbg/pwndbg/memoize.py", line 48, in __call__ value = self.func(*args, **kwargs) File "/root/reverse/pwndbg/pwndbg/regs.py", line 280, in __getattr__ value = value.cast(pwndbg.typeinfo.ptrdiff) AttributeError: 'NoneType' object has no attribute 'cast' ### My setup Gdb: 7.11.1 Python: 3.5.2 (default, Nov 12 2018, 13:43:14) [GCC 5.4.0 20160609] Pwndbg: 1.1.0 build: 054f209 Capstone: 4.0.1024 Unicorn: 1.0.1 </issue> <code> [start of pwndbg/commands/radare2.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import argparse 9 import subprocess 10 11 import pwndbg.commands 12 13 parser = argparse.ArgumentParser(description='Launches radare2', 14 epilog="Example: r2 -- -S -AA") 15 parser.add_argument('--no-seek', action='store_true', 16 help='Do not seek to current pc') 17 parser.add_argument('arguments', nargs='*', type=str, 18 help='Arguments to pass to radare') 19 20 21 @pwndbg.commands.ArgparsedCommand(parser) 22 @pwndbg.commands.OnlyWithFile 23 def r2(arguments, no_seek=False): 24 filename = pwndbg.file.get_file(pwndbg.proc.exe) 25 26 # Build up the command line to run 27 cmd = ['radare2', filename] 28 addr = pwndbg.regs.pc 29 if pwndbg.elf.get_elf_info(filename).is_pie: 30 addr -= pwndbg.elf.exe().address 31 if not no_seek and pwndbg.proc.alive: 32 cmd.extend(['-s', hex(addr)]) 33 cmd += arguments 34 35 try: 36 subprocess.call(cmd) 37 except Exception: 38 print("Could not run radare2. Please ensure it's installed and in $PATH.") 39 [end of pwndbg/commands/radare2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/commands/radare2.py b/pwndbg/commands/radare2.py --- a/pwndbg/commands/radare2.py +++ b/pwndbg/commands/radare2.py @@ -25,11 +25,12 @@ # Build up the command line to run cmd = ['radare2', filename] - addr = pwndbg.regs.pc - if pwndbg.elf.get_elf_info(filename).is_pie: - addr -= pwndbg.elf.exe().address - if not no_seek and pwndbg.proc.alive: - cmd.extend(['-s', hex(addr)]) + if pwndbg.proc.alive: + addr = pwndbg.regs.pc + if pwndbg.elf.get_elf_info(filename).is_pie: + addr -= pwndbg.elf.exe().address + if not no_seek: + cmd.extend(['-s', hex(addr)]) cmd += arguments try:
{"golden_diff": "diff --git a/pwndbg/commands/radare2.py b/pwndbg/commands/radare2.py\n--- a/pwndbg/commands/radare2.py\n+++ b/pwndbg/commands/radare2.py\n@@ -25,11 +25,12 @@\n \n # Build up the command line to run\n cmd = ['radare2', filename]\n- addr = pwndbg.regs.pc\n- if pwndbg.elf.get_elf_info(filename).is_pie:\n- addr -= pwndbg.elf.exe().address\n- if not no_seek and pwndbg.proc.alive:\n- cmd.extend(['-s', hex(addr)])\n+ if pwndbg.proc.alive:\n+ addr = pwndbg.regs.pc\n+ if pwndbg.elf.get_elf_info(filename).is_pie:\n+ addr -= pwndbg.elf.exe().address\n+ if not no_seek:\n+ cmd.extend(['-s', hex(addr)])\n cmd += arguments\n \n try:\n", "issue": "r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>)\n### Description\r\n\r\n\r\nThis happens when i initiate r2 after loading a binary in pwndbg \r\nI have tested both in wsl and a 64bit ubuntu machine same behavior sorta \r\n\r\n\r\n### Steps to reproduce\r\n\r\n\r\n1. Load a binary \r\n2. Run r2 \r\nException occured: r2: 'NoneType' object has no attribute 'cast' (<class 'AttributeError'>)\r\nTraceback (most recent call last):\r\n File \"/root/reverse/pwndbg/pwndbg/commands/__init__.py\", line 135, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/root/reverse/pwndbg/pwndbg/commands/__init__.py\", line 215, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/reverse/pwndbg/pwndbg/commands/radare2.py\", line 28, in r2\r\n addr = pwndbg.regs.pc\r\n File \"/root/reverse/pwndbg/pwndbg/memoize.py\", line 48, in __call__\r\n value = self.func(*args, **kwargs)\r\n File \"/root/reverse/pwndbg/pwndbg/regs.py\", line 280, in __getattr__\r\n value = value.cast(pwndbg.typeinfo.ptrdiff)\r\nAttributeError: 'NoneType' object has no attribute 'cast'\r\n\r\n\r\n\r\n\r\n\r\n### My setup\r\n\r\n\r\nGdb: 7.11.1\r\nPython: 3.5.2 (default, Nov 12 2018, 13:43:14) [GCC 5.4.0 20160609]\r\nPwndbg: 1.1.0 build: 054f209\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.1\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport subprocess\n\nimport pwndbg.commands\n\nparser = argparse.ArgumentParser(description='Launches radare2',\n epilog=\"Example: r2 -- -S -AA\")\nparser.add_argument('--no-seek', action='store_true',\n help='Do not seek to current pc')\nparser.add_argument('arguments', nargs='*', type=str,\n help='Arguments to pass to radare')\n\n\[email protected](parser)\[email protected]\ndef r2(arguments, no_seek=False):\n filename = pwndbg.file.get_file(pwndbg.proc.exe)\n\n # Build up the command line to run\n cmd = ['radare2', filename]\n addr = pwndbg.regs.pc\n if pwndbg.elf.get_elf_info(filename).is_pie:\n addr -= pwndbg.elf.exe().address\n if not no_seek and pwndbg.proc.alive:\n cmd.extend(['-s', hex(addr)])\n cmd += arguments\n\n try:\n subprocess.call(cmd)\n except Exception:\n print(\"Could not run radare2. Please ensure it's installed and in $PATH.\")\n", "path": "pwndbg/commands/radare2.py"}]}
1,349
233
gh_patches_debug_648
rasdani/github-patches
git_diff
pex-tool__pex-2000
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.117 On the docket: + [x] Published pex on github no longer works with PyPy since 2.1.109 #1995 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.116" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.116" +__version__ = "2.1.117"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.116\"\n+__version__ = \"2.1.117\"\n", "issue": "Release 2.1.117\nOn the docket:\r\n+ [x] Published pex on github no longer works with PyPy since 2.1.109 #1995\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.116\"\n", "path": "pex/version.py"}]}
629
99
gh_patches_debug_10025
rasdani/github-patches
git_diff
bridgecrewio__checkov-5170
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CKV_DOCKER_11 false positive when `--platform` is used **Describe the issue** CKV_DOCKER_11 false positive when `--platform` is used (possibly other arguments as well) For reference: _"CKV_DOCKER_11: "Ensure From Alias are unique for multistage builds."_ In other words, make sure you add `as myAlias` at the end of your `FROM` line **Examples** This will PASS as expected: `FROM node:16 as build` Now, add `--platform` and it will FAIL: `FROM --platform=linux/amd64 node:16 as build` **Version (please complete the following information):** ``` > checkov -v 2.3.240 ``` **Additional context** Add any other context about the problem here. </issue> <code> [start of checkov/dockerfile/checks/AliasIsUnique.py] 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING 4 5 from checkov.common.models.enums import CheckCategories, CheckResult 6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck 7 8 if TYPE_CHECKING: 9 from dockerfile_parse.parser import _Instruction 10 11 12 class AliasIsUnique(BaseDockerfileCheck): 13 def __init__(self) -> None: 14 """ 15 Ensure From Alias are unique for multistage builds. 16 """ 17 name = "Ensure From Alias are unique for multistage builds." 18 id = "CKV_DOCKER_11" 19 supported_instructions = ("FROM",) 20 categories = (CheckCategories.CONVENTION,) 21 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions) 22 23 def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]: 24 alias = [] 25 for instruction in conf: 26 if " as " in instruction["value"]: 27 temp = instruction["value"].split() 28 alias += [temp[2]] 29 30 if len(alias) == len(set(alias)): 31 return CheckResult.PASSED, None 32 else: 33 return CheckResult.FAILED, [conf[0]] 34 35 36 check = AliasIsUnique() 37 [end of checkov/dockerfile/checks/AliasIsUnique.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/dockerfile/checks/AliasIsUnique.py b/checkov/dockerfile/checks/AliasIsUnique.py --- a/checkov/dockerfile/checks/AliasIsUnique.py +++ b/checkov/dockerfile/checks/AliasIsUnique.py @@ -24,13 +24,12 @@ alias = [] for instruction in conf: if " as " in instruction["value"]: - temp = instruction["value"].split() - alias += [temp[2]] + alias.append(instruction["value"].rsplit(maxsplit=1)[-1]) if len(alias) == len(set(alias)): return CheckResult.PASSED, None - else: - return CheckResult.FAILED, [conf[0]] + + return CheckResult.FAILED, [conf[0]] check = AliasIsUnique()
{"golden_diff": "diff --git a/checkov/dockerfile/checks/AliasIsUnique.py b/checkov/dockerfile/checks/AliasIsUnique.py\n--- a/checkov/dockerfile/checks/AliasIsUnique.py\n+++ b/checkov/dockerfile/checks/AliasIsUnique.py\n@@ -24,13 +24,12 @@\n alias = []\n for instruction in conf:\n if \" as \" in instruction[\"value\"]:\n- temp = instruction[\"value\"].split()\n- alias += [temp[2]]\n+ alias.append(instruction[\"value\"].rsplit(maxsplit=1)[-1])\n \n if len(alias) == len(set(alias)):\n return CheckResult.PASSED, None\n- else:\n- return CheckResult.FAILED, [conf[0]]\n+\n+ return CheckResult.FAILED, [conf[0]]\n \n \n check = AliasIsUnique()\n", "issue": "CKV_DOCKER_11 false positive when `--platform` is used\n**Describe the issue**\r\n\r\nCKV_DOCKER_11 false positive when `--platform` is used (possibly other arguments as well)\r\n\r\nFor reference: _\"CKV_DOCKER_11: \"Ensure From Alias are unique for multistage builds.\"_ In other words, make sure you add `as myAlias` at the end of your `FROM` line\r\n\r\n**Examples**\r\n\r\nThis will PASS as expected:\r\n`FROM node:16 as build`\r\n\r\nNow, add `--platform` and it will FAIL:\r\n`FROM --platform=linux/amd64 node:16 as build`\r\n\r\n**Version (please complete the following information):**\r\n```\r\n> checkov -v \r\n2.3.240\r\n```\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nif TYPE_CHECKING:\n from dockerfile_parse.parser import _Instruction\n\n\nclass AliasIsUnique(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n Ensure From Alias are unique for multistage builds.\n \"\"\"\n name = \"Ensure From Alias are unique for multistage builds.\"\n id = \"CKV_DOCKER_11\"\n supported_instructions = (\"FROM\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, list[_Instruction] | None]:\n alias = []\n for instruction in conf:\n if \" as \" in instruction[\"value\"]:\n temp = instruction[\"value\"].split()\n alias += [temp[2]]\n\n if len(alias) == len(set(alias)):\n return CheckResult.PASSED, None\n else:\n return CheckResult.FAILED, [conf[0]]\n\n\ncheck = AliasIsUnique()\n", "path": "checkov/dockerfile/checks/AliasIsUnique.py"}]}
1,080
188
gh_patches_debug_17639
rasdani/github-patches
git_diff
wagtail__wagtail-715
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> search fields can't be overridden In the past, you were able to override a search field of a parent class by redefining it. This functionality appears to be broken in Wagtail 0.7 </issue> <code> [start of wagtail/wagtailsearch/index.py] 1 import warnings 2 3 from six import string_types 4 5 from django.db import models 6 7 8 class Indexed(object): 9 @classmethod 10 def indexed_get_parent(cls, require_model=True): 11 for base in cls.__bases__: 12 if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False): 13 return base 14 15 @classmethod 16 def indexed_get_content_type(cls): 17 # Work out content type 18 content_type = (cls._meta.app_label + '_' + cls.__name__).lower() 19 20 # Get parent content type 21 parent = cls.indexed_get_parent() 22 if parent: 23 parent_content_type = parent.indexed_get_content_type() 24 return parent_content_type + '_' + content_type 25 else: 26 return content_type 27 28 @classmethod 29 def indexed_get_toplevel_content_type(cls): 30 # Get parent content type 31 parent = cls.indexed_get_parent() 32 if parent: 33 return parent.indexed_get_content_type() 34 else: 35 # At toplevel, return this content type 36 return (cls._meta.app_label + '_' + cls.__name__).lower() 37 38 @classmethod 39 def get_search_fields(cls): 40 return cls.search_fields 41 42 @classmethod 43 def get_searchable_search_fields(cls): 44 return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields()) 45 46 @classmethod 47 def get_filterable_search_fields(cls): 48 return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields()) 49 50 @classmethod 51 def get_indexed_objects(cls): 52 return cls.objects.all() 53 54 search_fields = () 55 56 57 class BaseField(object): 58 suffix = '' 59 60 def __init__(self, field_name, **kwargs): 61 self.field_name = field_name 62 self.kwargs = kwargs 63 64 def get_field(self, cls): 65 return cls._meta.get_field_by_name(self.field_name)[0] 66 67 def get_attname(self, cls): 68 try: 69 field = self.get_field(cls) 70 return field.attname 71 except models.fields.FieldDoesNotExist: 72 return self.field_name 73 74 def get_index_name(self, cls): 75 return self.get_attname(cls) + self.suffix 76 77 def get_type(self, cls): 78 if 'type' in self.kwargs: 79 return self.kwargs['type'] 80 81 try: 82 field = self.get_field(cls) 83 return field.get_internal_type() 84 except models.fields.FieldDoesNotExist: 85 return 'CharField' 86 87 def get_value(self, obj): 88 try: 89 field = self.get_field(obj.__class__) 90 return field._get_val_from_obj(obj) 91 except models.fields.FieldDoesNotExist: 92 value = getattr(obj, self.field_name, None) 93 if hasattr(value, '__call__'): 94 value = value() 95 return value 96 97 def __repr__(self): 98 return '<%s: %s>' % (self.__class__.__name__, self.field_name) 99 100 101 class SearchField(BaseField): 102 def __init__(self, field_name, boost=None, partial_match=False, **kwargs): 103 super(SearchField, self).__init__(field_name, **kwargs) 104 self.boost = boost 105 self.partial_match = partial_match 106 107 108 class FilterField(BaseField): 109 suffix = '_filter' 110 111 [end of wagtail/wagtailsearch/index.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailsearch/index.py b/wagtail/wagtailsearch/index.py --- a/wagtail/wagtailsearch/index.py +++ b/wagtail/wagtailsearch/index.py @@ -37,15 +37,26 @@ @classmethod def get_search_fields(cls): - return cls.search_fields + search_fields = {} + + for field in cls.search_fields: + search_fields[(type(field), field.field_name)] = field + + return list(search_fields.values()) @classmethod def get_searchable_search_fields(cls): - return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields()) + return [ + field for field in cls.get_search_fields() + if isinstance(field, SearchField) + ] @classmethod def get_filterable_search_fields(cls): - return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields()) + return [ + field for field in cls.get_search_fields() + if isinstance(field, FilterField) + ] @classmethod def get_indexed_objects(cls):
{"golden_diff": "diff --git a/wagtail/wagtailsearch/index.py b/wagtail/wagtailsearch/index.py\n--- a/wagtail/wagtailsearch/index.py\n+++ b/wagtail/wagtailsearch/index.py\n@@ -37,15 +37,26 @@\n \n @classmethod\n def get_search_fields(cls):\n- return cls.search_fields\n+ search_fields = {}\n+\n+ for field in cls.search_fields:\n+ search_fields[(type(field), field.field_name)] = field\n+\n+ return list(search_fields.values())\n \n @classmethod\n def get_searchable_search_fields(cls):\n- return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields())\n+ return [\n+ field for field in cls.get_search_fields()\n+ if isinstance(field, SearchField)\n+ ]\n \n @classmethod\n def get_filterable_search_fields(cls):\n- return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields())\n+ return [\n+ field for field in cls.get_search_fields()\n+ if isinstance(field, FilterField)\n+ ]\n \n @classmethod\n def get_indexed_objects(cls):\n", "issue": "search fields can't be overridden\nIn the past, you were able to override a search field of a parent class by redefining it. This functionality appears to be broken in Wagtail 0.7\n\n", "before_files": [{"content": "import warnings\n\nfrom six import string_types\n\nfrom django.db import models\n\n\nclass Indexed(object):\n @classmethod\n def indexed_get_parent(cls, require_model=True):\n for base in cls.__bases__:\n if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False):\n return base\n\n @classmethod\n def indexed_get_content_type(cls):\n # Work out content type\n content_type = (cls._meta.app_label + '_' + cls.__name__).lower()\n\n # Get parent content type\n parent = cls.indexed_get_parent()\n if parent:\n parent_content_type = parent.indexed_get_content_type()\n return parent_content_type + '_' + content_type\n else:\n return content_type\n\n @classmethod\n def indexed_get_toplevel_content_type(cls):\n # Get parent content type\n parent = cls.indexed_get_parent()\n if parent:\n return parent.indexed_get_content_type()\n else:\n # At toplevel, return this content type\n return (cls._meta.app_label + '_' + cls.__name__).lower()\n\n @classmethod\n def get_search_fields(cls):\n return cls.search_fields\n\n @classmethod\n def get_searchable_search_fields(cls):\n return filter(lambda field: isinstance(field, SearchField), cls.get_search_fields())\n\n @classmethod\n def get_filterable_search_fields(cls):\n return filter(lambda field: isinstance(field, FilterField), cls.get_search_fields())\n\n @classmethod\n def get_indexed_objects(cls):\n return cls.objects.all()\n\n search_fields = ()\n\n\nclass BaseField(object):\n suffix = ''\n\n def __init__(self, field_name, **kwargs):\n self.field_name = field_name\n self.kwargs = kwargs\n\n def get_field(self, cls):\n return cls._meta.get_field_by_name(self.field_name)[0]\n\n def get_attname(self, cls):\n try:\n field = self.get_field(cls)\n return field.attname\n except models.fields.FieldDoesNotExist:\n return self.field_name\n\n def get_index_name(self, cls):\n return self.get_attname(cls) + self.suffix\n\n def get_type(self, cls):\n if 'type' in self.kwargs:\n return self.kwargs['type']\n\n try:\n field = self.get_field(cls)\n return field.get_internal_type()\n except models.fields.FieldDoesNotExist:\n return 'CharField'\n\n def get_value(self, obj):\n try:\n field = self.get_field(obj.__class__)\n return field._get_val_from_obj(obj)\n except models.fields.FieldDoesNotExist:\n value = getattr(obj, self.field_name, None)\n if hasattr(value, '__call__'):\n value = value()\n return value\n\n def __repr__(self):\n return '<%s: %s>' % (self.__class__.__name__, self.field_name)\n\n\nclass SearchField(BaseField):\n def __init__(self, field_name, boost=None, partial_match=False, **kwargs):\n super(SearchField, self).__init__(field_name, **kwargs)\n self.boost = boost\n self.partial_match = partial_match\n\n\nclass FilterField(BaseField):\n suffix = '_filter'\n\n", "path": "wagtail/wagtailsearch/index.py"}]}
1,512
253
gh_patches_debug_29535
rasdani/github-patches
git_diff
conan-io__conan-center-index-7032
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] imgui/1.84.1: Shared library does not automatically import global data symbols ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **imgui/1.84.1** * Operating System+version: **Windows 10 21H1 Build 19043.1165** * Compiler+version: **Visual Studio 16 (2019)** * Docker image: **N/A** * Conan version: **conan 1.39.0** * Python version: **Python 3.9.6** ### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use) ``` [settings] os=Windows os_build=Windows arch=x86_64 arch_build=x86_64 compiler=Visual Studio compiler.version=16 build_type=Release [options] [conf] [build_requires] [env] ``` ### Steps to reproduce (Include if Applicable) Try to reference any code that uses global data symbols since those need to use `__declspec(dllimport)` when using [`WINDOWS_EXPORT_ALL_SYMBOLS`](https://cmake.org/cmake/help/latest/prop_tgt/WINDOWS_EXPORT_ALL_SYMBOLS.html#windows-export-all-symbols). One example could be using [`ImGuiTextBuffer`](https://github.com/ocornut/imgui/blob/v1.84.1/imgui.h#L2078) (which has `IMGUI_API static char EmptyString[1];`). The following diff is for ImGui's [`test_package.cpp`](https://github.com/conan-io/conan-center-index/blob/master/recipes/imgui/all/test_package/test_package.cpp) and can reproduce this issue. ``` --- a/recipes/imgui/all/test_package/test_package.cpp +++ b/recipes/imgui/all/test_package/test_package.cpp @@ -5,6 +5,9 @@ int main(int, char**) { ImGuiContext* context =ImGui::CreateContext(); ImGuiIO& io = ImGui::GetIO(); + + ImGuiTextBuffer textBuffer; + textBuffer.append("Hello, ImGui"); // Build atlas unsigned char* tex_pixels = NULL; @@ -20,6 +23,7 @@ int main(int, char**) static float f = 0.0f; ImGui::Text("Hello, world!"); + ImGui::Text(textBuffer.begin()); ImGui::SliderFloat("float", &f, 0.0f, 1.0f); ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / io.Framerate, io.Framerate); ImGui::ShowDemoWindow(NULL); ``` Then, try to create the package with `conan create . imgui/1.84.1@ -o imgui:shared=True`. ### Logs (Include/Attach if Applicable) <details><summary>Click to expand log</summary> ``` test_package.obj : error LNK2019: unresolved external symbol "public: static char * ImGuiTextBuffer::EmptyString" (?Emp tyString@ImGuiTextBuffer@@2PADA) referenced in function main ``` </details> I think the simplest solution would be to add something like this ``` if self.options.shared and self.settings.os == "Windows": self.cpp_info.defines.append("IMGUI_API=__declspec(dllimport)") ``` I'd be happy to open a PR with this change. </issue> <code> [start of recipes/imgui/all/conanfile.py] 1 from conans import ConanFile, CMake, tools 2 import os 3 4 required_conan_version = ">=1.33.0" 5 6 7 class IMGUIConan(ConanFile): 8 name = "imgui" 9 url = "https://github.com/conan-io/conan-center-index" 10 homepage = "https://github.com/ocornut/imgui" 11 description = "Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies" 12 topics = ("conan", "imgui", "gui", "graphical") 13 license = "MIT" 14 15 exports_sources = ["CMakeLists.txt"] 16 generators = "cmake" 17 18 settings = "os", "arch", "compiler", "build_type" 19 options = { 20 "shared": [True, False], 21 "fPIC": [True, False] 22 } 23 default_options = { 24 "shared": False, 25 "fPIC": True 26 } 27 28 _cmake = None 29 30 @property 31 def _source_subfolder(self): 32 return "source_subfolder" 33 34 def config_options(self): 35 if self.settings.os == "Windows": 36 del self.options.fPIC 37 38 def configure(self): 39 if self.options.shared: 40 del self.options.fPIC 41 42 def source(self): 43 tools.get(**self.conan_data["sources"][self.version], 44 destination=self._source_subfolder, strip_root=True) 45 46 def _configure_cmake(self): 47 if self._cmake: 48 return self._cmake 49 self._cmake = CMake(self) 50 self._cmake.configure() 51 return self._cmake 52 53 def build(self): 54 cmake = self._configure_cmake() 55 cmake.build() 56 57 def package(self): 58 self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder) 59 backends_folder = src=os.path.join( 60 self._source_subfolder, 61 "backends" if tools.Version(self.version) >= "1.80" else "examples" 62 ) 63 self.copy(pattern="imgui_impl_*", 64 dst=os.path.join("res", "bindings"), 65 src=backends_folder) 66 cmake = self._configure_cmake() 67 cmake.install() 68 69 def package_info(self): 70 self.cpp_info.libs = ["imgui"] 71 if self.settings.os == "Linux": 72 self.cpp_info.system_libs.append("m") 73 self.cpp_info.srcdirs = [os.path.join("res", "bindings")] 74 75 bin_path = os.path.join(self.package_folder, "bin") 76 self.output.info("Appending PATH env var with : {}".format(bin_path)) 77 self.env_info.PATH.append(bin_path) 78 [end of recipes/imgui/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/imgui/all/conanfile.py b/recipes/imgui/all/conanfile.py --- a/recipes/imgui/all/conanfile.py +++ b/recipes/imgui/all/conanfile.py @@ -12,19 +12,18 @@ topics = ("conan", "imgui", "gui", "graphical") license = "MIT" - exports_sources = ["CMakeLists.txt"] - generators = "cmake" - settings = "os", "arch", "compiler", "build_type" options = { "shared": [True, False], - "fPIC": [True, False] + "fPIC": [True, False], } default_options = { "shared": False, - "fPIC": True + "fPIC": True, } + exports_sources = "CMakeLists.txt" + generators = "cmake" _cmake = None @property @@ -56,7 +55,7 @@ def package(self): self.copy(pattern="LICENSE.txt", dst="licenses", src=self._source_subfolder) - backends_folder = src=os.path.join( + backends_folder = os.path.join( self._source_subfolder, "backends" if tools.Version(self.version) >= "1.80" else "examples" ) @@ -68,6 +67,7 @@ def package_info(self): self.cpp_info.libs = ["imgui"] + self.cpp_info.defines.append("IMGUI_USER_CONFIG=\"imgui_user_config.h\"") if self.settings.os == "Linux": self.cpp_info.system_libs.append("m") self.cpp_info.srcdirs = [os.path.join("res", "bindings")]
{"golden_diff": "diff --git a/recipes/imgui/all/conanfile.py b/recipes/imgui/all/conanfile.py\n--- a/recipes/imgui/all/conanfile.py\n+++ b/recipes/imgui/all/conanfile.py\n@@ -12,19 +12,18 @@\n topics = (\"conan\", \"imgui\", \"gui\", \"graphical\")\n license = \"MIT\"\n \n- exports_sources = [\"CMakeLists.txt\"]\n- generators = \"cmake\"\n-\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n- \"fPIC\": [True, False]\n+ \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n- \"fPIC\": True\n+ \"fPIC\": True,\n }\n \n+ exports_sources = \"CMakeLists.txt\"\n+ generators = \"cmake\"\n _cmake = None\n \n @property\n@@ -56,7 +55,7 @@\n \n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n- backends_folder = src=os.path.join(\n+ backends_folder = os.path.join(\n self._source_subfolder,\n \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n )\n@@ -68,6 +67,7 @@\n \n def package_info(self):\n self.cpp_info.libs = [\"imgui\"]\n+ self.cpp_info.defines.append(\"IMGUI_USER_CONFIG=\\\"imgui_user_config.h\\\"\")\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.srcdirs = [os.path.join(\"res\", \"bindings\")]\n", "issue": "[package] imgui/1.84.1: Shared library does not automatically import global data symbols\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **imgui/1.84.1**\r\n * Operating System+version: **Windows 10 21H1 Build 19043.1165**\r\n * Compiler+version: **Visual Studio 16 (2019)**\r\n * Docker image: **N/A**\r\n * Conan version: **conan 1.39.0**\r\n * Python version: **Python 3.9.6**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Windows\r\nos_build=Windows\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=Visual Studio\r\ncompiler.version=16\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\nTry to reference any code that uses global data symbols since those need to use `__declspec(dllimport)` when using [`WINDOWS_EXPORT_ALL_SYMBOLS`](https://cmake.org/cmake/help/latest/prop_tgt/WINDOWS_EXPORT_ALL_SYMBOLS.html#windows-export-all-symbols). One example could be using [`ImGuiTextBuffer`](https://github.com/ocornut/imgui/blob/v1.84.1/imgui.h#L2078) (which has `IMGUI_API static char EmptyString[1];`).\r\nThe following diff is for ImGui's [`test_package.cpp`](https://github.com/conan-io/conan-center-index/blob/master/recipes/imgui/all/test_package/test_package.cpp) and can reproduce this issue.\r\n\r\n```\r\n--- a/recipes/imgui/all/test_package/test_package.cpp\r\n+++ b/recipes/imgui/all/test_package/test_package.cpp\r\n@@ -5,6 +5,9 @@ int main(int, char**)\r\n {\r\n ImGuiContext* context =ImGui::CreateContext();\r\n ImGuiIO& io = ImGui::GetIO();\r\n+\t\r\n+ ImGuiTextBuffer textBuffer;\r\n+ textBuffer.append(\"Hello, ImGui\");\r\n \r\n // Build atlas\r\n unsigned char* tex_pixels = NULL;\r\n@@ -20,6 +23,7 @@ int main(int, char**)\r\n \r\n static float f = 0.0f;\r\n ImGui::Text(\"Hello, world!\");\r\n+ ImGui::Text(textBuffer.begin());\r\n ImGui::SliderFloat(\"float\", &f, 0.0f, 1.0f);\r\n ImGui::Text(\"Application average %.3f ms/frame (%.1f FPS)\", 1000.0f / io.Framerate, io.Framerate);\r\n ImGui::ShowDemoWindow(NULL);\r\n```\r\n\r\nThen, try to create the package with `conan create . imgui/1.84.1@ -o imgui:shared=True`.\r\n\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\ntest_package.obj : error LNK2019: unresolved external symbol \"public: static char * ImGuiTextBuffer::EmptyString\" (?Emp\r\ntyString@ImGuiTextBuffer@@2PADA) referenced in function main\r\n```\r\n\r\n</details>\r\n\r\nI think the simplest solution would be to add something like this\r\n\r\n```\r\nif self.options.shared and self.settings.os == \"Windows\":\r\n self.cpp_info.defines.append(\"IMGUI_API=__declspec(dllimport)\")\r\n```\r\n\r\nI'd be happy to open a PR with this change.\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nimport os\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass IMGUIConan(ConanFile):\n name = \"imgui\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ocornut/imgui\"\n description = \"Bloat-free Immediate Mode Graphical User interface for C++ with minimal dependencies\"\n topics = (\"conan\", \"imgui\", \"gui\", \"graphical\")\n license = \"MIT\"\n\n exports_sources = [\"CMakeLists.txt\"]\n generators = \"cmake\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"LICENSE.txt\", dst=\"licenses\", src=self._source_subfolder)\n backends_folder = src=os.path.join(\n self._source_subfolder,\n \"backends\" if tools.Version(self.version) >= \"1.80\" else \"examples\"\n )\n self.copy(pattern=\"imgui_impl_*\",\n dst=os.path.join(\"res\", \"bindings\"),\n src=backends_folder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n self.cpp_info.libs = [\"imgui\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"m\")\n self.cpp_info.srcdirs = [os.path.join(\"res\", \"bindings\")]\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/imgui/all/conanfile.py"}]}
2,047
397
gh_patches_debug_36505
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2690
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of colossalai/gemini/gemini_context.py] 1 from enum import EnumMeta 2 3 4 class GeminiMemoryManager(object): 5 6 def __init__(self, states_cls: EnumMeta): 7 super().__init__() 8 self.states_cls = states_cls 9 self._cnter = 0 # the counter of instances 10 11 self.total_mem = dict() 12 self.state_mem = dict() 13 self.state_mem['cpu'] = dict() 14 self.state_mem['cuda'] = dict() 15 16 self.reset() 17 18 @property 19 def total_number(self): 20 return self._cnter 21 22 def reset(self): 23 self._cnter = 0 # the counter of instances 24 25 self.total_mem['cpu'] = 0 # memory occupation of instances in cpu 26 self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda 27 28 # memory conditions for all states 29 for state in self.states_cls: 30 self.state_mem['cpu'][state] = 0 31 self.state_mem['cuda'][state] = 0 32 33 def register_new_instance(self): 34 self._cnter += 1 35 36 def delete_instance(self): 37 self._cnter -= 1 38 39 def print_info(self): 40 print(f"Total number: {self.total_number}", 41 f"Total CPU memory occupation: {self.total_mem['cpu']}", 42 f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", 43 sep='\n') 44 45 for state in self.states_cls: 46 print(f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", 47 f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", 48 sep='\n') 49 [end of colossalai/gemini/gemini_context.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/colossalai/gemini/gemini_context.py b/colossalai/gemini/gemini_context.py --- a/colossalai/gemini/gemini_context.py +++ b/colossalai/gemini/gemini_context.py @@ -1,48 +1,48 @@ -from enum import EnumMeta - - -class GeminiMemoryManager(object): - - def __init__(self, states_cls: EnumMeta): - super().__init__() - self.states_cls = states_cls - self._cnter = 0 # the counter of instances - - self.total_mem = dict() - self.state_mem = dict() - self.state_mem['cpu'] = dict() - self.state_mem['cuda'] = dict() - - self.reset() - - @property - def total_number(self): - return self._cnter - - def reset(self): - self._cnter = 0 # the counter of instances - - self.total_mem['cpu'] = 0 # memory occupation of instances in cpu - self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda - - # memory conditions for all states - for state in self.states_cls: - self.state_mem['cpu'][state] = 0 - self.state_mem['cuda'][state] = 0 - - def register_new_instance(self): - self._cnter += 1 - - def delete_instance(self): - self._cnter -= 1 - - def print_info(self): - print(f"Total number: {self.total_number}", - f"Total CPU memory occupation: {self.total_mem['cpu']}", - f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", - sep='\n') - - for state in self.states_cls: - print(f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", - f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", - sep='\n') +from enum import EnumMeta + + +class GeminiMemoryManager(object): + + def __init__(self, states_cls: EnumMeta): + super().__init__() + self.states_cls = states_cls + self._cnter = 0 # the counter of instances + + self.total_mem = dict() + self.state_mem = dict() + self.state_mem['cpu'] = dict() + self.state_mem['cuda'] = dict() + + self.reset() + + @property + def total_number(self): + return self._cnter + + def reset(self): + self._cnter = 0 # the counter of instances + + self.total_mem['cpu'] = 0 # memory occupation of instances in cpu + self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda + + # memory conditions for all states + for state in self.states_cls: + self.state_mem['cpu'][state] = 0 + self.state_mem['cuda'][state] = 0 + + def register_new_instance(self): + self._cnter += 1 + + def delete_instance(self): + self._cnter -= 1 + + def print_info(self): + print(f"Total number: {self.total_number}", + f"Total CPU memory occupation: {self.total_mem['cpu']}", + f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", + sep='\n') + + for state in self.states_cls: + print(f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", + f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", + sep='\n')
{"golden_diff": "diff --git a/colossalai/gemini/gemini_context.py b/colossalai/gemini/gemini_context.py\n--- a/colossalai/gemini/gemini_context.py\n+++ b/colossalai/gemini/gemini_context.py\n@@ -1,48 +1,48 @@\n-from enum import EnumMeta\r\n-\r\n-\r\n-class GeminiMemoryManager(object):\r\n-\r\n- def __init__(self, states_cls: EnumMeta):\r\n- super().__init__()\r\n- self.states_cls = states_cls\r\n- self._cnter = 0 # the counter of instances\r\n-\r\n- self.total_mem = dict()\r\n- self.state_mem = dict()\r\n- self.state_mem['cpu'] = dict()\r\n- self.state_mem['cuda'] = dict()\r\n-\r\n- self.reset()\r\n-\r\n- @property\r\n- def total_number(self):\r\n- return self._cnter\r\n-\r\n- def reset(self):\r\n- self._cnter = 0 # the counter of instances\r\n-\r\n- self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\r\n- self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\r\n-\r\n- # memory conditions for all states\r\n- for state in self.states_cls:\r\n- self.state_mem['cpu'][state] = 0\r\n- self.state_mem['cuda'][state] = 0\r\n-\r\n- def register_new_instance(self):\r\n- self._cnter += 1\r\n-\r\n- def delete_instance(self):\r\n- self._cnter -= 1\r\n-\r\n- def print_info(self):\r\n- print(f\"Total number: {self.total_number}\",\r\n- f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\r\n- f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\r\n- sep='\\n')\r\n-\r\n- for state in self.states_cls:\r\n- print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\r\n- f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\r\n- sep='\\n')\r\n+from enum import EnumMeta\n+\n+\n+class GeminiMemoryManager(object):\n+\n+ def __init__(self, states_cls: EnumMeta):\n+ super().__init__()\n+ self.states_cls = states_cls\n+ self._cnter = 0 # the counter of instances\n+\n+ self.total_mem = dict()\n+ self.state_mem = dict()\n+ self.state_mem['cpu'] = dict()\n+ self.state_mem['cuda'] = dict()\n+\n+ self.reset()\n+\n+ @property\n+ def total_number(self):\n+ return self._cnter\n+\n+ def reset(self):\n+ self._cnter = 0 # the counter of instances\n+\n+ self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\n+ self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\n+\n+ # memory conditions for all states\n+ for state in self.states_cls:\n+ self.state_mem['cpu'][state] = 0\n+ self.state_mem['cuda'][state] = 0\n+\n+ def register_new_instance(self):\n+ self._cnter += 1\n+\n+ def delete_instance(self):\n+ self._cnter -= 1\n+\n+ def print_info(self):\n+ print(f\"Total number: {self.total_number}\",\n+ f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\n+ f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\n+ sep='\\n')\n+\n+ for state in self.states_cls:\n+ print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\n+ f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\n+ sep='\\n')\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from enum import EnumMeta\r\n\r\n\r\nclass GeminiMemoryManager(object):\r\n\r\n def __init__(self, states_cls: EnumMeta):\r\n super().__init__()\r\n self.states_cls = states_cls\r\n self._cnter = 0 # the counter of instances\r\n\r\n self.total_mem = dict()\r\n self.state_mem = dict()\r\n self.state_mem['cpu'] = dict()\r\n self.state_mem['cuda'] = dict()\r\n\r\n self.reset()\r\n\r\n @property\r\n def total_number(self):\r\n return self._cnter\r\n\r\n def reset(self):\r\n self._cnter = 0 # the counter of instances\r\n\r\n self.total_mem['cpu'] = 0 # memory occupation of instances in cpu\r\n self.total_mem['cuda'] = 0 # memory of occupation of instances in cuda\r\n\r\n # memory conditions for all states\r\n for state in self.states_cls:\r\n self.state_mem['cpu'][state] = 0\r\n self.state_mem['cuda'][state] = 0\r\n\r\n def register_new_instance(self):\r\n self._cnter += 1\r\n\r\n def delete_instance(self):\r\n self._cnter -= 1\r\n\r\n def print_info(self):\r\n print(f\"Total number: {self.total_number}\",\r\n f\"Total CPU memory occupation: {self.total_mem['cpu']}\",\r\n f\"Total CUDA memory occupation: {self.total_mem['cuda']}\\n\",\r\n sep='\\n')\r\n\r\n for state in self.states_cls:\r\n print(f\"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}\",\r\n f\"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\\n\",\r\n sep='\\n')\r\n", "path": "colossalai/gemini/gemini_context.py"}]}
1,025
871
gh_patches_debug_363
rasdani/github-patches
git_diff
mozilla__bugbug-3921
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [model:regressor] AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_' https://community-tc.services.mozilla.com/tasks/HncpjvKKRcSnxL_GJ8PV9A/runs/0/logs/public/logs/live.log ``` Traceback (most recent call last): File "/usr/local/bin/bugbug-train", line 8, in <module> sys.exit(main()) File "/usr/local/lib/python3.10/site-packages/scripts/trainer.py", line 141, in main retriever.go(args) File "/usr/local/lib/python3.10/site-packages/scripts/trainer.py", line 41, in go metrics = model_obj.train(limit=args.limit) File "/usr/local/lib/python3.10/site-packages/bugbug/model.py", line 418, in train logger.info("Number of features: %d", self.clf.steps[-1][1].n_features_in_) AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_' ``` </issue> <code> [start of bugbug/model_calibration.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 from sklearn.base import BaseEstimator, ClassifierMixin 7 from sklearn.calibration import CalibratedClassifierCV 8 from sklearn.model_selection import train_test_split 9 10 11 class IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin): 12 def __init__(self, base_clf): 13 self.base_clf = base_clf 14 self.calibrated_clf = CalibratedClassifierCV( 15 base_clf, cv="prefit", method="isotonic" 16 ) 17 18 def fit(self, X_train, y_train): 19 X_train, X_val, y_train, y_val = train_test_split( 20 X_train, y_train, test_size=0.2, random_state=42 21 ) 22 self.base_clf.fit(X_train, y_train) 23 self.calibrated_clf.fit(X_val, y_val) 24 25 def predict(self, X): 26 return self.calibrated_clf.predict(X) 27 28 def predict_proba(self, X): 29 return self.calibrated_clf.predict_proba(X) 30 [end of bugbug/model_calibration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bugbug/model_calibration.py b/bugbug/model_calibration.py --- a/bugbug/model_calibration.py +++ b/bugbug/model_calibration.py @@ -27,3 +27,7 @@ def predict_proba(self, X): return self.calibrated_clf.predict_proba(X) + + @property + def n_features_in_(self): + return self.base_clf.n_features_in_
{"golden_diff": "diff --git a/bugbug/model_calibration.py b/bugbug/model_calibration.py\n--- a/bugbug/model_calibration.py\n+++ b/bugbug/model_calibration.py\n@@ -27,3 +27,7 @@\n \n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n+\n+ @property\n+ def n_features_in_(self):\n+ return self.base_clf.n_features_in_\n", "issue": "[model:regressor] AttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_'\nhttps://community-tc.services.mozilla.com/tasks/HncpjvKKRcSnxL_GJ8PV9A/runs/0/logs/public/logs/live.log\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/bugbug-train\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.10/site-packages/scripts/trainer.py\", line 141, in main\r\n retriever.go(args)\r\n File \"/usr/local/lib/python3.10/site-packages/scripts/trainer.py\", line 41, in go\r\n metrics = model_obj.train(limit=args.limit)\r\n File \"/usr/local/lib/python3.10/site-packages/bugbug/model.py\", line 418, in train\r\n logger.info(\"Number of features: %d\", self.clf.steps[-1][1].n_features_in_)\r\nAttributeError: 'IsotonicRegressionCalibrator' object has no attribute 'n_features_in_'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom sklearn.base import BaseEstimator, ClassifierMixin\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split\n\n\nclass IsotonicRegressionCalibrator(BaseEstimator, ClassifierMixin):\n def __init__(self, base_clf):\n self.base_clf = base_clf\n self.calibrated_clf = CalibratedClassifierCV(\n base_clf, cv=\"prefit\", method=\"isotonic\"\n )\n\n def fit(self, X_train, y_train):\n X_train, X_val, y_train, y_val = train_test_split(\n X_train, y_train, test_size=0.2, random_state=42\n )\n self.base_clf.fit(X_train, y_train)\n self.calibrated_clf.fit(X_val, y_val)\n\n def predict(self, X):\n return self.calibrated_clf.predict(X)\n\n def predict_proba(self, X):\n return self.calibrated_clf.predict_proba(X)\n", "path": "bugbug/model_calibration.py"}]}
1,100
95
gh_patches_debug_30439
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-38
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> support blocking pull in PS so client don't need to retry in a loop </issue> <code> [start of tensorflow/ps/ps.py] 1 import tensorflow as tf 2 tf.enable_eager_execution() 3 import tensorflow.contrib.eager as tfe 4 import numpy as np 5 import queue 6 import threading 7 8 9 class ParameterServer(object): 10 def __init__(self, optimizer, vars): 11 self._opt = optimizer 12 self._vars = {} 13 for k, v in vars.items(): 14 if (not isinstance(v, np.ndarray) 15 or v.dtype not in (np.float32, np.float64)): 16 raise ValueError( 17 'Initial value for variable %s is not of float type ndarray' % 18 k) 19 self._vars[k] = tfe.Variable(v, name=k) 20 self._step = 0 21 self._grad_q = queue.Queue() 22 self._lock = threading.Lock() 23 self._runner = threading.Thread(target=self._run, name='ps-runner') 24 self._exiting = False 25 26 def pull(self, min_step=0, names=None): 27 with self._lock: 28 if min_step > self._step: 29 raise LookupError('Required step is not ready yet: %s' % min_step) 30 if names: 31 res = {k: self._vars[k].numpy() for k in names} 32 else: 33 res = {k: v.numpy() for k, v in self._vars.items()} 34 return self._step, res 35 36 def push(self, base_step, sub_step, grads): 37 with self._lock: 38 if base_step > self._step: 39 raise ValueError( 40 'Illegal base step %s, parameter server step is %s' % 41 (base_step, self._step)) 42 43 if sub_step < 0: 44 raise ValueError('Illegal sub step %s' % sub_step) 45 46 for k, g in grads.items(): 47 v = self._vars[k] 48 if g.dtype != v.dtype.as_numpy_dtype or g.shape != v.shape: 49 raise ValueError('Incompatible gradient for variable %s' % k) 50 # TODO(l.zou): use @dataclass when python 3.7 is available. 51 self._grad_q.put((base_step, sub_step, grads)) 52 53 def _compute(self, grads): 54 grads_vars = [(g, self._vars[k]) for k, g in grads.items()] 55 with self._lock: 56 self._opt.apply_gradients(grads_vars) 57 self._step += 1 58 59 def _run(self): 60 while not self._exiting: 61 # TODO(l.zou): How to properly accumulate and decay grads? 62 try: 63 base_step, sub_step, grads = self._grad_q.get(timeout=1.0) 64 self._compute(grads) 65 except queue.Empty: 66 pass 67 68 def start(self): 69 self._runner.start() 70 71 def join(self): 72 self._exiting = True 73 self._runner.join() 74 [end of tensorflow/ps/ps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensorflow/ps/ps.py b/tensorflow/ps/ps.py --- a/tensorflow/ps/ps.py +++ b/tensorflow/ps/ps.py @@ -1,9 +1,9 @@ +import threading +import queue +import numpy as np +import tensorflow.contrib.eager as tfe import tensorflow as tf tf.enable_eager_execution() -import tensorflow.contrib.eager as tfe -import numpy as np -import queue -import threading class ParameterServer(object): @@ -22,11 +22,18 @@ self._lock = threading.Lock() self._runner = threading.Thread(target=self._run, name='ps-runner') self._exiting = False + self._min_step_cv = threading.Condition() - def pull(self, min_step=0, names=None): + def pull(self, names=None, min_step=0, blocking=True, timeout=None): + with self._min_step_cv: + self._min_step_cv.wait_for( + lambda: not blocking or min_step <= self._step, + timeout=timeout) with self._lock: if min_step > self._step: - raise LookupError('Required step is not ready yet: %s' % min_step) + raise LookupError( + 'Required step is not ready yet: %s' % + min_step) if names: res = {k: self._vars[k].numpy() for k in names} else: @@ -54,7 +61,9 @@ grads_vars = [(g, self._vars[k]) for k, g in grads.items()] with self._lock: self._opt.apply_gradients(grads_vars) + with self._min_step_cv: self._step += 1 + self._min_step_cv.notify_all() def _run(self): while not self._exiting:
{"golden_diff": "diff --git a/tensorflow/ps/ps.py b/tensorflow/ps/ps.py\n--- a/tensorflow/ps/ps.py\n+++ b/tensorflow/ps/ps.py\n@@ -1,9 +1,9 @@\n+import threading\n+import queue\n+import numpy as np\n+import tensorflow.contrib.eager as tfe\n import tensorflow as tf\n tf.enable_eager_execution()\n-import tensorflow.contrib.eager as tfe\n-import numpy as np\n-import queue\n-import threading\n \n \n class ParameterServer(object):\n@@ -22,11 +22,18 @@\n self._lock = threading.Lock()\n self._runner = threading.Thread(target=self._run, name='ps-runner')\n self._exiting = False\n+ self._min_step_cv = threading.Condition()\n \n- def pull(self, min_step=0, names=None):\n+ def pull(self, names=None, min_step=0, blocking=True, timeout=None):\n+ with self._min_step_cv:\n+ self._min_step_cv.wait_for(\n+ lambda: not blocking or min_step <= self._step,\n+ timeout=timeout)\n with self._lock:\n if min_step > self._step:\n- raise LookupError('Required step is not ready yet: %s' % min_step)\n+ raise LookupError(\n+ 'Required step is not ready yet: %s' %\n+ min_step)\n if names:\n res = {k: self._vars[k].numpy() for k in names}\n else:\n@@ -54,7 +61,9 @@\n grads_vars = [(g, self._vars[k]) for k, g in grads.items()]\n with self._lock:\n self._opt.apply_gradients(grads_vars)\n+ with self._min_step_cv:\n self._step += 1\n+ self._min_step_cv.notify_all()\n \n def _run(self):\n while not self._exiting:\n", "issue": "support blocking pull in PS so client don't need to retry in a loop\n\n", "before_files": [{"content": "import tensorflow as tf\ntf.enable_eager_execution()\nimport tensorflow.contrib.eager as tfe\nimport numpy as np\nimport queue\nimport threading\n\n\nclass ParameterServer(object):\n def __init__(self, optimizer, vars):\n self._opt = optimizer\n self._vars = {}\n for k, v in vars.items():\n if (not isinstance(v, np.ndarray)\n or v.dtype not in (np.float32, np.float64)):\n raise ValueError(\n 'Initial value for variable %s is not of float type ndarray' %\n k)\n self._vars[k] = tfe.Variable(v, name=k)\n self._step = 0\n self._grad_q = queue.Queue()\n self._lock = threading.Lock()\n self._runner = threading.Thread(target=self._run, name='ps-runner')\n self._exiting = False\n\n def pull(self, min_step=0, names=None):\n with self._lock:\n if min_step > self._step:\n raise LookupError('Required step is not ready yet: %s' % min_step)\n if names:\n res = {k: self._vars[k].numpy() for k in names}\n else:\n res = {k: v.numpy() for k, v in self._vars.items()}\n return self._step, res\n\n def push(self, base_step, sub_step, grads):\n with self._lock:\n if base_step > self._step:\n raise ValueError(\n 'Illegal base step %s, parameter server step is %s' %\n (base_step, self._step))\n\n if sub_step < 0:\n raise ValueError('Illegal sub step %s' % sub_step)\n\n for k, g in grads.items():\n v = self._vars[k]\n if g.dtype != v.dtype.as_numpy_dtype or g.shape != v.shape:\n raise ValueError('Incompatible gradient for variable %s' % k)\n # TODO(l.zou): use @dataclass when python 3.7 is available.\n self._grad_q.put((base_step, sub_step, grads))\n\n def _compute(self, grads):\n grads_vars = [(g, self._vars[k]) for k, g in grads.items()]\n with self._lock:\n self._opt.apply_gradients(grads_vars)\n self._step += 1\n\n def _run(self):\n while not self._exiting:\n # TODO(l.zou): How to properly accumulate and decay grads?\n try:\n base_step, sub_step, grads = self._grad_q.get(timeout=1.0)\n self._compute(grads)\n except queue.Empty:\n pass\n\n def start(self):\n self._runner.start()\n\n def join(self):\n self._exiting = True\n self._runner.join()\n", "path": "tensorflow/ps/ps.py"}]}
1,298
423
gh_patches_debug_9871
rasdani/github-patches
git_diff
OCA__social-623
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [13.0] [BUG]"base_search_mail_content" module > Getting bug with "hr" (Employees) module module: base_search_mail_content version: 13.0 **Context :** OCB 13.0 Odoo Server up to date [(08/30/2020),] Virgin database , to reproduce issue faced on my test environnement. Also !! >> Get same bug on runbot : http://3437172-13-0-56e0a2.runbot2-2.odoo-community.org **Steps to reproduce** - Install together "base_search_mail_content" & "hr" (Employees) native odoo module, and try to access to : hr" (Employees) **Current behavior** (model=hr.employee&view_type=kanban, or tree) When i try to access to menu "Employees"There is this following message : > Something went wrong ! Only types ['many2one'] are supported for category (found type text) **Current resolution** i uninstall "base_search_mail_content" to retreive access to hr" (Employees) ----------------------------------------------------------------------------------------------------------------------- ![Capture-Access _employees-1](https://user-images.githubusercontent.com/59052920/91721693-74c7db80-eb99-11ea-8871-c5a5b0b21eac.JPG) ![Capture-Access _employees](https://user-images.githubusercontent.com/59052920/91721766-932dd700-eb99-11ea-8c2b-12a280df4217.JPG) </issue> <code> [start of base_search_mail_content/models/mail_thread.py] 1 # Copyright 2016-17 Eficent Business and IT Consulting Services S.L. 2 # (http://www.eficent.com) 3 # Copyright 2016 Serpent Consulting Services Pvt. Ltd. 4 # (<http://www.serpentcs.com>) 5 # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html). 6 7 from lxml import etree 8 9 from odoo import _, api, fields, models 10 from odoo.osv import expression 11 12 13 class MailThread(models.AbstractModel): 14 _inherit = "mail.thread" 15 16 def _search_message_content(self, operator, value): 17 model_domain = [("model", "=", self._name)] 18 if operator not in expression.NEGATIVE_TERM_OPERATORS: 19 model_domain += ["|"] * 4 20 model_domain += [ 21 ("record_name", operator, value), 22 ("subject", operator, value), 23 ("body", operator, value), 24 ("email_from", operator, value), 25 ("reply_to", operator, value), 26 ] 27 recs = self.env["mail.message"].search(model_domain) 28 return [("id", "in", recs.mapped("res_id"))] 29 30 message_content = fields.Text( 31 string="Message Content", 32 help="Message content, to be used only in searches", 33 compute=lambda self: False, 34 search="_search_message_content", 35 ) 36 37 @api.model 38 def fields_view_get( 39 self, view_id=None, view_type="form", toolbar=False, submenu=False 40 ): 41 """ 42 Override to add message_content field in all the objects 43 that inherits mail.thread 44 """ 45 res = super(MailThread, self).fields_view_get( 46 view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu 47 ) 48 if view_type == "search" and self._fields.get("message_content"): 49 doc = etree.XML(res["arch"]) 50 res["fields"].update( 51 {"message_content": {"type": "char", "string": _("Message Content")}} 52 ) 53 54 for node in doc.xpath("//field[last()]"): 55 # Add message_content in search view 56 elem = etree.Element("field", {"name": "message_content"}) 57 node.addnext(elem) 58 res["arch"] = etree.tostring(doc) 59 return res 60 [end of base_search_mail_content/models/mail_thread.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/base_search_mail_content/models/mail_thread.py b/base_search_mail_content/models/mail_thread.py --- a/base_search_mail_content/models/mail_thread.py +++ b/base_search_mail_content/models/mail_thread.py @@ -50,8 +50,7 @@ res["fields"].update( {"message_content": {"type": "char", "string": _("Message Content")}} ) - - for node in doc.xpath("//field[last()]"): + for node in doc.xpath("/search/field[last()]"): # Add message_content in search view elem = etree.Element("field", {"name": "message_content"}) node.addnext(elem)
{"golden_diff": "diff --git a/base_search_mail_content/models/mail_thread.py b/base_search_mail_content/models/mail_thread.py\n--- a/base_search_mail_content/models/mail_thread.py\n+++ b/base_search_mail_content/models/mail_thread.py\n@@ -50,8 +50,7 @@\n res[\"fields\"].update(\n {\"message_content\": {\"type\": \"char\", \"string\": _(\"Message Content\")}}\n )\n-\n- for node in doc.xpath(\"//field[last()]\"):\n+ for node in doc.xpath(\"/search/field[last()]\"):\n # Add message_content in search view\n elem = etree.Element(\"field\", {\"name\": \"message_content\"})\n node.addnext(elem)\n", "issue": "[13.0] [BUG]\"base_search_mail_content\" module > Getting bug with \"hr\" (Employees) module\nmodule: base_search_mail_content\r\nversion: 13.0\r\n\r\n**Context :**\r\nOCB 13.0 Odoo Server up to date [(08/30/2020),]\r\nVirgin database , to reproduce issue faced on my test environnement.\r\nAlso !! >> Get same bug on runbot : http://3437172-13-0-56e0a2.runbot2-2.odoo-community.org\r\n\r\n**Steps to reproduce**\r\n- Install together \"base_search_mail_content\" & \"hr\" (Employees) native odoo module, and try to access to : hr\" (Employees)\r\n\r\n**Current behavior** (model=hr.employee&view_type=kanban, or tree)\r\nWhen i try to access to menu \"Employees\"There is this following message : \r\n> Something went wrong !\r\nOnly types ['many2one'] are supported for category (found type text)\r\n\r\n**Current resolution**\r\ni uninstall \"base_search_mail_content\" to retreive access to hr\" (Employees)\r\n\r\n-----------------------------------------------------------------------------------------------------------------------\r\n![Capture-Access _employees-1](https://user-images.githubusercontent.com/59052920/91721693-74c7db80-eb99-11ea-8871-c5a5b0b21eac.JPG)\r\n![Capture-Access _employees](https://user-images.githubusercontent.com/59052920/91721766-932dd700-eb99-11ea-8c2b-12a280df4217.JPG)\r\n\r\n\n", "before_files": [{"content": "# Copyright 2016-17 Eficent Business and IT Consulting Services S.L.\n# (http://www.eficent.com)\n# Copyright 2016 Serpent Consulting Services Pvt. Ltd.\n# (<http://www.serpentcs.com>)\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).\n\nfrom lxml import etree\n\nfrom odoo import _, api, fields, models\nfrom odoo.osv import expression\n\n\nclass MailThread(models.AbstractModel):\n _inherit = \"mail.thread\"\n\n def _search_message_content(self, operator, value):\n model_domain = [(\"model\", \"=\", self._name)]\n if operator not in expression.NEGATIVE_TERM_OPERATORS:\n model_domain += [\"|\"] * 4\n model_domain += [\n (\"record_name\", operator, value),\n (\"subject\", operator, value),\n (\"body\", operator, value),\n (\"email_from\", operator, value),\n (\"reply_to\", operator, value),\n ]\n recs = self.env[\"mail.message\"].search(model_domain)\n return [(\"id\", \"in\", recs.mapped(\"res_id\"))]\n\n message_content = fields.Text(\n string=\"Message Content\",\n help=\"Message content, to be used only in searches\",\n compute=lambda self: False,\n search=\"_search_message_content\",\n )\n\n @api.model\n def fields_view_get(\n self, view_id=None, view_type=\"form\", toolbar=False, submenu=False\n ):\n \"\"\"\n Override to add message_content field in all the objects\n that inherits mail.thread\n \"\"\"\n res = super(MailThread, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu\n )\n if view_type == \"search\" and self._fields.get(\"message_content\"):\n doc = etree.XML(res[\"arch\"])\n res[\"fields\"].update(\n {\"message_content\": {\"type\": \"char\", \"string\": _(\"Message Content\")}}\n )\n\n for node in doc.xpath(\"//field[last()]\"):\n # Add message_content in search view\n elem = etree.Element(\"field\", {\"name\": \"message_content\"})\n node.addnext(elem)\n res[\"arch\"] = etree.tostring(doc)\n return res\n", "path": "base_search_mail_content/models/mail_thread.py"}]}
1,532
142
gh_patches_debug_13356
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2791
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Modify page routing to allow for any database name ## Current behavior - Many of our pages have URLs that begin with the database name. - We also have routes that begin with things like `administration` and `auth`. - Those routing rules produce an ambiguous routing grammar making it impossible to use Mathesar with a database named "administration" (for example). ## Desired behavior We should change `/<db_name>/` to `/db/<db_name>` </issue> <code> [start of mathesar/urls.py] 1 from django.contrib.auth.views import LoginView 2 from django.urls import include, path, re_path 3 from rest_framework_nested import routers 4 5 from mathesar import views 6 from mathesar.api.db import viewsets as db_viewsets 7 from mathesar.api.ui import viewsets as ui_viewsets 8 from mathesar.users.password_reset import MathesarPasswordResetConfirmView 9 10 db_router = routers.DefaultRouter() 11 db_router.register(r'tables', db_viewsets.TableViewSet, basename='table') 12 db_router.register(r'queries', db_viewsets.QueryViewSet, basename='query') 13 db_router.register(r'links', db_viewsets.LinkViewSet, basename='links') 14 db_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema') 15 db_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database') 16 db_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file') 17 18 db_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table') 19 db_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record') 20 db_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting') 21 db_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column') 22 db_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint') 23 24 ui_router = routers.DefaultRouter() 25 ui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version') 26 ui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database') 27 ui_router.register(r'users', ui_viewsets.UserViewSet, basename='user') 28 ui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role') 29 ui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role') 30 31 ui_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table') 32 ui_table_router.register(r'records', ui_viewsets.RecordViewSet, basename='table-record') 33 34 urlpatterns = [ 35 path('api/db/v0/', include(db_router.urls)), 36 path('api/db/v0/', include(db_table_router.urls)), 37 path('api/ui/v0/', include(ui_router.urls)), 38 path('api/ui/v0/', include(ui_table_router.urls)), 39 path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'), 40 path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'), 41 path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'), 42 path('auth/', include('django.contrib.auth.urls')), 43 path('', views.home, name='home'), 44 path('profile/', views.profile, name='profile'), 45 path('administration/', views.admin_home, name='admin_home'), 46 path('administration/users/', views.admin_home, name='admin_users_home'), 47 path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'), 48 path('administration/update/', views.admin_home, name='admin_update'), 49 path('<db_name>/', views.schemas, name='schemas'), 50 re_path( 51 r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/', 52 views.schema_home, 53 name='schema_home' 54 ), 55 ] 56 [end of mathesar/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/urls.py b/mathesar/urls.py --- a/mathesar/urls.py +++ b/mathesar/urls.py @@ -46,9 +46,10 @@ path('administration/users/', views.admin_home, name='admin_users_home'), path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'), path('administration/update/', views.admin_home, name='admin_update'), - path('<db_name>/', views.schemas, name='schemas'), + path('db/', views.home, name='db_home'), + path('db/<db_name>/', views.schemas, name='schemas'), re_path( - r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/', + r'^db/(?P<db_name>\w+)/(?P<schema_id>\w+)/', views.schema_home, name='schema_home' ),
{"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -46,9 +46,10 @@\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/update/', views.admin_home, name='admin_update'),\n- path('<db_name>/', views.schemas, name='schemas'),\n+ path('db/', views.home, name='db_home'),\n+ path('db/<db_name>/', views.schemas, name='schemas'),\n re_path(\n- r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n+ r'^db/(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n", "issue": "Modify page routing to allow for any database name\n## Current behavior\r\n\r\n- Many of our pages have URLs that begin with the database name.\r\n- We also have routes that begin with things like `administration` and `auth`.\r\n- Those routing rules produce an ambiguous routing grammar making it impossible to use Mathesar with a database named \"administration\" (for example).\r\n\r\n## Desired behavior\r\n\r\nWe should change `/<db_name>/` to `/db/<db_name>`\r\n\r\n\n", "before_files": [{"content": "from django.contrib.auth.views import LoginView\nfrom django.urls import include, path, re_path\nfrom rest_framework_nested import routers\n\nfrom mathesar import views\nfrom mathesar.api.db import viewsets as db_viewsets\nfrom mathesar.api.ui import viewsets as ui_viewsets\nfrom mathesar.users.password_reset import MathesarPasswordResetConfirmView\n\ndb_router = routers.DefaultRouter()\ndb_router.register(r'tables', db_viewsets.TableViewSet, basename='table')\ndb_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')\ndb_router.register(r'links', db_viewsets.LinkViewSet, basename='links')\ndb_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')\ndb_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')\ndb_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')\n\ndb_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\ndb_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')\ndb_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')\ndb_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')\ndb_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')\n\nui_router = routers.DefaultRouter()\nui_router.register(r'version', ui_viewsets.VersionViewSet, basename='version')\nui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')\nui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')\nui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')\nui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')\n\nui_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\nui_table_router.register(r'records', ui_viewsets.RecordViewSet, basename='table-record')\n\nurlpatterns = [\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n path('api/ui/v0/', include(ui_table_router.urls)),\n path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),\n path('auth/password_reset_confirm', MathesarPasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n path('auth/login/', LoginView.as_view(redirect_authenticated_user=True), name='login'),\n path('auth/', include('django.contrib.auth.urls')),\n path('', views.home, name='home'),\n path('profile/', views.profile, name='profile'),\n path('administration/', views.admin_home, name='admin_home'),\n path('administration/users/', views.admin_home, name='admin_users_home'),\n path('administration/users/<user_id>/', views.admin_home, name='admin_users_edit'),\n path('administration/update/', views.admin_home, name='admin_update'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n]\n", "path": "mathesar/urls.py"}]}
1,437
205
gh_patches_debug_7778
rasdani/github-patches
git_diff
nipy__nipype-2096
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> interfaces.camino.convert.FSL2Scheme does not show up in documentation Diagram on front page of docs contains a typo "Idiosynchratic" should be "Idiosyncratic" </issue> <code> [start of tools/build_interface_docs.py] 1 #!/usr/bin/env python 2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 3 # vi: set ft=python sts=4 ts=4 sw=4 et: 4 """Script to auto-generate interface docs. 5 """ 6 from __future__ import print_function, unicode_literals 7 # stdlib imports 8 import os 9 import sys 10 11 # ***************************************************************************** 12 if __name__ == '__main__': 13 nipypepath = os.path.abspath('..') 14 sys.path.insert(1, nipypepath) 15 # local imports 16 from interfacedocgen import InterfaceHelpWriter 17 package = 'nipype' 18 outdir = os.path.join('interfaces', 'generated') 19 docwriter = InterfaceHelpWriter(package) 20 # Packages that should not be included in generated API docs. 21 docwriter.package_skip_patterns += ['\.external$', 22 '\.fixes$', 23 '\.utils$', 24 '\.pipeline', 25 '\.testing', 26 '\.caching', 27 '\.scripts', 28 ] 29 # Modules that should not be included in generated API docs. 30 docwriter.module_skip_patterns += ['\.version$', 31 '\.interfaces\.base$', 32 '\.interfaces\.matlab$', 33 '\.interfaces\.rest$', 34 '\.interfaces\.pymvpa$', 35 '\.interfaces\.slicer\.generate_classes$', 36 '\.interfaces\.spm\.base$', 37 '\.interfaces\.traits', 38 '\.pipeline\.alloy$', 39 '\.pipeline\.s3_node_wrapper$', 40 '\.testing', 41 '\.scripts', 42 ] 43 docwriter.class_skip_patterns += ['AFNICommand', 44 'ANTS', 45 'FSL', 46 'FS', 47 'Info', 48 '^SPM', 49 'Tester', 50 'Spec$', 51 'Numpy' 52 # NipypeTester raises an 53 # exception when instantiated in 54 # InterfaceHelpWriter.generate_api_doc 55 'NipypeTester', 56 ] 57 docwriter.write_api_docs(outdir) 58 docwriter.write_index(outdir, 'gen', relative_to='interfaces') 59 print('%d files written' % len(docwriter.written_modules)) 60 [end of tools/build_interface_docs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py --- a/tools/build_interface_docs.py +++ b/tools/build_interface_docs.py @@ -42,7 +42,7 @@ ] docwriter.class_skip_patterns += ['AFNICommand', 'ANTS', - 'FSL', + 'FSLCommand', 'FS', 'Info', '^SPM',
{"golden_diff": "diff --git a/tools/build_interface_docs.py b/tools/build_interface_docs.py\n--- a/tools/build_interface_docs.py\n+++ b/tools/build_interface_docs.py\n@@ -42,7 +42,7 @@\n ]\n docwriter.class_skip_patterns += ['AFNICommand',\n 'ANTS',\n- 'FSL',\n+ 'FSLCommand',\n 'FS',\n 'Info',\n '^SPM',\n", "issue": "interfaces.camino.convert.FSL2Scheme does not show up in documentation\n\nDiagram on front page of docs contains a typo\n\"Idiosynchratic\" should be \"Idiosyncratic\"\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Script to auto-generate interface docs.\n\"\"\"\nfrom __future__ import print_function, unicode_literals\n# stdlib imports\nimport os\nimport sys\n\n# *****************************************************************************\nif __name__ == '__main__':\n nipypepath = os.path.abspath('..')\n sys.path.insert(1, nipypepath)\n # local imports\n from interfacedocgen import InterfaceHelpWriter\n package = 'nipype'\n outdir = os.path.join('interfaces', 'generated')\n docwriter = InterfaceHelpWriter(package)\n # Packages that should not be included in generated API docs.\n docwriter.package_skip_patterns += ['\\.external$',\n '\\.fixes$',\n '\\.utils$',\n '\\.pipeline',\n '\\.testing',\n '\\.caching',\n '\\.scripts',\n ]\n # Modules that should not be included in generated API docs.\n docwriter.module_skip_patterns += ['\\.version$',\n '\\.interfaces\\.base$',\n '\\.interfaces\\.matlab$',\n '\\.interfaces\\.rest$',\n '\\.interfaces\\.pymvpa$',\n '\\.interfaces\\.slicer\\.generate_classes$',\n '\\.interfaces\\.spm\\.base$',\n '\\.interfaces\\.traits',\n '\\.pipeline\\.alloy$',\n '\\.pipeline\\.s3_node_wrapper$',\n '\\.testing',\n '\\.scripts',\n ]\n docwriter.class_skip_patterns += ['AFNICommand',\n 'ANTS',\n 'FSL',\n 'FS',\n 'Info',\n '^SPM',\n 'Tester',\n 'Spec$',\n 'Numpy'\n # NipypeTester raises an\n # exception when instantiated in\n # InterfaceHelpWriter.generate_api_doc\n 'NipypeTester',\n ]\n docwriter.write_api_docs(outdir)\n docwriter.write_index(outdir, 'gen', relative_to='interfaces')\n print('%d files written' % len(docwriter.written_modules))\n", "path": "tools/build_interface_docs.py"}]}
1,150
89
gh_patches_debug_2400
rasdani/github-patches
git_diff
dask__distributed-2975
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dask.distributed.progress no longer callable in 2.3.0? We've used the progress() function from dask.distributed a bunch in the past to display a progress bar in JupyterLab, but it seems to have stopped working after upgrading to Dask 2.3.0: ``` from dask.distributed import Client, progress import dask.dataframe as dd df = dd.demo.make_timeseries('2010', '2016', {'value': float, 'name': str, 'id': int}, freq='10s', partition_freq='7d', seed=1) df = df.persist() progress(df) ``` Executing this in a single cell in JupyterLab (with an existing Dask cluster already running) results in: ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-16af814d7204> in <module> 7 8 df = df.persist() ----> 9 progress(df) TypeError: 'module' object is not callable ``` Let me know if I can provide any more info. Thanks! </issue> <code> [start of distributed/__init__.py] 1 from . import config 2 from dask.config import config 3 from .actor import Actor, ActorFuture 4 from .core import connect, rpc 5 from .deploy import LocalCluster, Adaptive, SpecCluster 6 from .diagnostics import progress 7 from .client import ( 8 Client, 9 Executor, 10 CompatibleExecutor, 11 wait, 12 as_completed, 13 default_client, 14 fire_and_forget, 15 Future, 16 futures_of, 17 get_task_stream, 18 ) 19 from .lock import Lock 20 from .nanny import Nanny 21 from .pubsub import Pub, Sub 22 from .queues import Queue 23 from .scheduler import Scheduler 24 from .threadpoolexecutor import rejoin 25 from .utils import sync 26 from .variable import Variable 27 from .worker import Worker, get_worker, get_client, secede, Reschedule 28 from .worker_client import local_client, worker_client 29 30 from tornado.gen import TimeoutError 31 32 from ._version import get_versions 33 34 versions = get_versions() 35 __version__ = versions["version"] 36 __git_revision__ = versions["full-revisionid"] 37 del get_versions, versions 38 [end of distributed/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/__init__.py b/distributed/__init__.py --- a/distributed/__init__.py +++ b/distributed/__init__.py @@ -3,7 +3,7 @@ from .actor import Actor, ActorFuture from .core import connect, rpc from .deploy import LocalCluster, Adaptive, SpecCluster -from .diagnostics import progress +from .diagnostics.progressbar import progress from .client import ( Client, Executor,
{"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -3,7 +3,7 @@\n from .actor import Actor, ActorFuture\n from .core import connect, rpc\n from .deploy import LocalCluster, Adaptive, SpecCluster\n-from .diagnostics import progress\n+from .diagnostics.progressbar import progress\n from .client import (\n Client,\n Executor,\n", "issue": "dask.distributed.progress no longer callable in 2.3.0?\nWe've used the progress() function from dask.distributed a bunch in the past to display a progress bar in JupyterLab, but it seems to have stopped working after upgrading to Dask 2.3.0:\r\n\r\n```\r\nfrom dask.distributed import Client, progress\r\nimport dask.dataframe as dd\r\n\r\ndf = dd.demo.make_timeseries('2010', '2016',\r\n {'value': float, 'name': str, 'id': int},\r\n freq='10s', partition_freq='7d', seed=1)\r\n\r\ndf = df.persist()\r\nprogress(df)\r\n```\r\n\r\nExecuting this in a single cell in JupyterLab (with an existing Dask cluster already running) results in:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-1-16af814d7204> in <module>\r\n 7 \r\n 8 df = df.persist()\r\n----> 9 progress(df)\r\n\r\nTypeError: 'module' object is not callable\r\n```\r\n\r\nLet me know if I can provide any more info. Thanks!\n", "before_files": [{"content": "from . import config\nfrom dask.config import config\nfrom .actor import Actor, ActorFuture\nfrom .core import connect, rpc\nfrom .deploy import LocalCluster, Adaptive, SpecCluster\nfrom .diagnostics import progress\nfrom .client import (\n Client,\n Executor,\n CompatibleExecutor,\n wait,\n as_completed,\n default_client,\n fire_and_forget,\n Future,\n futures_of,\n get_task_stream,\n)\nfrom .lock import Lock\nfrom .nanny import Nanny\nfrom .pubsub import Pub, Sub\nfrom .queues import Queue\nfrom .scheduler import Scheduler\nfrom .threadpoolexecutor import rejoin\nfrom .utils import sync\nfrom .variable import Variable\nfrom .worker import Worker, get_worker, get_client, secede, Reschedule\nfrom .worker_client import local_client, worker_client\n\nfrom tornado.gen import TimeoutError\n\nfrom ._version import get_versions\n\nversions = get_versions()\n__version__ = versions[\"version\"]\n__git_revision__ = versions[\"full-revisionid\"]\ndel get_versions, versions\n", "path": "distributed/__init__.py"}]}
1,079
104
gh_patches_debug_18781
rasdani/github-patches
git_diff
ivy-llc__ivy-15979
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> selu </issue> <code> [start of ivy/functional/frontends/paddle/nn/functional/activation.py] 1 # local 2 from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh 3 from ivy.functional.frontends.paddle.tensor.math import ( 4 log_softmax as paddle_log_softmax, 5 ) 6 7 8 tanh = paddle_tanh 9 log_softmax = paddle_log_softmax 10 [end of ivy/functional/frontends/paddle/nn/functional/activation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py --- a/ivy/functional/frontends/paddle/nn/functional/activation.py +++ b/ivy/functional/frontends/paddle/nn/functional/activation.py @@ -1,9 +1,33 @@ # local +import ivy +from ivy.func_wrapper import with_supported_dtypes +from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh from ivy.functional.frontends.paddle.tensor.math import ( log_softmax as paddle_log_softmax, ) +@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def selu( + x, + /, + *, + alpha=1.6732632423543772848170429916717, + scale=1.0507009873554804934193349852946, + name=None, +): + if scale <= 1.0: + raise ValueError(f"The scale must be greater than 1.0. Received: {scale}.") + + if alpha < 0: + raise ValueError(f"The alpha must be no less than zero. Received: {alpha}.") + + ret = ivy.where(x > 0, x, alpha * ivy.expm1(x)) + arr = scale * ret + return ivy.astype(arr, x.dtype) + + tanh = paddle_tanh log_softmax = paddle_log_softmax
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/nn/functional/activation.py b/ivy/functional/frontends/paddle/nn/functional/activation.py\n--- a/ivy/functional/frontends/paddle/nn/functional/activation.py\n+++ b/ivy/functional/frontends/paddle/nn/functional/activation.py\n@@ -1,9 +1,33 @@\n # local\n+import ivy\n+from ivy.func_wrapper import with_supported_dtypes\n+from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n from ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\n from ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n )\n \n \n+@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def selu(\n+ x,\n+ /,\n+ *,\n+ alpha=1.6732632423543772848170429916717,\n+ scale=1.0507009873554804934193349852946,\n+ name=None,\n+):\n+ if scale <= 1.0:\n+ raise ValueError(f\"The scale must be greater than 1.0. Received: {scale}.\")\n+\n+ if alpha < 0:\n+ raise ValueError(f\"The alpha must be no less than zero. Received: {alpha}.\")\n+\n+ ret = ivy.where(x > 0, x, alpha * ivy.expm1(x))\n+ arr = scale * ret\n+ return ivy.astype(arr, x.dtype)\n+\n+\n tanh = paddle_tanh\n log_softmax = paddle_log_softmax\n", "issue": "selu\n\n", "before_files": [{"content": "# local\nfrom ivy.functional.frontends.paddle.tensor.math import tanh as paddle_tanh\nfrom ivy.functional.frontends.paddle.tensor.math import (\n log_softmax as paddle_log_softmax,\n)\n\n\ntanh = paddle_tanh\nlog_softmax = paddle_log_softmax\n", "path": "ivy/functional/frontends/paddle/nn/functional/activation.py"}]}
628
421
gh_patches_debug_41505
rasdani/github-patches
git_diff
great-expectations__great_expectations-3279
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` </issue> <code> [start of great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py] 1 from dateutil.parser import parse 2 3 from great_expectations.execution_engine import ( 4 PandasExecutionEngine, 5 SqlAlchemyExecutionEngine, 6 ) 7 from great_expectations.expectations.metrics.import_manager import sa 8 from great_expectations.expectations.metrics.map_metric_provider import ( 9 ColumnPairMapMetricProvider, 10 column_pair_condition_partial, 11 ) 12 13 14 class ColumnPairValuesEqual(ColumnPairMapMetricProvider): 15 condition_metric_name = "column_pair_values.equal" 16 condition_domain_keys = ( 17 "batch_id", 18 "table", 19 "column_A", 20 "column_B", 21 "row_condition", 22 "condition_parser", 23 "ignore_row_if", 24 ) 25 condition_value_keys = () 26 27 # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex> 28 @column_pair_condition_partial(engine=PandasExecutionEngine) 29 def _pandas(cls, column_A, column_B, **kwargs): 30 return column_A == column_B 31 32 @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine) 33 def _sqlalchemy(cls, column_A, column_B, **kwargs): 34 return sa.case((column_A == column_B, True), else_=False) 35 [end of great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py] [start of great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py] 1 from dateutil.parser import parse 2 3 from great_expectations.execution_engine import ( 4 PandasExecutionEngine, 5 SqlAlchemyExecutionEngine, 6 ) 7 from great_expectations.expectations.metrics.import_manager import sa 8 from great_expectations.expectations.metrics.map_metric_provider import ( 9 ColumnPairMapMetricProvider, 10 column_pair_condition_partial, 11 ) 12 13 14 class ColumnPairValuesAGreaterThanB(ColumnPairMapMetricProvider): 15 condition_metric_name = "column_pair_values.a_greater_than_b" 16 condition_domain_keys = ( 17 "batch_id", 18 "table", 19 "column_A", 20 "column_B", 21 "row_condition", 22 "condition_parser", 23 "ignore_row_if", 24 ) 25 condition_value_keys = ( 26 "or_equal", 27 "parse_strings_as_datetimes", 28 "allow_cross_type_comparisons", 29 ) 30 31 # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex> 32 # noinspection PyPep8Naming 33 @column_pair_condition_partial(engine=PandasExecutionEngine) 34 def _pandas(cls, column_A, column_B, **kwargs): 35 allow_cross_type_comparisons = kwargs.get("allow_cross_type_comparisons") 36 if allow_cross_type_comparisons: 37 raise NotImplementedError 38 39 parse_strings_as_datetimes = kwargs.get("parse_strings_as_datetimes") 40 if parse_strings_as_datetimes: 41 # noinspection PyPep8Naming 42 temp_column_A = column_A.map(parse) 43 # noinspection PyPep8Naming 44 temp_column_B = column_B.map(parse) 45 else: 46 temp_column_A = column_A 47 temp_column_B = column_B 48 49 or_equal = kwargs.get("or_equal") 50 if or_equal: 51 return temp_column_A >= temp_column_B 52 else: 53 return temp_column_A > temp_column_B 54 55 # noinspection PyPep8Naming 56 @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine) 57 def _sqlalchemy(cls, column_A, column_B, **kwargs): 58 allow_cross_type_comparisons = kwargs.get("allow_cross_type_comparisons") 59 if allow_cross_type_comparisons: 60 raise NotImplementedError 61 62 parse_strings_as_datetimes = kwargs.get("parse_strings_as_datetimes") 63 if parse_strings_as_datetimes: 64 raise NotImplementedError 65 66 or_equal = kwargs.get("or_equal") 67 if or_equal: 68 return sa.case((column_A >= column_B, True), else_=False) 69 else: 70 return sa.case((column_A > column_B, True), else_=False) 71 [end of great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py] [start of great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py] 1 from great_expectations.execution_engine import PandasExecutionEngine 2 from great_expectations.expectations.metrics.map_metric_provider import ( 3 MulticolumnMapMetricProvider, 4 multicolumn_condition_partial, 5 ) 6 7 8 class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider): 9 condition_metric_name = "select_column_values.unique.within_record" 10 condition_domain_keys = ( 11 "batch_id", 12 "table", 13 "column_list", 14 "row_condition", 15 "condition_parser", 16 "ignore_row_if", 17 ) 18 19 # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex> 20 @multicolumn_condition_partial(engine=PandasExecutionEngine) 21 def _pandas(cls, column_list, **kwargs): 22 num_columns = len(column_list.columns) 23 row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns 24 return row_wise_cond 25 [end of great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py --- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py +++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py @@ -1,5 +1,3 @@ -from dateutil.parser import parse - from great_expectations.execution_engine import ( PandasExecutionEngine, SqlAlchemyExecutionEngine, diff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py --- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py +++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py @@ -38,9 +38,7 @@ parse_strings_as_datetimes = kwargs.get("parse_strings_as_datetimes") if parse_strings_as_datetimes: - # noinspection PyPep8Naming temp_column_A = column_A.map(parse) - # noinspection PyPep8Naming temp_column_B = column_B.map(parse) else: temp_column_A = column_A diff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py --- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py +++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py @@ -1,9 +1,17 @@ -from great_expectations.execution_engine import PandasExecutionEngine +import logging + +from great_expectations.execution_engine import ( + PandasExecutionEngine, + SqlAlchemyExecutionEngine, +) +from great_expectations.expectations.metrics.import_manager import sa from great_expectations.expectations.metrics.map_metric_provider import ( MulticolumnMapMetricProvider, multicolumn_condition_partial, ) +logger = logging.getLogger(__name__) + class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider): condition_metric_name = "select_column_values.unique.within_record" @@ -16,9 +24,37 @@ "ignore_row_if", ) - # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex> + # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex> @multicolumn_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column_list, **kwargs): num_columns = len(column_list.columns) row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns return row_wise_cond + + @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine) + def _sqlalchemy(cls, column_list, **kwargs): + """ + The present approach relies on an inefficient query condition construction implementation, whose computational + cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is + available, this is the only feasible mechanism under the current architecture, where map metric providers must + return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios). + """ + num_columns = len(column_list) + + # An arbitrary "num_columns" value used for issuing an explanatory message as a warning. + if num_columns > 100: + logger.warning( + f"""Batch data with {num_columns} columns is detected. Computing the "{cls.condition_metric_name}" \ +metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process. +""" + ) + + condition = sa.or_() + for idx_src in range(num_columns - 1): + for idx_dest in range(idx_src + 1, num_columns): + condition = sa.or_( + condition, (column_list[idx_src] == column_list[idx_dest]) + ) + + condition = sa.not_(condition) + return sa.case((condition, True), else_=False)
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py\n--- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py\n+++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py\n@@ -1,5 +1,3 @@\n-from dateutil.parser import parse\n-\n from great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\ndiff --git a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py\n--- a/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py\n+++ b/great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py\n@@ -38,9 +38,7 @@\n \n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n- # noinspection PyPep8Naming\n temp_column_A = column_A.map(parse)\n- # noinspection PyPep8Naming\n temp_column_B = column_B.map(parse)\n else:\n temp_column_A = column_A\ndiff --git a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n--- a/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n+++ b/great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py\n@@ -1,9 +1,17 @@\n-from great_expectations.execution_engine import PandasExecutionEngine\n+import logging\n+\n+from great_expectations.execution_engine import (\n+ PandasExecutionEngine,\n+ SqlAlchemyExecutionEngine,\n+)\n+from great_expectations.expectations.metrics.import_manager import sa\n from great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n )\n \n+logger = logging.getLogger(__name__)\n+\n \n class SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n@@ -16,9 +24,37 @@\n \"ignore_row_if\",\n )\n \n- # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex>\n+ # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n+\n+ @multicolumn_condition_partial(engine=SqlAlchemyExecutionEngine)\n+ def _sqlalchemy(cls, column_list, **kwargs):\n+ \"\"\"\n+ The present approach relies on an inefficient query condition construction implementation, whose computational\n+ cost is O(num_columns^2). However, until a more efficient implementation compatible with SQLAlchemy is\n+ available, this is the only feasible mechanism under the current architecture, where map metric providers must\n+ return a condition. Nevertheless, SQL query length limit is 1GB (sufficient for most practical scenarios).\n+ \"\"\"\n+ num_columns = len(column_list)\n+\n+ # An arbitrary \"num_columns\" value used for issuing an explanatory message as a warning.\n+ if num_columns > 100:\n+ logger.warning(\n+ f\"\"\"Batch data with {num_columns} columns is detected. Computing the \"{cls.condition_metric_name}\" \\\n+metric for wide tables using SQLAlchemy leads to long WHERE clauses for the underlying database engine to process.\n+\"\"\"\n+ )\n+\n+ condition = sa.or_()\n+ for idx_src in range(num_columns - 1):\n+ for idx_dest in range(idx_src + 1, num_columns):\n+ condition = sa.or_(\n+ condition, (column_list[idx_src] == column_list[idx_dest])\n+ )\n+\n+ condition = sa.not_(condition)\n+ return sa.case((condition, True), else_=False)\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from dateutil.parser import parse\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnPairMapMetricProvider,\n column_pair_condition_partial,\n)\n\n\nclass ColumnPairValuesEqual(ColumnPairMapMetricProvider):\n condition_metric_name = \"column_pair_values.equal\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = ()\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n @column_pair_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_A, column_B, **kwargs):\n return column_A == column_B\n\n @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_A, column_B, **kwargs):\n return sa.case((column_A == column_B, True), else_=False)\n", "path": "great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_equal.py"}, {"content": "from dateutil.parser import parse\n\nfrom great_expectations.execution_engine import (\n PandasExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import sa\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n ColumnPairMapMetricProvider,\n column_pair_condition_partial,\n)\n\n\nclass ColumnPairValuesAGreaterThanB(ColumnPairMapMetricProvider):\n condition_metric_name = \"column_pair_values.a_greater_than_b\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_A\",\n \"column_B\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n condition_value_keys = (\n \"or_equal\",\n \"parse_strings_as_datetimes\",\n \"allow_cross_type_comparisons\",\n )\n\n # TODO: <Alex>ALEX -- temporarily only Pandas and SQL Alchemy implementations are provided (Spark to follow).</Alex>\n # noinspection PyPep8Naming\n @column_pair_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_A, column_B, **kwargs):\n allow_cross_type_comparisons = kwargs.get(\"allow_cross_type_comparisons\")\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n # noinspection PyPep8Naming\n temp_column_A = column_A.map(parse)\n # noinspection PyPep8Naming\n temp_column_B = column_B.map(parse)\n else:\n temp_column_A = column_A\n temp_column_B = column_B\n\n or_equal = kwargs.get(\"or_equal\")\n if or_equal:\n return temp_column_A >= temp_column_B\n else:\n return temp_column_A > temp_column_B\n\n # noinspection PyPep8Naming\n @column_pair_condition_partial(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(cls, column_A, column_B, **kwargs):\n allow_cross_type_comparisons = kwargs.get(\"allow_cross_type_comparisons\")\n if allow_cross_type_comparisons:\n raise NotImplementedError\n\n parse_strings_as_datetimes = kwargs.get(\"parse_strings_as_datetimes\")\n if parse_strings_as_datetimes:\n raise NotImplementedError\n\n or_equal = kwargs.get(\"or_equal\")\n if or_equal:\n return sa.case((column_A >= column_B, True), else_=False)\n else:\n return sa.case((column_A > column_B, True), else_=False)\n", "path": "great_expectations/expectations/metrics/column_pair_map_metrics/column_pair_values_greater.py"}, {"content": "from great_expectations.execution_engine import PandasExecutionEngine\nfrom great_expectations.expectations.metrics.map_metric_provider import (\n MulticolumnMapMetricProvider,\n multicolumn_condition_partial,\n)\n\n\nclass SelectColumnValuesUniqueWithinRecord(MulticolumnMapMetricProvider):\n condition_metric_name = \"select_column_values.unique.within_record\"\n condition_domain_keys = (\n \"batch_id\",\n \"table\",\n \"column_list\",\n \"row_condition\",\n \"condition_parser\",\n \"ignore_row_if\",\n )\n\n # TODO: <Alex>ALEX -- temporarily only a Pandas implementation is provided (others to follow).</Alex>\n @multicolumn_condition_partial(engine=PandasExecutionEngine)\n def _pandas(cls, column_list, **kwargs):\n num_columns = len(column_list.columns)\n row_wise_cond = column_list.nunique(dropna=False, axis=1) >= num_columns\n return row_wise_cond\n", "path": "great_expectations/expectations/metrics/multicolumn_map_metrics/select_column_values_unique_within_record.py"}]}
1,907
975
gh_patches_debug_27085
rasdani/github-patches
git_diff
fossasia__open-event-server-2825
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Event does not show up on "manage events" page when it is a copy When the user copies an event and edits it, it does not show up on the event management page. ![screenshot from 2017-01-01 14-01-06](https://cloud.githubusercontent.com/assets/1583873/21581272/d8591f9e-d02a-11e6-8325-2c707636ee5d.png) </issue> <code> [start of app/helpers/wizard/clone.py] 1 from sqlalchemy.orm import make_transient 2 3 from app.helpers.data import save_to_db 4 from app.helpers.data_getter import DataGetter 5 from app.models import db 6 7 8 def clone_row(row, event_id=None): 9 db.session.expunge(row) 10 make_transient(row) 11 row.id = None 12 if event_id: 13 row.event_id = event_id 14 save_to_db(row) 15 db.session.flush() 16 return row 17 18 19 def create_event_copy(event_id): 20 old_event = DataGetter.get_event(event_id) 21 event = clone_row(old_event) 22 event.name = "Copy of " + event.name 23 event.state = "Draft" 24 save_to_db(event) 25 26 sponsors_old = DataGetter.get_sponsors(event_id).all() 27 tracks_old = DataGetter.get_tracks(event_id).all() 28 microlocations_old = DataGetter.get_microlocations(event_id).all() 29 call_for_paper_old = DataGetter.get_call_for_papers(event_id).first() 30 social_links = DataGetter.get_social_links_by_event_id(event_id).all() 31 custom_forms = DataGetter.get_custom_form_elements(event_id) 32 33 for social_link in social_links: 34 clone_row(social_link, event.id) 35 36 for sponsor in sponsors_old: 37 clone_row(sponsor, event.id) 38 39 for track in tracks_old: 40 clone_row(track, event.id) 41 42 for microlocation in microlocations_old: 43 clone_row(microlocation, event.id) 44 45 if call_for_paper_old: 46 clone_row(call_for_paper_old, event.id) 47 48 if custom_forms: 49 clone_row(custom_forms, event.id) 50 51 return event 52 [end of app/helpers/wizard/clone.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/helpers/wizard/clone.py b/app/helpers/wizard/clone.py --- a/app/helpers/wizard/clone.py +++ b/app/helpers/wizard/clone.py @@ -1,8 +1,13 @@ from sqlalchemy.orm import make_transient +from flask.ext import login from app.helpers.data import save_to_db from app.helpers.data_getter import DataGetter from app.models import db +from app.models.users_events_roles import UsersEventsRoles +from app.models.role import Role +from app.models.email_notifications import EmailNotification +from app.models.user import ORGANIZER def clone_row(row, event_id=None): @@ -23,6 +28,17 @@ event.state = "Draft" save_to_db(event) + role = Role.query.filter_by(name=ORGANIZER).first() + uer = UsersEventsRoles(login.current_user, event, role) + if save_to_db(uer, "Event saved"): + new_email_notification_setting = EmailNotification(next_event=1, + new_paper=1, + session_schedule=1, + session_accept_reject=1, + user_id=login.current_user.id, + event_id=event.id) + save_to_db(new_email_notification_setting, "EmailSetting Saved") + sponsors_old = DataGetter.get_sponsors(event_id).all() tracks_old = DataGetter.get_tracks(event_id).all() microlocations_old = DataGetter.get_microlocations(event_id).all()
{"golden_diff": "diff --git a/app/helpers/wizard/clone.py b/app/helpers/wizard/clone.py\n--- a/app/helpers/wizard/clone.py\n+++ b/app/helpers/wizard/clone.py\n@@ -1,8 +1,13 @@\n from sqlalchemy.orm import make_transient\n+from flask.ext import login\n \n from app.helpers.data import save_to_db\n from app.helpers.data_getter import DataGetter\n from app.models import db\n+from app.models.users_events_roles import UsersEventsRoles\n+from app.models.role import Role\n+from app.models.email_notifications import EmailNotification\n+from app.models.user import ORGANIZER\n \n \n def clone_row(row, event_id=None):\n@@ -23,6 +28,17 @@\n event.state = \"Draft\"\n save_to_db(event)\n \n+ role = Role.query.filter_by(name=ORGANIZER).first()\n+ uer = UsersEventsRoles(login.current_user, event, role)\n+ if save_to_db(uer, \"Event saved\"):\n+ new_email_notification_setting = EmailNotification(next_event=1,\n+ new_paper=1,\n+ session_schedule=1,\n+ session_accept_reject=1,\n+ user_id=login.current_user.id,\n+ event_id=event.id)\n+ save_to_db(new_email_notification_setting, \"EmailSetting Saved\")\n+\n sponsors_old = DataGetter.get_sponsors(event_id).all()\n tracks_old = DataGetter.get_tracks(event_id).all()\n microlocations_old = DataGetter.get_microlocations(event_id).all()\n", "issue": "Event does not show up on \"manage events\" page when it is a copy\nWhen the user copies an event and edits it, it does not show up on the event management page.\r\n\r\n![screenshot from 2017-01-01 14-01-06](https://cloud.githubusercontent.com/assets/1583873/21581272/d8591f9e-d02a-11e6-8325-2c707636ee5d.png)\r\n\n", "before_files": [{"content": "from sqlalchemy.orm import make_transient\n\nfrom app.helpers.data import save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.models import db\n\n\ndef clone_row(row, event_id=None):\n db.session.expunge(row)\n make_transient(row)\n row.id = None\n if event_id:\n row.event_id = event_id\n save_to_db(row)\n db.session.flush()\n return row\n\n\ndef create_event_copy(event_id):\n old_event = DataGetter.get_event(event_id)\n event = clone_row(old_event)\n event.name = \"Copy of \" + event.name\n event.state = \"Draft\"\n save_to_db(event)\n\n sponsors_old = DataGetter.get_sponsors(event_id).all()\n tracks_old = DataGetter.get_tracks(event_id).all()\n microlocations_old = DataGetter.get_microlocations(event_id).all()\n call_for_paper_old = DataGetter.get_call_for_papers(event_id).first()\n social_links = DataGetter.get_social_links_by_event_id(event_id).all()\n custom_forms = DataGetter.get_custom_form_elements(event_id)\n\n for social_link in social_links:\n clone_row(social_link, event.id)\n\n for sponsor in sponsors_old:\n clone_row(sponsor, event.id)\n\n for track in tracks_old:\n clone_row(track, event.id)\n\n for microlocation in microlocations_old:\n clone_row(microlocation, event.id)\n\n if call_for_paper_old:\n clone_row(call_for_paper_old, event.id)\n\n if custom_forms:\n clone_row(custom_forms, event.id)\n\n return event\n", "path": "app/helpers/wizard/clone.py"}]}
1,104
325
gh_patches_debug_31040
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-4372
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> distutils not included with latest virtualenv (16.4.0) This was already reported in #4031. The issue was closed without a fix so I'm creating this one. **With virtualenv 16.4.0, pyinstaller reports :** ``` 3583 INFO: distutils: retargeting to non-venv dir '/usr/lib64/python3.6/distutils/__init__.py' ``` and then during "Loading module hook" sequence, the `hook-distutils.py` is missing and distutils modules are not included into the final executable binary. When executing the binary the error is: ``` ModuleNotFoundError: No module named 'distutils' [10373] Failed to execute script <name here> ``` **With virtualenv 16.1.0, pyinstaller reports :** ``` 3157 INFO: Processing pre-find module path hook distutils 5053 INFO: Loading module hook "hook-distutils.py"... ``` and distutils modules are included into the final executable binary. </issue> <code> [start of PyInstaller/hooks/pre_find_module_path/hook-distutils.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2005-2019, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 10 """ 11 `distutils`-specific pre-find module path hook. 12 13 When run from within a venv (virtual environment), this hook changes the 14 `__path__` of the `distutils` package to that of the system-wide rather than 15 venv-specific `distutils` package. While the former is suitable for freezing, 16 the latter is intended for use _only_ from within venvs. 17 """ 18 19 20 import distutils 21 import os 22 23 from PyInstaller.utils.hooks import logger 24 25 26 def pre_find_module_path(api): 27 # Absolute path of the system-wide "distutils" package when run from within 28 # a venv or None otherwise. 29 distutils_dir = getattr(distutils, 'distutils_path', None) 30 if distutils_dir is not None: 31 # Find this package in its parent directory. 32 api.search_dirs = [os.path.dirname(distutils_dir)] 33 logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir) 34 [end of PyInstaller/hooks/pre_find_module_path/hook-distutils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py --- a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py +++ b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py @@ -1,11 +1,11 @@ -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- # Copyright (c) 2005-2019, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- +# ----------------------------------------------------------------------------- """ `distutils`-specific pre-find module path hook. @@ -26,8 +26,16 @@ def pre_find_module_path(api): # Absolute path of the system-wide "distutils" package when run from within # a venv or None otherwise. - distutils_dir = getattr(distutils, 'distutils_path', None) - if distutils_dir is not None: + + # opcode is not a virtualenv module, so we can use it to find the stdlib. + # Technique taken from virtualenv's "distutils" package detection at + # https://github.com/pypa/virtualenv/blob/16.3.0/virtualenv_embedded/distutils-init.py#L5 + import opcode + + system_module_path = os.path.normpath(os.path.dirname(opcode.__file__)) + loaded_module_path = os.path.normpath(os.path.dirname(distutils.__file__)) + if system_module_path != loaded_module_path: # Find this package in its parent directory. - api.search_dirs = [os.path.dirname(distutils_dir)] - logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir) + api.search_dirs = [system_module_path] + logger.info('distutils: retargeting to non-venv dir %r', + system_module_path)
{"golden_diff": "diff --git a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py\n--- a/PyInstaller/hooks/pre_find_module_path/hook-distutils.py\n+++ b/PyInstaller/hooks/pre_find_module_path/hook-distutils.py\n@@ -1,11 +1,11 @@\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n # Copyright (c) 2005-2019, PyInstaller Development Team.\n #\n # Distributed under the terms of the GNU General Public License with exception\n # for distributing bootloader.\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n-#-----------------------------------------------------------------------------\n+# -----------------------------------------------------------------------------\n \n \"\"\"\n `distutils`-specific pre-find module path hook.\n@@ -26,8 +26,16 @@\n def pre_find_module_path(api):\n # Absolute path of the system-wide \"distutils\" package when run from within\n # a venv or None otherwise.\n- distutils_dir = getattr(distutils, 'distutils_path', None)\n- if distutils_dir is not None:\n+\n+ # opcode is not a virtualenv module, so we can use it to find the stdlib.\n+ # Technique taken from virtualenv's \"distutils\" package detection at\n+ # https://github.com/pypa/virtualenv/blob/16.3.0/virtualenv_embedded/distutils-init.py#L5\n+ import opcode\n+\n+ system_module_path = os.path.normpath(os.path.dirname(opcode.__file__))\n+ loaded_module_path = os.path.normpath(os.path.dirname(distutils.__file__))\n+ if system_module_path != loaded_module_path:\n # Find this package in its parent directory.\n- api.search_dirs = [os.path.dirname(distutils_dir)]\n- logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir)\n+ api.search_dirs = [system_module_path]\n+ logger.info('distutils: retargeting to non-venv dir %r',\n+ system_module_path)\n", "issue": "distutils not included with latest virtualenv (16.4.0)\nThis was already reported in #4031. The issue was closed without a fix so I'm creating this one.\r\n\r\n**With virtualenv 16.4.0, pyinstaller reports :**\r\n\r\n```\r\n3583 INFO: distutils: retargeting to non-venv dir '/usr/lib64/python3.6/distutils/__init__.py'\r\n```\r\nand then during \"Loading module hook\" sequence, the `hook-distutils.py` is missing and distutils modules are not included into the final executable binary.\r\n\r\nWhen executing the binary the error is:\r\n\r\n```\r\nModuleNotFoundError: No module named 'distutils'\r\n[10373] Failed to execute script <name here>\r\n```\r\n\r\n**With virtualenv 16.1.0, pyinstaller reports :**\r\n\r\n```\r\n3157 INFO: Processing pre-find module path hook distutils\r\n5053 INFO: Loading module hook \"hook-distutils.py\"...\r\n```\r\n\r\nand distutils modules are included into the final executable binary.\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\n`distutils`-specific pre-find module path hook.\n\nWhen run from within a venv (virtual environment), this hook changes the\n`__path__` of the `distutils` package to that of the system-wide rather than\nvenv-specific `distutils` package. While the former is suitable for freezing,\nthe latter is intended for use _only_ from within venvs.\n\"\"\"\n\n\nimport distutils\nimport os\n\nfrom PyInstaller.utils.hooks import logger\n\n\ndef pre_find_module_path(api):\n # Absolute path of the system-wide \"distutils\" package when run from within\n # a venv or None otherwise.\n distutils_dir = getattr(distutils, 'distutils_path', None)\n if distutils_dir is not None:\n # Find this package in its parent directory.\n api.search_dirs = [os.path.dirname(distutils_dir)]\n logger.info('distutils: retargeting to non-venv dir %r' % distutils_dir)\n", "path": "PyInstaller/hooks/pre_find_module_path/hook-distutils.py"}]}
1,119
445
gh_patches_debug_14392
rasdani/github-patches
git_diff
pre-commit__pre-commit-216
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pre-commit potentially uses the wrong `virtualenv` when building environments It should use `sys.executable, '-m', 'virtualenv'` instead of `'virtualenv'` </issue> <code> [start of pre_commit/languages/python.py] 1 from __future__ import unicode_literals 2 3 import contextlib 4 import distutils.spawn 5 import os 6 7 import virtualenv 8 9 from pre_commit.languages import helpers 10 from pre_commit.util import clean_path_on_failure 11 12 13 ENVIRONMENT_DIR = 'py_env' 14 15 16 class PythonEnv(helpers.Environment): 17 @property 18 def env_prefix(self): 19 return ". '{{prefix}}{0}activate' &&".format( 20 virtualenv.path_locations( 21 ENVIRONMENT_DIR, 22 )[-1].rstrip(os.sep) + os.sep, 23 'activate', 24 ) 25 26 27 @contextlib.contextmanager 28 def in_env(repo_cmd_runner): 29 yield PythonEnv(repo_cmd_runner) 30 31 32 def norm_version(version): 33 if os.name == 'nt': # pragma: no cover (windows) 34 if not distutils.spawn.find_executable(version): 35 # The default place for python on windows is: 36 # C:\PythonXX\python.exe 37 version = r'C:\{0}\python.exe'.format(version.replace('.', '')) 38 return version 39 40 41 def install_environment(repo_cmd_runner, version='default'): 42 assert repo_cmd_runner.exists('setup.py') 43 44 # Install a virtualenv 45 with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)): 46 venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)] 47 if version != 'default': 48 venv_cmd.extend(['-p', norm_version(version)]) 49 repo_cmd_runner.run(venv_cmd) 50 with in_env(repo_cmd_runner) as env: 51 env.run("cd '{prefix}' && pip install .") 52 53 54 def run_hook(repo_cmd_runner, hook, file_args): 55 with in_env(repo_cmd_runner) as env: 56 return helpers.run_hook(env, hook, file_args) 57 [end of pre_commit/languages/python.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py --- a/pre_commit/languages/python.py +++ b/pre_commit/languages/python.py @@ -3,6 +3,7 @@ import contextlib import distutils.spawn import os +import sys import virtualenv @@ -43,7 +44,10 @@ # Install a virtualenv with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)): - venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)] + venv_cmd = [ + sys.executable, '-m', 'virtualenv', + '{{prefix}}{0}'.format(ENVIRONMENT_DIR) + ] if version != 'default': venv_cmd.extend(['-p', norm_version(version)]) repo_cmd_runner.run(venv_cmd)
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -3,6 +3,7 @@\n import contextlib\n import distutils.spawn\n import os\n+import sys\n \n import virtualenv\n \n@@ -43,7 +44,10 @@\n \n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)):\n- venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)]\n+ venv_cmd = [\n+ sys.executable, '-m', 'virtualenv',\n+ '{{prefix}}{0}'.format(ENVIRONMENT_DIR)\n+ ]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n", "issue": "pre-commit potentially uses the wrong `virtualenv` when building environments\nIt should use `sys.executable, '-m', 'virtualenv'` instead of `'virtualenv'`\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport distutils.spawn\nimport os\n\nimport virtualenv\n\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import clean_path_on_failure\n\n\nENVIRONMENT_DIR = 'py_env'\n\n\nclass PythonEnv(helpers.Environment):\n @property\n def env_prefix(self):\n return \". '{{prefix}}{0}activate' &&\".format(\n virtualenv.path_locations(\n ENVIRONMENT_DIR,\n )[-1].rstrip(os.sep) + os.sep,\n 'activate',\n )\n\n\[email protected]\ndef in_env(repo_cmd_runner):\n yield PythonEnv(repo_cmd_runner)\n\n\ndef norm_version(version):\n if os.name == 'nt': # pragma: no cover (windows)\n if not distutils.spawn.find_executable(version):\n # The default place for python on windows is:\n # C:\\PythonXX\\python.exe\n version = r'C:\\{0}\\python.exe'.format(version.replace('.', ''))\n return version\n\n\ndef install_environment(repo_cmd_runner, version='default'):\n assert repo_cmd_runner.exists('setup.py')\n\n # Install a virtualenv\n with clean_path_on_failure(repo_cmd_runner.path(ENVIRONMENT_DIR)):\n venv_cmd = ['virtualenv', '{{prefix}}{0}'.format(ENVIRONMENT_DIR)]\n if version != 'default':\n venv_cmd.extend(['-p', norm_version(version)])\n repo_cmd_runner.run(venv_cmd)\n with in_env(repo_cmd_runner) as env:\n env.run(\"cd '{prefix}' && pip install .\")\n\n\ndef run_hook(repo_cmd_runner, hook, file_args):\n with in_env(repo_cmd_runner) as env:\n return helpers.run_hook(env, hook, file_args)\n", "path": "pre_commit/languages/python.py"}]}
1,058
192
gh_patches_debug_30909
rasdani/github-patches
git_diff
ephios-dev__ephios-1012
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve explanation for page slugs Users did not understand what the page slug means. We should provide a help text, either directly or in the docs. </issue> <code> [start of ephios/plugins/pages/models.py] 1 from django.db import models 2 from django.utils.translation import gettext_lazy as _ 3 4 5 class Page(models.Model): 6 title = models.CharField(verbose_name=_("Title"), max_length=250) 7 content = models.TextField(_("Content"), blank=True) 8 slug = models.SlugField(_("Slug"), max_length=250, unique=True) 9 show_in_footer = models.BooleanField(_("Show in footer"), default=False) 10 publicly_visible = models.BooleanField(_("Publicly visible"), default=False) 11 12 def __str__(self): 13 return str(self.title) 14 15 class Meta: 16 verbose_name = "Page" 17 verbose_name_plural = "Pages" 18 [end of ephios/plugins/pages/models.py] [start of ephios/plugins/pages/views.py] 1 from django.contrib import messages 2 from django.contrib.auth.views import redirect_to_login 3 from django.urls import reverse 4 from django.utils.translation import gettext as _ 5 from django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView 6 7 from ephios.extra.mixins import CustomPermissionRequiredMixin 8 from ephios.plugins.pages.models import Page 9 10 11 class PageListView(CustomPermissionRequiredMixin, ListView): 12 model = Page 13 permission_required = "pages.add_page" 14 15 16 class PageView(DetailView): 17 model = Page 18 19 def setup(self, request, *args, **kwargs): 20 super().setup(request, *args, **kwargs) 21 self.object = self.get_object() 22 23 def dispatch(self, request, *args, **kwargs): 24 if not request.user.is_authenticated and not self.object.publicly_visible: 25 return redirect_to_login(self.request.get_full_path()) 26 return super().dispatch(request, *args, **kwargs) 27 28 29 class PageCreateView(CustomPermissionRequiredMixin, CreateView): 30 model = Page 31 permission_required = "pages.add_page" 32 fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] 33 34 def get_success_url(self): 35 messages.success(self.request, _("Page saved successfully.")) 36 return reverse("pages:settings_page_list") 37 38 39 class PageUpdateView(CustomPermissionRequiredMixin, UpdateView): 40 model = Page 41 permission_required = "pages.change_page" 42 fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] 43 44 def get_success_url(self): 45 messages.success(self.request, _("Page saved successfully.")) 46 return reverse("pages:settings_page_list") 47 48 49 class PageDeleteView(CustomPermissionRequiredMixin, DeleteView): 50 model = Page 51 permission_required = "pages.delete_page" 52 53 def get_success_url(self): 54 messages.info(self.request, _("Page deleted successfully.")) 55 return reverse("pages:settings_page_list") 56 [end of ephios/plugins/pages/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ephios/plugins/pages/models.py b/ephios/plugins/pages/models.py --- a/ephios/plugins/pages/models.py +++ b/ephios/plugins/pages/models.py @@ -5,7 +5,12 @@ class Page(models.Model): title = models.CharField(verbose_name=_("Title"), max_length=250) content = models.TextField(_("Content"), blank=True) - slug = models.SlugField(_("Slug"), max_length=250, unique=True) + slug = models.SlugField( + _("URL slug"), + help_text=_("The slug is used to generate the page's URL."), + max_length=250, + unique=True, + ) show_in_footer = models.BooleanField(_("Show in footer"), default=False) publicly_visible = models.BooleanField(_("Publicly visible"), default=False) diff --git a/ephios/plugins/pages/views.py b/ephios/plugins/pages/views.py --- a/ephios/plugins/pages/views.py +++ b/ephios/plugins/pages/views.py @@ -29,7 +29,7 @@ class PageCreateView(CustomPermissionRequiredMixin, CreateView): model = Page permission_required = "pages.add_page" - fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] + fields = ["title", "slug", "content", "show_in_footer", "publicly_visible"] def get_success_url(self): messages.success(self.request, _("Page saved successfully.")) @@ -39,7 +39,7 @@ class PageUpdateView(CustomPermissionRequiredMixin, UpdateView): model = Page permission_required = "pages.change_page" - fields = ["title", "content", "slug", "show_in_footer", "publicly_visible"] + fields = ["title", "slug", "content", "show_in_footer", "publicly_visible"] def get_success_url(self): messages.success(self.request, _("Page saved successfully."))
{"golden_diff": "diff --git a/ephios/plugins/pages/models.py b/ephios/plugins/pages/models.py\n--- a/ephios/plugins/pages/models.py\n+++ b/ephios/plugins/pages/models.py\n@@ -5,7 +5,12 @@\n class Page(models.Model):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=250)\n content = models.TextField(_(\"Content\"), blank=True)\n- slug = models.SlugField(_(\"Slug\"), max_length=250, unique=True)\n+ slug = models.SlugField(\n+ _(\"URL slug\"),\n+ help_text=_(\"The slug is used to generate the page's URL.\"),\n+ max_length=250,\n+ unique=True,\n+ )\n show_in_footer = models.BooleanField(_(\"Show in footer\"), default=False)\n publicly_visible = models.BooleanField(_(\"Publicly visible\"), default=False)\n \ndiff --git a/ephios/plugins/pages/views.py b/ephios/plugins/pages/views.py\n--- a/ephios/plugins/pages/views.py\n+++ b/ephios/plugins/pages/views.py\n@@ -29,7 +29,7 @@\n class PageCreateView(CustomPermissionRequiredMixin, CreateView):\n model = Page\n permission_required = \"pages.add_page\"\n- fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n+ fields = [\"title\", \"slug\", \"content\", \"show_in_footer\", \"publicly_visible\"]\n \n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n@@ -39,7 +39,7 @@\n class PageUpdateView(CustomPermissionRequiredMixin, UpdateView):\n model = Page\n permission_required = \"pages.change_page\"\n- fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n+ fields = [\"title\", \"slug\", \"content\", \"show_in_footer\", \"publicly_visible\"]\n \n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n", "issue": "Improve explanation for page slugs\nUsers did not understand what the page slug means. We should provide a help text, either directly or in the docs.\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Page(models.Model):\n title = models.CharField(verbose_name=_(\"Title\"), max_length=250)\n content = models.TextField(_(\"Content\"), blank=True)\n slug = models.SlugField(_(\"Slug\"), max_length=250, unique=True)\n show_in_footer = models.BooleanField(_(\"Show in footer\"), default=False)\n publicly_visible = models.BooleanField(_(\"Publicly visible\"), default=False)\n\n def __str__(self):\n return str(self.title)\n\n class Meta:\n verbose_name = \"Page\"\n verbose_name_plural = \"Pages\"\n", "path": "ephios/plugins/pages/models.py"}, {"content": "from django.contrib import messages\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\nfrom django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView\n\nfrom ephios.extra.mixins import CustomPermissionRequiredMixin\nfrom ephios.plugins.pages.models import Page\n\n\nclass PageListView(CustomPermissionRequiredMixin, ListView):\n model = Page\n permission_required = \"pages.add_page\"\n\n\nclass PageView(DetailView):\n model = Page\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.object = self.get_object()\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated and not self.object.publicly_visible:\n return redirect_to_login(self.request.get_full_path())\n return super().dispatch(request, *args, **kwargs)\n\n\nclass PageCreateView(CustomPermissionRequiredMixin, CreateView):\n model = Page\n permission_required = \"pages.add_page\"\n fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n\n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n return reverse(\"pages:settings_page_list\")\n\n\nclass PageUpdateView(CustomPermissionRequiredMixin, UpdateView):\n model = Page\n permission_required = \"pages.change_page\"\n fields = [\"title\", \"content\", \"slug\", \"show_in_footer\", \"publicly_visible\"]\n\n def get_success_url(self):\n messages.success(self.request, _(\"Page saved successfully.\"))\n return reverse(\"pages:settings_page_list\")\n\n\nclass PageDeleteView(CustomPermissionRequiredMixin, DeleteView):\n model = Page\n permission_required = \"pages.delete_page\"\n\n def get_success_url(self):\n messages.info(self.request, _(\"Page deleted successfully.\"))\n return reverse(\"pages:settings_page_list\")\n", "path": "ephios/plugins/pages/views.py"}]}
1,273
432
gh_patches_debug_15187
rasdani/github-patches
git_diff
vispy__vispy-1362
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> examples/tutorial/app/simple_wx.py issue ``` Traceback (most recent call last): File "simple_wx.py", line 58, in <module> frame = TestFrame() File "simple_wx.py", line 49, in __init__ self.canvas = Canvas(app="wx", parent=self, show=True) File "simple_wx.py", line 20, in __init__ app.Canvas.__init__(self, *args, **kwargs) File "/home/eldar/src/vispy/vispy/app/canvas.py", line 208, in __init__ self.set_current() File "/home/eldar/src/vispy/vispy/app/canvas.py", line 406, in set_current self._backend._vispy_set_current() File "/home/eldar/src/vispy/vispy/app/backends/_wx.py", line 302, in _vispy_set_current self.SetCurrent(self._gl_context) wx._core.wxAssertionError: C++ assertion "xid" failed at /home/eldar/src/wx/wxPython_Phoenix/wxPython-4.0.0b2/ext/wxWidgets/src/unix/glx11.cpp(194) in SetCurrent(): window must be shown ``` </issue> <code> [start of examples/tutorial/app/simple_wx.py] 1 # -*- coding: utf-8 -*- 2 # vispy: testskip 3 # Copyright (c) 2015, Vispy Development Team. 4 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 5 """ 6 This is a very minimal example that opens a window and makes the background 7 color to change from black to white to black ... 8 9 The wx backend is used to embed the canvas in a simple wx Frame with 10 a menubar. 11 """ 12 13 import wx 14 import math 15 from vispy import app, gloo 16 17 18 class Canvas(app.Canvas): 19 def __init__(self, *args, **kwargs): 20 app.Canvas.__init__(self, *args, **kwargs) 21 self._timer = app.Timer('auto', connect=self.on_timer, start=True) 22 self.tick = 0 23 24 def on_draw(self, event): 25 gloo.clear(color=True) 26 27 def on_timer(self, event): 28 self.tick += 1 / 60.0 29 c = abs(math.sin(self.tick)) 30 gloo.set_clear_color((c, c, c, 1)) 31 self.update() 32 33 def stop_timer(self): 34 self._timer.stop() 35 36 37 class TestFrame(wx.Frame): 38 def __init__(self): 39 wx.Frame.__init__(self, None, -1, "Vispy Test", 40 wx.DefaultPosition, size=(500, 500)) 41 42 MenuBar = wx.MenuBar() 43 file_menu = wx.Menu() 44 file_menu.Append(wx.ID_EXIT, "&Quit") 45 self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT) 46 MenuBar.Append(file_menu, "&File") 47 self.SetMenuBar(MenuBar) 48 49 self.canvas = Canvas(app="wx", parent=self, show=True) 50 51 def on_quit(self, event): 52 self.canvas.stop_timer() 53 self.Close(True) 54 55 56 if __name__ == '__main__': 57 myapp = wx.App(0) 58 frame = TestFrame() 59 frame.Show(True) 60 myapp.MainLoop() 61 [end of examples/tutorial/app/simple_wx.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/tutorial/app/simple_wx.py b/examples/tutorial/app/simple_wx.py --- a/examples/tutorial/app/simple_wx.py +++ b/examples/tutorial/app/simple_wx.py @@ -43,15 +43,20 @@ file_menu = wx.Menu() file_menu.Append(wx.ID_EXIT, "&Quit") self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT) + self.Bind(wx.EVT_SHOW, self.on_show) MenuBar.Append(file_menu, "&File") self.SetMenuBar(MenuBar) - self.canvas = Canvas(app="wx", parent=self, show=True) + self.canvas = Canvas(app="wx", parent=self) def on_quit(self, event): self.canvas.stop_timer() self.Close(True) + def on_show(self, event): + self.canvas.show() + event.Skip() + if __name__ == '__main__': myapp = wx.App(0)
{"golden_diff": "diff --git a/examples/tutorial/app/simple_wx.py b/examples/tutorial/app/simple_wx.py\n--- a/examples/tutorial/app/simple_wx.py\n+++ b/examples/tutorial/app/simple_wx.py\n@@ -43,15 +43,20 @@\n file_menu = wx.Menu()\n file_menu.Append(wx.ID_EXIT, \"&Quit\")\n self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT)\n+ self.Bind(wx.EVT_SHOW, self.on_show)\n MenuBar.Append(file_menu, \"&File\")\n self.SetMenuBar(MenuBar)\n \n- self.canvas = Canvas(app=\"wx\", parent=self, show=True)\n+ self.canvas = Canvas(app=\"wx\", parent=self)\n \n def on_quit(self, event):\n self.canvas.stop_timer()\n self.Close(True)\n \n+ def on_show(self, event):\n+ self.canvas.show()\n+ event.Skip()\n+\n \n if __name__ == '__main__':\n myapp = wx.App(0)\n", "issue": "examples/tutorial/app/simple_wx.py issue\n```\r\nTraceback (most recent call last):\r\n File \"simple_wx.py\", line 58, in <module>\r\n frame = TestFrame()\r\n File \"simple_wx.py\", line 49, in __init__\r\n self.canvas = Canvas(app=\"wx\", parent=self, show=True)\r\n File \"simple_wx.py\", line 20, in __init__\r\n app.Canvas.__init__(self, *args, **kwargs)\r\n File \"/home/eldar/src/vispy/vispy/app/canvas.py\", line 208, in __init__\r\n self.set_current()\r\n File \"/home/eldar/src/vispy/vispy/app/canvas.py\", line 406, in set_current\r\n self._backend._vispy_set_current()\r\n File \"/home/eldar/src/vispy/vispy/app/backends/_wx.py\", line 302, in _vispy_set_current\r\n self.SetCurrent(self._gl_context)\r\nwx._core.wxAssertionError: C++ assertion \"xid\" failed at /home/eldar/src/wx/wxPython_Phoenix/wxPython-4.0.0b2/ext/wxWidgets/src/unix/glx11.cpp(194) in SetCurrent(): window must be shown\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: testskip\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nThis is a very minimal example that opens a window and makes the background\ncolor to change from black to white to black ...\n\nThe wx backend is used to embed the canvas in a simple wx Frame with\na menubar.\n\"\"\"\n\nimport wx\nimport math\nfrom vispy import app, gloo\n\n\nclass Canvas(app.Canvas):\n def __init__(self, *args, **kwargs):\n app.Canvas.__init__(self, *args, **kwargs)\n self._timer = app.Timer('auto', connect=self.on_timer, start=True)\n self.tick = 0\n\n def on_draw(self, event):\n gloo.clear(color=True)\n\n def on_timer(self, event):\n self.tick += 1 / 60.0\n c = abs(math.sin(self.tick))\n gloo.set_clear_color((c, c, c, 1))\n self.update()\n\n def stop_timer(self):\n self._timer.stop()\n\n\nclass TestFrame(wx.Frame):\n def __init__(self):\n wx.Frame.__init__(self, None, -1, \"Vispy Test\",\n wx.DefaultPosition, size=(500, 500))\n\n MenuBar = wx.MenuBar()\n file_menu = wx.Menu()\n file_menu.Append(wx.ID_EXIT, \"&Quit\")\n self.Bind(wx.EVT_MENU, self.on_quit, id=wx.ID_EXIT)\n MenuBar.Append(file_menu, \"&File\")\n self.SetMenuBar(MenuBar)\n\n self.canvas = Canvas(app=\"wx\", parent=self, show=True)\n\n def on_quit(self, event):\n self.canvas.stop_timer()\n self.Close(True)\n\n\nif __name__ == '__main__':\n myapp = wx.App(0)\n frame = TestFrame()\n frame.Show(True)\n myapp.MainLoop()\n", "path": "examples/tutorial/app/simple_wx.py"}]}
1,376
208
gh_patches_debug_975
rasdani/github-patches
git_diff
PennyLaneAI__pennylane-2947
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] `qml.equal` ignore in-place inversion Currently, we have: ``` >>> qml.equal(qml.RX(1.0, wires=0), qml.RX(1.0, wires=0).inv()) True ``` If two operations are inverses of each other, they should not be equal. </issue> <code> [start of pennylane/ops/functions/equal.py] 1 # Copyright 2018-2021 Xanadu Quantum Technologies Inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This module contains the qml.equal function. 16 """ 17 # pylint: disable=too-many-arguments,too-many-return-statements 18 import pennylane as qml 19 from pennylane.operation import Operator 20 21 22 def equal( 23 op1: Operator, 24 op2: Operator, 25 check_interface=True, 26 check_trainability=True, 27 rtol=1e-5, 28 atol=1e-9, 29 ): 30 r"""Function for determining operator equality. 31 32 Args: 33 op1 (.Operator): First operator to compare 34 op2 (.Operator): Second operator to compare 35 check_interface (bool, optional): Whether to compare interfaces. Default: `True` 36 check_trainability (bool, optional): Whether to compare trainability status. Default: `True` 37 rtol (float, optional): Relative tolerance for parameters 38 atol (float, optional): Absolute tolerance for parameters 39 40 Returns: 41 bool: `True` if the operators are equal, else `False` 42 43 **Example** 44 45 Given two operators, ``qml.equal`` determines their equality: 46 47 >>> op1 = qml.RX(np.array(.12), wires=0) 48 >>> op2 = qml.RY(np.array(1.23), wires=0) 49 >>> qml.equal(op1, op1), qml.equal(op1, op2) 50 True False 51 52 .. details:: 53 :title: Usage Details 54 55 You can use the optional arguments to get more specific results. 56 57 Consider the following comparisons: 58 59 >>> op1 = qml.RX(torch.tensor(1.2), wires=0) 60 >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0) 61 >>> qml.equal(op1, op2) 62 False 63 64 >>> qml.equal(op1, op2, check_interface=False, check_trainability=False) 65 True 66 67 >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0) 68 >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0) 69 >>> qml.equal(op3, op4) 70 False 71 72 >>> qml.equal(op3, op4, check_trainability=False) 73 True 74 """ 75 if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth: 76 return False 77 if op1.arithmetic_depth > 0: 78 raise NotImplementedError( 79 "Comparison of operators with an arithmetic depth larger than 0 is not yet implemented." 80 ) 81 if not all( 82 qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data) 83 ): 84 return False 85 if op1.wires != op2.wires: 86 return False 87 for kwarg in op1.hyperparameters: 88 if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]: 89 return False 90 91 if check_trainability: 92 for params_1, params_2 in zip(op1.data, op2.data): 93 if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2): 94 return False 95 96 if check_interface: 97 for params_1, params_2 in zip(op1.data, op2.data): 98 if qml.math.get_interface(params_1) != qml.math.get_interface(params_2): 99 return False 100 101 return True 102 [end of pennylane/ops/functions/equal.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pennylane/ops/functions/equal.py b/pennylane/ops/functions/equal.py --- a/pennylane/ops/functions/equal.py +++ b/pennylane/ops/functions/equal.py @@ -98,4 +98,4 @@ if qml.math.get_interface(params_1) != qml.math.get_interface(params_2): return False - return True + return getattr(op1, "inverse", False) == getattr(op2, "inverse", False)
{"golden_diff": "diff --git a/pennylane/ops/functions/equal.py b/pennylane/ops/functions/equal.py\n--- a/pennylane/ops/functions/equal.py\n+++ b/pennylane/ops/functions/equal.py\n@@ -98,4 +98,4 @@\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n \n- return True\n+ return getattr(op1, \"inverse\", False) == getattr(op2, \"inverse\", False)\n", "issue": "[BUG] `qml.equal` ignore in-place inversion\nCurrently, we have:\r\n```\r\n>>> qml.equal(qml.RX(1.0, wires=0), qml.RX(1.0, wires=0).inv())\r\nTrue\r\n```\r\n\r\nIf two operations are inverses of each other, they should not be equal.\n", "before_files": [{"content": "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the qml.equal function.\n\"\"\"\n# pylint: disable=too-many-arguments,too-many-return-statements\nimport pennylane as qml\nfrom pennylane.operation import Operator\n\n\ndef equal(\n op1: Operator,\n op2: Operator,\n check_interface=True,\n check_trainability=True,\n rtol=1e-5,\n atol=1e-9,\n):\n r\"\"\"Function for determining operator equality.\n\n Args:\n op1 (.Operator): First operator to compare\n op2 (.Operator): Second operator to compare\n check_interface (bool, optional): Whether to compare interfaces. Default: `True`\n check_trainability (bool, optional): Whether to compare trainability status. Default: `True`\n rtol (float, optional): Relative tolerance for parameters\n atol (float, optional): Absolute tolerance for parameters\n\n Returns:\n bool: `True` if the operators are equal, else `False`\n\n **Example**\n\n Given two operators, ``qml.equal`` determines their equality:\n\n >>> op1 = qml.RX(np.array(.12), wires=0)\n >>> op2 = qml.RY(np.array(1.23), wires=0)\n >>> qml.equal(op1, op1), qml.equal(op1, op2)\n True False\n\n .. details::\n :title: Usage Details\n\n You can use the optional arguments to get more specific results.\n\n Consider the following comparisons:\n\n >>> op1 = qml.RX(torch.tensor(1.2), wires=0)\n >>> op2 = qml.RX(jax.numpy.array(1.2), wires=0)\n >>> qml.equal(op1, op2)\n False\n\n >>> qml.equal(op1, op2, check_interface=False, check_trainability=False)\n True\n\n >>> op3 = qml.RX(np.array(1.2, requires_grad=True), wires=0)\n >>> op4 = qml.RX(np.array(1.2, requires_grad=False), wires=0)\n >>> qml.equal(op3, op4)\n False\n\n >>> qml.equal(op3, op4, check_trainability=False)\n True\n \"\"\"\n if op1.__class__ is not op2.__class__ or op1.arithmetic_depth != op2.arithmetic_depth:\n return False\n if op1.arithmetic_depth > 0:\n raise NotImplementedError(\n \"Comparison of operators with an arithmetic depth larger than 0 is not yet implemented.\"\n )\n if not all(\n qml.math.allclose(d1, d2, rtol=rtol, atol=atol) for d1, d2 in zip(op1.data, op2.data)\n ):\n return False\n if op1.wires != op2.wires:\n return False\n for kwarg in op1.hyperparameters:\n if op1.hyperparameters[kwarg] != op2.hyperparameters[kwarg]:\n return False\n\n if check_trainability:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.requires_grad(params_1) != qml.math.requires_grad(params_2):\n return False\n\n if check_interface:\n for params_1, params_2 in zip(op1.data, op2.data):\n if qml.math.get_interface(params_1) != qml.math.get_interface(params_2):\n return False\n\n return True\n", "path": "pennylane/ops/functions/equal.py"}]}
1,718
117
gh_patches_debug_4858
rasdani/github-patches
git_diff
Gallopsled__pwntools-752
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 3.0.3 Release Broken It appears that the archive uploaded to PyPI does not include README.md, which is referred to by setup.py. @Idolf can you update the release to include the README? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python2 2 import glob 3 import os 4 import platform 5 import sys 6 from distutils.command.install import INSTALL_SCHEMES 7 from distutils.sysconfig import get_python_inc 8 from distutils.util import convert_path 9 10 from setuptools import find_packages 11 from setuptools import setup 12 13 # Get all template files 14 templates = [] 15 for dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates')): 16 for f in filenames: 17 templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib')) 18 19 # This makes pwntools-LICENSE.txt appear with the package folders 20 for scheme in INSTALL_SCHEMES.values(): 21 scheme['data'] = scheme['purelib'] 22 23 # Find all of the console scripts 24 console_scripts = [] 25 26 for filename in glob.glob('pwnlib/commandline/*'): 27 filename = os.path.basename(filename) 28 filename, ext = os.path.splitext(filename) 29 30 if ext != '.py' or '__init__' in filename: 31 continue 32 33 script = '%s=pwnlib.commandline.%s:main' % (filename, filename) 34 console_scripts.append(script) 35 36 install_requires = ['paramiko>=1.15.2', 37 'mako>=1.0.0', 38 'pyelftools>=0.2.4', 39 'capstone', 40 'ropgadget>=5.3', 41 'pyserial>=2.7', 42 'requests>=2.0', 43 'pip>=6.0.8', 44 'tox>=1.8.1', 45 'pygments>=2.0', 46 'pysocks', 47 'python-dateutil', 48 'pypandoc', 49 'packaging'] 50 51 # This is a hack until somebody ports psutil to OpenBSD 52 if platform.system() != 'OpenBSD': 53 install_requires.append('psutil>=2.1.3') 54 55 # Check that the user has installed the Python development headers 56 PythonH = os.path.join(get_python_inc(), 'Python.h') 57 if not os.path.exists(PythonH): 58 print >> sys.stderr, "You must install the Python development headers!" 59 print >> sys.stderr, "$ apt-get install python-dev" 60 sys.exit(-1) 61 62 # Convert README.md to reStructuredText for PyPI 63 long_description = '' 64 try: 65 import pypandoc 66 try: 67 pypandoc.get_pandoc_path() 68 except OSError: 69 pypandoc.download_pandoc() 70 long_description = pypandoc.convert_file('README.md', 'rst') 71 except ImportError: 72 pass 73 74 75 setup( 76 name = 'pwntools', 77 packages = find_packages(), 78 version = '3.0.3', 79 data_files = [('', 80 ['LICENSE-pwntools.txt', 81 ]), 82 ], 83 package_data = { 84 'pwnlib': [ 85 'data/crcsums.txt', 86 'data/useragents/useragents.txt', 87 'data/binutils/*', 88 'data/includes/*.h', 89 'data/includes/*/*.h', 90 ] + templates, 91 }, 92 entry_points = {'console_scripts': console_scripts}, 93 scripts = glob.glob("bin/*"), 94 description = "Pwntools CTF framework and exploit development library.", 95 long_description = long_description, 96 author = "Gallopsled et al.", 97 author_email = "#pwntools @ freenode.net", 98 url = 'https://pwntools.com', 99 download_url = "https://github.com/Gallopsled/pwntools/releases", 100 install_requires = install_requires, 101 license = "Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt", 102 keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon', 103 classifiers = [ 104 'Development Status :: 5 - Production/Stable', 105 'Environment :: Console', 106 'Intended Audience :: Developers', 107 'Intended Audience :: Science/Research', 108 'Intended Audience :: System Administrators', 109 'License :: OSI Approved :: MIT License', 110 'Natural Language :: English', 111 'Operating System :: POSIX :: Linux', 112 'Programming Language :: Python :: 2.7', 113 'Topic :: Security', 114 'Topic :: Software Development :: Assemblers', 115 'Topic :: Software Development :: Debuggers', 116 'Topic :: Software Development :: Disassemblers', 117 'Topic :: Software Development :: Embedded Systems', 118 'Topic :: Software Development :: Libraries :: Python Modules', 119 'Topic :: System :: System Shells', 120 'Topic :: Utilities', 121 ] 122 ) 123 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -77,8 +77,7 @@ packages = find_packages(), version = '3.0.3', data_files = [('', - ['LICENSE-pwntools.txt', - ]), + glob.glob('*.md') + glob.glob('*.txt')), ], package_data = { 'pwnlib': [
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -77,8 +77,7 @@\n packages = find_packages(),\n version = '3.0.3',\n data_files = [('',\n- ['LICENSE-pwntools.txt',\n- ]),\n+ glob.glob('*.md') + glob.glob('*.txt')),\n ],\n package_data = {\n 'pwnlib': [\n", "issue": "3.0.3 Release Broken\nIt appears that the archive uploaded to PyPI does not include README.md, which is referred to by setup.py.\n\n@Idolf can you update the release to include the README?\n\n", "before_files": [{"content": "#!/usr/bin/env python2\nimport glob\nimport os\nimport platform\nimport sys\nfrom distutils.command.install import INSTALL_SCHEMES\nfrom distutils.sysconfig import get_python_inc\nfrom distutils.util import convert_path\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n# Get all template files\ntemplates = []\nfor dirpath, dirnames, filenames in os.walk(convert_path('pwnlib/shellcraft/templates')):\n for f in filenames:\n templates.append(os.path.relpath(os.path.join(dirpath, f), 'pwnlib'))\n\n# This makes pwntools-LICENSE.txt appear with the package folders\nfor scheme in INSTALL_SCHEMES.values():\n scheme['data'] = scheme['purelib']\n\n# Find all of the console scripts\nconsole_scripts = []\n\nfor filename in glob.glob('pwnlib/commandline/*'):\n filename = os.path.basename(filename)\n filename, ext = os.path.splitext(filename)\n\n if ext != '.py' or '__init__' in filename:\n continue\n\n script = '%s=pwnlib.commandline.%s:main' % (filename, filename)\n console_scripts.append(script)\n\ninstall_requires = ['paramiko>=1.15.2',\n 'mako>=1.0.0',\n 'pyelftools>=0.2.4',\n 'capstone',\n 'ropgadget>=5.3',\n 'pyserial>=2.7',\n 'requests>=2.0',\n 'pip>=6.0.8',\n 'tox>=1.8.1',\n 'pygments>=2.0',\n 'pysocks',\n 'python-dateutil',\n 'pypandoc',\n 'packaging']\n\n# This is a hack until somebody ports psutil to OpenBSD\nif platform.system() != 'OpenBSD':\n install_requires.append('psutil>=2.1.3')\n\n# Check that the user has installed the Python development headers\nPythonH = os.path.join(get_python_inc(), 'Python.h')\nif not os.path.exists(PythonH):\n print >> sys.stderr, \"You must install the Python development headers!\"\n print >> sys.stderr, \"$ apt-get install python-dev\"\n sys.exit(-1)\n\n# Convert README.md to reStructuredText for PyPI\nlong_description = ''\ntry:\n import pypandoc\n try:\n pypandoc.get_pandoc_path()\n except OSError:\n pypandoc.download_pandoc()\n long_description = pypandoc.convert_file('README.md', 'rst')\nexcept ImportError:\n pass\n\n\nsetup(\n name = 'pwntools',\n packages = find_packages(),\n version = '3.0.3',\n data_files = [('',\n ['LICENSE-pwntools.txt',\n ]),\n ],\n package_data = {\n 'pwnlib': [\n 'data/crcsums.txt',\n 'data/useragents/useragents.txt',\n 'data/binutils/*',\n 'data/includes/*.h',\n 'data/includes/*/*.h',\n ] + templates,\n },\n entry_points = {'console_scripts': console_scripts},\n scripts = glob.glob(\"bin/*\"),\n description = \"Pwntools CTF framework and exploit development library.\",\n long_description = long_description,\n author = \"Gallopsled et al.\",\n author_email = \"#pwntools @ freenode.net\",\n url = 'https://pwntools.com',\n download_url = \"https://github.com/Gallopsled/pwntools/releases\",\n install_requires = install_requires,\n license = \"Mostly MIT, some GPL/BSD, see LICENSE-pwntools.txt\",\n keywords = 'pwntools exploit ctf capture the flag binary wargame overflow stack heap defcon',\n classifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Assemblers',\n 'Topic :: Software Development :: Debuggers',\n 'Topic :: Software Development :: Disassemblers',\n 'Topic :: Software Development :: Embedded Systems',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: System :: System Shells',\n 'Topic :: Utilities',\n ]\n)\n", "path": "setup.py"}]}
1,830
101
gh_patches_debug_2598
rasdani/github-patches
git_diff
ivy-llc__ivy-13425
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> normal </issue> <code> [start of ivy/functional/frontends/torch/random_sampling.py] 1 import ivy 2 from ivy.func_wrapper import with_supported_dtypes 3 from ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back 4 5 try: 6 from torch import Generator 7 except ImportError: 8 from types import SimpleNamespace 9 10 Generator = SimpleNamespace 11 12 13 def seed() -> int: 14 """Returns a 64 bit number used to seed the RNG""" 15 return int(ivy.randint(-(2**63), 2**63 - 1)) 16 17 18 @to_ivy_arrays_and_back 19 def manual_seed(seed: int): 20 ivy.seed(seed_value=seed) 21 return Generator().manual_seed(seed) 22 23 24 @with_supported_dtypes( 25 { 26 "1.11.0 and below": ( 27 "float32", 28 "float64", 29 ) 30 }, 31 "torch", 32 ) 33 @to_ivy_arrays_and_back 34 def multinomial(input, num_samples, replacement=False, *, generator=None, out=None): 35 return ivy.multinomial( 36 num_samples + 1, # doesn't matter because `probs` is provided, but should be 37 # greater than the number of samples 38 num_samples, 39 probs=input, 40 replace=replacement, 41 out=out, 42 ) 43 44 45 @with_supported_dtypes( 46 { 47 "1.11.0 and below": ( 48 "float32", 49 "float64", 50 ) 51 }, 52 "torch", 53 ) 54 @to_ivy_arrays_and_back 55 def poisson(input, generator=None): 56 return ivy.poisson(input, shape=None) 57 58 59 @to_ivy_arrays_and_back 60 def rand( 61 size, 62 *, 63 generator=None, 64 out=None, 65 dtype=None, 66 layout=None, 67 device=None, 68 requires_grad=False, 69 pin_memory=False 70 ): 71 return ivy.random_uniform( 72 shape=size, 73 out=out, 74 dtype=dtype, 75 device=device, 76 ) 77 78 79 @to_ivy_arrays_and_back 80 def rand_like( 81 input, 82 *, 83 dtype=None, 84 layout=None, 85 device=None, 86 requires_grad=False, 87 memory_format=False 88 ): 89 shape = input.shape 90 if not dtype: 91 dtype = input.dtype 92 93 return ivy.random_uniform( 94 shape=shape, 95 dtype=dtype, 96 device=device, 97 ) 98 99 100 @to_ivy_arrays_and_back 101 def randn( 102 size, 103 *, 104 generator=None, 105 out=None, 106 dtype=None, 107 layout=None, 108 device=None, 109 requires_grad=False, 110 pin_memory=False 111 ): 112 return ivy.random_normal( 113 shape=size, 114 out=out, 115 dtype=dtype, 116 device=device, 117 ) 118 [end of ivy/functional/frontends/torch/random_sampling.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py --- a/ivy/functional/frontends/torch/random_sampling.py +++ b/ivy/functional/frontends/torch/random_sampling.py @@ -76,6 +76,20 @@ ) +@with_supported_dtypes( + { + "1.11.0 and below": ( + "float32", + "float64", + ) + }, + "torch", +) +@to_ivy_arrays_and_back +def normal(mean, std, *, generator=None, out=None): + return ivy.random_normal(mean=mean, std=std, out=out) + + @to_ivy_arrays_and_back def rand_like( input,
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/random_sampling.py b/ivy/functional/frontends/torch/random_sampling.py\n--- a/ivy/functional/frontends/torch/random_sampling.py\n+++ b/ivy/functional/frontends/torch/random_sampling.py\n@@ -76,6 +76,20 @@\n )\n \n \n+@with_supported_dtypes(\n+ {\n+ \"1.11.0 and below\": (\n+ \"float32\",\n+ \"float64\",\n+ )\n+ },\n+ \"torch\",\n+)\n+@to_ivy_arrays_and_back\n+def normal(mean, std, *, generator=None, out=None):\n+ return ivy.random_normal(mean=mean, std=std, out=out)\n+ \n+\n @to_ivy_arrays_and_back\n def rand_like(\n input,\n", "issue": "normal\n\n", "before_files": [{"content": "import ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import to_ivy_arrays_and_back\n\ntry:\n from torch import Generator\nexcept ImportError:\n from types import SimpleNamespace\n\n Generator = SimpleNamespace\n\n\ndef seed() -> int:\n \"\"\"Returns a 64 bit number used to seed the RNG\"\"\"\n return int(ivy.randint(-(2**63), 2**63 - 1))\n\n\n@to_ivy_arrays_and_back\ndef manual_seed(seed: int):\n ivy.seed(seed_value=seed)\n return Generator().manual_seed(seed)\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef multinomial(input, num_samples, replacement=False, *, generator=None, out=None):\n return ivy.multinomial(\n num_samples + 1, # doesn't matter because `probs` is provided, but should be\n # greater than the number of samples\n num_samples,\n probs=input,\n replace=replacement,\n out=out,\n )\n\n\n@with_supported_dtypes(\n {\n \"1.11.0 and below\": (\n \"float32\",\n \"float64\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef poisson(input, generator=None):\n return ivy.poisson(input, shape=None)\n\n\n@to_ivy_arrays_and_back\ndef rand(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_uniform(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef rand_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=False\n):\n shape = input.shape\n if not dtype:\n dtype = input.dtype\n\n return ivy.random_uniform(\n shape=shape,\n dtype=dtype,\n device=device,\n )\n\n\n@to_ivy_arrays_and_back\ndef randn(\n size,\n *,\n generator=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False\n):\n return ivy.random_normal(\n shape=size,\n out=out,\n dtype=dtype,\n device=device,\n )\n", "path": "ivy/functional/frontends/torch/random_sampling.py"}]}
1,367
182
gh_patches_debug_4790
rasdani/github-patches
git_diff
gratipay__gratipay.com-3040
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> show total ever given Suggested [via Twitter](https://twitter.com/tripflex/status/532597015210131456): > is there no way for me to see the total I have donated? I know I can see it weekly, but what about overall total? </issue> <code> [start of gratipay/utils/history.py] 1 def iter_payday_events(db, participant): 2 """Yields payday events for the given participant. 3 """ 4 username = participant.username 5 exchanges = db.all(""" 6 SELECT * 7 FROM exchanges 8 WHERE participant=%s 9 """, (username,), back_as=dict) 10 transfers = db.all(""" 11 SELECT * 12 FROM transfers 13 WHERE tipper=%(username)s OR tippee=%(username)s 14 """, locals(), back_as=dict) 15 16 if not (exchanges or transfers): 17 return 18 19 payday_dates = db.all(""" 20 SELECT ts_start::date 21 FROM paydays 22 ORDER BY ts_start ASC 23 """) 24 25 balance = participant.balance 26 prev_date = None 27 get_timestamp = lambda e: e['timestamp'] 28 events = sorted(exchanges+transfers, key=get_timestamp, reverse=True) 29 for event in events: 30 31 event['balance'] = balance 32 33 event_date = event['timestamp'].date() 34 if event_date != prev_date: 35 if prev_date: 36 yield dict(kind='day-close', balance=balance) 37 day_open = dict(kind='day-open', date=event_date, balance=balance) 38 if payday_dates: 39 while payday_dates and payday_dates[-1] > event_date: 40 payday_dates.pop() 41 payday_date = payday_dates[-1] if payday_dates else None 42 if event_date == payday_date: 43 day_open['payday_number'] = len(payday_dates) - 1 44 yield day_open 45 prev_date = event_date 46 47 if 'fee' in event: 48 if event['amount'] > 0: 49 kind = 'charge' 50 if event['status'] in (None, 'succeeded'): 51 balance -= event['amount'] 52 else: 53 kind = 'credit' 54 if event['status'] != 'failed': 55 balance -= event['amount'] - event['fee'] 56 else: 57 kind = 'transfer' 58 if event['tippee'] == username: 59 balance -= event['amount'] 60 else: 61 balance += event['amount'] 62 event['kind'] = kind 63 64 yield event 65 66 yield dict(kind='day-close', balance='0.00') 67 [end of gratipay/utils/history.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py --- a/gratipay/utils/history.py +++ b/gratipay/utils/history.py @@ -16,6 +16,13 @@ if not (exchanges or transfers): return + if transfers: + yield dict( + kind='totals', + given=sum(t['amount'] for t in transfers if t['tipper'] == username), + received=sum(t['amount'] for t in transfers if t['tippee'] == username), + ) + payday_dates = db.all(""" SELECT ts_start::date FROM paydays
{"golden_diff": "diff --git a/gratipay/utils/history.py b/gratipay/utils/history.py\n--- a/gratipay/utils/history.py\n+++ b/gratipay/utils/history.py\n@@ -16,6 +16,13 @@\n if not (exchanges or transfers):\n return\n \n+ if transfers:\n+ yield dict(\n+ kind='totals',\n+ given=sum(t['amount'] for t in transfers if t['tipper'] == username),\n+ received=sum(t['amount'] for t in transfers if t['tippee'] == username),\n+ )\n+\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n", "issue": "show total ever given\nSuggested [via Twitter](https://twitter.com/tripflex/status/532597015210131456):\n\n> is there no way for me to see the total I have donated? I know I can see it weekly, but what about overall total?\n\n", "before_files": [{"content": "def iter_payday_events(db, participant):\n \"\"\"Yields payday events for the given participant.\n \"\"\"\n username = participant.username\n exchanges = db.all(\"\"\"\n SELECT *\n FROM exchanges\n WHERE participant=%s\n \"\"\", (username,), back_as=dict)\n transfers = db.all(\"\"\"\n SELECT *\n FROM transfers\n WHERE tipper=%(username)s OR tippee=%(username)s\n \"\"\", locals(), back_as=dict)\n\n if not (exchanges or transfers):\n return\n\n payday_dates = db.all(\"\"\"\n SELECT ts_start::date\n FROM paydays\n ORDER BY ts_start ASC\n \"\"\")\n\n balance = participant.balance\n prev_date = None\n get_timestamp = lambda e: e['timestamp']\n events = sorted(exchanges+transfers, key=get_timestamp, reverse=True)\n for event in events:\n\n event['balance'] = balance\n\n event_date = event['timestamp'].date()\n if event_date != prev_date:\n if prev_date:\n yield dict(kind='day-close', balance=balance)\n day_open = dict(kind='day-open', date=event_date, balance=balance)\n if payday_dates:\n while payday_dates and payday_dates[-1] > event_date:\n payday_dates.pop()\n payday_date = payday_dates[-1] if payday_dates else None\n if event_date == payday_date:\n day_open['payday_number'] = len(payday_dates) - 1\n yield day_open\n prev_date = event_date\n\n if 'fee' in event:\n if event['amount'] > 0:\n kind = 'charge'\n if event['status'] in (None, 'succeeded'):\n balance -= event['amount']\n else:\n kind = 'credit'\n if event['status'] != 'failed':\n balance -= event['amount'] - event['fee']\n else:\n kind = 'transfer'\n if event['tippee'] == username:\n balance -= event['amount']\n else:\n balance += event['amount']\n event['kind'] = kind\n\n yield event\n\n yield dict(kind='day-close', balance='0.00')\n", "path": "gratipay/utils/history.py"}]}
1,198
143
gh_patches_debug_5928
rasdani/github-patches
git_diff
DataDog__dd-trace-py-616
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unable to install with opentracing extras I was following the [OpenTracing setup instructions](https://docs.datadoghq.com/tracing/advanced_usage/?tab=python#opentracing) but got a warning about missing extras: ```sh (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip list Package Version ---------- ------- pip 18.0 setuptools 40.4.1 wheel 0.31.1 (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> python --version Python 2.7.14 (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip --version pip 18.0 from /home/sam/.local/share/virtualenvs/blah-YneZd-6L/lib/python2.7/site-packages/pip (python 2.7) (blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip install 'ddtrace[opentracing] == 0.14.0' Collecting ddtrace[opentracing]==0.14.0 ddtrace 0.14.0 does not provide the extra 'opentracing' Collecting msgpack-python (from ddtrace[opentracing]==0.14.0) Collecting wrapt (from ddtrace[opentracing]==0.14.0) Installing collected packages: msgpack-python, wrapt, ddtrace Successfully installed ddtrace-0.14.0 msgpack-python-0.5.6 wrapt-1.10.11 ``` > `ddtrace 0.14.0 does not provide the extra 'opentracing'` Happens on Python 3.6 as well. </issue> <code> [start of setup.py] 1 import os 2 import sys 3 import re 4 5 from setuptools import setup, find_packages 6 from setuptools.command.test import test as TestCommand 7 8 9 def get_version(package): 10 """ 11 Return package version as listed in `__version__` in `__init__.py`. 12 This method prevents to import packages at setup-time. 13 """ 14 init_py = open(os.path.join(package, '__init__.py')).read() 15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) 16 17 18 class Tox(TestCommand): 19 20 user_options = [('tox-args=', 'a', "Arguments to pass to tox")] 21 22 def initialize_options(self): 23 TestCommand.initialize_options(self) 24 self.tox_args = None 25 26 def finalize_options(self): 27 TestCommand.finalize_options(self) 28 self.test_args = [] 29 self.test_suite = True 30 31 def run_tests(self): 32 # import here, cause outside the eggs aren't loaded 33 import tox 34 import shlex 35 args = self.tox_args 36 if args: 37 args = shlex.split(self.tox_args) 38 errno = tox.cmdline(args=args) 39 sys.exit(errno) 40 41 42 version = get_version('ddtrace') 43 # Append a suffix to the version for dev builds 44 if os.environ.get('VERSION_SUFFIX'): 45 version = '{v}+{s}'.format( 46 v=version, 47 s=os.environ.get('VERSION_SUFFIX'), 48 ) 49 50 setup( 51 name='ddtrace', 52 version=version, 53 description='Datadog tracing code', 54 url='https://github.com/DataDog/dd-trace-py', 55 author='Datadog, Inc.', 56 author_email='[email protected]', 57 license='BSD', 58 packages=find_packages(exclude=['tests*']), 59 install_requires=[ 60 "wrapt", 61 "msgpack-python", 62 ], 63 extra_requires={ 64 # users can include opentracing by having: 65 # install_requires=["ddtrace[opentracing]", ...] 66 "opentracing": ["opentracing"], 67 }, 68 # plugin tox 69 tests_require=['tox', 'flake8'], 70 cmdclass={'test': Tox}, 71 entry_points={ 72 'console_scripts': [ 73 'ddtrace-run = ddtrace.commands.ddtrace_run:main' 74 ] 75 }, 76 classifiers=[ 77 'Programming Language :: Python', 78 'Programming Language :: Python :: 2.7', 79 'Programming Language :: Python :: 3.4', 80 'Programming Language :: Python :: 3.5', 81 'Programming Language :: Python :: 3.6', 82 ], 83 ) 84 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -60,10 +60,10 @@ "wrapt", "msgpack-python", ], - extra_requires={ + extras_require={ # users can include opentracing by having: # install_requires=["ddtrace[opentracing]", ...] - "opentracing": ["opentracing"], + "opentracing": ["opentracing>=2.0.0"], }, # plugin tox tests_require=['tox', 'flake8'],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -60,10 +60,10 @@\n \"wrapt\",\n \"msgpack-python\",\n ],\n- extra_requires={\n+ extras_require={\n # users can include opentracing by having:\n # install_requires=[\"ddtrace[opentracing]\", ...]\n- \"opentracing\": [\"opentracing\"],\n+ \"opentracing\": [\"opentracing>=2.0.0\"],\n },\n # plugin tox\n tests_require=['tox', 'flake8'],\n", "issue": "Unable to install with opentracing extras\nI was following the [OpenTracing setup instructions](https://docs.datadoghq.com/tracing/advanced_usage/?tab=python#opentracing) but got a warning about missing extras:\r\n\r\n```sh\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip list\r\nPackage Version\r\n---------- -------\r\npip 18.0 \r\nsetuptools 40.4.1 \r\nwheel 0.31.1 \r\n\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> python --version\r\nPython 2.7.14\r\n\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip --version\r\npip 18.0 from /home/sam/.local/share/virtualenvs/blah-YneZd-6L/lib/python2.7/site-packages/pip (python 2.7)\r\n\r\n\r\n(blah-YneZd-6L) sam@sam-Q325UAR ~/t/blah> pip install 'ddtrace[opentracing] == 0.14.0'\r\nCollecting ddtrace[opentracing]==0.14.0\r\n ddtrace 0.14.0 does not provide the extra 'opentracing'\r\nCollecting msgpack-python (from ddtrace[opentracing]==0.14.0)\r\nCollecting wrapt (from ddtrace[opentracing]==0.14.0)\r\nInstalling collected packages: msgpack-python, wrapt, ddtrace\r\nSuccessfully installed ddtrace-0.14.0 msgpack-python-0.5.6 wrapt-1.10.11\r\n```\r\n\r\n> `ddtrace 0.14.0 does not provide the extra 'opentracing'`\r\n\r\nHappens on Python 3.6 as well.\n", "before_files": [{"content": "import os\nimport sys\nimport re\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `__init__.py`.\n This method prevents to import packages at setup-time.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\nclass Tox(TestCommand):\n\n user_options = [('tox-args=', 'a', \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n args = self.tox_args\n if args:\n args = shlex.split(self.tox_args)\n errno = tox.cmdline(args=args)\n sys.exit(errno)\n\n\nversion = get_version('ddtrace')\n# Append a suffix to the version for dev builds\nif os.environ.get('VERSION_SUFFIX'):\n version = '{v}+{s}'.format(\n v=version,\n s=os.environ.get('VERSION_SUFFIX'),\n )\n\nsetup(\n name='ddtrace',\n version=version,\n description='Datadog tracing code',\n url='https://github.com/DataDog/dd-trace-py',\n author='Datadog, Inc.',\n author_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=['tests*']),\n install_requires=[\n \"wrapt\",\n \"msgpack-python\",\n ],\n extra_requires={\n # users can include opentracing by having:\n # install_requires=[\"ddtrace[opentracing]\", ...]\n \"opentracing\": [\"opentracing\"],\n },\n # plugin tox\n tests_require=['tox', 'flake8'],\n cmdclass={'test': Tox},\n entry_points={\n 'console_scripts': [\n 'ddtrace-run = ddtrace.commands.ddtrace_run:main'\n ]\n },\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]}
1,669
130
gh_patches_debug_33594
rasdani/github-patches
git_diff
saleor__saleor-5530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ProductCreate weight mutation ![p1](https://user-images.githubusercontent.com/36409853/79555939-f2032000-80d2-11ea-88cc-5e1809e51ec5.PNG) ![p2](https://user-images.githubusercontent.com/36409853/79556174-4b6b4f00-80d3-11ea-96d7-f06ee76bf1ee.PNG) productCreate mutation I followed the "amount unit" as said in the comment, but returns a "'StringValue(value='10.00 kg')' value must be a float." Sorry just a beginner </issue> <code> [start of saleor/graphql/core/scalars.py] 1 import decimal 2 3 import graphene 4 from measurement.measures import Weight 5 6 from ...core.weight import convert_weight, get_default_weight_unit 7 8 9 class Decimal(graphene.Float): 10 """Custom Decimal implementation. 11 12 Returns Decimal as a float in the API, 13 parses float to the Decimal on the way back. 14 """ 15 16 @staticmethod 17 def parse_literal(node): 18 try: 19 return decimal.Decimal(node.value) 20 except decimal.DecimalException: 21 return None 22 23 @staticmethod 24 def parse_value(value): 25 try: 26 # Converting the float to str before parsing it to Decimal is 27 # necessary to keep the decimal places as typed 28 value = str(value) 29 return decimal.Decimal(value) 30 except decimal.DecimalException: 31 return None 32 33 34 class WeightScalar(graphene.Scalar): 35 @staticmethod 36 def parse_value(value): 37 # Expects value to be a string "amount unit" separated by a single 38 # space. 39 try: 40 value = decimal.Decimal(value) 41 except decimal.DecimalException: 42 return None 43 default_unit = get_default_weight_unit() 44 return Weight(**{default_unit: value}) 45 46 @staticmethod 47 def serialize(weight): 48 if isinstance(weight, Weight): 49 default_unit = get_default_weight_unit() 50 if weight.unit != default_unit: 51 weight = convert_weight(weight, default_unit) 52 return str(weight) 53 return None 54 55 @staticmethod 56 def parse_literal(node): 57 return node 58 [end of saleor/graphql/core/scalars.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/core/scalars.py b/saleor/graphql/core/scalars.py --- a/saleor/graphql/core/scalars.py +++ b/saleor/graphql/core/scalars.py @@ -1,6 +1,8 @@ import decimal import graphene +from graphql.language import ast +from graphql.error import GraphQLError from measurement.measures import Weight from ...core.weight import convert_weight, get_default_weight_unit @@ -34,14 +36,14 @@ class WeightScalar(graphene.Scalar): @staticmethod def parse_value(value): - # Expects value to be a string "amount unit" separated by a single - # space. - try: - value = decimal.Decimal(value) - except decimal.DecimalException: - return None - default_unit = get_default_weight_unit() - return Weight(**{default_unit: value}) + weight = None + if isinstance(value, dict): + weight = Weight(**{value["unit"]: value["value"]}) + else: + weight = WeightScalar.parse_decimal(value) + if not weight: + raise GraphQLError(f"Unsupported value: {value}") + return weight @staticmethod def serialize(weight): @@ -54,4 +56,35 @@ @staticmethod def parse_literal(node): - return node + weight = None + if isinstance(node, ast.ObjectValue): + weight = WeightScalar.parse_literal_object(node) + else: + weight = WeightScalar.parse_decimal(node.value) + if not weight: + raise GraphQLError(f"Unsupported value: {node.value}") + return weight + + @staticmethod + def parse_decimal(value): + try: + value = decimal.Decimal(value) + except decimal.DecimalException: + return None + default_unit = get_default_weight_unit() + return Weight(**{default_unit: value}) + + @staticmethod + def parse_literal_object(node): + value = 0 + unit = get_default_weight_unit() + + for field in node.fields: + if field.name.value == "value": + try: + value = decimal.Decimal(field.value.value) + except decimal.DecimalException: + raise GraphQLError(f"Unsupported value: {field.value.value}") + if field.name.value == "unit": + unit = field.value.value + return Weight(**{unit: value})
{"golden_diff": "diff --git a/saleor/graphql/core/scalars.py b/saleor/graphql/core/scalars.py\n--- a/saleor/graphql/core/scalars.py\n+++ b/saleor/graphql/core/scalars.py\n@@ -1,6 +1,8 @@\n import decimal\n \n import graphene\n+from graphql.language import ast\n+from graphql.error import GraphQLError\n from measurement.measures import Weight\n \n from ...core.weight import convert_weight, get_default_weight_unit\n@@ -34,14 +36,14 @@\n class WeightScalar(graphene.Scalar):\n @staticmethod\n def parse_value(value):\n- # Expects value to be a string \"amount unit\" separated by a single\n- # space.\n- try:\n- value = decimal.Decimal(value)\n- except decimal.DecimalException:\n- return None\n- default_unit = get_default_weight_unit()\n- return Weight(**{default_unit: value})\n+ weight = None\n+ if isinstance(value, dict):\n+ weight = Weight(**{value[\"unit\"]: value[\"value\"]})\n+ else:\n+ weight = WeightScalar.parse_decimal(value)\n+ if not weight:\n+ raise GraphQLError(f\"Unsupported value: {value}\")\n+ return weight\n \n @staticmethod\n def serialize(weight):\n@@ -54,4 +56,35 @@\n \n @staticmethod\n def parse_literal(node):\n- return node\n+ weight = None\n+ if isinstance(node, ast.ObjectValue):\n+ weight = WeightScalar.parse_literal_object(node)\n+ else:\n+ weight = WeightScalar.parse_decimal(node.value)\n+ if not weight:\n+ raise GraphQLError(f\"Unsupported value: {node.value}\")\n+ return weight\n+\n+ @staticmethod\n+ def parse_decimal(value):\n+ try:\n+ value = decimal.Decimal(value)\n+ except decimal.DecimalException:\n+ return None\n+ default_unit = get_default_weight_unit()\n+ return Weight(**{default_unit: value})\n+\n+ @staticmethod\n+ def parse_literal_object(node):\n+ value = 0\n+ unit = get_default_weight_unit()\n+\n+ for field in node.fields:\n+ if field.name.value == \"value\":\n+ try:\n+ value = decimal.Decimal(field.value.value)\n+ except decimal.DecimalException:\n+ raise GraphQLError(f\"Unsupported value: {field.value.value}\")\n+ if field.name.value == \"unit\":\n+ unit = field.value.value\n+ return Weight(**{unit: value})\n", "issue": "ProductCreate weight mutation\n\r\n![p1](https://user-images.githubusercontent.com/36409853/79555939-f2032000-80d2-11ea-88cc-5e1809e51ec5.PNG)\r\n![p2](https://user-images.githubusercontent.com/36409853/79556174-4b6b4f00-80d3-11ea-96d7-f06ee76bf1ee.PNG)\r\n\r\n\r\nproductCreate mutation\r\n\r\nI followed the \"amount unit\" as said in the comment, but returns a \"'StringValue(value='10.00 kg')' value must be a float.\"\r\nSorry just a beginner\n", "before_files": [{"content": "import decimal\n\nimport graphene\nfrom measurement.measures import Weight\n\nfrom ...core.weight import convert_weight, get_default_weight_unit\n\n\nclass Decimal(graphene.Float):\n \"\"\"Custom Decimal implementation.\n\n Returns Decimal as a float in the API,\n parses float to the Decimal on the way back.\n \"\"\"\n\n @staticmethod\n def parse_literal(node):\n try:\n return decimal.Decimal(node.value)\n except decimal.DecimalException:\n return None\n\n @staticmethod\n def parse_value(value):\n try:\n # Converting the float to str before parsing it to Decimal is\n # necessary to keep the decimal places as typed\n value = str(value)\n return decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n\n\nclass WeightScalar(graphene.Scalar):\n @staticmethod\n def parse_value(value):\n # Expects value to be a string \"amount unit\" separated by a single\n # space.\n try:\n value = decimal.Decimal(value)\n except decimal.DecimalException:\n return None\n default_unit = get_default_weight_unit()\n return Weight(**{default_unit: value})\n\n @staticmethod\n def serialize(weight):\n if isinstance(weight, Weight):\n default_unit = get_default_weight_unit()\n if weight.unit != default_unit:\n weight = convert_weight(weight, default_unit)\n return str(weight)\n return None\n\n @staticmethod\n def parse_literal(node):\n return node\n", "path": "saleor/graphql/core/scalars.py"}]}
1,129
545
gh_patches_debug_18183
rasdani/github-patches
git_diff
keras-team__autokeras-627
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pip install autokeras fails on torch ==1.1.0 ### Bug Description When executing `pip install autokeras`, I get the following message: `Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1) No matching distribution found for torch==1.0.1.post2 (from autokeras)` ### Reproducing Steps Steps to reproduce the behavior: * Step 1: set up anaconda environment * Step 2: install pytorch via their website's recommended command: `conda install pytorch-cpu torchvision-cpu -c pytorch` * Step 3: try to install autokeras via `pip install autokeras` * Step 4: get the following output: ``` Collecting autokeras Downloading https://files.pythonhosted.org/packages/c2/32/de74bf6afd09925980340355a05aa6a19e7378ed91dac09e76a487bd136d/autokeras-0.4.0.tar.gz (67kB) 100% |████████████████████████████████| 71kB 1.3MB/s Collecting scipy==1.2.0 (from autokeras) Downloading https://files.pythonhosted.org/packages/c4/0f/2bdeab43db2b4a75863863bf7eddda8920b031b0a70494fd2665c73c9aec/scipy-1.2.0-cp36-cp36m-win_amd64.whl (31.9MB) 100% |████████████████████████████████| 31.9MB 508kB/s Requirement already satisfied: tensorflow==1.13.1 in c:\[...]\lib\site-packages (from autokeras) (1.13.1) Collecting torch==1.0.1.post2 (from autokeras) Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1) No matching distribution found for torch==1.0.1.post2 (from autokeras) ``` ### Expected Behavior Autokeras is installed without error. ### Setup Details Include the details about the versions of: - OS type and version: Windows 10 Version 10.0.17763 Build 17763 - Python: 3.6.8 (anaconda) - autokeras: 0.4.0 - scikit-learn: 0.20.3 - numpy:1.16.2 - keras: 2.2.4 - scipy:1.2.1 - tensorflow:1.13.1 - pytorch:1.1.0 ### Additional context <!--- Add any other context about the problem here. --> </issue> <code> [start of setup.py] 1 from distutils.core import setup 2 from setuptools import find_packages 3 4 setup( 5 name='autokeras', 6 packages=find_packages(exclude=('tests',)), 7 install_requires=['scipy==1.2.0', 8 'tensorflow==1.13.1', 9 'torch==1.0.1.post2', 10 'torchvision==0.2.1', 11 'numpy==1.16.1', 12 'scikit-learn==0.20.2', 13 'scikit-image==0.14.2', 14 'tqdm==4.31.0', 15 'imageio==2.5.0', 16 'requests==2.21.0' 17 ], 18 version='0.4.0', 19 description='AutoML for deep learning', 20 author='DATA Lab at Texas A&M University', 21 author_email='[email protected]', 22 url='http://autokeras.com', 23 download_url='https://github.com/keras-team/autokeras/archive/0.3.7.tar.gz', 24 keywords=['AutoML', 'keras'], 25 classifiers=[] 26 ) 27 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -4,16 +4,16 @@ setup( name='autokeras', packages=find_packages(exclude=('tests',)), - install_requires=['scipy==1.2.0', - 'tensorflow==1.13.1', - 'torch==1.0.1.post2', - 'torchvision==0.2.1', - 'numpy==1.16.1', - 'scikit-learn==0.20.2', - 'scikit-image==0.14.2', - 'tqdm==4.31.0', - 'imageio==2.5.0', - 'requests==2.21.0' + install_requires=['scipy>=1.2.0', + 'tensorflow>=1.13.1', + 'torch>=1.0.1.post2', + 'torchvision>=0.2.1', + 'numpy>=1.16.1', + 'scikit-learn>=0.20.2', + 'scikit-image>=0.14.2', + 'tqdm>=4.31.0', + 'imageio>=2.5.0', + 'requests>=2.21.0' ], version='0.4.0', description='AutoML for deep learning',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,16 +4,16 @@\n setup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n- install_requires=['scipy==1.2.0',\n- 'tensorflow==1.13.1',\n- 'torch==1.0.1.post2',\n- 'torchvision==0.2.1',\n- 'numpy==1.16.1',\n- 'scikit-learn==0.20.2',\n- 'scikit-image==0.14.2',\n- 'tqdm==4.31.0',\n- 'imageio==2.5.0',\n- 'requests==2.21.0'\n+ install_requires=['scipy>=1.2.0',\n+ 'tensorflow>=1.13.1',\n+ 'torch>=1.0.1.post2',\n+ 'torchvision>=0.2.1',\n+ 'numpy>=1.16.1',\n+ 'scikit-learn>=0.20.2',\n+ 'scikit-image>=0.14.2',\n+ 'tqdm>=4.31.0',\n+ 'imageio>=2.5.0',\n+ 'requests>=2.21.0'\n ],\n version='0.4.0',\n description='AutoML for deep learning',\n", "issue": "pip install autokeras fails on torch ==1.1.0\n### Bug Description\r\nWhen executing `pip install autokeras`, I get the following message:\r\n`Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1)\r\nNo matching distribution found for torch==1.0.1.post2 (from autokeras)`\r\n\r\n### Reproducing Steps\r\nSteps to reproduce the behavior:\r\n * Step 1: set up anaconda environment\r\n * Step 2: install pytorch via their website's recommended command: `conda install pytorch-cpu torchvision-cpu -c pytorch`\r\n * Step 3: try to install autokeras via `pip install autokeras`\r\n * Step 4: get the following output:\r\n\r\n```\r\nCollecting autokeras\r\n Downloading https://files.pythonhosted.org/packages/c2/32/de74bf6afd09925980340355a05aa6a19e7378ed91dac09e76a487bd136d/autokeras-0.4.0.tar.gz (67kB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 71kB 1.3MB/s\r\nCollecting scipy==1.2.0 (from autokeras)\r\n Downloading https://files.pythonhosted.org/packages/c4/0f/2bdeab43db2b4a75863863bf7eddda8920b031b0a70494fd2665c73c9aec/scipy-1.2.0-cp36-cp36m-win_amd64.whl (31.9MB)\r\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 31.9MB 508kB/s\r\nRequirement already satisfied: tensorflow==1.13.1 in c:\\[...]\\lib\\site-packages (from autokeras) (1.13.1)\r\nCollecting torch==1.0.1.post2 (from autokeras)\r\n Could not find a version that satisfies the requirement torch==1.0.1.post2 (from autokeras) (from versions: 0.1.2, 0.1.2.post1)\r\nNo matching distribution found for torch==1.0.1.post2 (from autokeras)\r\n```\r\n\r\n### Expected Behavior\r\nAutokeras is installed without error.\r\n\r\n### Setup Details\r\nInclude the details about the versions of:\r\n - OS type and version: Windows 10 Version\t10.0.17763 Build 17763\r\n - Python: 3.6.8 (anaconda)\r\n - autokeras: 0.4.0\r\n - scikit-learn: 0.20.3\r\n - numpy:1.16.2\r\n - keras: 2.2.4\r\n - scipy:1.2.1\r\n - tensorflow:1.13.1\r\n - pytorch:1.1.0\r\n\r\n### Additional context\r\n<!---\r\nAdd any other context about the problem here.\r\n-->\r\n\n", "before_files": [{"content": "from distutils.core import setup\nfrom setuptools import find_packages\n\nsetup(\n name='autokeras',\n packages=find_packages(exclude=('tests',)),\n install_requires=['scipy==1.2.0',\n 'tensorflow==1.13.1',\n 'torch==1.0.1.post2',\n 'torchvision==0.2.1',\n 'numpy==1.16.1',\n 'scikit-learn==0.20.2',\n 'scikit-image==0.14.2',\n 'tqdm==4.31.0',\n 'imageio==2.5.0',\n 'requests==2.21.0'\n ],\n version='0.4.0',\n description='AutoML for deep learning',\n author='DATA Lab at Texas A&M University',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/keras-team/autokeras/archive/0.3.7.tar.gz',\n keywords=['AutoML', 'keras'],\n classifiers=[]\n)\n", "path": "setup.py"}]}
1,548
332
gh_patches_debug_39980
rasdani/github-patches
git_diff
microsoft__ptvsd-1161
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> sys.stdin not None and missing encoding attribute when running with pythonw.exe ## Environment data - PTVSD version: 4.2 - OS and version: windows 10 - Python version (& distribution if applicable, e.g. Anaconda): CPython 3.7 using **pythonw.exe** - Using VS Code or Visual Studio: VS ## Actual behavior None has no attribute encoding exception ## Expected behavior Either sys.stdin.encoding works, or sys.stdin is None (it is None when running without debugging) ## Steps to reproduce: 1. Debug this code using pythonw.exe (no console) ``` import sys with open('issue4866.txt', 'wt') as f: f.write('hello\n') f.write(str(type(sys.stdin)) + '\n') if sys.stdin is not None: f.write(str(sys.stdin.encoding) + '\n') f.write('bye\n') ``` From https://github.com/Microsoft/PTVS/issues/4866 </issue> <code> [start of src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py] 1 from _pydevd_bundle import pydevd_constants 2 3 IS_PY3K = pydevd_constants.IS_PY3K 4 5 class IORedirector: 6 ''' 7 This class works to wrap a stream (stdout/stderr) with an additional redirect. 8 ''' 9 10 def __init__(self, original, new_redirect, wrap_buffer=False): 11 ''' 12 :param stream original: 13 The stream to be wrapped (usually stdout/stderr). 14 15 :param stream new_redirect: 16 Usually IOBuf (below). 17 18 :param bool wrap_buffer: 19 Whether to create a buffer attribute (needed to mimick python 3 s 20 tdout/stderr which has a buffer to write binary data). 21 ''' 22 self._redirect_to = (original, new_redirect) 23 if wrap_buffer and hasattr(original, 'buffer'): 24 self.buffer = IORedirector(original.buffer, new_redirect.buffer, False) 25 26 def write(self, s): 27 # Note that writing to the original stream may fail for some reasons 28 # (such as trying to write something that's not a string or having it closed). 29 for r in self._redirect_to: 30 r.write(s) 31 32 def isatty(self): 33 return self._redirect_to[0].isatty() 34 35 def flush(self): 36 for r in self._redirect_to: 37 r.flush() 38 39 def __getattr__(self, name): 40 for r in self._redirect_to: 41 if hasattr(r, name): 42 return getattr(r, name) 43 raise AttributeError(name) 44 45 class IOBuf: 46 '''This class works as a replacement for stdio and stderr. 47 It is a buffer and when its contents are requested, it will erase what 48 it has so far so that the next return will not return the same contents again. 49 ''' 50 def __init__(self): 51 self.buflist = [] 52 import os 53 self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8') 54 55 def getvalue(self): 56 b = self.buflist 57 self.buflist = [] # clear it 58 return ''.join(b) # bytes on py2, str on py3. 59 60 def write(self, s): 61 if not IS_PY3K: 62 if isinstance(s, unicode): 63 # can't use 'errors' as kwargs in py 2.6 64 s = s.encode(self.encoding, 'replace') 65 else: 66 if isinstance(s, bytes): 67 s = s.decode(self.encoding, errors='replace') 68 self.buflist.append(s) 69 70 def isatty(self): 71 return False 72 73 def flush(self): 74 pass 75 76 def empty(self): 77 return len(self.buflist) == 0 78 79 class _RedirectionsHolder: 80 _stack_stdout = [] 81 _stack_stderr = [] 82 83 84 def start_redirect(keep_original_redirection=False, std='stdout'): 85 ''' 86 @param std: 'stdout', 'stderr', or 'both' 87 ''' 88 import sys 89 buf = IOBuf() 90 91 if std == 'both': 92 config_stds = ['stdout', 'stderr'] 93 else: 94 config_stds = [std] 95 96 for std in config_stds: 97 original = getattr(sys, std) 98 stack = getattr(_RedirectionsHolder, '_stack_%s' % std) 99 stack.append(original) 100 101 if keep_original_redirection: 102 setattr(sys, std, IORedirector(getattr(sys, std), buf)) 103 else: 104 setattr(sys, std, buf) 105 return buf 106 107 108 def end_redirect(std='stdout'): 109 import sys 110 if std == 'both': 111 config_stds = ['stdout', 'stderr'] 112 else: 113 config_stds = [std] 114 for std in config_stds: 115 stack = getattr(_RedirectionsHolder, '_stack_%s' % std) 116 setattr(sys, std, stack.pop()) 117 118 [end of src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py --- a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py +++ b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py @@ -2,6 +2,7 @@ IS_PY3K = pydevd_constants.IS_PY3K + class IORedirector: ''' This class works to wrap a stream (stdout/stderr) with an additional redirect. @@ -10,7 +11,7 @@ def __init__(self, original, new_redirect, wrap_buffer=False): ''' :param stream original: - The stream to be wrapped (usually stdout/stderr). + The stream to be wrapped (usually stdout/stderr, but could be None). :param stream new_redirect: Usually IOBuf (below). @@ -27,14 +28,19 @@ # Note that writing to the original stream may fail for some reasons # (such as trying to write something that's not a string or having it closed). for r in self._redirect_to: - r.write(s) + if hasattr(r, 'write'): + r.write(s) def isatty(self): - return self._redirect_to[0].isatty() + for r in self._redirect_to: + if hasattr(r, 'isatty'): + return r.isatty() + return False def flush(self): for r in self._redirect_to: - r.flush() + if hasattr(r, 'flush'): + r.flush() def __getattr__(self, name): for r in self._redirect_to: @@ -42,11 +48,13 @@ return getattr(r, name) raise AttributeError(name) + class IOBuf: '''This class works as a replacement for stdio and stderr. It is a buffer and when its contents are requested, it will erase what it has so far so that the next return will not return the same contents again. ''' + def __init__(self): self.buflist = [] import os @@ -56,7 +64,7 @@ b = self.buflist self.buflist = [] # clear it return ''.join(b) # bytes on py2, str on py3. - + def write(self, s): if not IS_PY3K: if isinstance(s, unicode): @@ -76,6 +84,7 @@ def empty(self): return len(self.buflist) == 0 + class _RedirectionsHolder: _stack_stdout = [] _stack_stderr = []
{"golden_diff": "diff --git a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n--- a/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n+++ b/src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py\n@@ -2,6 +2,7 @@\n \n IS_PY3K = pydevd_constants.IS_PY3K\n \n+\n class IORedirector:\n '''\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n@@ -10,7 +11,7 @@\n def __init__(self, original, new_redirect, wrap_buffer=False):\n '''\n :param stream original:\n- The stream to be wrapped (usually stdout/stderr).\n+ The stream to be wrapped (usually stdout/stderr, but could be None).\n \n :param stream new_redirect:\n Usually IOBuf (below).\n@@ -27,14 +28,19 @@\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n for r in self._redirect_to:\n- r.write(s)\n+ if hasattr(r, 'write'):\n+ r.write(s)\n \n def isatty(self):\n- return self._redirect_to[0].isatty()\n+ for r in self._redirect_to:\n+ if hasattr(r, 'isatty'):\n+ return r.isatty()\n+ return False\n \n def flush(self):\n for r in self._redirect_to:\n- r.flush()\n+ if hasattr(r, 'flush'):\n+ r.flush()\n \n def __getattr__(self, name):\n for r in self._redirect_to:\n@@ -42,11 +48,13 @@\n return getattr(r, name)\n raise AttributeError(name)\n \n+\n class IOBuf:\n '''This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n '''\n+\n def __init__(self):\n self.buflist = []\n import os\n@@ -56,7 +64,7 @@\n b = self.buflist\n self.buflist = [] # clear it\n return ''.join(b) # bytes on py2, str on py3.\n- \n+\n def write(self, s):\n if not IS_PY3K:\n if isinstance(s, unicode):\n@@ -76,6 +84,7 @@\n def empty(self):\n return len(self.buflist) == 0\n \n+\n class _RedirectionsHolder:\n _stack_stdout = []\n _stack_stderr = []\n", "issue": "sys.stdin not None and missing encoding attribute when running with pythonw.exe\n## Environment data\r\n\r\n- PTVSD version: 4.2\r\n- OS and version: windows 10\r\n- Python version (& distribution if applicable, e.g. Anaconda): CPython 3.7 using **pythonw.exe**\r\n- Using VS Code or Visual Studio: VS\r\n\r\n## Actual behavior\r\n\r\nNone has no attribute encoding exception\r\n\r\n## Expected behavior\r\n\r\nEither sys.stdin.encoding works, or sys.stdin is None (it is None when running without debugging)\r\n\r\n\r\n## Steps to reproduce:\r\n1. Debug this code using pythonw.exe (no console)\r\n```\r\nimport sys\r\n\r\nwith open('issue4866.txt', 'wt') as f:\r\n f.write('hello\\n')\r\n f.write(str(type(sys.stdin)) + '\\n')\r\n if sys.stdin is not None:\r\n f.write(str(sys.stdin.encoding) + '\\n')\r\n f.write('bye\\n')\r\n```\r\n\r\nFrom https://github.com/Microsoft/PTVS/issues/4866\n", "before_files": [{"content": "from _pydevd_bundle import pydevd_constants\n\nIS_PY3K = pydevd_constants.IS_PY3K\n\nclass IORedirector:\n '''\n This class works to wrap a stream (stdout/stderr) with an additional redirect.\n '''\n\n def __init__(self, original, new_redirect, wrap_buffer=False):\n '''\n :param stream original:\n The stream to be wrapped (usually stdout/stderr).\n\n :param stream new_redirect:\n Usually IOBuf (below).\n\n :param bool wrap_buffer:\n Whether to create a buffer attribute (needed to mimick python 3 s\n tdout/stderr which has a buffer to write binary data).\n '''\n self._redirect_to = (original, new_redirect)\n if wrap_buffer and hasattr(original, 'buffer'):\n self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)\n\n def write(self, s):\n # Note that writing to the original stream may fail for some reasons\n # (such as trying to write something that's not a string or having it closed).\n for r in self._redirect_to:\n r.write(s)\n\n def isatty(self):\n return self._redirect_to[0].isatty()\n\n def flush(self):\n for r in self._redirect_to:\n r.flush()\n\n def __getattr__(self, name):\n for r in self._redirect_to:\n if hasattr(r, name):\n return getattr(r, name)\n raise AttributeError(name)\n\nclass IOBuf:\n '''This class works as a replacement for stdio and stderr.\n It is a buffer and when its contents are requested, it will erase what\n it has so far so that the next return will not return the same contents again.\n '''\n def __init__(self):\n self.buflist = []\n import os\n self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')\n\n def getvalue(self):\n b = self.buflist\n self.buflist = [] # clear it\n return ''.join(b) # bytes on py2, str on py3.\n \n def write(self, s):\n if not IS_PY3K:\n if isinstance(s, unicode):\n # can't use 'errors' as kwargs in py 2.6\n s = s.encode(self.encoding, 'replace')\n else:\n if isinstance(s, bytes):\n s = s.decode(self.encoding, errors='replace')\n self.buflist.append(s)\n\n def isatty(self):\n return False\n\n def flush(self):\n pass\n\n def empty(self):\n return len(self.buflist) == 0\n\nclass _RedirectionsHolder:\n _stack_stdout = []\n _stack_stderr = []\n\n\ndef start_redirect(keep_original_redirection=False, std='stdout'):\n '''\n @param std: 'stdout', 'stderr', or 'both'\n '''\n import sys\n buf = IOBuf()\n\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n\n for std in config_stds:\n original = getattr(sys, std)\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n stack.append(original)\n\n if keep_original_redirection:\n setattr(sys, std, IORedirector(getattr(sys, std), buf))\n else:\n setattr(sys, std, buf)\n return buf\n\n\ndef end_redirect(std='stdout'):\n import sys\n if std == 'both':\n config_stds = ['stdout', 'stderr']\n else:\n config_stds = [std]\n for std in config_stds:\n stack = getattr(_RedirectionsHolder, '_stack_%s' % std)\n setattr(sys, std, stack.pop())\n\n", "path": "src/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py"}]}
1,869
647
gh_patches_debug_4883
rasdani/github-patches
git_diff
pre-commit__pre-commit-2996
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use of --dev deprecated for npm I'm seeing this warning sometimes (output seems to be hidden unless the install fails): ``` npm WARN install Usage of the `--dev` option is deprecated. Use `--include=dev` instead. ``` Which seems to be because of this: https://github.com/pre-commit/pre-commit/blob/fe436f1eb09dfdd67032b4f9f8dfa543fb99cf06/pre_commit/languages/node.py#L104 The problem with this command was that it installed dependencies recursively, rendering them useless (AFAICT, not a node expert). The developers decided it was only a footgun in https://github.com/npm/npm/issues/5554#issuecomment-56121953 and deprecated in https://github.com/npm/npm/issues/6200. </issue> <code> [start of pre_commit/languages/node.py] 1 from __future__ import annotations 2 3 import contextlib 4 import functools 5 import os 6 import sys 7 from typing import Generator 8 from typing import Sequence 9 10 import pre_commit.constants as C 11 from pre_commit import lang_base 12 from pre_commit.envcontext import envcontext 13 from pre_commit.envcontext import PatchesT 14 from pre_commit.envcontext import UNSET 15 from pre_commit.envcontext import Var 16 from pre_commit.languages.python import bin_dir 17 from pre_commit.prefix import Prefix 18 from pre_commit.util import cmd_output 19 from pre_commit.util import cmd_output_b 20 from pre_commit.util import rmtree 21 22 ENVIRONMENT_DIR = 'node_env' 23 run_hook = lang_base.basic_run_hook 24 25 26 @functools.lru_cache(maxsize=1) 27 def get_default_version() -> str: 28 # nodeenv does not yet support `-n system` on windows 29 if sys.platform == 'win32': 30 return C.DEFAULT 31 # if node is already installed, we can save a bunch of setup time by 32 # using the installed version 33 elif all(lang_base.exe_exists(exe) for exe in ('node', 'npm')): 34 return 'system' 35 else: 36 return C.DEFAULT 37 38 39 def get_env_patch(venv: str) -> PatchesT: 40 if sys.platform == 'cygwin': # pragma: no cover 41 _, win_venv, _ = cmd_output('cygpath', '-w', venv) 42 install_prefix = fr'{win_venv.strip()}\bin' 43 lib_dir = 'lib' 44 elif sys.platform == 'win32': # pragma: no cover 45 install_prefix = bin_dir(venv) 46 lib_dir = 'Scripts' 47 else: # pragma: win32 no cover 48 install_prefix = venv 49 lib_dir = 'lib' 50 return ( 51 ('NODE_VIRTUAL_ENV', venv), 52 ('NPM_CONFIG_PREFIX', install_prefix), 53 ('npm_config_prefix', install_prefix), 54 ('NPM_CONFIG_USERCONFIG', UNSET), 55 ('npm_config_userconfig', UNSET), 56 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')), 57 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 58 ) 59 60 61 @contextlib.contextmanager 62 def in_env(prefix: Prefix, version: str) -> Generator[None, None, None]: 63 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version) 64 with envcontext(get_env_patch(envdir)): 65 yield 66 67 68 def health_check(prefix: Prefix, version: str) -> str | None: 69 with in_env(prefix, version): 70 retcode, _, _ = cmd_output_b('node', '--version', check=False) 71 if retcode != 0: # pragma: win32 no cover 72 return f'`node --version` returned {retcode}' 73 else: 74 return None 75 76 77 def install_environment( 78 prefix: Prefix, version: str, additional_dependencies: Sequence[str], 79 ) -> None: 80 assert prefix.exists('package.json') 81 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version) 82 83 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath 84 if sys.platform == 'win32': # pragma: no cover 85 envdir = fr'\\?\{os.path.normpath(envdir)}' 86 cmd = [sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir] 87 if version != C.DEFAULT: 88 cmd.extend(['-n', version]) 89 cmd_output_b(*cmd) 90 91 with in_env(prefix, version): 92 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449 93 # install as if we installed from git 94 95 local_install_cmd = ( 96 'npm', 'install', '--dev', '--prod', 97 '--ignore-prepublish', '--no-progress', '--no-save', 98 ) 99 lang_base.setup_cmd(prefix, local_install_cmd) 100 101 _, pkg, _ = cmd_output('npm', 'pack', cwd=prefix.prefix_dir) 102 pkg = prefix.path(pkg.strip()) 103 104 install = ('npm', 'install', '-g', pkg, *additional_dependencies) 105 lang_base.setup_cmd(prefix, install) 106 107 # clean these up after installation 108 if prefix.exists('node_modules'): # pragma: win32 no cover 109 rmtree(prefix.path('node_modules')) 110 os.remove(pkg) 111 [end of pre_commit/languages/node.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py --- a/pre_commit/languages/node.py +++ b/pre_commit/languages/node.py @@ -93,7 +93,7 @@ # install as if we installed from git local_install_cmd = ( - 'npm', 'install', '--dev', '--prod', + 'npm', 'install', '--include=dev', '--include=prod', '--ignore-prepublish', '--no-progress', '--no-save', ) lang_base.setup_cmd(prefix, local_install_cmd)
{"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -93,7 +93,7 @@\n # install as if we installed from git\n \n local_install_cmd = (\n- 'npm', 'install', '--dev', '--prod',\n+ 'npm', 'install', '--include=dev', '--include=prod',\n '--ignore-prepublish', '--no-progress', '--no-save',\n )\n lang_base.setup_cmd(prefix, local_install_cmd)\n", "issue": "Use of --dev deprecated for npm\nI'm seeing this warning sometimes (output seems to be hidden unless the install fails):\r\n\r\n```\r\nnpm WARN install Usage of the `--dev` option is deprecated. Use `--include=dev` instead.\r\n```\r\n\r\nWhich seems to be because of this:\r\n\r\nhttps://github.com/pre-commit/pre-commit/blob/fe436f1eb09dfdd67032b4f9f8dfa543fb99cf06/pre_commit/languages/node.py#L104\r\n\r\nThe problem with this command was that it installed dependencies recursively, rendering them useless (AFAICT, not a node expert). The developers decided it was only a footgun in https://github.com/npm/npm/issues/5554#issuecomment-56121953 and deprecated in https://github.com/npm/npm/issues/6200.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'node_env'\nrun_hook = lang_base.basic_run_hook\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(lang_base.exe_exists(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef health_check(prefix: Prefix, version: str) -> str | None:\n with in_env(prefix, version):\n retcode, _, _ = cmd_output_b('node', '--version', check=False)\n if retcode != 0: # pragma: win32 no cover\n return f'`node --version` returned {retcode}'\n else:\n return None\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n assert prefix.exists('package.json')\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n cmd = [sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n\n local_install_cmd = (\n 'npm', 'install', '--dev', '--prod',\n '--ignore-prepublish', '--no-progress', '--no-save',\n )\n lang_base.setup_cmd(prefix, local_install_cmd)\n\n _, pkg, _ = cmd_output('npm', 'pack', cwd=prefix.prefix_dir)\n pkg = prefix.path(pkg.strip())\n\n install = ('npm', 'install', '-g', pkg, *additional_dependencies)\n lang_base.setup_cmd(prefix, install)\n\n # clean these up after installation\n if prefix.exists('node_modules'): # pragma: win32 no cover\n rmtree(prefix.path('node_modules'))\n os.remove(pkg)\n", "path": "pre_commit/languages/node.py"}]}
1,951
124
gh_patches_debug_42163
rasdani/github-patches
git_diff
cupy__cupy-2290
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `TestNpz.test_dump` test failure https://jenkins.preferred.jp/job/chainer/job/cupy_pr/161/TEST=cupy-py3,label=mn1-p100/console </issue> <code> [start of cupy/io/npz.py] 1 import numpy 2 3 import cupy 4 5 6 class NpzFile(object): 7 8 def __init__(self, npz_file): 9 self.npz_file = npz_file 10 11 def __enter__(self): 12 self.npz_file.__enter__() 13 return self 14 15 def __exit__(self, typ, val, traceback): 16 self.npz_file.__exit__(typ, val, traceback) 17 18 def __getitem__(self, key): 19 arr = self.npz_file[key] 20 return cupy.array(arr) 21 22 def close(self): 23 self.npz_file.close() 24 25 26 def load(file, mmap_mode=None): 27 """Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file. 28 29 This function just calls ``numpy.load`` and then sends the arrays to the 30 current device. NPZ file is converted to NpzFile object, which defers the 31 transfer to the time of accessing the items. 32 33 Args: 34 file (file-like object or string): The file to read. 35 mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the 36 file to construct an intermediate :class:`numpy.ndarray` object and 37 transfer it to the current device. 38 39 Returns: 40 CuPy array or NpzFile object depending on the type of the file. NpzFile 41 object is a dictionary-like object with the context manager protocol 42 (which enables us to use *with* statement on it). 43 44 .. seealso:: :func:`numpy.load` 45 46 """ 47 obj = numpy.load(file, mmap_mode) 48 if isinstance(obj, numpy.ndarray): 49 return cupy.array(obj) 50 elif isinstance(obj, numpy.lib.npyio.NpzFile): 51 return NpzFile(obj) 52 else: 53 return obj 54 55 56 def save(file, arr): 57 """Saves an array to a binary file in ``.npy`` format. 58 59 Args: 60 file (file or str): File or filename to save. 61 arr (array_like): Array to save. It should be able to feed to 62 :func:`cupy.asnumpy`. 63 64 .. seealso:: :func:`numpy.save` 65 66 """ 67 numpy.save(file, cupy.asnumpy(arr)) 68 69 70 def savez(file, *args, **kwds): 71 """Saves one or more arrays into a file in uncompressed ``.npz`` format. 72 73 Arguments without keys are treated as arguments with automatic keys named 74 ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument 75 list. The keys of arguments are used as keys in the ``.npz`` file, which 76 are used for accessing NpzFile object when the file is read by 77 :func:`cupy.load` function. 78 79 Args: 80 file (file or str): File or filename to save. 81 *args: Arrays with implicit keys. 82 **kwds: Arrays with explicit keys. 83 84 .. seealso:: :func:`numpy.savez` 85 86 """ 87 args = map(cupy.asnumpy, args) 88 for key in kwds: 89 kwds[key] = cupy.asnumpy(kwds[key]) 90 numpy.savez(file, *args, **kwds) 91 92 93 def savez_compressed(file, *args, **kwds): 94 """Saves one or more arrays into a file in compressed ``.npz`` format. 95 96 It is equivalent to :func:`cupy.savez` function except the output file is 97 compressed. 98 99 .. seealso:: 100 :func:`cupy.savez` for more detail, 101 :func:`numpy.savez_compressed` 102 103 """ 104 args = map(cupy.asnumpy, args) 105 for key in kwds: 106 kwds[key] = cupy.asnumpy(kwds[key]) 107 numpy.savez_compressed(file, *args, **kwds) 108 [end of cupy/io/npz.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/io/npz.py b/cupy/io/npz.py --- a/cupy/io/npz.py +++ b/cupy/io/npz.py @@ -1,8 +1,13 @@ +import warnings + import numpy import cupy +_support_allow_pickle = (numpy.lib.NumpyVersion(numpy.__version__) >= '1.10.0') + + class NpzFile(object): def __init__(self, npz_file): @@ -23,7 +28,7 @@ self.npz_file.close() -def load(file, mmap_mode=None): +def load(file, mmap_mode=None, allow_pickle=None): """Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file. This function just calls ``numpy.load`` and then sends the arrays to the @@ -35,6 +40,16 @@ mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the file to construct an intermediate :class:`numpy.ndarray` object and transfer it to the current device. + allow_pickle (bool): Allow loading pickled object arrays stored in npy + files. Reasons for disallowing pickles include security, as + loading pickled data can execute arbitrary code. If pickles are + disallowed, loading object arrays will fail. + Please be aware that CuPy does not support arrays with dtype of + `object`. + The default is False. + This option is available only for NumPy 1.10 or later. + In NumPy 1.9, this option cannot be specified (loading pickled + objects is always allowed). Returns: CuPy array or NpzFile object depending on the type of the file. NpzFile @@ -44,7 +59,14 @@ .. seealso:: :func:`numpy.load` """ - obj = numpy.load(file, mmap_mode) + if _support_allow_pickle: + allow_pickle = False if allow_pickle is None else allow_pickle + obj = numpy.load(file, mmap_mode, allow_pickle) + else: + if allow_pickle is not None: + warnings.warn('allow_pickle option is not supported in NumPy 1.9') + obj = numpy.load(file, mmap_mode) + if isinstance(obj, numpy.ndarray): return cupy.array(obj) elif isinstance(obj, numpy.lib.npyio.NpzFile): @@ -53,18 +75,35 @@ return obj -def save(file, arr): +def save(file, arr, allow_pickle=None): """Saves an array to a binary file in ``.npy`` format. Args: file (file or str): File or filename to save. arr (array_like): Array to save. It should be able to feed to :func:`cupy.asnumpy`. + allow_pickle (bool): Allow saving object arrays using Python pickles. + Reasons for disallowing pickles include security (loading pickled + data can execute arbitrary code) and portability (pickled objects + may not be loadable on different Python installations, for example + if the stored objects require libraries that are not available, + and not all pickled data is compatible between Python 2 and Python + 3). + The default is True. + This option is available only for NumPy 1.10 or later. + In NumPy 1.9, this option cannot be specified (saving objects + using pickles is always allowed). .. seealso:: :func:`numpy.save` """ - numpy.save(file, cupy.asnumpy(arr)) + if _support_allow_pickle: + allow_pickle = True if allow_pickle is None else allow_pickle + numpy.save(file, cupy.asnumpy(arr), allow_pickle) + else: + if allow_pickle is not None: + warnings.warn('allow_pickle option is not supported in NumPy 1.9') + numpy.save(file, cupy.asnumpy(arr)) def savez(file, *args, **kwds):
{"golden_diff": "diff --git a/cupy/io/npz.py b/cupy/io/npz.py\n--- a/cupy/io/npz.py\n+++ b/cupy/io/npz.py\n@@ -1,8 +1,13 @@\n+import warnings\n+\n import numpy\n \n import cupy\n \n \n+_support_allow_pickle = (numpy.lib.NumpyVersion(numpy.__version__) >= '1.10.0')\n+\n+\n class NpzFile(object):\n \n def __init__(self, npz_file):\n@@ -23,7 +28,7 @@\n self.npz_file.close()\n \n \n-def load(file, mmap_mode=None):\n+def load(file, mmap_mode=None, allow_pickle=None):\n \"\"\"Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file.\n \n This function just calls ``numpy.load`` and then sends the arrays to the\n@@ -35,6 +40,16 @@\n mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the\n file to construct an intermediate :class:`numpy.ndarray` object and\n transfer it to the current device.\n+ allow_pickle (bool): Allow loading pickled object arrays stored in npy\n+ files. Reasons for disallowing pickles include security, as\n+ loading pickled data can execute arbitrary code. If pickles are\n+ disallowed, loading object arrays will fail.\n+ Please be aware that CuPy does not support arrays with dtype of\n+ `object`.\n+ The default is False.\n+ This option is available only for NumPy 1.10 or later.\n+ In NumPy 1.9, this option cannot be specified (loading pickled\n+ objects is always allowed).\n \n Returns:\n CuPy array or NpzFile object depending on the type of the file. NpzFile\n@@ -44,7 +59,14 @@\n .. seealso:: :func:`numpy.load`\n \n \"\"\"\n- obj = numpy.load(file, mmap_mode)\n+ if _support_allow_pickle:\n+ allow_pickle = False if allow_pickle is None else allow_pickle\n+ obj = numpy.load(file, mmap_mode, allow_pickle)\n+ else:\n+ if allow_pickle is not None:\n+ warnings.warn('allow_pickle option is not supported in NumPy 1.9')\n+ obj = numpy.load(file, mmap_mode)\n+\n if isinstance(obj, numpy.ndarray):\n return cupy.array(obj)\n elif isinstance(obj, numpy.lib.npyio.NpzFile):\n@@ -53,18 +75,35 @@\n return obj\n \n \n-def save(file, arr):\n+def save(file, arr, allow_pickle=None):\n \"\"\"Saves an array to a binary file in ``.npy`` format.\n \n Args:\n file (file or str): File or filename to save.\n arr (array_like): Array to save. It should be able to feed to\n :func:`cupy.asnumpy`.\n+ allow_pickle (bool): Allow saving object arrays using Python pickles.\n+ Reasons for disallowing pickles include security (loading pickled\n+ data can execute arbitrary code) and portability (pickled objects\n+ may not be loadable on different Python installations, for example\n+ if the stored objects require libraries that are not available,\n+ and not all pickled data is compatible between Python 2 and Python\n+ 3).\n+ The default is True.\n+ This option is available only for NumPy 1.10 or later.\n+ In NumPy 1.9, this option cannot be specified (saving objects\n+ using pickles is always allowed).\n \n .. seealso:: :func:`numpy.save`\n \n \"\"\"\n- numpy.save(file, cupy.asnumpy(arr))\n+ if _support_allow_pickle:\n+ allow_pickle = True if allow_pickle is None else allow_pickle\n+ numpy.save(file, cupy.asnumpy(arr), allow_pickle)\n+ else:\n+ if allow_pickle is not None:\n+ warnings.warn('allow_pickle option is not supported in NumPy 1.9')\n+ numpy.save(file, cupy.asnumpy(arr))\n \n \n def savez(file, *args, **kwds):\n", "issue": "`TestNpz.test_dump` test failure\nhttps://jenkins.preferred.jp/job/chainer/job/cupy_pr/161/TEST=cupy-py3,label=mn1-p100/console\r\n\n", "before_files": [{"content": "import numpy\n\nimport cupy\n\n\nclass NpzFile(object):\n\n def __init__(self, npz_file):\n self.npz_file = npz_file\n\n def __enter__(self):\n self.npz_file.__enter__()\n return self\n\n def __exit__(self, typ, val, traceback):\n self.npz_file.__exit__(typ, val, traceback)\n\n def __getitem__(self, key):\n arr = self.npz_file[key]\n return cupy.array(arr)\n\n def close(self):\n self.npz_file.close()\n\n\ndef load(file, mmap_mode=None):\n \"\"\"Loads arrays or pickled objects from ``.npy``, ``.npz`` or pickled file.\n\n This function just calls ``numpy.load`` and then sends the arrays to the\n current device. NPZ file is converted to NpzFile object, which defers the\n transfer to the time of accessing the items.\n\n Args:\n file (file-like object or string): The file to read.\n mmap_mode (None, 'r+', 'r', 'w+', 'c'): If not ``None``, memory-map the\n file to construct an intermediate :class:`numpy.ndarray` object and\n transfer it to the current device.\n\n Returns:\n CuPy array or NpzFile object depending on the type of the file. NpzFile\n object is a dictionary-like object with the context manager protocol\n (which enables us to use *with* statement on it).\n\n .. seealso:: :func:`numpy.load`\n\n \"\"\"\n obj = numpy.load(file, mmap_mode)\n if isinstance(obj, numpy.ndarray):\n return cupy.array(obj)\n elif isinstance(obj, numpy.lib.npyio.NpzFile):\n return NpzFile(obj)\n else:\n return obj\n\n\ndef save(file, arr):\n \"\"\"Saves an array to a binary file in ``.npy`` format.\n\n Args:\n file (file or str): File or filename to save.\n arr (array_like): Array to save. It should be able to feed to\n :func:`cupy.asnumpy`.\n\n .. seealso:: :func:`numpy.save`\n\n \"\"\"\n numpy.save(file, cupy.asnumpy(arr))\n\n\ndef savez(file, *args, **kwds):\n \"\"\"Saves one or more arrays into a file in uncompressed ``.npz`` format.\n\n Arguments without keys are treated as arguments with automatic keys named\n ``arr_0``, ``arr_1``, etc. corresponding to the positions in the argument\n list. The keys of arguments are used as keys in the ``.npz`` file, which\n are used for accessing NpzFile object when the file is read by\n :func:`cupy.load` function.\n\n Args:\n file (file or str): File or filename to save.\n *args: Arrays with implicit keys.\n **kwds: Arrays with explicit keys.\n\n .. seealso:: :func:`numpy.savez`\n\n \"\"\"\n args = map(cupy.asnumpy, args)\n for key in kwds:\n kwds[key] = cupy.asnumpy(kwds[key])\n numpy.savez(file, *args, **kwds)\n\n\ndef savez_compressed(file, *args, **kwds):\n \"\"\"Saves one or more arrays into a file in compressed ``.npz`` format.\n\n It is equivalent to :func:`cupy.savez` function except the output file is\n compressed.\n\n .. seealso::\n :func:`cupy.savez` for more detail,\n :func:`numpy.savez_compressed`\n\n \"\"\"\n args = map(cupy.asnumpy, args)\n for key in kwds:\n kwds[key] = cupy.asnumpy(kwds[key])\n numpy.savez_compressed(file, *args, **kwds)\n", "path": "cupy/io/npz.py"}]}
1,647
932
gh_patches_debug_14753
rasdani/github-patches
git_diff
ansible__ansible-39634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws_s3 is automaticly decrypting ansible-vault encrypted files before put <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and devel branch are affected too. Always add information AFTER of these html comments. --> ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME aws_s3 ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` 2.5.1 ``` ##### SUMMARY - I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. - aws_s3 in 2.4 didn't decrypt the src: parameter. - The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted. - The aws_s3 module doesn't accept the decrypt: argument. ##### STEPS TO REPRODUCE <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: upload vault to s3 aws_s3: bucket: "the bucket" object: "file.txt" src: "file.txt" mode: put ``` 1. The file.txt is encrypted with ansible-vault. 2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted. ##### EXPECTED RESULTS Don't autodecrypt the src: argument or be able to specify decrypt: no. ##### ACTUAL RESULTS The src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy). aws_s3 is automaticly decrypting ansible-vault encrypted files before put <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and devel branch are affected too. Always add information AFTER of these html comments. --> ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME aws_s3 ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` 2.5.1 ``` ##### SUMMARY - I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. - aws_s3 in 2.4 didn't decrypt the src: parameter. - The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted. - The aws_s3 module doesn't accept the decrypt: argument. ##### STEPS TO REPRODUCE <!--- Paste example playbooks or commands between quotes below --> ```yaml - name: upload vault to s3 aws_s3: bucket: "the bucket" object: "file.txt" src: "file.txt" mode: put ``` 1. The file.txt is encrypted with ansible-vault. 2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted. ##### EXPECTED RESULTS Don't autodecrypt the src: argument or be able to specify decrypt: no. ##### ACTUAL RESULTS The src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy). </issue> <code> [start of lib/ansible/plugins/action/aws_s3.py] 1 # (c) 2012, Michael DeHaan <[email protected]> 2 # (c) 2018, Will Thames <[email protected]> 3 # 4 # This file is part of Ansible 5 # 6 # Ansible is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # Ansible is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 18 from __future__ import (absolute_import, division, print_function) 19 __metaclass__ = type 20 21 import os 22 23 from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound 24 from ansible.module_utils._text import to_text 25 from ansible.plugins.action import ActionBase 26 27 28 class ActionModule(ActionBase): 29 30 TRANSFERS_FILES = True 31 32 def run(self, tmp=None, task_vars=None): 33 ''' handler for aws_s3 operations ''' 34 if task_vars is None: 35 task_vars = dict() 36 37 result = super(ActionModule, self).run(tmp, task_vars) 38 del tmp # tmp no longer has any effect 39 40 source = self._task.args.get('src', None) 41 42 try: 43 new_module_args = self._task.args.copy() 44 if source: 45 source = os.path.expanduser(source) 46 47 # For backward compatibility check if the file exists on the remote; it should take precedence 48 if not self._remote_file_exists(source): 49 try: 50 source = self._loader.get_real_file(self._find_needle('files', source)) 51 new_module_args['src'] = source 52 except AnsibleFileNotFound as e: 53 # module handles error message for nonexistent files 54 new_module_args['src'] = source 55 except AnsibleError as e: 56 raise AnsibleActionFail(to_text(e)) 57 58 # execute the aws_s3 module now, with the updated args 59 result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars)) 60 except AnsibleAction as e: 61 result.update(e.result) 62 return result 63 [end of lib/ansible/plugins/action/aws_s3.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py --- a/lib/ansible/plugins/action/aws_s3.py +++ b/lib/ansible/plugins/action/aws_s3.py @@ -47,7 +47,7 @@ # For backward compatibility check if the file exists on the remote; it should take precedence if not self._remote_file_exists(source): try: - source = self._loader.get_real_file(self._find_needle('files', source)) + source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False) new_module_args['src'] = source except AnsibleFileNotFound as e: # module handles error message for nonexistent files
{"golden_diff": "diff --git a/lib/ansible/plugins/action/aws_s3.py b/lib/ansible/plugins/action/aws_s3.py\n--- a/lib/ansible/plugins/action/aws_s3.py\n+++ b/lib/ansible/plugins/action/aws_s3.py\n@@ -47,7 +47,7 @@\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n- source = self._loader.get_real_file(self._find_needle('files', source))\n+ source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False)\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n", "issue": "aws_s3 is automaticly decrypting ansible-vault encrypted files before put\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments. -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naws_s3\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n2.5.1\r\n```\r\n\r\n##### SUMMARY\r\n- I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. \r\n- aws_s3 in 2.4 didn't decrypt the src: parameter.\r\n- The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted.\r\n- The aws_s3 module doesn't accept the decrypt: argument.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: upload vault to s3\r\n aws_s3:\r\n bucket: \"the bucket\"\r\n object: \"file.txt\"\r\n src: \"file.txt\"\r\n mode: put\r\n```\r\n1. The file.txt is encrypted with ansible-vault. \r\n2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted.\r\n\r\n##### EXPECTED RESULTS\r\nDon't autodecrypt the src: argument or be able to specify decrypt: no.\r\n\r\n##### ACTUAL RESULTS\r\nThe src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy).\r\n\naws_s3 is automaticly decrypting ansible-vault encrypted files before put\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments. -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\naws_s3\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n2.5.1\r\n```\r\n\r\n##### SUMMARY\r\n- I'm trying to upload an ansible-vault encrypted file with aws_s3. But aws_s3 decrypts the src: file before uploading it to S3. \r\n- aws_s3 in 2.4 didn't decrypt the src: parameter.\r\n- The documentation for aws_s3 doesn't mention that the src: parameter is autodecrypted.\r\n- The aws_s3 module doesn't accept the decrypt: argument.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: upload vault to s3\r\n aws_s3:\r\n bucket: \"the bucket\"\r\n object: \"file.txt\"\r\n src: \"file.txt\"\r\n mode: put\r\n```\r\n1. The file.txt is encrypted with ansible-vault. \r\n2. The playbook that runs this task is invoked with --vault-password and is able to decrypt the file because other tasks need the file decrypted.\r\n\r\n##### EXPECTED RESULTS\r\nDon't autodecrypt the src: argument or be able to specify decrypt: no.\r\n\r\n##### ACTUAL RESULTS\r\nThe src: argument to aws_s3 is automagicly decrypted without documentation or a way to disable the feature like other modules (ex. copy).\r\n\n", "before_files": [{"content": "# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2018, Will Thames <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\nfrom ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound\nfrom ansible.module_utils._text import to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = True\n\n def run(self, tmp=None, task_vars=None):\n ''' handler for aws_s3 operations '''\n if task_vars is None:\n task_vars = dict()\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n source = self._task.args.get('src', None)\n\n try:\n new_module_args = self._task.args.copy()\n if source:\n source = os.path.expanduser(source)\n\n # For backward compatibility check if the file exists on the remote; it should take precedence\n if not self._remote_file_exists(source):\n try:\n source = self._loader.get_real_file(self._find_needle('files', source))\n new_module_args['src'] = source\n except AnsibleFileNotFound as e:\n # module handles error message for nonexistent files\n new_module_args['src'] = source\n except AnsibleError as e:\n raise AnsibleActionFail(to_text(e))\n\n # execute the aws_s3 module now, with the updated args\n result.update(self._execute_module(module_args=new_module_args, task_vars=task_vars))\n except AnsibleAction as e:\n result.update(e.result)\n return result\n", "path": "lib/ansible/plugins/action/aws_s3.py"}]}
1,936
165
gh_patches_debug_34994
rasdani/github-patches
git_diff
getredash__redash-725
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User should be redirected to his original destination after login with Google OAuth If the user tried to open a page before being logged in, he should be redirected to this page after successful login. </issue> <code> [start of redash/google_oauth.py] 1 import logging 2 from flask.ext.login import login_user 3 import requests 4 from flask import redirect, url_for, Blueprint, flash 5 from flask_oauth import OAuth 6 from redash import models, settings 7 8 logger = logging.getLogger('google_oauth') 9 oauth = OAuth() 10 11 12 if not settings.GOOGLE_APPS_DOMAIN: 13 logger.warning("No Google Apps domain defined, all Google accounts allowed.") 14 15 google = oauth.remote_app('google', 16 base_url='https://www.google.com/accounts/', 17 authorize_url='https://accounts.google.com/o/oauth2/auth', 18 request_token_url=None, 19 request_token_params={ 20 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 21 'response_type': 'code' 22 }, 23 access_token_url='https://accounts.google.com/o/oauth2/token', 24 access_token_method='POST', 25 access_token_params={'grant_type': 'authorization_code'}, 26 consumer_key=settings.GOOGLE_CLIENT_ID, 27 consumer_secret=settings.GOOGLE_CLIENT_SECRET) 28 29 30 blueprint = Blueprint('google_oauth', __name__) 31 32 33 def get_user_profile(access_token): 34 headers = {'Authorization': 'OAuth {}'.format(access_token)} 35 response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers) 36 37 if response.status_code == 401: 38 logger.warning("Failed getting user profile (response code 401).") 39 return None 40 41 return response.json() 42 43 44 def verify_profile(profile): 45 if not settings.GOOGLE_APPS_DOMAIN: 46 return True 47 48 domain = profile['email'].split('@')[-1] 49 return domain in settings.GOOGLE_APPS_DOMAIN 50 51 52 def create_and_login_user(name, email): 53 try: 54 user_object = models.User.get_by_email(email) 55 if user_object.name != name: 56 logger.debug("Updating user name (%r -> %r)", user_object.name, name) 57 user_object.name = name 58 user_object.save() 59 except models.User.DoesNotExist: 60 logger.debug("Creating user object (%r)", name) 61 user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS) 62 63 login_user(user_object, remember=True) 64 65 66 @blueprint.route('/oauth/google', endpoint="authorize") 67 def login(): 68 # TODO, suport next 69 callback=url_for('.callback', _external=True) 70 logger.debug("Callback url: %s", callback) 71 return google.authorize(callback=callback) 72 73 74 @blueprint.route('/oauth/google_callback', endpoint="callback") 75 @google.authorized_handler 76 def authorized(resp): 77 access_token = resp['access_token'] 78 79 if access_token is None: 80 logger.warning("Access token missing in call back request.") 81 flash("Validation error. Please retry.") 82 return redirect(url_for('login')) 83 84 profile = get_user_profile(access_token) 85 if profile is None: 86 flash("Validation error. Please retry.") 87 return redirect(url_for('login')) 88 89 if not verify_profile(profile): 90 logger.warning("User tried to login with unauthorized domain name: %s", profile['email']) 91 flash("Your Google Apps domain name isn't allowed.") 92 return redirect(url_for('login')) 93 94 create_and_login_user(profile['name'], profile['email']) 95 96 return redirect(url_for('index')) [end of redash/google_oauth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/google_oauth.py b/redash/google_oauth.py --- a/redash/google_oauth.py +++ b/redash/google_oauth.py @@ -1,8 +1,8 @@ import logging from flask.ext.login import login_user import requests -from flask import redirect, url_for, Blueprint, flash -from flask_oauth import OAuth +from flask import redirect, url_for, Blueprint, flash, request +from flask_oauthlib.client import OAuth from redash import models, settings logger = logging.getLogger('google_oauth') @@ -18,11 +18,9 @@ request_token_url=None, request_token_params={ 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', - 'response_type': 'code' }, access_token_url='https://accounts.google.com/o/oauth2/token', access_token_method='POST', - access_token_params={'grant_type': 'authorization_code'}, consumer_key=settings.GOOGLE_CLIENT_ID, consumer_secret=settings.GOOGLE_CLIENT_SECRET) @@ -65,10 +63,10 @@ @blueprint.route('/oauth/google', endpoint="authorize") def login(): - # TODO, suport next + next = request.args.get('next','/') callback=url_for('.callback', _external=True) logger.debug("Callback url: %s", callback) - return google.authorize(callback=callback) + return google.authorize(callback=callback, state=next) @blueprint.route('/oauth/google_callback', endpoint="callback") @@ -93,4 +91,6 @@ create_and_login_user(profile['name'], profile['email']) - return redirect(url_for('index')) \ No newline at end of file + next = request.args.get('state','/') + + return redirect(next)
{"golden_diff": "diff --git a/redash/google_oauth.py b/redash/google_oauth.py\n--- a/redash/google_oauth.py\n+++ b/redash/google_oauth.py\n@@ -1,8 +1,8 @@\n import logging\n from flask.ext.login import login_user\n import requests\n-from flask import redirect, url_for, Blueprint, flash\n-from flask_oauth import OAuth\n+from flask import redirect, url_for, Blueprint, flash, request\n+from flask_oauthlib.client import OAuth\n from redash import models, settings\n \n logger = logging.getLogger('google_oauth')\n@@ -18,11 +18,9 @@\n request_token_url=None,\n request_token_params={\n 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',\n- 'response_type': 'code'\n },\n access_token_url='https://accounts.google.com/o/oauth2/token',\n access_token_method='POST',\n- access_token_params={'grant_type': 'authorization_code'},\n consumer_key=settings.GOOGLE_CLIENT_ID,\n consumer_secret=settings.GOOGLE_CLIENT_SECRET)\n \n@@ -65,10 +63,10 @@\n \n @blueprint.route('/oauth/google', endpoint=\"authorize\")\n def login():\n- # TODO, suport next\n+ next = request.args.get('next','/')\n callback=url_for('.callback', _external=True)\n logger.debug(\"Callback url: %s\", callback)\n- return google.authorize(callback=callback)\n+ return google.authorize(callback=callback, state=next)\n \n \n @blueprint.route('/oauth/google_callback', endpoint=\"callback\")\n@@ -93,4 +91,6 @@\n \n create_and_login_user(profile['name'], profile['email'])\n \n- return redirect(url_for('index'))\n\\ No newline at end of file\n+ next = request.args.get('state','/')\n+\n+ return redirect(next)\n", "issue": "User should be redirected to his original destination after login with Google OAuth\nIf the user tried to open a page before being logged in, he should be redirected to this page after successful login.\n\n", "before_files": [{"content": "import logging\nfrom flask.ext.login import login_user\nimport requests\nfrom flask import redirect, url_for, Blueprint, flash\nfrom flask_oauth import OAuth\nfrom redash import models, settings\n\nlogger = logging.getLogger('google_oauth')\noauth = OAuth()\n\n\nif not settings.GOOGLE_APPS_DOMAIN:\n logger.warning(\"No Google Apps domain defined, all Google accounts allowed.\")\n\ngoogle = oauth.remote_app('google',\n base_url='https://www.google.com/accounts/',\n authorize_url='https://accounts.google.com/o/oauth2/auth',\n request_token_url=None,\n request_token_params={\n 'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile',\n 'response_type': 'code'\n },\n access_token_url='https://accounts.google.com/o/oauth2/token',\n access_token_method='POST',\n access_token_params={'grant_type': 'authorization_code'},\n consumer_key=settings.GOOGLE_CLIENT_ID,\n consumer_secret=settings.GOOGLE_CLIENT_SECRET)\n\n\nblueprint = Blueprint('google_oauth', __name__)\n\n\ndef get_user_profile(access_token):\n headers = {'Authorization': 'OAuth {}'.format(access_token)}\n response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)\n\n if response.status_code == 401:\n logger.warning(\"Failed getting user profile (response code 401).\")\n return None\n\n return response.json()\n\n\ndef verify_profile(profile):\n if not settings.GOOGLE_APPS_DOMAIN:\n return True\n\n domain = profile['email'].split('@')[-1]\n return domain in settings.GOOGLE_APPS_DOMAIN\n\n\ndef create_and_login_user(name, email):\n try:\n user_object = models.User.get_by_email(email)\n if user_object.name != name:\n logger.debug(\"Updating user name (%r -> %r)\", user_object.name, name)\n user_object.name = name\n user_object.save()\n except models.User.DoesNotExist:\n logger.debug(\"Creating user object (%r)\", name)\n user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)\n\n login_user(user_object, remember=True)\n\n\[email protected]('/oauth/google', endpoint=\"authorize\")\ndef login():\n # TODO, suport next\n callback=url_for('.callback', _external=True)\n logger.debug(\"Callback url: %s\", callback)\n return google.authorize(callback=callback)\n\n\[email protected]('/oauth/google_callback', endpoint=\"callback\")\[email protected]_handler\ndef authorized(resp):\n access_token = resp['access_token']\n\n if access_token is None:\n logger.warning(\"Access token missing in call back request.\")\n flash(\"Validation error. Please retry.\")\n return redirect(url_for('login'))\n\n profile = get_user_profile(access_token)\n if profile is None:\n flash(\"Validation error. Please retry.\")\n return redirect(url_for('login'))\n\n if not verify_profile(profile):\n logger.warning(\"User tried to login with unauthorized domain name: %s\", profile['email'])\n flash(\"Your Google Apps domain name isn't allowed.\")\n return redirect(url_for('login'))\n\n create_and_login_user(profile['name'], profile['email'])\n\n return redirect(url_for('index'))", "path": "redash/google_oauth.py"}]}
1,446
401
gh_patches_debug_551
rasdani/github-patches
git_diff
pypi__warehouse-5814
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sorting searches by 'Date last updated' results in 503 **Describe the bug** When trying to search for anything on pypi.org, sorting by relevance or trending works fine, but sorting by date last updated returns a 503 error. **Expected behavior** Search results, sorted by date. **To Reproduce** Example URL: https://pypi.org/search/?q=test&o=-created Result: > Sorry, something went wrong > > PyPI is down for maintenance or is having an outage. > > This is affecting several of our services, including our web interface. > If you are trying to install a package, you should be able to pip install packages without problem. > > Check our status page, or > View Python Status on Twitter The status page, though, shows all green. **My Platform** - Win 10, Firefox 66.0.3 - Ubuntu 18.04, Chrome 74.0.3729.108 --- Edit: I know this functionality was working at least as recently as last Thursday, 2 May 2019. </issue> <code> [start of warehouse/packaging/search.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import packaging.version 14 15 from elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer 16 17 from warehouse.search.utils import doc_type 18 19 EmailAnalyzer = analyzer( 20 "email", 21 tokenizer="uax_url_email", 22 filter=["standard", "lowercase", "stop", "snowball"], 23 ) 24 25 NameAnalyzer = analyzer( 26 "normalized_name", 27 tokenizer="lowercase", 28 filter=["standard", "lowercase", "word_delimiter"], 29 ) 30 31 32 @doc_type 33 class Project(Document): 34 35 name = Text() 36 normalized_name = Text(analyzer=NameAnalyzer) 37 version = Keyword(multi=True) 38 latest_version = Keyword() 39 summary = Text(analyzer="snowball") 40 description = Text(analyzer="snowball") 41 author = Text() 42 author_email = Text(analyzer=EmailAnalyzer) 43 maintainer = Text() 44 maintainer_email = Text(analyzer=EmailAnalyzer) 45 license = Text() 46 home_page = Keyword() 47 download_url = Keyword() 48 keywords = Text(analyzer="snowball") 49 platform = Keyword() 50 created = Date() 51 classifiers = Keyword(multi=True) 52 zscore = Float() 53 54 @classmethod 55 def from_db(cls, release): 56 obj = cls(meta={"id": release.normalized_name}) 57 obj["name"] = release.name 58 obj["normalized_name"] = release.normalized_name 59 obj["version"] = sorted( 60 release.all_versions, key=lambda r: packaging.version.parse(r), reverse=True 61 ) 62 obj["latest_version"] = release.latest_version 63 obj["summary"] = release.summary 64 obj["description"] = release.description 65 obj["author"] = release.author 66 obj["author_email"] = release.author_email 67 obj["maintainer"] = release.maintainer 68 obj["maintainer_email"] = release.maintainer_email 69 obj["home_page"] = release.home_page 70 obj["download_url"] = release.download_url 71 obj["keywords"] = release.keywords 72 obj["platform"] = release.platform 73 obj["created"] = release.created 74 obj["classifiers"] = release.classifiers 75 obj["zscore"] = release.zscore 76 77 return obj 78 [end of warehouse/packaging/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/warehouse/packaging/search.py b/warehouse/packaging/search.py --- a/warehouse/packaging/search.py +++ b/warehouse/packaging/search.py @@ -75,3 +75,8 @@ obj["zscore"] = release.zscore return obj + + class Index: + # make sure this class can match any index so it will always be used to + # deserialize data coming from elasticsearch. + name = "*"
{"golden_diff": "diff --git a/warehouse/packaging/search.py b/warehouse/packaging/search.py\n--- a/warehouse/packaging/search.py\n+++ b/warehouse/packaging/search.py\n@@ -75,3 +75,8 @@\n obj[\"zscore\"] = release.zscore\n \n return obj\n+\n+ class Index:\n+ # make sure this class can match any index so it will always be used to\n+ # deserialize data coming from elasticsearch.\n+ name = \"*\"\n", "issue": "Sorting searches by 'Date last updated' results in 503\n**Describe the bug**\r\n\r\nWhen trying to search for anything on pypi.org, sorting by relevance or trending works fine, but sorting by date last updated returns a 503 error.\r\n\r\n**Expected behavior**\r\n\r\nSearch results, sorted by date.\r\n\r\n**To Reproduce**\r\n\r\nExample URL: https://pypi.org/search/?q=test&o=-created\r\n\r\nResult:\r\n\r\n> Sorry, something went wrong\r\n> \r\n> PyPI is down for maintenance or is having an outage.\r\n> \r\n> This is affecting several of our services, including our web interface.\r\n> If you are trying to install a package, you should be able to pip install packages without problem.\r\n> \r\n> Check our status page, or\r\n> View Python Status on Twitter\r\n\r\nThe status page, though, shows all green.\r\n\r\n\r\n**My Platform**\r\n\r\n- Win 10, Firefox 66.0.3\r\n- Ubuntu 18.04, Chrome 74.0.3729.108\r\n\r\n---\r\n\r\nEdit: I know this functionality was working at least as recently as last Thursday, 2 May 2019.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport packaging.version\n\nfrom elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer\n\nfrom warehouse.search.utils import doc_type\n\nEmailAnalyzer = analyzer(\n \"email\",\n tokenizer=\"uax_url_email\",\n filter=[\"standard\", \"lowercase\", \"stop\", \"snowball\"],\n)\n\nNameAnalyzer = analyzer(\n \"normalized_name\",\n tokenizer=\"lowercase\",\n filter=[\"standard\", \"lowercase\", \"word_delimiter\"],\n)\n\n\n@doc_type\nclass Project(Document):\n\n name = Text()\n normalized_name = Text(analyzer=NameAnalyzer)\n version = Keyword(multi=True)\n latest_version = Keyword()\n summary = Text(analyzer=\"snowball\")\n description = Text(analyzer=\"snowball\")\n author = Text()\n author_email = Text(analyzer=EmailAnalyzer)\n maintainer = Text()\n maintainer_email = Text(analyzer=EmailAnalyzer)\n license = Text()\n home_page = Keyword()\n download_url = Keyword()\n keywords = Text(analyzer=\"snowball\")\n platform = Keyword()\n created = Date()\n classifiers = Keyword(multi=True)\n zscore = Float()\n\n @classmethod\n def from_db(cls, release):\n obj = cls(meta={\"id\": release.normalized_name})\n obj[\"name\"] = release.name\n obj[\"normalized_name\"] = release.normalized_name\n obj[\"version\"] = sorted(\n release.all_versions, key=lambda r: packaging.version.parse(r), reverse=True\n )\n obj[\"latest_version\"] = release.latest_version\n obj[\"summary\"] = release.summary\n obj[\"description\"] = release.description\n obj[\"author\"] = release.author\n obj[\"author_email\"] = release.author_email\n obj[\"maintainer\"] = release.maintainer\n obj[\"maintainer_email\"] = release.maintainer_email\n obj[\"home_page\"] = release.home_page\n obj[\"download_url\"] = release.download_url\n obj[\"keywords\"] = release.keywords\n obj[\"platform\"] = release.platform\n obj[\"created\"] = release.created\n obj[\"classifiers\"] = release.classifiers\n obj[\"zscore\"] = release.zscore\n\n return obj\n", "path": "warehouse/packaging/search.py"}]}
1,523
109
gh_patches_debug_22279
rasdani/github-patches
git_diff
chainer__chainer-243
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add type check to NonparameterizedConvolution2D function Related to #123 </issue> <code> [start of chainer/functions/nonparameterized_convolution_2d.py] 1 from chainer import cuda 2 from chainer import function 3 from chainer.functions import convolution_2d as conv2d_module 4 5 6 class NonparameterizedConvolution2D(function.Function): 7 8 """Two-dimensional nonparameterized convolution class. 9 10 Args: 11 stride (int or (int, int)): Stride of filter applications. 12 ``stride=s`` and ``stride=(s, s)`` are equivalent. 13 pad (int or (int, int)): Spatial padding width for input arrays. 14 ``pad=p`` and ``pad=(p, p)`` are equivalent. 15 use_cudnn (bool): If True, then this function uses CuDNN if available. 16 17 .. seealso:: :class:`Convolution2D` 18 19 """ 20 def __init__(self, stride=1, pad=0, use_cudnn=True): 21 self.stride = stride 22 self.pad = pad 23 24 self.use_cudnn = use_cudnn 25 26 def forward(self, x): 27 W = x[1] 28 b = None 29 if len(x) == 3: 30 b = x[2] 31 func = conv2d_module.Convolution2D( 32 W.shape[1], W.shape[0], W.shape[2:], 33 stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn, 34 initialW=W, initial_bias=b) 35 self.func = func 36 if any(isinstance(i, cuda.GPUArray) for i in x): 37 func.to_gpu() 38 return func.forward(x[:1]) 39 40 def backward(self, x, gy): 41 func = self.func 42 func.zero_grads() 43 gx = func.backward(x[:1], gy) 44 if func.gb is None: 45 return (gx[0], func.gW) 46 return (gx[0], func.gW, func.gb) 47 48 49 def convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True): 50 """Two-dimensional convolution function. 51 52 Args: 53 x (~chainer.Variable): Input variable. 54 W (~chainer.Variable): Weight variable. 55 b (~chainer.Variable): Bias variable. 56 stride (int or (int, int)): Stride of filter applications. 57 ``stride=s`` and ``stride=(s, s)`` are equivalent. 58 pad (int or (int, int)): Spatial padding width for input arrays. 59 ``pad=p`` and ``pad=(p, p)`` are equivalent. 60 use_cudnn (bool): If True, then this function uses CuDNN if available. 61 62 Returns: 63 ~chainer.Variable: Output variable. 64 65 .. seealso:: :class:`Convolution2D` 66 67 """ 68 return NonparameterizedConvolution2D( 69 stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W, b) 70 [end of chainer/functions/nonparameterized_convolution_2d.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/nonparameterized_convolution_2d.py b/chainer/functions/nonparameterized_convolution_2d.py --- a/chainer/functions/nonparameterized_convolution_2d.py +++ b/chainer/functions/nonparameterized_convolution_2d.py @@ -1,6 +1,9 @@ +import numpy + from chainer import cuda from chainer import function from chainer.functions import convolution_2d as conv2d_module +from chainer.utils import type_check class NonparameterizedConvolution2D(function.Function): @@ -23,6 +26,30 @@ self.use_cudnn = use_cudnn + def check_type_forward(self, in_types): + type_check.expect( + 2 <= in_types.size(), + in_types.size() <= 3, + ) + + x_type = in_types[0] + w_type = in_types[1] + type_check.expect( + x_type.dtype == numpy.float32, + w_type.dtype == numpy.float32, + x_type.ndim == 4, + w_type.ndim == 4, + x_type.shape[1] == w_type.shape[1], + ) + + if in_types.size().eval() == 3: + b_type = in_types[2] + type_check.expect( + b_type.dtype == numpy.float32, + b_type.ndim == 1, + b_type.shape[0] == w_type.shape[0], + ) + def forward(self, x): W = x[1] b = None
{"golden_diff": "diff --git a/chainer/functions/nonparameterized_convolution_2d.py b/chainer/functions/nonparameterized_convolution_2d.py\n--- a/chainer/functions/nonparameterized_convolution_2d.py\n+++ b/chainer/functions/nonparameterized_convolution_2d.py\n@@ -1,6 +1,9 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n from chainer.functions import convolution_2d as conv2d_module\n+from chainer.utils import type_check\n \n \n class NonparameterizedConvolution2D(function.Function):\n@@ -23,6 +26,30 @@\n \n self.use_cudnn = use_cudnn\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(\n+ 2 <= in_types.size(),\n+ in_types.size() <= 3,\n+ )\n+\n+ x_type = in_types[0]\n+ w_type = in_types[1]\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ w_type.dtype == numpy.float32,\n+ x_type.ndim == 4,\n+ w_type.ndim == 4,\n+ x_type.shape[1] == w_type.shape[1],\n+ )\n+\n+ if in_types.size().eval() == 3:\n+ b_type = in_types[2]\n+ type_check.expect(\n+ b_type.dtype == numpy.float32,\n+ b_type.ndim == 1,\n+ b_type.shape[0] == w_type.shape[0],\n+ )\n+\n def forward(self, x):\n W = x[1]\n b = None\n", "issue": "Add type check to NonparameterizedConvolution2D function\nRelated to #123 \n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.functions import convolution_2d as conv2d_module\n\n\nclass NonparameterizedConvolution2D(function.Function):\n\n \"\"\"Two-dimensional nonparameterized convolution class.\n\n Args:\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True, then this function uses CuDNN if available.\n\n .. seealso:: :class:`Convolution2D`\n\n \"\"\"\n def __init__(self, stride=1, pad=0, use_cudnn=True):\n self.stride = stride\n self.pad = pad\n\n self.use_cudnn = use_cudnn\n\n def forward(self, x):\n W = x[1]\n b = None\n if len(x) == 3:\n b = x[2]\n func = conv2d_module.Convolution2D(\n W.shape[1], W.shape[0], W.shape[2:],\n stride=self.stride, pad=self.pad, use_cudnn=self.use_cudnn,\n initialW=W, initial_bias=b)\n self.func = func\n if any(isinstance(i, cuda.GPUArray) for i in x):\n func.to_gpu()\n return func.forward(x[:1])\n\n def backward(self, x, gy):\n func = self.func\n func.zero_grads()\n gx = func.backward(x[:1], gy)\n if func.gb is None:\n return (gx[0], func.gW)\n return (gx[0], func.gW, func.gb)\n\n\ndef convolution_2d(x, W, b=None, stride=1, pad=0, use_cudnn=True):\n \"\"\"Two-dimensional convolution function.\n\n Args:\n x (~chainer.Variable): Input variable.\n W (~chainer.Variable): Weight variable.\n b (~chainer.Variable): Bias variable.\n stride (int or (int, int)): Stride of filter applications.\n ``stride=s`` and ``stride=(s, s)`` are equivalent.\n pad (int or (int, int)): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p)`` are equivalent.\n use_cudnn (bool): If True, then this function uses CuDNN if available.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :class:`Convolution2D`\n\n \"\"\"\n return NonparameterizedConvolution2D(\n stride=stride, pad=pad, use_cudnn=use_cudnn)(x, W, b)\n", "path": "chainer/functions/nonparameterized_convolution_2d.py"}]}
1,323
362
gh_patches_debug_23091
rasdani/github-patches
git_diff
pytorch__ignite-984
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Deprecate ignite.contrib.handlers.custom_events.CustomPeriodicEvent ## 🚀 Feature Custom events `CustomPeriodicEvent` from contrib seem heavy and unusable. Idea is to - [ ] raise a warning about deprecation since v0.4.0 and removing since v0.5.0 and suggest to use filtered events. - [ ] remove all docs about them </issue> <code> [start of ignite/contrib/handlers/custom_events.py] 1 from ignite.engine import Events, State, EventEnum 2 3 4 class CustomPeriodicEvent: 5 """Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine. 6 7 When custom periodic event is created and attached to an engine, the following events are fired: 8 1) K iterations is specified: 9 - `Events.ITERATIONS_<K>_STARTED` 10 - `Events.ITERATIONS_<K>_COMPLETED` 11 12 1) K epochs is specified: 13 - `Events.EPOCHS_<K>_STARTED` 14 - `Events.EPOCHS_<K>_COMPLETED` 15 16 17 Examples: 18 19 .. code-block:: python 20 21 from ignite.engine import Engine, Events 22 from ignite.contrib.handlers import CustomPeriodicEvent 23 24 # Let's define an event every 1000 iterations 25 cpe1 = CustomPeriodicEvent(n_iterations=1000) 26 cpe1.attach(trainer) 27 28 # Let's define an event every 10 epochs 29 cpe2 = CustomPeriodicEvent(n_epochs=10) 30 cpe2.attach(trainer) 31 32 @trainer.on(cpe1.Events.ITERATIONS_1000_COMPLETED) 33 def on_every_1000_iterations(engine): 34 # run a computation after 1000 iterations 35 # ... 36 print(engine.state.iterations_1000) 37 38 @trainer.on(cpe2.Events.EPOCHS_10_STARTED) 39 def on_every_10_epochs(engine): 40 # run a computation every 10 epochs 41 # ... 42 print(engine.state.epochs_10) 43 44 45 Args: 46 n_iterations (int, optional): number iterations of the custom periodic event 47 n_epochs (int, optional): number iterations of the custom periodic event. Argument is optional, but only one, 48 either n_iterations or n_epochs should defined. 49 50 """ 51 52 def __init__(self, n_iterations=None, n_epochs=None): 53 54 if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1): 55 raise ValueError("Argument n_iterations should be positive integer number") 56 57 if n_epochs is not None and (not isinstance(n_epochs, int) or n_epochs < 1): 58 raise ValueError("Argument n_epochs should be positive integer number") 59 60 if (n_iterations is None and n_epochs is None) or (n_iterations and n_epochs): 61 raise ValueError("Either n_iterations or n_epochs should defined") 62 63 if n_iterations: 64 prefix = "iterations" 65 self.state_attr = "iteration" 66 self.period = n_iterations 67 68 if n_epochs: 69 prefix = "epochs" 70 self.state_attr = "epoch" 71 self.period = n_epochs 72 73 self.custom_state_attr = "{}_{}".format(prefix, self.period) 74 event_name = "{}_{}".format(prefix.upper(), self.period) 75 setattr( 76 self, 77 "Events", 78 EventEnum("Events", " ".join(["{}_STARTED".format(event_name), "{}_COMPLETED".format(event_name)])), 79 ) 80 81 # Update State.event_to_attr 82 for e in self.Events: 83 State.event_to_attr[e] = self.custom_state_attr 84 85 # Create aliases 86 self._periodic_event_started = getattr(self.Events, "{}_STARTED".format(event_name)) 87 self._periodic_event_completed = getattr(self.Events, "{}_COMPLETED".format(event_name)) 88 89 def _on_started(self, engine): 90 setattr(engine.state, self.custom_state_attr, 0) 91 92 def _on_periodic_event_started(self, engine): 93 if getattr(engine.state, self.state_attr) % self.period == 1: 94 setattr(engine.state, self.custom_state_attr, getattr(engine.state, self.custom_state_attr) + 1) 95 engine.fire_event(self._periodic_event_started) 96 97 def _on_periodic_event_completed(self, engine): 98 if getattr(engine.state, self.state_attr) % self.period == 0: 99 engine.fire_event(self._periodic_event_completed) 100 101 def attach(self, engine): 102 engine.register_events(*self.Events) 103 104 engine.add_event_handler(Events.STARTED, self._on_started) 105 engine.add_event_handler( 106 getattr(Events, "{}_STARTED".format(self.state_attr.upper())), self._on_periodic_event_started 107 ) 108 engine.add_event_handler( 109 getattr(Events, "{}_COMPLETED".format(self.state_attr.upper())), self._on_periodic_event_completed 110 ) 111 [end of ignite/contrib/handlers/custom_events.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/contrib/handlers/custom_events.py b/ignite/contrib/handlers/custom_events.py --- a/ignite/contrib/handlers/custom_events.py +++ b/ignite/contrib/handlers/custom_events.py @@ -1,8 +1,11 @@ from ignite.engine import Events, State, EventEnum +import warnings class CustomPeriodicEvent: - """Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine. + """DEPRECATED. Use filtered events instead. + Handler to define a custom periodic events as a number of elapsed iterations/epochs + for an engine. When custom periodic event is created and attached to an engine, the following events are fired: 1) K iterations is specified: @@ -51,6 +54,11 @@ def __init__(self, n_iterations=None, n_epochs=None): + warnings.warn( + "CustomPeriodicEvent is deprecated since 0.4.0 and will be removed in 0.5.0. Use filtered events instead.", + DeprecationWarning, + ) + if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1): raise ValueError("Argument n_iterations should be positive integer number")
{"golden_diff": "diff --git a/ignite/contrib/handlers/custom_events.py b/ignite/contrib/handlers/custom_events.py\n--- a/ignite/contrib/handlers/custom_events.py\n+++ b/ignite/contrib/handlers/custom_events.py\n@@ -1,8 +1,11 @@\n from ignite.engine import Events, State, EventEnum\n+import warnings\n \n \n class CustomPeriodicEvent:\n- \"\"\"Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine.\n+ \"\"\"DEPRECATED. Use filtered events instead.\n+ Handler to define a custom periodic events as a number of elapsed iterations/epochs\n+ for an engine.\n \n When custom periodic event is created and attached to an engine, the following events are fired:\n 1) K iterations is specified:\n@@ -51,6 +54,11 @@\n \n def __init__(self, n_iterations=None, n_epochs=None):\n \n+ warnings.warn(\n+ \"CustomPeriodicEvent is deprecated since 0.4.0 and will be removed in 0.5.0. Use filtered events instead.\",\n+ DeprecationWarning,\n+ )\n+\n if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1):\n raise ValueError(\"Argument n_iterations should be positive integer number\")\n", "issue": "Deprecate ignite.contrib.handlers.custom_events.CustomPeriodicEvent\n## \ud83d\ude80 Feature\r\n\r\nCustom events `CustomPeriodicEvent` from contrib seem heavy and unusable. \r\n\r\nIdea is to \r\n\r\n- [ ] raise a warning about deprecation since v0.4.0 and removing since v0.5.0 and suggest to use filtered events.\r\n- [ ] remove all docs about them \r\n\n", "before_files": [{"content": "from ignite.engine import Events, State, EventEnum\n\n\nclass CustomPeriodicEvent:\n \"\"\"Handler to define a custom periodic events as a number of elapsed iterations/epochs for an engine.\n\n When custom periodic event is created and attached to an engine, the following events are fired:\n 1) K iterations is specified:\n - `Events.ITERATIONS_<K>_STARTED`\n - `Events.ITERATIONS_<K>_COMPLETED`\n\n 1) K epochs is specified:\n - `Events.EPOCHS_<K>_STARTED`\n - `Events.EPOCHS_<K>_COMPLETED`\n\n\n Examples:\n\n .. code-block:: python\n\n from ignite.engine import Engine, Events\n from ignite.contrib.handlers import CustomPeriodicEvent\n\n # Let's define an event every 1000 iterations\n cpe1 = CustomPeriodicEvent(n_iterations=1000)\n cpe1.attach(trainer)\n\n # Let's define an event every 10 epochs\n cpe2 = CustomPeriodicEvent(n_epochs=10)\n cpe2.attach(trainer)\n\n @trainer.on(cpe1.Events.ITERATIONS_1000_COMPLETED)\n def on_every_1000_iterations(engine):\n # run a computation after 1000 iterations\n # ...\n print(engine.state.iterations_1000)\n\n @trainer.on(cpe2.Events.EPOCHS_10_STARTED)\n def on_every_10_epochs(engine):\n # run a computation every 10 epochs\n # ...\n print(engine.state.epochs_10)\n\n\n Args:\n n_iterations (int, optional): number iterations of the custom periodic event\n n_epochs (int, optional): number iterations of the custom periodic event. Argument is optional, but only one,\n either n_iterations or n_epochs should defined.\n\n \"\"\"\n\n def __init__(self, n_iterations=None, n_epochs=None):\n\n if n_iterations is not None and (not isinstance(n_iterations, int) or n_iterations < 1):\n raise ValueError(\"Argument n_iterations should be positive integer number\")\n\n if n_epochs is not None and (not isinstance(n_epochs, int) or n_epochs < 1):\n raise ValueError(\"Argument n_epochs should be positive integer number\")\n\n if (n_iterations is None and n_epochs is None) or (n_iterations and n_epochs):\n raise ValueError(\"Either n_iterations or n_epochs should defined\")\n\n if n_iterations:\n prefix = \"iterations\"\n self.state_attr = \"iteration\"\n self.period = n_iterations\n\n if n_epochs:\n prefix = \"epochs\"\n self.state_attr = \"epoch\"\n self.period = n_epochs\n\n self.custom_state_attr = \"{}_{}\".format(prefix, self.period)\n event_name = \"{}_{}\".format(prefix.upper(), self.period)\n setattr(\n self,\n \"Events\",\n EventEnum(\"Events\", \" \".join([\"{}_STARTED\".format(event_name), \"{}_COMPLETED\".format(event_name)])),\n )\n\n # Update State.event_to_attr\n for e in self.Events:\n State.event_to_attr[e] = self.custom_state_attr\n\n # Create aliases\n self._periodic_event_started = getattr(self.Events, \"{}_STARTED\".format(event_name))\n self._periodic_event_completed = getattr(self.Events, \"{}_COMPLETED\".format(event_name))\n\n def _on_started(self, engine):\n setattr(engine.state, self.custom_state_attr, 0)\n\n def _on_periodic_event_started(self, engine):\n if getattr(engine.state, self.state_attr) % self.period == 1:\n setattr(engine.state, self.custom_state_attr, getattr(engine.state, self.custom_state_attr) + 1)\n engine.fire_event(self._periodic_event_started)\n\n def _on_periodic_event_completed(self, engine):\n if getattr(engine.state, self.state_attr) % self.period == 0:\n engine.fire_event(self._periodic_event_completed)\n\n def attach(self, engine):\n engine.register_events(*self.Events)\n\n engine.add_event_handler(Events.STARTED, self._on_started)\n engine.add_event_handler(\n getattr(Events, \"{}_STARTED\".format(self.state_attr.upper())), self._on_periodic_event_started\n )\n engine.add_event_handler(\n getattr(Events, \"{}_COMPLETED\".format(self.state_attr.upper())), self._on_periodic_event_completed\n )\n", "path": "ignite/contrib/handlers/custom_events.py"}]}
1,814
287
gh_patches_debug_720
rasdani/github-patches
git_diff
liqd__a4-opin-766
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cannot delete user in django admin if user has not uploaded avatar </issue> <code> [start of euth/users/signals.py] 1 from django.db.models import signals 2 from django.dispatch import receiver 3 4 from adhocracy4.images import services 5 6 from . import models 7 8 9 @receiver(signals.post_init, sender=models.User) 10 def backup_image_path(sender, instance, **kwargs): 11 instance._current_image_file = instance.avatar 12 13 14 @receiver(signals.post_save, sender=models.User) 15 def delete_old_image(sender, instance, **kwargs): 16 if hasattr(instance, '_current_image_file'): 17 if instance._current_image_file != instance.avatar: 18 services.delete_images([instance._current_image_file]) 19 20 21 @receiver(signals.post_delete, sender=models.User) 22 def delete_images_for_User(sender, instance, **kwargs): 23 services.delete_images([instance.avatar]) 24 [end of euth/users/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/users/signals.py b/euth/users/signals.py --- a/euth/users/signals.py +++ b/euth/users/signals.py @@ -20,4 +20,4 @@ @receiver(signals.post_delete, sender=models.User) def delete_images_for_User(sender, instance, **kwargs): - services.delete_images([instance.avatar]) + services.delete_images([instance._avatar])
{"golden_diff": "diff --git a/euth/users/signals.py b/euth/users/signals.py\n--- a/euth/users/signals.py\n+++ b/euth/users/signals.py\n@@ -20,4 +20,4 @@\n \n @receiver(signals.post_delete, sender=models.User)\n def delete_images_for_User(sender, instance, **kwargs):\n- services.delete_images([instance.avatar])\n+ services.delete_images([instance._avatar])\n", "issue": "cannot delete user in django admin if user has not uploaded avatar \n\n", "before_files": [{"content": "from django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.images import services\n\nfrom . import models\n\n\n@receiver(signals.post_init, sender=models.User)\ndef backup_image_path(sender, instance, **kwargs):\n instance._current_image_file = instance.avatar\n\n\n@receiver(signals.post_save, sender=models.User)\ndef delete_old_image(sender, instance, **kwargs):\n if hasattr(instance, '_current_image_file'):\n if instance._current_image_file != instance.avatar:\n services.delete_images([instance._current_image_file])\n\n\n@receiver(signals.post_delete, sender=models.User)\ndef delete_images_for_User(sender, instance, **kwargs):\n services.delete_images([instance.avatar])\n", "path": "euth/users/signals.py"}]}
740
92
gh_patches_debug_951
rasdani/github-patches
git_diff
pytorch__ignite-844
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Typehint of ignite._utils._to_hours_mins_secs not satisfied with float ## 🐛 Bug description That is a so tiny bug. The `typehint` of the following function of `ignite._utils` is not satisfied with a `float` argument ``` python def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]: """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) return hours, mins, secs ``` We have ```python >>> divmod(10.0,2) (5.0, 0.0) ``` ## Environment - PyTorch Version (e.g., 1.4): 1.4 - Ignite Version (e.g., 0.3.0): 0.3.0 - OS (e.g., Linux): Linux - How you installed Ignite (`conda`, `pip`, source): conda - Python version: 3.7 - Any other relevant information: </issue> <code> [start of ignite/_utils.py] 1 from typing import Union, Tuple 2 3 # For compatibilty 4 from ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot 5 6 7 def _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]: 8 """Convert seconds to hours, mins, and seconds.""" 9 mins, secs = divmod(time_taken, 60) 10 hours, mins = divmod(mins, 60) 11 return hours, mins, secs 12 [end of ignite/_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/_utils.py b/ignite/_utils.py --- a/ignite/_utils.py +++ b/ignite/_utils.py @@ -8,4 +8,4 @@ """Convert seconds to hours, mins, and seconds.""" mins, secs = divmod(time_taken, 60) hours, mins = divmod(mins, 60) - return hours, mins, secs + return round(hours), round(mins), round(secs)
{"golden_diff": "diff --git a/ignite/_utils.py b/ignite/_utils.py\n--- a/ignite/_utils.py\n+++ b/ignite/_utils.py\n@@ -8,4 +8,4 @@\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n- return hours, mins, secs\n+ return round(hours), round(mins), round(secs)\n", "issue": "Typehint of ignite._utils._to_hours_mins_secs not satisfied with float\n## \ud83d\udc1b Bug description\r\n\r\nThat is a so tiny bug. The `typehint` of the following function of `ignite._utils` is not satisfied with a `float` argument\r\n``` python\r\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\r\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\r\n mins, secs = divmod(time_taken, 60)\r\n hours, mins = divmod(mins, 60)\r\n return hours, mins, secs\r\n```\r\nWe have\r\n```python\r\n>>> divmod(10.0,2)\r\n(5.0, 0.0)\r\n```\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4): 1.4\r\n - Ignite Version (e.g., 0.3.0): 0.3.0\r\n - OS (e.g., Linux): Linux\r\n - How you installed Ignite (`conda`, `pip`, source): conda\r\n - Python version: 3.7\r\n - Any other relevant information:\r\n\r\n\n", "before_files": [{"content": "from typing import Union, Tuple\n\n# For compatibilty\nfrom ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot\n\n\ndef _to_hours_mins_secs(time_taken: Union[float, int]) -> Tuple[int, int, int]:\n \"\"\"Convert seconds to hours, mins, and seconds.\"\"\"\n mins, secs = divmod(time_taken, 60)\n hours, mins = divmod(mins, 60)\n return hours, mins, secs\n", "path": "ignite/_utils.py"}]}
906
106
gh_patches_debug_64715
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-1243
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Docs page describing Beta meaning ## 📚 Documentation Add a page in our docs describing that beta means that one or all of the following are true: - the feature has unstable dependencies - the feature may change without notice in future versions - the feature is not compatible with other flash / pl features - the performance of the feature has not been verified Anything else? </issue> <code> [start of docs/extensions/stability.py] 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from docutils import nodes 15 from docutils.parsers.rst import Directive 16 from docutils.statemachine import StringList 17 18 ADMONITION_TEMPLATE = """ 19 .. raw:: html 20 21 <div class="admonition warning {type}"> 22 <p class="admonition-title">{title}</p> 23 <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future 24 releases.</p> 25 </div> 26 """ 27 28 29 class Beta(Directive): 30 has_content = True 31 required_arguments = 1 32 optional_arguments = 0 33 34 def run(self): 35 36 scope = self.arguments[0] 37 38 admonition_rst = ADMONITION_TEMPLATE.format(type="beta", title="Beta", scope=scope) 39 admonition_list = StringList(admonition_rst.split("\n")) 40 admonition = nodes.paragraph() 41 self.state.nested_parse(admonition_list, self.content_offset, admonition) 42 return [admonition] 43 44 45 def setup(app): 46 app.add_directive("beta", Beta) 47 [end of docs/extensions/stability.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/extensions/stability.py b/docs/extensions/stability.py --- a/docs/extensions/stability.py +++ b/docs/extensions/stability.py @@ -20,8 +20,14 @@ <div class="admonition warning {type}"> <p class="admonition-title">{title}</p> - <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future - releases.</p> + <p> + +This {scope} is currently in Beta. The API and functionality may change without warning in future +releases. :ref:`More details <stability>`. + +.. raw:: html + + </p> </div> """
{"golden_diff": "diff --git a/docs/extensions/stability.py b/docs/extensions/stability.py\n--- a/docs/extensions/stability.py\n+++ b/docs/extensions/stability.py\n@@ -20,8 +20,14 @@\n \n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n- <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future\n- releases.</p>\n+ <p>\n+\n+This {scope} is currently in Beta. The API and functionality may change without warning in future\n+releases. :ref:`More details <stability>`.\n+\n+.. raw:: html\n+\n+ </p>\n </div>\n \"\"\"\n", "issue": "Docs page describing Beta meaning\n## \ud83d\udcda Documentation\r\n\r\nAdd a page in our docs describing that beta means that one or all of the following are true:\r\n- the feature has unstable dependencies\r\n- the feature may change without notice in future versions\r\n- the feature is not compatible with other flash / pl features\r\n- the performance of the feature has not been verified\r\n\r\nAnything else?\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom docutils.statemachine import StringList\n\nADMONITION_TEMPLATE = \"\"\"\n.. raw:: html\n\n <div class=\"admonition warning {type}\">\n <p class=\"admonition-title\">{title}</p>\n <p>This {scope} is currently in Beta. The interfaces and functionality may change without warning in future\n releases.</p>\n </div>\n\"\"\"\n\n\nclass Beta(Directive):\n has_content = True\n required_arguments = 1\n optional_arguments = 0\n\n def run(self):\n\n scope = self.arguments[0]\n\n admonition_rst = ADMONITION_TEMPLATE.format(type=\"beta\", title=\"Beta\", scope=scope)\n admonition_list = StringList(admonition_rst.split(\"\\n\"))\n admonition = nodes.paragraph()\n self.state.nested_parse(admonition_list, self.content_offset, admonition)\n return [admonition]\n\n\ndef setup(app):\n app.add_directive(\"beta\", Beta)\n", "path": "docs/extensions/stability.py"}]}
1,057
162
gh_patches_debug_2342
rasdani/github-patches
git_diff
mozilla__bugbug-411
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use codespell in precommit hook </issue> <code> [start of run.py] 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import argparse 7 import csv 8 import os 9 from datetime import datetime, timedelta 10 11 import numpy as np 12 13 from bugbug import repository # noqa 14 from bugbug import bugzilla, db 15 from bugbug.models import get_model_class 16 17 if __name__ == "__main__": 18 parser = argparse.ArgumentParser() 19 parser.add_argument( 20 "--lemmatization", 21 help="Perform lemmatization (using spaCy)", 22 action="store_true", 23 ) 24 parser.add_argument("--train", help="Perform training", action="store_true") 25 parser.add_argument( 26 "--goal", 27 help="Goal of the classifier", 28 choices=[ 29 # bug classifiers 30 "defect", 31 "regression", 32 "tracking", 33 "qaneeded", 34 "uplift", 35 "component", 36 "devdocneeded", 37 "defectenhancementtask", 38 "assignee", 39 "bugtype", 40 "stepstoreproduce", 41 # commit classifiers 42 "backout", 43 ], 44 default="defect", 45 ) 46 parser.add_argument( 47 "--classifier", 48 help="Type of the classifier", 49 choices=["default", "nn"], 50 default="default", 51 ) 52 parser.add_argument("--classify", help="Perform evaluation", action="store_true") 53 parser.add_argument( 54 "--generate-sheet", 55 help="Perform evaluation on bugs from last week and generate a csv file", 56 action="store_true", 57 ) 58 parser.add_argument("--token", help="Bugzilla token", action="store") 59 parser.add_argument( 60 "--historical", help="Analyze historical bugs", action="store_true" 61 ) 62 args = parser.parse_args() 63 64 model_file_name = "{}{}model".format( 65 args.goal, "" if args.classifier == "default" else args.classifier 66 ) 67 68 model_class_name = args.goal 69 70 if args.goal == "component": 71 if args.classifier == "default": 72 model_class_name = "component" 73 elif args.classifier == "nn": 74 model_class_name = "component_nn" 75 else: 76 raise ValueError(f"Unkown value {args.classifier}") 77 78 model_class = get_model_class(model_class_name) 79 80 if args.train: 81 db.download() 82 83 if args.historical: 84 model = model_class(args.lemmatization, args.historical) 85 else: 86 model = model_class(args.lemmatization) 87 model.train() 88 else: 89 model = model_class.load(model_file_name) 90 91 if args.classify: 92 for bug in bugzilla.get_bugs(): 93 print( 94 f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug["id"] } - { bug["summary"]} ' 95 ) 96 97 if model.calculate_importance: 98 probas, importances = model.classify( 99 bug, probabilities=True, importances=True 100 ) 101 102 feature_names = model.get_feature_names() 103 for i, (importance, index, is_positive) in enumerate(importances): 104 print( 105 f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})' 106 ) 107 else: 108 probas = model.classify(bug, probabilities=True, importances=False) 109 110 if np.argmax(probas) == 1: 111 print(f"Positive! {probas}") 112 else: 113 print(f"Negative! {probas}") 114 input() 115 116 if args.generate_sheet: 117 assert ( 118 args.token is not None 119 ), "A Bugzilla token should be set in order to download bugs" 120 today = datetime.utcnow() 121 a_week_ago = today - timedelta(7) 122 bugzilla.set_token(args.token) 123 bugs = bugzilla.download_bugs_between(a_week_ago, today) 124 125 print(f"Classifying {len(bugs)} bugs...") 126 127 rows = [["Bug", f"{args.goal}(model)", args.goal, "Title"]] 128 129 for bug in bugs: 130 p = model.classify(bug, probabilities=True) 131 rows.append( 132 [ 133 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]}', 134 "y" if p[0][1] >= 0.7 else "n", 135 "", 136 bug["summary"], 137 ] 138 ) 139 140 os.makedirs("sheets", exist_ok=True) 141 with open( 142 os.path.join( 143 "sheets", 144 f'{args.goal}-{datetime.utcnow().strftime("%Y-%m-%d")}-labels.csv', 145 ), 146 "w", 147 ) as f: 148 writer = csv.writer(f) 149 writer.writerows(rows) 150 [end of run.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/run.py b/run.py --- a/run.py +++ b/run.py @@ -73,7 +73,7 @@ elif args.classifier == "nn": model_class_name = "component_nn" else: - raise ValueError(f"Unkown value {args.classifier}") + raise ValueError(f"Unknown value {args.classifier}") model_class = get_model_class(model_class_name)
{"golden_diff": "diff --git a/run.py b/run.py\n--- a/run.py\n+++ b/run.py\n@@ -73,7 +73,7 @@\n elif args.classifier == \"nn\":\n model_class_name = \"component_nn\"\n else:\n- raise ValueError(f\"Unkown value {args.classifier}\")\n+ raise ValueError(f\"Unknown value {args.classifier}\")\n \n model_class = get_model_class(model_class_name)\n", "issue": "Use codespell in precommit hook\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport argparse\nimport csv\nimport os\nfrom datetime import datetime, timedelta\n\nimport numpy as np\n\nfrom bugbug import repository # noqa\nfrom bugbug import bugzilla, db\nfrom bugbug.models import get_model_class\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--lemmatization\",\n help=\"Perform lemmatization (using spaCy)\",\n action=\"store_true\",\n )\n parser.add_argument(\"--train\", help=\"Perform training\", action=\"store_true\")\n parser.add_argument(\n \"--goal\",\n help=\"Goal of the classifier\",\n choices=[\n # bug classifiers\n \"defect\",\n \"regression\",\n \"tracking\",\n \"qaneeded\",\n \"uplift\",\n \"component\",\n \"devdocneeded\",\n \"defectenhancementtask\",\n \"assignee\",\n \"bugtype\",\n \"stepstoreproduce\",\n # commit classifiers\n \"backout\",\n ],\n default=\"defect\",\n )\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--classify\", help=\"Perform evaluation\", action=\"store_true\")\n parser.add_argument(\n \"--generate-sheet\",\n help=\"Perform evaluation on bugs from last week and generate a csv file\",\n action=\"store_true\",\n )\n parser.add_argument(\"--token\", help=\"Bugzilla token\", action=\"store\")\n parser.add_argument(\n \"--historical\", help=\"Analyze historical bugs\", action=\"store_true\"\n )\n args = parser.parse_args()\n\n model_file_name = \"{}{}model\".format(\n args.goal, \"\" if args.classifier == \"default\" else args.classifier\n )\n\n model_class_name = args.goal\n\n if args.goal == \"component\":\n if args.classifier == \"default\":\n model_class_name = \"component\"\n elif args.classifier == \"nn\":\n model_class_name = \"component_nn\"\n else:\n raise ValueError(f\"Unkown value {args.classifier}\")\n\n model_class = get_model_class(model_class_name)\n\n if args.train:\n db.download()\n\n if args.historical:\n model = model_class(args.lemmatization, args.historical)\n else:\n model = model_class(args.lemmatization)\n model.train()\n else:\n model = model_class.load(model_file_name)\n\n if args.classify:\n for bug in bugzilla.get_bugs():\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={ bug[\"id\"] } - { bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importances = model.classify(\n bug, probabilities=True, importances=True\n )\n\n feature_names = model.get_feature_names()\n for i, (importance, index, is_positive) in enumerate(importances):\n print(\n f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})'\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n if args.generate_sheet:\n assert (\n args.token is not None\n ), \"A Bugzilla token should be set in order to download bugs\"\n today = datetime.utcnow()\n a_week_ago = today - timedelta(7)\n bugzilla.set_token(args.token)\n bugs = bugzilla.download_bugs_between(a_week_ago, today)\n\n print(f\"Classifying {len(bugs)} bugs...\")\n\n rows = [[\"Bug\", f\"{args.goal}(model)\", args.goal, \"Title\"]]\n\n for bug in bugs:\n p = model.classify(bug, probabilities=True)\n rows.append(\n [\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]}',\n \"y\" if p[0][1] >= 0.7 else \"n\",\n \"\",\n bug[\"summary\"],\n ]\n )\n\n os.makedirs(\"sheets\", exist_ok=True)\n with open(\n os.path.join(\n \"sheets\",\n f'{args.goal}-{datetime.utcnow().strftime(\"%Y-%m-%d\")}-labels.csv',\n ),\n \"w\",\n ) as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n", "path": "run.py"}]}
1,923
94
gh_patches_debug_35906
rasdani/github-patches
git_diff
streamlink__streamlink-5754
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plugins.bigo: Unable to parse JSON ### Checklist - [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose) - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest release ### Description Hello, the bigo.py is not working at the moment. It is giving a parse JSON error. Debug log is following... ### Debug log ```text error: Unable to parse JSON: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE html>\n<html lang="en" s ...) ``` </issue> <code> [start of src/streamlink/plugins/bigo.py] 1 """ 2 $description Global live streaming platform for live video game broadcasts and individual live streams. 3 $url live.bigo.tv 4 $url bigoweb.co 5 $type live 6 """ 7 8 import re 9 10 from streamlink.plugin import Plugin, pluginmatcher 11 from streamlink.plugin.api import useragents, validate 12 from streamlink.stream.hls import HLSStream 13 14 15 @pluginmatcher(re.compile( 16 r"https?://(?:www\.)?bigo\.tv/([^/]+)$", 17 )) 18 class Bigo(Plugin): 19 _api_url = "https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}" 20 21 _video_info_schema = validate.Schema({ 22 "code": 0, 23 "msg": "success", 24 "data": { 25 "videoSrc": validate.any(None, "", validate.url()), 26 }, 27 }) 28 29 def _get_streams(self): 30 res = self.session.http.get( 31 self._api_url.format(self.match.group(1)), 32 allow_redirects=True, 33 headers={"User-Agent": useragents.IPHONE_6}, 34 ) 35 data = self.session.http.json(res, schema=self._video_info_schema) 36 videourl = data["data"]["videoSrc"] 37 if videourl: 38 yield "live", HLSStream(self.session, videourl) 39 40 41 __plugin__ = Bigo 42 [end of src/streamlink/plugins/bigo.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/bigo.py b/src/streamlink/plugins/bigo.py --- a/src/streamlink/plugins/bigo.py +++ b/src/streamlink/plugins/bigo.py @@ -1,41 +1,68 @@ """ -$description Global live streaming platform for live video game broadcasts and individual live streams. -$url live.bigo.tv -$url bigoweb.co +$description Global live-streaming platform for live video game broadcasts and individual live streams. +$url bigo.tv $type live +$metadata id +$metadata author +$metadata category +$metadata title """ +import logging import re from streamlink.plugin import Plugin, pluginmatcher -from streamlink.plugin.api import useragents, validate +from streamlink.plugin.api import validate from streamlink.stream.hls import HLSStream +log = logging.getLogger(__name__) + + @pluginmatcher(re.compile( - r"https?://(?:www\.)?bigo\.tv/([^/]+)$", + r"https?://(?:www\.)?bigo\.tv/(?P<site_id>[^/]+)$", )) class Bigo(Plugin): - _api_url = "https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}" - - _video_info_schema = validate.Schema({ - "code": 0, - "msg": "success", - "data": { - "videoSrc": validate.any(None, "", validate.url()), - }, - }) + _URL_API = "https://ta.bigo.tv/official_website/studio/getInternalStudioInfo" def _get_streams(self): - res = self.session.http.get( - self._api_url.format(self.match.group(1)), - allow_redirects=True, - headers={"User-Agent": useragents.IPHONE_6}, + self.id, self.author, self.category, self.title, hls_url = self.session.http.post( + self._URL_API, + params={ + "siteId": self.match["site_id"], + "verify": "", + }, + schema=validate.Schema( + validate.parse_json(), + { + "code": 0, + "msg": "success", + "data": { + "roomId": validate.any(None, str), + "clientBigoId": validate.any(None, str), + "gameTitle": str, + "roomTopic": str, + "hls_src": validate.any(None, "", validate.url()), + }, + }, + validate.union_get( + ("data", "roomId"), + ("data", "clientBigoId"), + ("data", "gameTitle"), + ("data", "roomTopic"), + ("data", "hls_src"), + ), + ), ) - data = self.session.http.json(res, schema=self._video_info_schema) - videourl = data["data"]["videoSrc"] - if videourl: - yield "live", HLSStream(self.session, videourl) + + if not self.id: + return + + if not hls_url: + log.info("Channel is offline") + return + + yield "live", HLSStream(self.session, hls_url) __plugin__ = Bigo
{"golden_diff": "diff --git a/src/streamlink/plugins/bigo.py b/src/streamlink/plugins/bigo.py\n--- a/src/streamlink/plugins/bigo.py\n+++ b/src/streamlink/plugins/bigo.py\n@@ -1,41 +1,68 @@\n \"\"\"\n-$description Global live streaming platform for live video game broadcasts and individual live streams.\n-$url live.bigo.tv\n-$url bigoweb.co\n+$description Global live-streaming platform for live video game broadcasts and individual live streams.\n+$url bigo.tv\n $type live\n+$metadata id\n+$metadata author\n+$metadata category\n+$metadata title\n \"\"\"\n \n+import logging\n import re\n \n from streamlink.plugin import Plugin, pluginmatcher\n-from streamlink.plugin.api import useragents, validate\n+from streamlink.plugin.api import validate\n from streamlink.stream.hls import HLSStream\n \n \n+log = logging.getLogger(__name__)\n+\n+\n @pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?bigo\\.tv/([^/]+)$\",\n+ r\"https?://(?:www\\.)?bigo\\.tv/(?P<site_id>[^/]+)$\",\n ))\n class Bigo(Plugin):\n- _api_url = \"https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}\"\n-\n- _video_info_schema = validate.Schema({\n- \"code\": 0,\n- \"msg\": \"success\",\n- \"data\": {\n- \"videoSrc\": validate.any(None, \"\", validate.url()),\n- },\n- })\n+ _URL_API = \"https://ta.bigo.tv/official_website/studio/getInternalStudioInfo\"\n \n def _get_streams(self):\n- res = self.session.http.get(\n- self._api_url.format(self.match.group(1)),\n- allow_redirects=True,\n- headers={\"User-Agent\": useragents.IPHONE_6},\n+ self.id, self.author, self.category, self.title, hls_url = self.session.http.post(\n+ self._URL_API,\n+ params={\n+ \"siteId\": self.match[\"site_id\"],\n+ \"verify\": \"\",\n+ },\n+ schema=validate.Schema(\n+ validate.parse_json(),\n+ {\n+ \"code\": 0,\n+ \"msg\": \"success\",\n+ \"data\": {\n+ \"roomId\": validate.any(None, str),\n+ \"clientBigoId\": validate.any(None, str),\n+ \"gameTitle\": str,\n+ \"roomTopic\": str,\n+ \"hls_src\": validate.any(None, \"\", validate.url()),\n+ },\n+ },\n+ validate.union_get(\n+ (\"data\", \"roomId\"),\n+ (\"data\", \"clientBigoId\"),\n+ (\"data\", \"gameTitle\"),\n+ (\"data\", \"roomTopic\"),\n+ (\"data\", \"hls_src\"),\n+ ),\n+ ),\n )\n- data = self.session.http.json(res, schema=self._video_info_schema)\n- videourl = data[\"data\"][\"videoSrc\"]\n- if videourl:\n- yield \"live\", HLSStream(self.session, videourl)\n+\n+ if not self.id:\n+ return\n+\n+ if not hls_url:\n+ log.info(\"Channel is offline\")\n+ return\n+\n+ yield \"live\", HLSStream(self.session, hls_url)\n \n \n __plugin__ = Bigo\n", "issue": "plugins.bigo: Unable to parse JSON\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest release\n\n### Description\n\nHello,\r\n\r\nthe bigo.py is not working at the moment.\r\n\r\nIt is giving a parse JSON error.\r\n\r\nDebug log is following...\n\n### Debug log\n\n```text\nerror: Unable to parse JSON: Expecting value: line 1 column 1 (char 0) ('<!DOCTYPE html>\\n<html lang=\"en\" s ...)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Global live streaming platform for live video game broadcasts and individual live streams.\n$url live.bigo.tv\n$url bigoweb.co\n$type live\n\"\"\"\n\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.stream.hls import HLSStream\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?bigo\\.tv/([^/]+)$\",\n))\nclass Bigo(Plugin):\n _api_url = \"https://www.bigo.tv/OInterface/getVideoParam?bigoId={0}\"\n\n _video_info_schema = validate.Schema({\n \"code\": 0,\n \"msg\": \"success\",\n \"data\": {\n \"videoSrc\": validate.any(None, \"\", validate.url()),\n },\n })\n\n def _get_streams(self):\n res = self.session.http.get(\n self._api_url.format(self.match.group(1)),\n allow_redirects=True,\n headers={\"User-Agent\": useragents.IPHONE_6},\n )\n data = self.session.http.json(res, schema=self._video_info_schema)\n videourl = data[\"data\"][\"videoSrc\"]\n if videourl:\n yield \"live\", HLSStream(self.session, videourl)\n\n\n__plugin__ = Bigo\n", "path": "src/streamlink/plugins/bigo.py"}]}
1,145
725
gh_patches_debug_12103
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-4326
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> soundfile hook has osx/windows specific behaviour hook-soundfile.py was added in 3.5 but it blows up on linux as follows 9727 INFO: Loading module hook "hook-soundfile.py"... Unable to find "/home/matt/.virtualenvs/beqdesigner-entpycF3/lib/python3.7/site-packages/_soundfile_data" when adding binary and data files. on OSX, it also fails but with a different error ValueError: Unknown Mach-O header: 0x20202020 in <_io.BufferedReader name='/Users/travis/build/3ll3d00d/beqdesigner/.venv/lib/python3.7/site-packages/_soundfile_data/COPYING'> It completes successfully on Windows The problem is that pysoundfile packages libsndfile on Windows and OSX (as per https://pysoundfile.readthedocs.io/en/0.9.0/#installation) but relies on a system package on Linux so the mentioned directory (`_soundfile_data`) will not exist on Linux. On OSX only a certain file is required (`_soundfile_data/libsndfile.dylib`) Minimal test case can be found at https://github.com/3ll3d00d/pyinstaller-pysoundfile-bug </issue> <code> [start of PyInstaller/hooks/hook-soundfile.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2016-2019, PyInstaller Development Team. 3 # 4 # Distributed under the terms of the GNU General Public License with exception 5 # for distributing bootloader. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 #----------------------------------------------------------------------------- 9 10 """ 11 pysoundfile: 12 https://github.com/bastibe/SoundFile 13 """ 14 15 import os 16 from PyInstaller.utils.hooks import get_package_paths 17 18 # get path of soundfile 19 sfp = get_package_paths('soundfile') 20 21 # add the binaries 22 bins = os.path.join(sfp[0], "_soundfile_data") 23 binaries = [(bins, "_soundfile_data")] 24 [end of PyInstaller/hooks/hook-soundfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/hook-soundfile.py b/PyInstaller/hooks/hook-soundfile.py --- a/PyInstaller/hooks/hook-soundfile.py +++ b/PyInstaller/hooks/hook-soundfile.py @@ -13,11 +13,20 @@ """ import os + +from PyInstaller.compat import is_win, is_darwin from PyInstaller.utils.hooks import get_package_paths # get path of soundfile sfp = get_package_paths('soundfile') -# add the binaries -bins = os.path.join(sfp[0], "_soundfile_data") -binaries = [(bins, "_soundfile_data")] +# add binaries packaged by soundfile on OSX and Windows +# an external dependency (libsndfile) is used on GNU/Linux +path = None +if is_win: + path = os.path.join(sfp[0], '_soundfile_data') +elif is_darwin: + path = os.path.join(sfp[0], '_soundfile_data', 'libsndfile.dylib') + +if path is not None and os.path.exists(path): + binaries = [(path, "_soundfile_data")]
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-soundfile.py b/PyInstaller/hooks/hook-soundfile.py\n--- a/PyInstaller/hooks/hook-soundfile.py\n+++ b/PyInstaller/hooks/hook-soundfile.py\n@@ -13,11 +13,20 @@\n \"\"\"\n \n import os\n+\n+from PyInstaller.compat import is_win, is_darwin\n from PyInstaller.utils.hooks import get_package_paths\n \n # get path of soundfile\n sfp = get_package_paths('soundfile')\n \n-# add the binaries\n-bins = os.path.join(sfp[0], \"_soundfile_data\")\n-binaries = [(bins, \"_soundfile_data\")]\n+# add binaries packaged by soundfile on OSX and Windows\n+# an external dependency (libsndfile) is used on GNU/Linux\n+path = None\n+if is_win:\n+ path = os.path.join(sfp[0], '_soundfile_data')\n+elif is_darwin:\n+ path = os.path.join(sfp[0], '_soundfile_data', 'libsndfile.dylib')\n+\n+if path is not None and os.path.exists(path):\n+ binaries = [(path, \"_soundfile_data\")]\n", "issue": "soundfile hook has osx/windows specific behaviour\nhook-soundfile.py was added in 3.5 but it blows up on linux as follows\r\n\r\n 9727 INFO: Loading module hook \"hook-soundfile.py\"...\r\n Unable to find \"/home/matt/.virtualenvs/beqdesigner-entpycF3/lib/python3.7/site-packages/_soundfile_data\" when adding binary and data files.\r\n\r\non OSX, it also fails but with a different error\r\n\r\n ValueError: Unknown Mach-O header: 0x20202020 in <_io.BufferedReader \r\n name='/Users/travis/build/3ll3d00d/beqdesigner/.venv/lib/python3.7/site-packages/_soundfile_data/COPYING'>\r\n\r\nIt completes successfully on Windows\r\n\r\nThe problem is that pysoundfile packages libsndfile on Windows and OSX (as per https://pysoundfile.readthedocs.io/en/0.9.0/#installation) but relies on a system package on Linux so the mentioned directory (`_soundfile_data`) will not exist on Linux. On OSX only a certain file is required (`_soundfile_data/libsndfile.dylib`)\r\n\r\nMinimal test case can be found at https://github.com/3ll3d00d/pyinstaller-pysoundfile-bug\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2016-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n\"\"\"\npysoundfile:\nhttps://github.com/bastibe/SoundFile\n\"\"\"\n\nimport os\nfrom PyInstaller.utils.hooks import get_package_paths\n\n# get path of soundfile\nsfp = get_package_paths('soundfile')\n\n# add the binaries\nbins = os.path.join(sfp[0], \"_soundfile_data\")\nbinaries = [(bins, \"_soundfile_data\")]\n", "path": "PyInstaller/hooks/hook-soundfile.py"}]}
1,011
258
gh_patches_debug_35619
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-616
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Uncaught 404s in record viewsets and pagination classes ## Description The record viewset, column viewset and paignation classes regularly call `Table.objects.get(id=table_pk)`, which throws a `mathesar.models.Table.DoesNotExist: Table matching query does not exist.` when an invalid table id is passed. To recreate, run `client.get(f'/api/v0/tables/3000/records/')`. ## Expected behavior We should ensure that the table exists before querying, or catch the `DoesNotExist` error after querying. We should also include tests for table 404s. This is probably best done after #488 is merged, as it includes a function to do exactly this. </issue> <code> [start of mathesar/api/pagination.py] 1 from collections import OrderedDict 2 3 from rest_framework.pagination import LimitOffsetPagination 4 from rest_framework.response import Response 5 6 7 class DefaultLimitOffsetPagination(LimitOffsetPagination): 8 default_limit = 50 9 max_limit = 500 10 11 def get_paginated_response(self, data): 12 return Response(OrderedDict([ 13 ('count', self.count), 14 ('results', data) 15 ])) 16 17 18 class ColumnLimitOffsetPagination(DefaultLimitOffsetPagination): 19 20 def paginate_queryset(self, queryset, request, table_id): 21 self.limit = self.get_limit(request) 22 if self.limit is None: 23 self.limit = self.default_limit 24 self.offset = self.get_offset(request) 25 table = queryset.get(id=table_id) 26 self.count = len(table.sa_columns) 27 self.request = request 28 return list(table.sa_columns)[self.offset:self.offset + self.limit] 29 30 31 class TableLimitOffsetPagination(DefaultLimitOffsetPagination): 32 33 def paginate_queryset(self, queryset, request, table_id, 34 filters=[], order_by=[]): 35 self.limit = self.get_limit(request) 36 if self.limit is None: 37 self.limit = self.default_limit 38 self.offset = self.get_offset(request) 39 # TODO: Cache count value somewhere, since calculating it is expensive. 40 table = queryset.get(id=table_id) 41 self.count = table.sa_num_records(filters=filters) 42 self.request = request 43 44 return table.get_records( 45 self.limit, self.offset, filters=filters, order_by=order_by, 46 ) 47 48 49 class TableLimitOffsetGroupPagination(TableLimitOffsetPagination): 50 def get_paginated_response(self, data): 51 return Response(OrderedDict([ 52 ('count', self.count), 53 ('group_count', self.group_count), 54 ('results', data) 55 ])) 56 57 def paginate_queryset(self, queryset, request, table_id, 58 filters=[], order_by=[], group_count_by=[]): 59 records = super().paginate_queryset( 60 queryset, request, table_id, filters=filters, order_by=order_by 61 ) 62 63 table = queryset.get(id=table_id) 64 if group_count_by: 65 group_count = table.get_group_counts( 66 group_count_by, self.limit, self.offset, 67 filters=filters, order_by=order_by 68 ) 69 # Convert the tuple keys into strings so it can be converted to JSON 70 group_count = [{"values": list(cols), "count": count} 71 for cols, count in group_count.items()] 72 self.group_count = { 73 'group_count_by': group_count_by, 74 'results': group_count, 75 } 76 else: 77 self.group_count = { 78 'group_count_by': None, 79 'results': None, 80 } 81 82 return records 83 [end of mathesar/api/pagination.py] [start of mathesar/api/utils.py] 1 from rest_framework.exceptions import NotFound 2 3 from mathesar.models import Table 4 5 6 def get_table_or_404(pk): 7 try: 8 table = Table.objects.get(id=pk) 9 except Table.DoesNotExist: 10 raise NotFound 11 return table 12 [end of mathesar/api/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/pagination.py b/mathesar/api/pagination.py --- a/mathesar/api/pagination.py +++ b/mathesar/api/pagination.py @@ -3,6 +3,8 @@ from rest_framework.pagination import LimitOffsetPagination from rest_framework.response import Response +from mathesar.api.utils import get_table_or_404 + class DefaultLimitOffsetPagination(LimitOffsetPagination): default_limit = 50 @@ -22,7 +24,7 @@ if self.limit is None: self.limit = self.default_limit self.offset = self.get_offset(request) - table = queryset.get(id=table_id) + table = get_table_or_404(pk=table_id) self.count = len(table.sa_columns) self.request = request return list(table.sa_columns)[self.offset:self.offset + self.limit] @@ -37,7 +39,7 @@ self.limit = self.default_limit self.offset = self.get_offset(request) # TODO: Cache count value somewhere, since calculating it is expensive. - table = queryset.get(id=table_id) + table = get_table_or_404(pk=table_id) self.count = table.sa_num_records(filters=filters) self.request = request @@ -60,7 +62,7 @@ queryset, request, table_id, filters=filters, order_by=order_by ) - table = queryset.get(id=table_id) + table = get_table_or_404(pk=table_id) if group_count_by: group_count = table.get_group_counts( group_count_by, self.limit, self.offset, diff --git a/mathesar/api/utils.py b/mathesar/api/utils.py --- a/mathesar/api/utils.py +++ b/mathesar/api/utils.py @@ -4,6 +4,13 @@ def get_table_or_404(pk): + """ + Get table if it exists, otherwise throws a DRF NotFound error. + Args: + pk: id of table + Returns: + table: return the table based on a specific id + """ try: table = Table.objects.get(id=pk) except Table.DoesNotExist:
{"golden_diff": "diff --git a/mathesar/api/pagination.py b/mathesar/api/pagination.py\n--- a/mathesar/api/pagination.py\n+++ b/mathesar/api/pagination.py\n@@ -3,6 +3,8 @@\n from rest_framework.pagination import LimitOffsetPagination\n from rest_framework.response import Response\n \n+from mathesar.api.utils import get_table_or_404\n+\n \n class DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n@@ -22,7 +24,7 @@\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n- table = queryset.get(id=table_id)\n+ table = get_table_or_404(pk=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n@@ -37,7 +39,7 @@\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n- table = queryset.get(id=table_id)\n+ table = get_table_or_404(pk=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n \n@@ -60,7 +62,7 @@\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n \n- table = queryset.get(id=table_id)\n+ table = get_table_or_404(pk=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\ndiff --git a/mathesar/api/utils.py b/mathesar/api/utils.py\n--- a/mathesar/api/utils.py\n+++ b/mathesar/api/utils.py\n@@ -4,6 +4,13 @@\n \n \n def get_table_or_404(pk):\n+ \"\"\"\n+ Get table if it exists, otherwise throws a DRF NotFound error.\n+ Args:\n+ pk: id of table\n+ Returns:\n+ table: return the table based on a specific id\n+ \"\"\"\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n", "issue": "Uncaught 404s in record viewsets and pagination classes\n## Description\r\nThe record viewset, column viewset and paignation classes regularly call `Table.objects.get(id=table_pk)`, which throws a `mathesar.models.Table.DoesNotExist: Table matching query does not exist.` when an invalid table id is passed.\r\n\r\nTo recreate, run `client.get(f'/api/v0/tables/3000/records/')`.\r\n\r\n\r\n## Expected behavior\r\nWe should ensure that the table exists before querying, or catch the `DoesNotExist` error after querying. We should also include tests for table 404s. \r\n\r\nThis is probably best done after #488 is merged, as it includes a function to do exactly this.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom rest_framework.response import Response\n\n\nclass DefaultLimitOffsetPagination(LimitOffsetPagination):\n default_limit = 50\n max_limit = 500\n\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('results', data)\n ]))\n\n\nclass ColumnLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n table = queryset.get(id=table_id)\n self.count = len(table.sa_columns)\n self.request = request\n return list(table.sa_columns)[self.offset:self.offset + self.limit]\n\n\nclass TableLimitOffsetPagination(DefaultLimitOffsetPagination):\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[]):\n self.limit = self.get_limit(request)\n if self.limit is None:\n self.limit = self.default_limit\n self.offset = self.get_offset(request)\n # TODO: Cache count value somewhere, since calculating it is expensive.\n table = queryset.get(id=table_id)\n self.count = table.sa_num_records(filters=filters)\n self.request = request\n\n return table.get_records(\n self.limit, self.offset, filters=filters, order_by=order_by,\n )\n\n\nclass TableLimitOffsetGroupPagination(TableLimitOffsetPagination):\n def get_paginated_response(self, data):\n return Response(OrderedDict([\n ('count', self.count),\n ('group_count', self.group_count),\n ('results', data)\n ]))\n\n def paginate_queryset(self, queryset, request, table_id,\n filters=[], order_by=[], group_count_by=[]):\n records = super().paginate_queryset(\n queryset, request, table_id, filters=filters, order_by=order_by\n )\n\n table = queryset.get(id=table_id)\n if group_count_by:\n group_count = table.get_group_counts(\n group_count_by, self.limit, self.offset,\n filters=filters, order_by=order_by\n )\n # Convert the tuple keys into strings so it can be converted to JSON\n group_count = [{\"values\": list(cols), \"count\": count}\n for cols, count in group_count.items()]\n self.group_count = {\n 'group_count_by': group_count_by,\n 'results': group_count,\n }\n else:\n self.group_count = {\n 'group_count_by': None,\n 'results': None,\n }\n\n return records\n", "path": "mathesar/api/pagination.py"}, {"content": "from rest_framework.exceptions import NotFound\n\nfrom mathesar.models import Table\n\n\ndef get_table_or_404(pk):\n try:\n table = Table.objects.get(id=pk)\n except Table.DoesNotExist:\n raise NotFound\n return table\n", "path": "mathesar/api/utils.py"}]}
1,507
488
gh_patches_debug_3160
rasdani/github-patches
git_diff
ipython__ipython-7560
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Displaying a widget using displayhook produces misaligned Out[N] prompt ![screenshot from 2015-01-22 16 56 44](https://cloud.githubusercontent.com/assets/327925/5867960/bd98d2d8-a257-11e4-8599-e0e331664f9b.png) This doesn't look right. @jdfreder, can you investigate? </issue> <code> [start of IPython/kernel/zmq/displayhook.py] 1 """Replacements for sys.displayhook that publish over ZMQ.""" 2 3 # Copyright (c) IPython Development Team. 4 # Distributed under the terms of the Modified BSD License. 5 6 import sys 7 8 from IPython.core.displayhook import DisplayHook 9 from IPython.kernel.inprocess.socket import SocketABC 10 from IPython.utils.jsonutil import encode_images 11 from IPython.utils.py3compat import builtin_mod 12 from IPython.utils.traitlets import Instance, Dict 13 from .session import extract_header, Session 14 15 class ZMQDisplayHook(object): 16 """A simple displayhook that publishes the object's repr over a ZeroMQ 17 socket.""" 18 topic=b'execute_result' 19 20 def __init__(self, session, pub_socket): 21 self.session = session 22 self.pub_socket = pub_socket 23 self.parent_header = {} 24 25 def __call__(self, obj): 26 if obj is None: 27 return 28 29 builtin_mod._ = obj 30 sys.stdout.flush() 31 sys.stderr.flush() 32 msg = self.session.send(self.pub_socket, u'execute_result', {u'data':repr(obj)}, 33 parent=self.parent_header, ident=self.topic) 34 35 def set_parent(self, parent): 36 self.parent_header = extract_header(parent) 37 38 39 class ZMQShellDisplayHook(DisplayHook): 40 """A displayhook subclass that publishes data using ZeroMQ. This is intended 41 to work with an InteractiveShell instance. It sends a dict of different 42 representations of the object.""" 43 topic=None 44 45 session = Instance(Session) 46 pub_socket = Instance(SocketABC) 47 parent_header = Dict({}) 48 49 def set_parent(self, parent): 50 """Set the parent for outbound messages.""" 51 self.parent_header = extract_header(parent) 52 53 def start_displayhook(self): 54 self.msg = self.session.msg(u'execute_result', { 55 'data': {}, 56 'metadata': {}, 57 }, parent=self.parent_header) 58 59 def write_output_prompt(self): 60 """Write the output prompt.""" 61 self.msg['content']['execution_count'] = self.prompt_count 62 63 def write_format_data(self, format_dict, md_dict=None): 64 self.msg['content']['data'] = encode_images(format_dict) 65 self.msg['content']['metadata'] = md_dict 66 67 def finish_displayhook(self): 68 """Finish up all displayhook activities.""" 69 sys.stdout.flush() 70 sys.stderr.flush() 71 self.session.send(self.pub_socket, self.msg, ident=self.topic) 72 self.msg = None 73 74 [end of IPython/kernel/zmq/displayhook.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/kernel/zmq/displayhook.py b/IPython/kernel/zmq/displayhook.py --- a/IPython/kernel/zmq/displayhook.py +++ b/IPython/kernel/zmq/displayhook.py @@ -68,6 +68,7 @@ """Finish up all displayhook activities.""" sys.stdout.flush() sys.stderr.flush() - self.session.send(self.pub_socket, self.msg, ident=self.topic) + if self.msg['content']['data']: + self.session.send(self.pub_socket, self.msg, ident=self.topic) self.msg = None
{"golden_diff": "diff --git a/IPython/kernel/zmq/displayhook.py b/IPython/kernel/zmq/displayhook.py\n--- a/IPython/kernel/zmq/displayhook.py\n+++ b/IPython/kernel/zmq/displayhook.py\n@@ -68,6 +68,7 @@\n \"\"\"Finish up all displayhook activities.\"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n- self.session.send(self.pub_socket, self.msg, ident=self.topic)\n+ if self.msg['content']['data']:\n+ self.session.send(self.pub_socket, self.msg, ident=self.topic)\n self.msg = None\n", "issue": "Displaying a widget using displayhook produces misaligned Out[N] prompt\n![screenshot from 2015-01-22 16 56 44](https://cloud.githubusercontent.com/assets/327925/5867960/bd98d2d8-a257-11e4-8599-e0e331664f9b.png)\n\nThis doesn't look right. @jdfreder, can you investigate?\n\n", "before_files": [{"content": "\"\"\"Replacements for sys.displayhook that publish over ZMQ.\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport sys\n\nfrom IPython.core.displayhook import DisplayHook\nfrom IPython.kernel.inprocess.socket import SocketABC\nfrom IPython.utils.jsonutil import encode_images\nfrom IPython.utils.py3compat import builtin_mod\nfrom IPython.utils.traitlets import Instance, Dict\nfrom .session import extract_header, Session\n\nclass ZMQDisplayHook(object):\n \"\"\"A simple displayhook that publishes the object's repr over a ZeroMQ\n socket.\"\"\"\n topic=b'execute_result'\n\n def __init__(self, session, pub_socket):\n self.session = session\n self.pub_socket = pub_socket\n self.parent_header = {}\n\n def __call__(self, obj):\n if obj is None:\n return\n\n builtin_mod._ = obj\n sys.stdout.flush()\n sys.stderr.flush()\n msg = self.session.send(self.pub_socket, u'execute_result', {u'data':repr(obj)},\n parent=self.parent_header, ident=self.topic)\n\n def set_parent(self, parent):\n self.parent_header = extract_header(parent)\n\n\nclass ZMQShellDisplayHook(DisplayHook):\n \"\"\"A displayhook subclass that publishes data using ZeroMQ. This is intended\n to work with an InteractiveShell instance. It sends a dict of different\n representations of the object.\"\"\"\n topic=None\n\n session = Instance(Session)\n pub_socket = Instance(SocketABC)\n parent_header = Dict({})\n\n def set_parent(self, parent):\n \"\"\"Set the parent for outbound messages.\"\"\"\n self.parent_header = extract_header(parent)\n\n def start_displayhook(self):\n self.msg = self.session.msg(u'execute_result', {\n 'data': {},\n 'metadata': {},\n }, parent=self.parent_header)\n\n def write_output_prompt(self):\n \"\"\"Write the output prompt.\"\"\"\n self.msg['content']['execution_count'] = self.prompt_count\n\n def write_format_data(self, format_dict, md_dict=None):\n self.msg['content']['data'] = encode_images(format_dict)\n self.msg['content']['metadata'] = md_dict\n\n def finish_displayhook(self):\n \"\"\"Finish up all displayhook activities.\"\"\"\n sys.stdout.flush()\n sys.stderr.flush()\n self.session.send(self.pub_socket, self.msg, ident=self.topic)\n self.msg = None\n\n", "path": "IPython/kernel/zmq/displayhook.py"}]}
1,313
124
gh_patches_debug_61971
rasdani/github-patches
git_diff
crytic__slither-1110
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug-Candidate]: Phi-node print missing 'f' in f-string ### Describe the issue: When printing a Phi-node the string is not formatted. There seems to be a 'f' missing ahead of the str in https://github.com/crytic/slither/blob/dev/slither/slithir/operations/phi.py#L36 ### Code example to reproduce the issue: slither tests/complex_func.sol --print slithir-ssa ### Version: dev-branch dd91f770f61eaadc286e2af3c72fb5798e376c16 ### Relevant log output: ``` Contract Increment Function Increment.increaseBy1() IRs: {self.lvalue}({self.lvalue.type}) := ϕ({[str(v) for v in self._rvalues]}) Expression: i += 1 ``` </issue> <code> [start of slither/slithir/operations/phi.py] 1 from slither.slithir.operations.lvalue import OperationWithLValue 2 from slither.slithir.utils.utils import is_valid_lvalue 3 4 5 class Phi(OperationWithLValue): 6 def __init__(self, left_variable, nodes): 7 # When Phi operations are created the 8 # correct indexes of the variables are not yet computed 9 # We store the nodes where the variables are written 10 # so we can update the rvalues of the Phi operation 11 # after its instantiation 12 assert is_valid_lvalue(left_variable) 13 assert isinstance(nodes, set) 14 super().__init__() 15 self._lvalue = left_variable 16 self._rvalues = [] 17 self._nodes = nodes 18 19 @property 20 def read(self): 21 return self.rvalues 22 23 @property 24 def rvalues(self): 25 return self._rvalues 26 27 @rvalues.setter 28 def rvalues(self, vals): 29 self._rvalues = vals 30 31 @property 32 def nodes(self): 33 return self._nodes 34 35 def __str__(self): 36 return "{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})" 37 [end of slither/slithir/operations/phi.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/slither/slithir/operations/phi.py b/slither/slithir/operations/phi.py --- a/slither/slithir/operations/phi.py +++ b/slither/slithir/operations/phi.py @@ -33,4 +33,4 @@ return self._nodes def __str__(self): - return "{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})" + return f"{self.lvalue}({self.lvalue.type}) := \u03D5({[str(v) for v in self._rvalues]})"
{"golden_diff": "diff --git a/slither/slithir/operations/phi.py b/slither/slithir/operations/phi.py\n--- a/slither/slithir/operations/phi.py\n+++ b/slither/slithir/operations/phi.py\n@@ -33,4 +33,4 @@\n return self._nodes\n \n def __str__(self):\n- return \"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n+ return f\"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "issue": "[Bug-Candidate]: Phi-node print missing 'f' in f-string\n### Describe the issue:\n\nWhen printing a Phi-node the string is not formatted.\r\nThere seems to be a 'f' missing ahead of the str in https://github.com/crytic/slither/blob/dev/slither/slithir/operations/phi.py#L36\n\n### Code example to reproduce the issue:\n\nslither tests/complex_func.sol --print slithir-ssa\n\n### Version:\n\ndev-branch dd91f770f61eaadc286e2af3c72fb5798e376c16\n\n### Relevant log output:\n\n```\r\nContract Increment\r\n Function Increment.increaseBy1()\r\n IRs:\r\n {self.lvalue}({self.lvalue.type}) := \u03d5({[str(v) for v in self._rvalues]})\r\n Expression: i += 1\r\n```\n", "before_files": [{"content": "from slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue\n\n\nclass Phi(OperationWithLValue):\n def __init__(self, left_variable, nodes):\n # When Phi operations are created the\n # correct indexes of the variables are not yet computed\n # We store the nodes where the variables are written\n # so we can update the rvalues of the Phi operation\n # after its instantiation\n assert is_valid_lvalue(left_variable)\n assert isinstance(nodes, set)\n super().__init__()\n self._lvalue = left_variable\n self._rvalues = []\n self._nodes = nodes\n\n @property\n def read(self):\n return self.rvalues\n\n @property\n def rvalues(self):\n return self._rvalues\n\n @rvalues.setter\n def rvalues(self, vals):\n self._rvalues = vals\n\n @property\n def nodes(self):\n return self._nodes\n\n def __str__(self):\n return \"{self.lvalue}({self.lvalue.type}) := \\u03D5({[str(v) for v in self._rvalues]})\"\n", "path": "slither/slithir/operations/phi.py"}]}
1,070
149
gh_patches_debug_31305
rasdani/github-patches
git_diff
sosreport__sos-2660
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [ssh] Can ssh plugin check the permissions set for /home/*/.ssh files? Hello! When users set wrong permissions to files in their ~/.ssh/ folder, i.e. they set write permission for `~/.ssh/authenticated_keys` for `other`, the SSH server will refuse to accept connections for this user. I think it would be nice for the [ssh] plugin to check, if the permissions set for files in the `.ssh` folders of system users are correct, or if they are corrupted in some way. A very simple solution for that would be to just run `ls -l .ssh/` in every home directory. Would it be OK to extend the ssh plugin to do this? Would it be better to have a separate plugin do this? Let me know what you think, and I'll give it a try if that's ok :) </issue> <code> [start of sos/report/plugins/ssh.py] 1 # Copyright (C) 2007 Red Hat, Inc., Eugene Teo <[email protected]> 2 3 # This file is part of the sos project: https://github.com/sosreport/sos 4 # 5 # This copyrighted material is made available to anyone wishing to use, 6 # modify, copy, or redistribute it subject to the terms and conditions of 7 # version 2 of the GNU General Public License. 8 # 9 # See the LICENSE file in the source distribution for further information. 10 11 from sos.report.plugins import Plugin, IndependentPlugin 12 13 14 class Ssh(Plugin, IndependentPlugin): 15 16 short_desc = 'Secure shell service' 17 18 plugin_name = 'ssh' 19 profiles = ('services', 'security', 'system', 'identity') 20 21 def setup(self): 22 23 self.add_file_tags({ 24 '/etc/ssh/sshd_config': 'sshd_config', 25 '/etc/ssh/ssh_config': 'ssh_config' 26 }) 27 28 sshcfgs = [ 29 "/etc/ssh/ssh_config", 30 "/etc/ssh/sshd_config" 31 ] 32 33 # Include main config files 34 self.add_copy_spec(sshcfgs) 35 36 # Read configs for any includes and copy those 37 try: 38 for sshcfg in sshcfgs: 39 tag = sshcfg.split('/')[-1] 40 with open(sshcfg, 'r') as cfgfile: 41 for line in cfgfile: 42 # skip empty lines and comments 43 if len(line.split()) == 0 or line.startswith('#'): 44 continue 45 # ssh_config keywords are allowed as case-insensitive 46 if line.lower().startswith('include'): 47 confarg = line.split() 48 self.add_copy_spec(confarg[1], tags=tag) 49 except Exception: 50 pass 51 52 53 # vim: set et ts=4 sw=4 : 54 [end of sos/report/plugins/ssh.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py --- a/sos/report/plugins/ssh.py +++ b/sos/report/plugins/ssh.py @@ -9,6 +9,7 @@ # See the LICENSE file in the source distribution for further information. from sos.report.plugins import Plugin, IndependentPlugin +import os.path class Ssh(Plugin, IndependentPlugin): @@ -33,6 +34,10 @@ # Include main config files self.add_copy_spec(sshcfgs) + self.included_configs(sshcfgs) + self.user_ssh_files_permissions() + + def included_configs(self, sshcfgs): # Read configs for any includes and copy those try: for sshcfg in sshcfgs: @@ -49,5 +54,33 @@ except Exception: pass + def user_ssh_files_permissions(self): + """ + Iterate over .ssh folders in user homes to see their permissions. + + Bad permissions can prevent SSH from allowing access to given user. + """ + users_data = self.exec_cmd('getent passwd') + + if users_data['status']: + # If getent fails, fallback to just reading /etc/passwd + try: + with open('/etc/passwd') as passwd_file: + users_data_lines = passwd_file.readlines() + except Exception: + # If we can't read /etc/passwd, then there's something wrong. + self._log_error("Couldn't read /etc/passwd") + return + else: + users_data_lines = users_data['output'].splitlines() + + # Read the home paths of users in the system and check the ~/.ssh dirs + for usr_line in users_data_lines: + try: + home_dir = os.path.join(usr_line.split(':')[5], '.ssh') + if self.path_isdir(home_dir): + self.add_cmd_output('ls -laZ {}'.format(home_dir)) + except IndexError: + pass # vim: set et ts=4 sw=4 :
{"golden_diff": "diff --git a/sos/report/plugins/ssh.py b/sos/report/plugins/ssh.py\n--- a/sos/report/plugins/ssh.py\n+++ b/sos/report/plugins/ssh.py\n@@ -9,6 +9,7 @@\n # See the LICENSE file in the source distribution for further information.\n \n from sos.report.plugins import Plugin, IndependentPlugin\n+import os.path\n \n \n class Ssh(Plugin, IndependentPlugin):\n@@ -33,6 +34,10 @@\n # Include main config files\n self.add_copy_spec(sshcfgs)\n \n+ self.included_configs(sshcfgs)\n+ self.user_ssh_files_permissions()\n+\n+ def included_configs(self, sshcfgs):\n # Read configs for any includes and copy those\n try:\n for sshcfg in sshcfgs:\n@@ -49,5 +54,33 @@\n except Exception:\n pass\n \n+ def user_ssh_files_permissions(self):\n+ \"\"\"\n+ Iterate over .ssh folders in user homes to see their permissions.\n+\n+ Bad permissions can prevent SSH from allowing access to given user.\n+ \"\"\"\n+ users_data = self.exec_cmd('getent passwd')\n+\n+ if users_data['status']:\n+ # If getent fails, fallback to just reading /etc/passwd\n+ try:\n+ with open('/etc/passwd') as passwd_file:\n+ users_data_lines = passwd_file.readlines()\n+ except Exception:\n+ # If we can't read /etc/passwd, then there's something wrong.\n+ self._log_error(\"Couldn't read /etc/passwd\")\n+ return\n+ else:\n+ users_data_lines = users_data['output'].splitlines()\n+\n+ # Read the home paths of users in the system and check the ~/.ssh dirs\n+ for usr_line in users_data_lines:\n+ try:\n+ home_dir = os.path.join(usr_line.split(':')[5], '.ssh')\n+ if self.path_isdir(home_dir):\n+ self.add_cmd_output('ls -laZ {}'.format(home_dir))\n+ except IndexError:\n+ pass\n \n # vim: set et ts=4 sw=4 :\n", "issue": "[ssh] Can ssh plugin check the permissions set for /home/*/.ssh files?\nHello!\r\n\r\nWhen users set wrong permissions to files in their ~/.ssh/ folder, i.e. they set write permission for `~/.ssh/authenticated_keys` for `other`, the SSH server will refuse to accept connections for this user.\r\n\r\nI think it would be nice for the [ssh] plugin to check, if the permissions set for files in the `.ssh` folders of system users are correct, or if they are corrupted in some way. \r\n\r\nA very simple solution for that would be to just run `ls -l .ssh/` in every home directory. Would it be OK to extend the ssh plugin to do this? Would it be better to have a separate plugin do this?\r\n\r\nLet me know what you think, and I'll give it a try if that's ok :)\n", "before_files": [{"content": "# Copyright (C) 2007 Red Hat, Inc., Eugene Teo <[email protected]>\n\n# This file is part of the sos project: https://github.com/sosreport/sos\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# version 2 of the GNU General Public License.\n#\n# See the LICENSE file in the source distribution for further information.\n\nfrom sos.report.plugins import Plugin, IndependentPlugin\n\n\nclass Ssh(Plugin, IndependentPlugin):\n\n short_desc = 'Secure shell service'\n\n plugin_name = 'ssh'\n profiles = ('services', 'security', 'system', 'identity')\n\n def setup(self):\n\n self.add_file_tags({\n '/etc/ssh/sshd_config': 'sshd_config',\n '/etc/ssh/ssh_config': 'ssh_config'\n })\n\n sshcfgs = [\n \"/etc/ssh/ssh_config\",\n \"/etc/ssh/sshd_config\"\n ]\n\n # Include main config files\n self.add_copy_spec(sshcfgs)\n\n # Read configs for any includes and copy those\n try:\n for sshcfg in sshcfgs:\n tag = sshcfg.split('/')[-1]\n with open(sshcfg, 'r') as cfgfile:\n for line in cfgfile:\n # skip empty lines and comments\n if len(line.split()) == 0 or line.startswith('#'):\n continue\n # ssh_config keywords are allowed as case-insensitive\n if line.lower().startswith('include'):\n confarg = line.split()\n self.add_copy_spec(confarg[1], tags=tag)\n except Exception:\n pass\n\n\n# vim: set et ts=4 sw=4 :\n", "path": "sos/report/plugins/ssh.py"}]}
1,202
466
gh_patches_debug_54607
rasdani/github-patches
git_diff
zulip__zulip-13067
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Clean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery. It looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines. See `compile_requirements` in that file for details. My guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`. @hackerkid this might be a good project for you. Clean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery. It looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines. See `compile_requirements` in that file for details. My guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`. @hackerkid this might be a good project for you. </issue> <code> [start of version.py] 1 import os 2 3 ZULIP_VERSION = "2.0.4+git" 4 # Add information on number of commits and commit hash to version, if available 5 zulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version') 6 if os.path.exists(zulip_git_version_file): 7 with open(zulip_git_version_file) as f: 8 version = f.read().strip() 9 if version: 10 ZULIP_VERSION = version 11 12 LATEST_MAJOR_VERSION = "2.0" 13 LATEST_RELEASE_VERSION = "2.0.4" 14 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.org/2019/03/01/zulip-2-0-released/" 15 16 # Bump the minor PROVISION_VERSION to indicate that folks should provision 17 # only when going from an old version of the code to a newer version. Bump 18 # the major version to indicate that folks should provision in both 19 # directions. 20 21 # Typically, 22 # * adding a dependency only requires a minor version bump; 23 # * removing a dependency requires a major version bump; 24 # * upgrading a dependency requires a major version bump, unless the 25 # upgraded dependency is backwards compatible with all of our 26 # historical commits sharing the same major version, in which case a 27 # minor version bump suffices. 28 29 PROVISION_VERSION = '49.1' 30 [end of version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -26,4 +26,4 @@ # historical commits sharing the same major version, in which case a # minor version bump suffices. -PROVISION_VERSION = '49.1' +PROVISION_VERSION = '49.2'
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -26,4 +26,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = '49.1'\n+PROVISION_VERSION = '49.2'\n", "issue": "Clean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery.\nIt looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines. \r\n See `compile_requirements` in that file for details. \r\n\r\nMy guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`. \r\n\r\n@hackerkid this might be a good project for you.\nClean up `update-locked-requirements` and `requirements.in` files to remove `-e` hackery.\nIt looks like https://github.com/jazzband/pip-tools/pull/807 was included in the latest `pip-tools` release 12 days ago. I think this may mean we can get rid of our semantically incorrect usage of `-e` in our requirements files, which in turn may mean we can remove most of the messy code in `tools/update-locked-requirements` related to hackily removing the `-e` lines. \r\n See `compile_requirements` in that file for details. \r\n\r\nMy guess is that this means if we upgrade pip-tools, we can delete 50% of the code in `update-locked-requirements` and clean up our `requirements.in` files to not use `-e`. \r\n\r\n@hackerkid this might be a good project for you.\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"2.0.4+git\"\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zulip-git-version')\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n version = f.read().strip()\n if version:\n ZULIP_VERSION = version\n\nLATEST_MAJOR_VERSION = \"2.0\"\nLATEST_RELEASE_VERSION = \"2.0.4\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.org/2019/03/01/zulip-2-0-released/\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = '49.1'\n", "path": "version.py"}]}
1,260
79
gh_patches_debug_9751
rasdani/github-patches
git_diff
liqd__a4-meinberlin-481
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> project list does not show text if there are no matching projects It should show something like "No projects could be found". Note that the text should work for two cases: "there are no projects" and "there are no projects matching the filters". </issue> <code> [start of apps/contrib/templatetags/contrib_tags.py] 1 from django import template 2 from django.template.loader import render_to_string 3 4 register = template.Library() 5 6 7 @register.assignment_tag 8 def include_template_string(template, **kwargs): 9 rendered_template = render_to_string(template, kwargs) 10 return str(rendered_template) 11 12 13 @register.assignment_tag 14 def combined_url_parameter(request_query_dict, **kwargs): 15 combined_query_dict = request_query_dict.copy() 16 for key in kwargs: 17 combined_query_dict.setlist(key, [kwargs[key]]) 18 encoded_parameter = '?' + combined_query_dict.urlencode() 19 return encoded_parameter 20 [end of apps/contrib/templatetags/contrib_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py --- a/apps/contrib/templatetags/contrib_tags.py +++ b/apps/contrib/templatetags/contrib_tags.py @@ -17,3 +17,14 @@ combined_query_dict.setlist(key, [kwargs[key]]) encoded_parameter = '?' + combined_query_dict.urlencode() return encoded_parameter + + [email protected]_tag +def filter_has_perm(perm, user, objects): + """Filter a list of objects based on user permissions.""" + if not hasattr(user, 'has_perm'): + # If the swapped user model does not support permissions, all objects + # will be returned. This is taken from rules.templatetags.has_perm. + return objects + else: + return (obj for obj in objects if user.has_perm(perm, obj))
{"golden_diff": "diff --git a/apps/contrib/templatetags/contrib_tags.py b/apps/contrib/templatetags/contrib_tags.py\n--- a/apps/contrib/templatetags/contrib_tags.py\n+++ b/apps/contrib/templatetags/contrib_tags.py\n@@ -17,3 +17,14 @@\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n+\n+\[email protected]_tag\n+def filter_has_perm(perm, user, objects):\n+ \"\"\"Filter a list of objects based on user permissions.\"\"\"\n+ if not hasattr(user, 'has_perm'):\n+ # If the swapped user model does not support permissions, all objects\n+ # will be returned. This is taken from rules.templatetags.has_perm.\n+ return objects\n+ else:\n+ return (obj for obj in objects if user.has_perm(perm, obj))\n", "issue": "project list does not show text if there are no matching projects\nIt should show something like \"No projects could be found\". Note that the text should work for two cases: \"there are no projects\" and \"there are no projects matching the filters\".\n", "before_files": [{"content": "from django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\n\[email protected]_tag\ndef include_template_string(template, **kwargs):\n rendered_template = render_to_string(template, kwargs)\n return str(rendered_template)\n\n\[email protected]_tag\ndef combined_url_parameter(request_query_dict, **kwargs):\n combined_query_dict = request_query_dict.copy()\n for key in kwargs:\n combined_query_dict.setlist(key, [kwargs[key]])\n encoded_parameter = '?' + combined_query_dict.urlencode()\n return encoded_parameter\n", "path": "apps/contrib/templatetags/contrib_tags.py"}]}
749
210
gh_patches_debug_8368
rasdani/github-patches
git_diff
wagtail__wagtail-2488
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Keyerror when sending password reset email When sending a password reset email, I'm getting an internal error I'll just share the raven error - hopefully that doesn't review all of the site secrets (probably does) https://app.getsentry.com/share/issue/37343334302e313233323439393235/ </issue> <code> [start of wagtail/wagtailadmin/templatetags/wagtailuserbar.py] 1 from __future__ import absolute_import, unicode_literals 2 3 from django import template 4 from django.template.loader import render_to_string 5 6 from wagtail.wagtailadmin.userbar import ( 7 AddPageItem, AdminItem, ApproveModerationEditPageItem, EditPageItem, ExplorePageItem, 8 RejectModerationEditPageItem) 9 from wagtail.wagtailcore import hooks 10 from wagtail.wagtailcore.models import PAGE_TEMPLATE_VAR, Page, PageRevision 11 12 # from django.contrib.auth.decorators import permission_required 13 14 15 register = template.Library() 16 17 18 def get_page_instance(context): 19 """ 20 Given a template context, try and find a Page variable in the common 21 places. Returns None if a page can not be found. 22 """ 23 possible_names = [PAGE_TEMPLATE_VAR, 'self'] 24 for name in possible_names: 25 if name in context: 26 page = context[name] 27 if isinstance(page, Page): 28 return page 29 30 31 @register.simple_tag(takes_context=True) 32 def wagtailuserbar(context, position='bottom-right'): 33 # Find request object 34 request = context['request'] 35 36 37 # Don't render if user doesn't have permission to access the admin area 38 if not request.user.has_perm('wagtailadmin.access_admin'): 39 return '' 40 41 # Only render if the context contains a variable referencing a saved page 42 page = get_page_instance(context) 43 if page is None: 44 return '' 45 46 # Dont render anything if the page has not been saved - i.e. a preview 47 if page.pk is None: 48 return '' 49 50 try: 51 revision_id = request.revision_id 52 except AttributeError: 53 revision_id = None 54 55 if revision_id is None: 56 items = [ 57 AdminItem(), 58 ExplorePageItem(Page.objects.get(id=page.id)), 59 EditPageItem(Page.objects.get(id=page.id)), 60 AddPageItem(Page.objects.get(id=page.id)), 61 ] 62 else: 63 items = [ 64 AdminItem(), 65 ExplorePageItem(PageRevision.objects.get(id=revision_id).page), 66 EditPageItem(PageRevision.objects.get(id=revision_id).page), 67 AddPageItem(PageRevision.objects.get(id=revision_id).page), 68 ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)), 69 RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)), 70 ] 71 72 for fn in hooks.get_hooks('construct_wagtail_userbar'): 73 fn(request, items) 74 75 # Render the items 76 rendered_items = [item.render(request) for item in items] 77 78 # Remove any unrendered items 79 rendered_items = [item for item in rendered_items if item] 80 81 # Render the userbar items 82 return render_to_string('wagtailadmin/userbar/base.html', { 83 'request': request, 84 'items': rendered_items, 85 'position': position, 86 'page': page, 87 'revision_id': revision_id 88 }) 89 [end of wagtail/wagtailadmin/templatetags/wagtailuserbar.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py --- a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py +++ b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py @@ -31,8 +31,10 @@ @register.simple_tag(takes_context=True) def wagtailuserbar(context, position='bottom-right'): # Find request object - request = context['request'] - + try: + request = context['request'] + except KeyError: + return '' # Don't render if user doesn't have permission to access the admin area if not request.user.has_perm('wagtailadmin.access_admin'):
{"golden_diff": "diff --git a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py\n--- a/wagtail/wagtailadmin/templatetags/wagtailuserbar.py\n+++ b/wagtail/wagtailadmin/templatetags/wagtailuserbar.py\n@@ -31,8 +31,10 @@\n @register.simple_tag(takes_context=True)\n def wagtailuserbar(context, position='bottom-right'):\n # Find request object\n- request = context['request']\n-\n+ try:\n+ request = context['request']\n+ except KeyError:\n+ return ''\n \n # Don't render if user doesn't have permission to access the admin area\n if not request.user.has_perm('wagtailadmin.access_admin'):\n", "issue": "Keyerror when sending password reset email\nWhen sending a password reset email, I'm getting an internal error\n\nI'll just share the raven error - hopefully that doesn't review all of the site secrets (probably does)\n\nhttps://app.getsentry.com/share/issue/37343334302e313233323439393235/\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nfrom django import template\nfrom django.template.loader import render_to_string\n\nfrom wagtail.wagtailadmin.userbar import (\n AddPageItem, AdminItem, ApproveModerationEditPageItem, EditPageItem, ExplorePageItem,\n RejectModerationEditPageItem)\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import PAGE_TEMPLATE_VAR, Page, PageRevision\n\n# from django.contrib.auth.decorators import permission_required\n\n\nregister = template.Library()\n\n\ndef get_page_instance(context):\n \"\"\"\n Given a template context, try and find a Page variable in the common\n places. Returns None if a page can not be found.\n \"\"\"\n possible_names = [PAGE_TEMPLATE_VAR, 'self']\n for name in possible_names:\n if name in context:\n page = context[name]\n if isinstance(page, Page):\n return page\n\n\[email protected]_tag(takes_context=True)\ndef wagtailuserbar(context, position='bottom-right'):\n # Find request object\n request = context['request']\n\n\n # Don't render if user doesn't have permission to access the admin area\n if not request.user.has_perm('wagtailadmin.access_admin'):\n return ''\n\n # Only render if the context contains a variable referencing a saved page\n page = get_page_instance(context)\n if page is None:\n return ''\n\n # Dont render anything if the page has not been saved - i.e. a preview\n if page.pk is None:\n return ''\n\n try:\n revision_id = request.revision_id\n except AttributeError:\n revision_id = None\n\n if revision_id is None:\n items = [\n AdminItem(),\n ExplorePageItem(Page.objects.get(id=page.id)),\n EditPageItem(Page.objects.get(id=page.id)),\n AddPageItem(Page.objects.get(id=page.id)),\n ]\n else:\n items = [\n AdminItem(),\n ExplorePageItem(PageRevision.objects.get(id=revision_id).page),\n EditPageItem(PageRevision.objects.get(id=revision_id).page),\n AddPageItem(PageRevision.objects.get(id=revision_id).page),\n ApproveModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n RejectModerationEditPageItem(PageRevision.objects.get(id=revision_id)),\n ]\n\n for fn in hooks.get_hooks('construct_wagtail_userbar'):\n fn(request, items)\n\n # Render the items\n rendered_items = [item.render(request) for item in items]\n\n # Remove any unrendered items\n rendered_items = [item for item in rendered_items if item]\n\n # Render the userbar items\n return render_to_string('wagtailadmin/userbar/base.html', {\n 'request': request,\n 'items': rendered_items,\n 'position': position,\n 'page': page,\n 'revision_id': revision_id\n })\n", "path": "wagtail/wagtailadmin/templatetags/wagtailuserbar.py"}]}
1,446
188
gh_patches_debug_27250
rasdani/github-patches
git_diff
nilearn__nilearn-3710
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation builder failure on main https://github.com/nilearn/nilearn/actions/workflows/build-docs.yml started occurring after merging #3698 (doubt it is related given the content of the PR) https://github.com/nilearn/nilearn/actions/runs/4741116007 </issue> <code> [start of nilearn/datasets/__init__.py] 1 """Helper functions to download NeuroImaging datasets.""" 2 3 from .atlas import ( 4 fetch_atlas_aal, 5 fetch_atlas_allen_2011, 6 fetch_atlas_basc_multiscale_2015, 7 fetch_atlas_craddock_2012, 8 fetch_atlas_destrieux_2009, 9 fetch_atlas_difumo, 10 fetch_atlas_harvard_oxford, 11 fetch_atlas_juelich, 12 fetch_atlas_msdl, 13 fetch_atlas_schaefer_2018, 14 fetch_atlas_smith_2009, 15 fetch_atlas_surf_destrieux, 16 fetch_atlas_talairach, 17 fetch_atlas_yeo_2011, 18 fetch_coords_dosenbach_2010, 19 fetch_coords_power_2011, 20 fetch_coords_seitzman_2018, 21 ) 22 from .func import ( 23 fetch_abide_pcp, 24 fetch_adhd, 25 fetch_bids_langloc_dataset, 26 fetch_development_fmri, 27 fetch_fiac_first_level, 28 fetch_haxby, 29 fetch_language_localizer_demo_dataset, 30 fetch_localizer_button_task, 31 fetch_localizer_calculation_task, 32 fetch_localizer_contrasts, 33 fetch_localizer_first_level, 34 fetch_megatrawls_netmats, 35 fetch_mixed_gambles, 36 fetch_miyawaki2008, 37 fetch_openneuro_dataset, 38 fetch_openneuro_dataset_index, 39 fetch_spm_auditory, 40 fetch_spm_multimodal_fmri, 41 fetch_surf_nki_enhanced, 42 patch_openneuro_dataset, 43 select_from_index, 44 ) 45 from .neurovault import ( 46 fetch_neurovault, 47 fetch_neurovault_auditory_computation_task, 48 fetch_neurovault_ids, 49 fetch_neurovault_motor_task, 50 ) 51 from .struct import ( 52 GM_MNI152_FILE_PATH, 53 MNI152_FILE_PATH, 54 WM_MNI152_FILE_PATH, 55 fetch_icbm152_2009, 56 fetch_icbm152_brain_gm_mask, 57 fetch_oasis_vbm, 58 fetch_surf_fsaverage, 59 load_mni152_brain_mask, 60 load_mni152_gm_mask, 61 load_mni152_gm_template, 62 load_mni152_template, 63 load_mni152_wm_mask, 64 load_mni152_wm_template, 65 ) 66 from .utils import get_data_dirs, load_sample_motor_activation_image 67 68 __all__ = [ 69 "MNI152_FILE_PATH", 70 "GM_MNI152_FILE_PATH", 71 "WM_MNI152_FILE_PATH", 72 "fetch_icbm152_2009", 73 "load_mni152_template", 74 "load_mni152_gm_template", 75 "load_mni152_wm_template", 76 "fetch_oasis_vbm", 77 "fetch_haxby", 78 "fetch_adhd", 79 "fetch_miyawaki2008", 80 "fetch_localizer_contrasts", 81 "fetch_localizer_button_task", 82 "fetch_abide_pcp", 83 "fetch_localizer_calculation_task", 84 "fetch_atlas_craddock_2012", 85 "fetch_atlas_destrieux_2009", 86 "fetch_atlas_juelich", 87 "fetch_atlas_harvard_oxford", 88 "fetch_atlas_msdl", 89 "fetch_atlas_schaefer_2018", 90 "fetch_coords_power_2011", 91 "fetch_coords_seitzman_2018", 92 "fetch_atlas_smith_2009", 93 "fetch_atlas_allen_2011", 94 "fetch_atlas_yeo_2011", 95 "fetch_mixed_gambles", 96 "fetch_atlas_aal", 97 "fetch_atlas_difumo", 98 "fetch_megatrawls_netmats", 99 "fetch_surf_nki_enhanced", 100 "fetch_development_fmri", 101 "fetch_surf_fsaverage", 102 "fetch_atlas_basc_multiscale_2015", 103 "fetch_coords_dosenbach_2010", 104 "fetch_neurovault", 105 "fetch_neurovault_ids", 106 "fetch_neurovault_motor_task", 107 "fetch_neurovault_auditory_computation_task", 108 "load_mni152_brain_mask", 109 "load_mni152_gm_mask", 110 "load_mni152_wm_mask", 111 "fetch_icbm152_brain_gm_mask", 112 "fetch_atlas_surf_destrieux", 113 "fetch_atlas_talairach", 114 "get_data_dirs", 115 "load_sample_motor_activation_image", 116 "fetch_language_localizer_demo_dataset", 117 "fetch_bids_langloc_dataset", 118 "fetch_openneuro_dataset_index", 119 "select_from_index", 120 "patch_openneuro_dataset", 121 "fetch_openneuro_dataset", 122 "fetch_localizer_first_level", 123 "fetch_spm_auditory", 124 "fetch_spm_multimodal_fmri", 125 "fetch_fiac_first_level", 126 ] 127 [end of nilearn/datasets/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py --- a/nilearn/datasets/__init__.py +++ b/nilearn/datasets/__init__.py @@ -10,6 +10,7 @@ fetch_atlas_harvard_oxford, fetch_atlas_juelich, fetch_atlas_msdl, + fetch_atlas_pauli_2017, fetch_atlas_schaefer_2018, fetch_atlas_smith_2009, fetch_atlas_surf_destrieux, @@ -24,6 +25,7 @@ fetch_adhd, fetch_bids_langloc_dataset, fetch_development_fmri, + fetch_ds000030_urls, fetch_fiac_first_level, fetch_haxby, fetch_language_localizer_demo_dataset, @@ -86,6 +88,7 @@ "fetch_atlas_juelich", "fetch_atlas_harvard_oxford", "fetch_atlas_msdl", + "fetch_atlas_pauli_2017", "fetch_atlas_schaefer_2018", "fetch_coords_power_2011", "fetch_coords_seitzman_2018", @@ -98,6 +101,7 @@ "fetch_megatrawls_netmats", "fetch_surf_nki_enhanced", "fetch_development_fmri", + "fetch_ds000030_urls", "fetch_surf_fsaverage", "fetch_atlas_basc_multiscale_2015", "fetch_coords_dosenbach_2010",
{"golden_diff": "diff --git a/nilearn/datasets/__init__.py b/nilearn/datasets/__init__.py\n--- a/nilearn/datasets/__init__.py\n+++ b/nilearn/datasets/__init__.py\n@@ -10,6 +10,7 @@\n fetch_atlas_harvard_oxford,\n fetch_atlas_juelich,\n fetch_atlas_msdl,\n+ fetch_atlas_pauli_2017,\n fetch_atlas_schaefer_2018,\n fetch_atlas_smith_2009,\n fetch_atlas_surf_destrieux,\n@@ -24,6 +25,7 @@\n fetch_adhd,\n fetch_bids_langloc_dataset,\n fetch_development_fmri,\n+ fetch_ds000030_urls,\n fetch_fiac_first_level,\n fetch_haxby,\n fetch_language_localizer_demo_dataset,\n@@ -86,6 +88,7 @@\n \"fetch_atlas_juelich\",\n \"fetch_atlas_harvard_oxford\",\n \"fetch_atlas_msdl\",\n+ \"fetch_atlas_pauli_2017\",\n \"fetch_atlas_schaefer_2018\",\n \"fetch_coords_power_2011\",\n \"fetch_coords_seitzman_2018\",\n@@ -98,6 +101,7 @@\n \"fetch_megatrawls_netmats\",\n \"fetch_surf_nki_enhanced\",\n \"fetch_development_fmri\",\n+ \"fetch_ds000030_urls\",\n \"fetch_surf_fsaverage\",\n \"fetch_atlas_basc_multiscale_2015\",\n \"fetch_coords_dosenbach_2010\",\n", "issue": "Documentation builder failure on main\nhttps://github.com/nilearn/nilearn/actions/workflows/build-docs.yml\r\n\r\nstarted occurring after merging #3698 (doubt it is related given the content of the PR)\r\nhttps://github.com/nilearn/nilearn/actions/runs/4741116007\r\n\r\n\n", "before_files": [{"content": "\"\"\"Helper functions to download NeuroImaging datasets.\"\"\"\n\nfrom .atlas import (\n fetch_atlas_aal,\n fetch_atlas_allen_2011,\n fetch_atlas_basc_multiscale_2015,\n fetch_atlas_craddock_2012,\n fetch_atlas_destrieux_2009,\n fetch_atlas_difumo,\n fetch_atlas_harvard_oxford,\n fetch_atlas_juelich,\n fetch_atlas_msdl,\n fetch_atlas_schaefer_2018,\n fetch_atlas_smith_2009,\n fetch_atlas_surf_destrieux,\n fetch_atlas_talairach,\n fetch_atlas_yeo_2011,\n fetch_coords_dosenbach_2010,\n fetch_coords_power_2011,\n fetch_coords_seitzman_2018,\n)\nfrom .func import (\n fetch_abide_pcp,\n fetch_adhd,\n fetch_bids_langloc_dataset,\n fetch_development_fmri,\n fetch_fiac_first_level,\n fetch_haxby,\n fetch_language_localizer_demo_dataset,\n fetch_localizer_button_task,\n fetch_localizer_calculation_task,\n fetch_localizer_contrasts,\n fetch_localizer_first_level,\n fetch_megatrawls_netmats,\n fetch_mixed_gambles,\n fetch_miyawaki2008,\n fetch_openneuro_dataset,\n fetch_openneuro_dataset_index,\n fetch_spm_auditory,\n fetch_spm_multimodal_fmri,\n fetch_surf_nki_enhanced,\n patch_openneuro_dataset,\n select_from_index,\n)\nfrom .neurovault import (\n fetch_neurovault,\n fetch_neurovault_auditory_computation_task,\n fetch_neurovault_ids,\n fetch_neurovault_motor_task,\n)\nfrom .struct import (\n GM_MNI152_FILE_PATH,\n MNI152_FILE_PATH,\n WM_MNI152_FILE_PATH,\n fetch_icbm152_2009,\n fetch_icbm152_brain_gm_mask,\n fetch_oasis_vbm,\n fetch_surf_fsaverage,\n load_mni152_brain_mask,\n load_mni152_gm_mask,\n load_mni152_gm_template,\n load_mni152_template,\n load_mni152_wm_mask,\n load_mni152_wm_template,\n)\nfrom .utils import get_data_dirs, load_sample_motor_activation_image\n\n__all__ = [\n \"MNI152_FILE_PATH\",\n \"GM_MNI152_FILE_PATH\",\n \"WM_MNI152_FILE_PATH\",\n \"fetch_icbm152_2009\",\n \"load_mni152_template\",\n \"load_mni152_gm_template\",\n \"load_mni152_wm_template\",\n \"fetch_oasis_vbm\",\n \"fetch_haxby\",\n \"fetch_adhd\",\n \"fetch_miyawaki2008\",\n \"fetch_localizer_contrasts\",\n \"fetch_localizer_button_task\",\n \"fetch_abide_pcp\",\n \"fetch_localizer_calculation_task\",\n \"fetch_atlas_craddock_2012\",\n \"fetch_atlas_destrieux_2009\",\n \"fetch_atlas_juelich\",\n \"fetch_atlas_harvard_oxford\",\n \"fetch_atlas_msdl\",\n \"fetch_atlas_schaefer_2018\",\n \"fetch_coords_power_2011\",\n \"fetch_coords_seitzman_2018\",\n \"fetch_atlas_smith_2009\",\n \"fetch_atlas_allen_2011\",\n \"fetch_atlas_yeo_2011\",\n \"fetch_mixed_gambles\",\n \"fetch_atlas_aal\",\n \"fetch_atlas_difumo\",\n \"fetch_megatrawls_netmats\",\n \"fetch_surf_nki_enhanced\",\n \"fetch_development_fmri\",\n \"fetch_surf_fsaverage\",\n \"fetch_atlas_basc_multiscale_2015\",\n \"fetch_coords_dosenbach_2010\",\n \"fetch_neurovault\",\n \"fetch_neurovault_ids\",\n \"fetch_neurovault_motor_task\",\n \"fetch_neurovault_auditory_computation_task\",\n \"load_mni152_brain_mask\",\n \"load_mni152_gm_mask\",\n \"load_mni152_wm_mask\",\n \"fetch_icbm152_brain_gm_mask\",\n \"fetch_atlas_surf_destrieux\",\n \"fetch_atlas_talairach\",\n \"get_data_dirs\",\n \"load_sample_motor_activation_image\",\n \"fetch_language_localizer_demo_dataset\",\n \"fetch_bids_langloc_dataset\",\n \"fetch_openneuro_dataset_index\",\n \"select_from_index\",\n \"patch_openneuro_dataset\",\n \"fetch_openneuro_dataset\",\n \"fetch_localizer_first_level\",\n \"fetch_spm_auditory\",\n \"fetch_spm_multimodal_fmri\",\n \"fetch_fiac_first_level\",\n]\n", "path": "nilearn/datasets/__init__.py"}]}
2,037
388
gh_patches_debug_1413
rasdani/github-patches
git_diff
gratipay__gratipay.com-1314
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> reset.css doesn't load sometimes @clone1018 saw this when we first started caching static assets. It's why I turned off static caching initially. Now static caching is back with #1245 and indeed we're seeing this again. :( ![2013-08-06_11-47-55](https://f.cloud.github.com/assets/134455/931202/acae7e70-0026-11e3-8624-91825c6e8726.png) </issue> <code> [start of gittip/cache_static.py] 1 """ 2 Handles caching of static resources. 3 """ 4 import os 5 from calendar import timegm 6 from email.utils import parsedate 7 from wsgiref.handlers import format_date_time 8 9 from aspen import Response 10 11 12 def version_is_available(request): 13 """Return a boolean, whether we have the version they asked for. 14 """ 15 path = request.line.uri.path 16 version = request.website.version 17 return path['version'] == version if 'version' in path else True 18 19 20 def version_is_dash(request): 21 """Return a boolean, whether the version they asked for is -. 22 """ 23 return request.line.uri.path.get('version') == '-' 24 25 26 def get_last_modified(fs_path): 27 """Get the last modified time, as int, of the file pointed to by fs_path. 28 """ 29 return int(os.path.getctime(fs_path)) 30 31 32 def inbound(request): 33 """Try to serve a 304 for resources under assets/. 34 """ 35 uri = request.line.uri 36 37 if not uri.startswith('/assets/'): 38 39 # Only apply to the assets/ directory. 40 41 return request 42 43 if version_is_dash(request): 44 45 # Special-case a version of '-' to never 304/404 here. 46 47 return request 48 49 if not version_is_available(request): 50 51 # Don't serve one version of a file as if it were another. 52 53 raise Response(404) 54 55 ims = request.headers.get('If-Modified-Since') 56 if not ims: 57 58 # This client doesn't care about when the file was modified. 59 60 return request 61 62 if request.fs.endswith('.spt'): 63 64 # This is a requests for a dynamic resource. Perhaps in the future 65 # we'll delegate to such resources to compute a sensible Last-Modified 66 # or E-Tag, but for now we punt. This is okay, because we expect to 67 # put our dynamic assets behind a CDN in production. 68 69 return request 70 71 72 try: 73 ims = timegm(parsedate(ims)) 74 except: 75 76 # Malformed If-Modified-Since header. Proceed with the request. 77 78 return request 79 80 last_modified = get_last_modified(request.fs) 81 if ims < last_modified: 82 83 # The file has been modified since. Serve the whole thing. 84 85 return request 86 87 88 # Huzzah! 89 # ======= 90 # We can serve a 304! :D 91 92 response = Response(304) 93 response.headers['Last-Modified'] = format_date_time(last_modified) 94 response.headers['Cache-Control'] = 'no-cache' 95 raise response 96 97 98 def outbound(response): 99 """Set caching headers for resources under assets/. 100 """ 101 request = response.request 102 website = request.website 103 uri = request.line.uri 104 105 version = website.version 106 response.headers['X-Gittip-Version'] = version 107 108 if not uri.startswith('/assets/'): 109 return response 110 111 response.headers.cookie.clear() 112 113 if response.code == 304: 114 return response 115 116 if website.cache_static: 117 118 # https://developers.google.com/speed/docs/best-practices/caching 119 response.headers['Cache-Control'] = 'public' 120 response.headers['Vary'] = 'accept-encoding' 121 122 if 'version' in uri.path: 123 # This specific asset is versioned, so it's fine to cache it. 124 response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT' 125 else: 126 # Asset is not versioned. Don't cache it, but set Last-Modified. 127 last_modified = get_last_modified(request.fs) 128 response.headers['Last-Modified'] = format_date_time(last_modified) 129 [end of gittip/cache_static.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gittip/cache_static.py b/gittip/cache_static.py --- a/gittip/cache_static.py +++ b/gittip/cache_static.py @@ -111,6 +111,10 @@ response.headers.cookie.clear() if response.code == 304: + + # https://github.com/gittip/www.gittip.com/issues/1308 + del response.headers['Content-Type'] + return response if website.cache_static:
{"golden_diff": "diff --git a/gittip/cache_static.py b/gittip/cache_static.py\n--- a/gittip/cache_static.py\n+++ b/gittip/cache_static.py\n@@ -111,6 +111,10 @@\n response.headers.cookie.clear()\n \n if response.code == 304:\n+\n+ # https://github.com/gittip/www.gittip.com/issues/1308\n+ del response.headers['Content-Type']\n+\n return response\n \n if website.cache_static:\n", "issue": "reset.css doesn't load sometimes\n@clone1018 saw this when we first started caching static assets. It's why I turned off static caching initially. Now static caching is back with #1245 and indeed we're seeing this again. :(\n\n![2013-08-06_11-47-55](https://f.cloud.github.com/assets/134455/931202/acae7e70-0026-11e3-8624-91825c6e8726.png)\n\n", "before_files": [{"content": "\"\"\"\nHandles caching of static resources.\n\"\"\"\nimport os\nfrom calendar import timegm\nfrom email.utils import parsedate\nfrom wsgiref.handlers import format_date_time\n\nfrom aspen import Response\n\n\ndef version_is_available(request):\n \"\"\"Return a boolean, whether we have the version they asked for.\n \"\"\"\n path = request.line.uri.path\n version = request.website.version\n return path['version'] == version if 'version' in path else True\n\n\ndef version_is_dash(request):\n \"\"\"Return a boolean, whether the version they asked for is -.\n \"\"\"\n return request.line.uri.path.get('version') == '-'\n\n\ndef get_last_modified(fs_path):\n \"\"\"Get the last modified time, as int, of the file pointed to by fs_path.\n \"\"\"\n return int(os.path.getctime(fs_path))\n\n\ndef inbound(request):\n \"\"\"Try to serve a 304 for resources under assets/.\n \"\"\"\n uri = request.line.uri\n\n if not uri.startswith('/assets/'):\n\n # Only apply to the assets/ directory.\n\n return request\n\n if version_is_dash(request):\n\n # Special-case a version of '-' to never 304/404 here.\n\n return request\n\n if not version_is_available(request):\n\n # Don't serve one version of a file as if it were another.\n\n raise Response(404)\n\n ims = request.headers.get('If-Modified-Since')\n if not ims:\n\n # This client doesn't care about when the file was modified.\n\n return request\n\n if request.fs.endswith('.spt'):\n\n # This is a requests for a dynamic resource. Perhaps in the future\n # we'll delegate to such resources to compute a sensible Last-Modified\n # or E-Tag, but for now we punt. This is okay, because we expect to\n # put our dynamic assets behind a CDN in production.\n\n return request\n\n\n try:\n ims = timegm(parsedate(ims))\n except:\n\n # Malformed If-Modified-Since header. Proceed with the request.\n\n return request\n\n last_modified = get_last_modified(request.fs)\n if ims < last_modified:\n\n # The file has been modified since. Serve the whole thing.\n\n return request\n\n\n # Huzzah!\n # =======\n # We can serve a 304! :D\n\n response = Response(304)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n response.headers['Cache-Control'] = 'no-cache'\n raise response\n\n\ndef outbound(response):\n \"\"\"Set caching headers for resources under assets/.\n \"\"\"\n request = response.request\n website = request.website\n uri = request.line.uri\n\n version = website.version\n response.headers['X-Gittip-Version'] = version\n\n if not uri.startswith('/assets/'):\n return response\n\n response.headers.cookie.clear()\n\n if response.code == 304:\n return response\n\n if website.cache_static:\n\n # https://developers.google.com/speed/docs/best-practices/caching\n response.headers['Cache-Control'] = 'public'\n response.headers['Vary'] = 'accept-encoding'\n\n if 'version' in uri.path:\n # This specific asset is versioned, so it's fine to cache it.\n response.headers['Expires'] = 'Sun, 17 Jan 2038 19:14:07 GMT'\n else:\n # Asset is not versioned. Don't cache it, but set Last-Modified.\n last_modified = get_last_modified(request.fs)\n response.headers['Last-Modified'] = format_date_time(last_modified)\n", "path": "gittip/cache_static.py"}]}
1,764
112
gh_patches_debug_30152
rasdani/github-patches
git_diff
wagtail__wagtail-9973
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Setting WAGTAILIMAGES_RENDITION_STORAGE generates a migration in wagtailimages ### Issue Summary Running `./manage.py makemigrations` while WAGTAILIMAGES_RENDITION_STORAGE is set to something other than the default storage causes a migration to be generated within the wagtailimages app ### Steps to Reproduce 1. (for example) Start a new project with `wagtail start myproject` 2. Run `./manage.py migrate` and `./manage.py makemigrations`; this outputs "No changes detected" 3. `pip install django-storages` 4. Add the line `WAGTAILIMAGES_RENDITION_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"` to myproject/settings/base.py 5. Run `./manage.py makemigrations`; this generates a migration `wagtail/images/migrations/0026_alter_rendition_file.py` that adds a `storage` argument to the Rendition.file field. - I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes ### Technical details - Python version: 3.8.0 - Django version: 4.1.3 - Wagtail version: main (4.2a0, 4b770784ca68f22d5ea58ecbd01e5c8c13882a3d) </issue> <code> [start of wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py] 1 # Generated by Django 4.0.7 on 2022-08-10 16:26 2 3 from django.db import migrations 4 import wagtail.images.models 5 6 7 class Migration(migrations.Migration): 8 9 dependencies = [ 10 ("wagtailimages", "0024_index_image_file_hash"), 11 ] 12 13 operations = [ 14 migrations.AlterField( 15 model_name="image", 16 name="file", 17 field=wagtail.images.models.WagtailImageField( 18 height_field="height", 19 upload_to=wagtail.images.models.get_upload_to, 20 verbose_name="file", 21 width_field="width", 22 ), 23 ), 24 migrations.AlterField( 25 model_name="rendition", 26 name="file", 27 field=wagtail.images.models.WagtailImageField( 28 height_field="height", 29 upload_to=wagtail.images.models.get_rendition_upload_to, 30 width_field="width", 31 ), 32 ), 33 ] 34 [end of wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py --- a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py +++ b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py @@ -1,5 +1,6 @@ # Generated by Django 4.0.7 on 2022-08-10 16:26 +from django import VERSION as DJANGO_VERSION from django.db import migrations import wagtail.images.models @@ -10,6 +11,19 @@ ("wagtailimages", "0024_index_image_file_hash"), ] + rendition_file_options = { + "height_field": "height", + "upload_to": wagtail.images.models.get_rendition_upload_to, + "width_field": "width", + } + # See https://code.djangoproject.com/ticket/34192 - prior to Django 4.2, a callable storage + # argument that returns default_storage would be incorrectly omitted from the deconstructed + # field. We need to match that behaviour and include/omit it accordingly to prevent + # makemigrations from seeing a difference and generating a spurious migration in + # wagtail.images. + if DJANGO_VERSION >= (4, 2): + rendition_file_options["storage"] = wagtail.images.models.get_rendition_storage + operations = [ migrations.AlterField( model_name="image", @@ -24,10 +38,6 @@ migrations.AlterField( model_name="rendition", name="file", - field=wagtail.images.models.WagtailImageField( - height_field="height", - upload_to=wagtail.images.models.get_rendition_upload_to, - width_field="width", - ), + field=wagtail.images.models.WagtailImageField(**rendition_file_options), ), ]
{"golden_diff": "diff --git a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py\n--- a/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py\n+++ b/wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py\n@@ -1,5 +1,6 @@\n # Generated by Django 4.0.7 on 2022-08-10 16:26\r\n \r\n+from django import VERSION as DJANGO_VERSION\r\n from django.db import migrations\r\n import wagtail.images.models\r\n \r\n@@ -10,6 +11,19 @@\n (\"wagtailimages\", \"0024_index_image_file_hash\"),\r\n ]\r\n \r\n+ rendition_file_options = {\r\n+ \"height_field\": \"height\",\r\n+ \"upload_to\": wagtail.images.models.get_rendition_upload_to,\r\n+ \"width_field\": \"width\",\r\n+ }\r\n+ # See https://code.djangoproject.com/ticket/34192 - prior to Django 4.2, a callable storage\r\n+ # argument that returns default_storage would be incorrectly omitted from the deconstructed\r\n+ # field. We need to match that behaviour and include/omit it accordingly to prevent\r\n+ # makemigrations from seeing a difference and generating a spurious migration in\r\n+ # wagtail.images.\r\n+ if DJANGO_VERSION >= (4, 2):\r\n+ rendition_file_options[\"storage\"] = wagtail.images.models.get_rendition_storage\r\n+\r\n operations = [\r\n migrations.AlterField(\r\n model_name=\"image\",\r\n@@ -24,10 +38,6 @@\n migrations.AlterField(\r\n model_name=\"rendition\",\r\n name=\"file\",\r\n- field=wagtail.images.models.WagtailImageField(\r\n- height_field=\"height\",\r\n- upload_to=wagtail.images.models.get_rendition_upload_to,\r\n- width_field=\"width\",\r\n- ),\r\n+ field=wagtail.images.models.WagtailImageField(**rendition_file_options),\r\n ),\r\n ]\n", "issue": "Setting WAGTAILIMAGES_RENDITION_STORAGE generates a migration in wagtailimages\n### Issue Summary\r\n\r\nRunning `./manage.py makemigrations` while WAGTAILIMAGES_RENDITION_STORAGE is set to something other than the default storage causes a migration to be generated within the wagtailimages app\r\n\r\n### Steps to Reproduce\r\n\r\n1. (for example) Start a new project with `wagtail start myproject`\r\n2. Run `./manage.py migrate` and `./manage.py makemigrations`; this outputs \"No changes detected\"\r\n3. `pip install django-storages`\r\n4. Add the line `WAGTAILIMAGES_RENDITION_STORAGE = \"storages.backends.s3boto3.S3Boto3Storage\"` to myproject/settings/base.py\r\n5. Run `./manage.py makemigrations`; this generates a migration `wagtail/images/migrations/0026_alter_rendition_file.py` that adds a `storage` argument to the Rendition.file field.\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Python version: 3.8.0\r\n- Django version: 4.1.3\r\n- Wagtail version: main (4.2a0, 4b770784ca68f22d5ea58ecbd01e5c8c13882a3d)\r\n\n", "before_files": [{"content": "# Generated by Django 4.0.7 on 2022-08-10 16:26\r\n\r\nfrom django.db import migrations\r\nimport wagtail.images.models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n (\"wagtailimages\", \"0024_index_image_file_hash\"),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name=\"image\",\r\n name=\"file\",\r\n field=wagtail.images.models.WagtailImageField(\r\n height_field=\"height\",\r\n upload_to=wagtail.images.models.get_upload_to,\r\n verbose_name=\"file\",\r\n width_field=\"width\",\r\n ),\r\n ),\r\n migrations.AlterField(\r\n model_name=\"rendition\",\r\n name=\"file\",\r\n field=wagtail.images.models.WagtailImageField(\r\n height_field=\"height\",\r\n upload_to=wagtail.images.models.get_rendition_upload_to,\r\n width_field=\"width\",\r\n ),\r\n ),\r\n ]\r\n", "path": "wagtail/images/migrations/0025_alter_image_file_alter_rendition_file.py"}]}
1,149
483
gh_patches_debug_30208
rasdani/github-patches
git_diff
microsoft__DeepSpeed-3348
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Size of saved model checkpoint becomes much larger after deepspeed.initialize when using ZeRO-2 **Describe the bug** Originally reported [here](https://github.com/huggingface/transformers/issues/22822). @stas00 @tjruwase For some models, the size of model checkpoints saved by `model.save_prtrained()` becomes much larger after calling `deepspeed.initialize`. See examples below. **To Reproduce** ```python from transformers import AutoModelForCausalLM import deepspeed ds_config = { "optimizer": { "type": "AdamW", }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": True }, "allgather_partitions": True, "allgather_bucket_size": 2e8, "overlap_comm": True, "reduce_scatter": True, "reduce_bucket_size": 2e8, "contiguous_gradients": True }, "offload_optimizer": { "device": "cpu", "pin_memory": True }, "train_batch_size": 1, "train_micro_batch_size_per_gpu": 1 } model = AutoModelForCausalLM.from_pretrained("decapoda-research/llama-7b-hf") model.save_pretrained("before") deepspeed_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config) deepspeed_engine.module.save_pretrained("after") ``` File sizes: ```bash du -a -h --max-depth=1 before/ 512 before/config.json 32K before/pytorch_model.bin.index.json 9.2G before/pytorch_model-00001-of-00003.bin 9.3G before/pytorch_model-00002-of-00003.bin 6.7G before/pytorch_model-00003-of-00003.bin 512 before/generation_config.json 26G before/ du -a -h --max-depth=1 after/ 512 after/config.json 32K after/pytorch_model.bin.index.json 26G after/pytorch_model-00001-of-00003.bin 26G after/pytorch_model-00002-of-00003.bin 26G after/pytorch_model-00003-of-00003.bin 512 after/generation_config.json 76G after/ ``` This issue is not always occurred, for example, `gpt2` does not have this problem. But I tested `decapoda-research/llama-7b-hf`, and `decapoda-research/llama-13b-hf` have this issue. This can be fixed by re-clone states before the saving: ```python state_dict = deepspeed_engine.module.state_dict() state_dict = type(state_dict)( {k: v.clone() for k, v in state_dict.items()}) deepspeed_engine.module.save_pretrained("after_fixed", state_dict=state_dict) ``` **Expected behavior** The saved model size should be unchanged after `deepspeed.initialize` **System info (please complete the following information):** - deepspeed: 0.8.3 - transformers version: 4.28.0.dev0 - Platform: Linux-4.18.0-372.32.1.el8_6.x86_64-x86_64-with-glibc2.17 - Python version: 3.8.16 - Huggingface_hub version: 0.13.3 - Safetensors version: not installed - PyTorch version (GPU?): 1.12.1+cu116 (True) - Tensorflow version (GPU?): not installed (NA) - Flax version (CPU?/GPU?/TPU?): not installed (NA) - Jax version: not installed - JaxLib version: not installed - Using GPU in script?: yes - Using distributed or parallel set-up in script?: yes </issue> <code> [start of deepspeed/checkpoint/utils.py] 1 # Copyright (c) Microsoft Corporation. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # DeepSpeed Team 5 6 import os 7 from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX) 8 9 10 def get_model_ckpt_name_for_rank(base_folder, mp_rank_str): 11 ckpt_name = os.path.join( 12 base_folder, 13 MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX, 14 ) 15 return ckpt_name 16 17 18 def get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank): 19 zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}' 20 mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}' 21 zero_ckpt_name = os.path.join( 22 base_folder, 23 zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX, 24 ) 25 return zero_ckpt_name 26 27 28 def get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank): 29 ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}' 30 ckpt_path = os.path.join(base_folder, ckpt_file) 31 return ckpt_path 32 [end of deepspeed/checkpoint/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deepspeed/checkpoint/utils.py b/deepspeed/checkpoint/utils.py --- a/deepspeed/checkpoint/utils.py +++ b/deepspeed/checkpoint/utils.py @@ -4,6 +4,7 @@ # DeepSpeed Team import os +import torch from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX) @@ -29,3 +30,33 @@ ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}' ckpt_path = os.path.join(base_folder, ckpt_file) return ckpt_path + + +# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save() +# saves the underlying storage rather than the slice of the storage corresponding to individual tensors. +# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers. +# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size. +# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat. +# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing +def clone_tensors_for_torch_save(item, device=torch.device('cpu')): + """ + Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device. + Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. + + Parameters: + - ``item``: tensor to clone or (possibly nested) container of tensors to clone. + - ``device``: target device (defaults to 'cpu') + + Returns: + - copy of ``item`` with cloned tensors on target device + """ + if torch.is_tensor(item): + return item.detach().clone().to(device) + elif isinstance(item, list): + return [clone_tensors_for_torch_save(v, device) for v in item] + elif isinstance(item, tuple): + return tuple([clone_tensors_for_torch_save(v, device) for v in item]) + elif isinstance(item, dict): + return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()}) + else: + return item
{"golden_diff": "diff --git a/deepspeed/checkpoint/utils.py b/deepspeed/checkpoint/utils.py\n--- a/deepspeed/checkpoint/utils.py\n+++ b/deepspeed/checkpoint/utils.py\n@@ -4,6 +4,7 @@\n # DeepSpeed Team\n \n import os\n+import torch\n from .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)\n \n \n@@ -29,3 +30,33 @@\n ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'\n ckpt_path = os.path.join(base_folder, ckpt_file)\n return ckpt_path\n+\n+\n+# We pass cloned tensors to torch.save() to avoid checkpoint bloat that occurs when torch.save()\n+# saves the underlying storage rather than the slice of the storage corresponding to individual tensors.\n+# This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers.\n+# Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size.\n+# It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat.\n+# See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing\n+def clone_tensors_for_torch_save(item, device=torch.device('cpu')):\n+ \"\"\"\n+ Returns a copy of ``item`` with all enclosed tensors replaced by clones on a specified device.\n+ Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts.\n+\n+ Parameters:\n+ - ``item``: tensor to clone or (possibly nested) container of tensors to clone.\n+ - ``device``: target device (defaults to 'cpu')\n+\n+ Returns:\n+ - copy of ``item`` with cloned tensors on target device\n+ \"\"\"\n+ if torch.is_tensor(item):\n+ return item.detach().clone().to(device)\n+ elif isinstance(item, list):\n+ return [clone_tensors_for_torch_save(v, device) for v in item]\n+ elif isinstance(item, tuple):\n+ return tuple([clone_tensors_for_torch_save(v, device) for v in item])\n+ elif isinstance(item, dict):\n+ return type(item)({k: clone_tensors_for_torch_save(v, device) for k, v in item.items()})\n+ else:\n+ return item\n", "issue": "[BUG] Size of saved model checkpoint becomes much larger after deepspeed.initialize when using ZeRO-2\n**Describe the bug**\r\nOriginally reported [here](https://github.com/huggingface/transformers/issues/22822). @stas00 @tjruwase\r\n\r\nFor some models, the size of model checkpoints saved by `model.save_prtrained()` becomes much larger after calling `deepspeed.initialize`. See examples below.\r\n\r\n\r\n**To Reproduce**\r\n```python\r\nfrom transformers import AutoModelForCausalLM\r\nimport deepspeed\r\n\r\nds_config = {\r\n \"optimizer\": {\r\n \"type\": \"AdamW\",\r\n },\r\n \"zero_optimization\": {\r\n \"stage\": 2,\r\n \"offload_optimizer\": {\r\n \"device\": \"cpu\",\r\n \"pin_memory\": True\r\n },\r\n \"allgather_partitions\": True,\r\n \"allgather_bucket_size\": 2e8,\r\n \"overlap_comm\": True,\r\n \"reduce_scatter\": True,\r\n \"reduce_bucket_size\": 2e8,\r\n \"contiguous_gradients\": True\r\n },\r\n \"offload_optimizer\": {\r\n \"device\": \"cpu\",\r\n \"pin_memory\": True\r\n },\r\n \"train_batch_size\": 1,\r\n \"train_micro_batch_size_per_gpu\": 1\r\n}\r\n\r\nmodel = AutoModelForCausalLM.from_pretrained(\"decapoda-research/llama-7b-hf\")\r\nmodel.save_pretrained(\"before\")\r\ndeepspeed_engine, _, _, _ = deepspeed.initialize(model=model, config_params=ds_config)\r\ndeepspeed_engine.module.save_pretrained(\"after\")\r\n```\r\n\r\nFile sizes:\r\n\r\n```bash\r\ndu -a -h --max-depth=1 before/\r\n512 before/config.json\r\n32K before/pytorch_model.bin.index.json\r\n9.2G before/pytorch_model-00001-of-00003.bin\r\n9.3G before/pytorch_model-00002-of-00003.bin\r\n6.7G before/pytorch_model-00003-of-00003.bin\r\n512 before/generation_config.json\r\n26G before/\r\n\r\ndu -a -h --max-depth=1 after/\r\n512 after/config.json\r\n32K after/pytorch_model.bin.index.json\r\n26G after/pytorch_model-00001-of-00003.bin\r\n26G after/pytorch_model-00002-of-00003.bin\r\n26G after/pytorch_model-00003-of-00003.bin\r\n512 after/generation_config.json\r\n76G after/\r\n```\r\n\r\nThis issue is not always occurred, for example, `gpt2` does not have this problem. But I tested `decapoda-research/llama-7b-hf`, and `decapoda-research/llama-13b-hf` have this issue.\r\n\r\nThis can be fixed by re-clone states before the saving:\r\n```python\r\nstate_dict = deepspeed_engine.module.state_dict()\r\nstate_dict = type(state_dict)(\r\n {k: v.clone()\r\n for k,\r\n v in state_dict.items()})\r\ndeepspeed_engine.module.save_pretrained(\"after_fixed\", state_dict=state_dict)\r\n```\r\n\r\n**Expected behavior**\r\nThe saved model size should be unchanged after `deepspeed.initialize`\r\n\r\n**System info (please complete the following information):**\r\n- deepspeed: 0.8.3\r\n- transformers version: 4.28.0.dev0\r\n- Platform: Linux-4.18.0-372.32.1.el8_6.x86_64-x86_64-with-glibc2.17\r\n- Python version: 3.8.16\r\n- Huggingface_hub version: 0.13.3\r\n- Safetensors version: not installed\r\n- PyTorch version (GPU?): 1.12.1+cu116 (True)\r\n- Tensorflow version (GPU?): not installed (NA)\r\n- Flax version (CPU?/GPU?/TPU?): not installed (NA)\r\n- Jax version: not installed\r\n- JaxLib version: not installed\r\n- Using GPU in script?: yes\r\n- Using distributed or parallel set-up in script?: yes\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport os\nfrom .constants import (MODEL_FILE_PREFIX, MODEL_FILE_SUFFIX, OPTIM_FILE_SUFFIX, ZERO_FILE_PREFIX)\n\n\ndef get_model_ckpt_name_for_rank(base_folder, mp_rank_str):\n ckpt_name = os.path.join(\n base_folder,\n MODEL_FILE_PREFIX + mp_rank_str + MODEL_FILE_SUFFIX,\n )\n return ckpt_name\n\n\ndef get_zero_ckpt_name_for_rank(base_folder, dp_rank, mp_rank):\n zero_prefix = f'{ZERO_FILE_PREFIX}{dp_rank}'\n mp_rank_string = f'_{MODEL_FILE_PREFIX}{mp_rank:02d}'\n zero_ckpt_name = os.path.join(\n base_folder,\n zero_prefix + mp_rank_string + OPTIM_FILE_SUFFIX,\n )\n return zero_ckpt_name\n\n\ndef get_layer_ckpt_name_for_rank(base_folder, layer_id, tp_rank):\n ckpt_file = f'{layer_id}-model_{tp_rank:02d}{MODEL_FILE_SUFFIX}'\n ckpt_path = os.path.join(base_folder, ckpt_file)\n return ckpt_path\n", "path": "deepspeed/checkpoint/utils.py"}]}
1,793
505
gh_patches_debug_7862
rasdani/github-patches
git_diff
coala__coala-bears-2136
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Set setup.py url = http://coala.io/ difficulty/newcomer Opened by @jayvdb at [Gitter](https://gitter.im/coala/coala?at=5a1181aff257ad9109b396a0) </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 import locale 4 import sys 5 from subprocess import call 6 7 import setuptools.command.build_py 8 from bears import Constants 9 from setuptools import find_packages, setup 10 from setuptools.command.test import test as TestCommand 11 12 try: 13 locale.getlocale() 14 except (ValueError, UnicodeError): 15 locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') 16 17 18 class PyTestCommand(TestCommand): 19 20 def run_tests(self): 21 # import here, cause outside the eggs aren't loaded 22 import pytest 23 errno = pytest.main([]) 24 sys.exit(errno) 25 26 27 class BuildDocsCommand(setuptools.command.build_py.build_py): 28 apidoc_command = ('sphinx-apidoc', '-f', '-o', 'docs/API', 29 'bears') 30 make_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W') 31 32 def run(self): 33 err_no = call(self.apidoc_command) 34 if not err_no: 35 err_no = call(self.make_command) 36 sys.exit(err_no) 37 38 39 with open('requirements.txt') as requirements: 40 required = requirements.read().splitlines() 41 required.remove('-r bear-requirements.txt') 42 43 with open('bear-requirements.txt') as requirements: 44 bear_required = requirements.read().splitlines() 45 46 with open('test-requirements.txt') as requirements: 47 test_required = requirements.read().splitlines() 48 49 with open('ignore.txt') as ignore: 50 ignore_requirements = ignore.read().splitlines() 51 52 with open('README.rst') as readme: 53 long_description = readme.read() 54 55 extras_require = { 56 'alldeps': bear_required, 57 } 58 59 # For the average user we leave out some of the more complicated requirements, 60 # e.g. language-check (needs java). 61 required += [req for req in bear_required 62 if not any(req.startswith(ignore) 63 for ignore in ignore_requirements)] 64 65 66 if __name__ == '__main__': 67 setup(name='coala-bears', 68 version=Constants.VERSION, 69 description='Bears for coala (Code Analysis Application)', 70 author='The coala developers', 71 maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\xfcger', 72 maintainer_email=('[email protected], ' 73 '[email protected], ' 74 '[email protected]'), 75 url='http://coala.rtfd.org/', 76 platforms='any', 77 packages=find_packages(exclude=('build.*', 'tests', 'tests.*')), 78 install_requires=required, 79 extras_require=extras_require, 80 tests_require=test_required, 81 package_data={'bears': ['VERSION'], 82 'bears.java': ['checkstyle.jar', 'google_checks.xml'], 83 'bears.scala': ['scalastyle.jar', 84 'scalastyle_config.xml']}, 85 license='AGPL-3.0', 86 long_description=long_description, 87 entry_points={'coalabears': ['coala_official_bears = bears']}, 88 # from http://pypi.python.org/pypi?%3Aaction=list_classifiers 89 classifiers=[ 90 'Development Status :: 4 - Beta', 91 92 'Environment :: Plugins', 93 'Environment :: MacOS X', 94 'Environment :: Win32 (MS Windows)', 95 'Environment :: X11 Applications :: Gnome', 96 97 'Intended Audience :: Science/Research', 98 'Intended Audience :: Developers', 99 100 'License :: OSI Approved :: GNU Affero General Public License ' 101 'v3 or later (AGPLv3+)', 102 103 'Operating System :: OS Independent', 104 105 'Programming Language :: Python :: Implementation :: CPython', 106 'Programming Language :: Python :: 3.4', 107 'Programming Language :: Python :: 3.5', 108 'Programming Language :: Python :: 3 :: Only', 109 110 'Topic :: Scientific/Engineering :: Information Analysis', 111 'Topic :: Software Development :: Quality Assurance', 112 'Topic :: Text Processing :: Linguistic'], 113 cmdclass={'docs': BuildDocsCommand, 114 'test': PyTestCommand}) 115 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -72,7 +72,7 @@ maintainer_email=('[email protected], ' '[email protected], ' '[email protected]'), - url='http://coala.rtfd.org/', + url='http://coala.io/', platforms='any', packages=find_packages(exclude=('build.*', 'tests', 'tests.*')), install_requires=required,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,7 +72,7 @@\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n- url='http://coala.rtfd.org/',\n+ url='http://coala.io/',\n platforms='any',\n packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),\n install_requires=required,\n", "issue": "Set setup.py url = http://coala.io/\ndifficulty/newcomer\nOpened by @jayvdb at [Gitter](https://gitter.im/coala/coala?at=5a1181aff257ad9109b396a0)\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport locale\nimport sys\nfrom subprocess import call\n\nimport setuptools.command.build_py\nfrom bears import Constants\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\ntry:\n locale.getlocale()\nexcept (ValueError, UnicodeError):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nclass PyTestCommand(TestCommand):\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main([])\n sys.exit(errno)\n\n\nclass BuildDocsCommand(setuptools.command.build_py.build_py):\n apidoc_command = ('sphinx-apidoc', '-f', '-o', 'docs/API',\n 'bears')\n make_command = ('make', '-C', 'docs', 'html', 'SPHINXOPTS=-W')\n\n def run(self):\n err_no = call(self.apidoc_command)\n if not err_no:\n err_no = call(self.make_command)\n sys.exit(err_no)\n\n\nwith open('requirements.txt') as requirements:\n required = requirements.read().splitlines()\n required.remove('-r bear-requirements.txt')\n\nwith open('bear-requirements.txt') as requirements:\n bear_required = requirements.read().splitlines()\n\nwith open('test-requirements.txt') as requirements:\n test_required = requirements.read().splitlines()\n\nwith open('ignore.txt') as ignore:\n ignore_requirements = ignore.read().splitlines()\n\nwith open('README.rst') as readme:\n long_description = readme.read()\n\nextras_require = {\n 'alldeps': bear_required,\n}\n\n# For the average user we leave out some of the more complicated requirements,\n# e.g. language-check (needs java).\nrequired += [req for req in bear_required\n if not any(req.startswith(ignore)\n for ignore in ignore_requirements)]\n\n\nif __name__ == '__main__':\n setup(name='coala-bears',\n version=Constants.VERSION,\n description='Bears for coala (Code Analysis Application)',\n author='The coala developers',\n maintainer='Lasse Schuirmann, Fabian Neuschmidt, Mischa Kr\\xfcger',\n maintainer_email=('[email protected], '\n '[email protected], '\n '[email protected]'),\n url='http://coala.rtfd.org/',\n platforms='any',\n packages=find_packages(exclude=('build.*', 'tests', 'tests.*')),\n install_requires=required,\n extras_require=extras_require,\n tests_require=test_required,\n package_data={'bears': ['VERSION'],\n 'bears.java': ['checkstyle.jar', 'google_checks.xml'],\n 'bears.scala': ['scalastyle.jar',\n 'scalastyle_config.xml']},\n license='AGPL-3.0',\n long_description=long_description,\n entry_points={'coalabears': ['coala_official_bears = bears']},\n # from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Environment :: Plugins',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n\n 'License :: OSI Approved :: GNU Affero General Public License '\n 'v3 or later (AGPLv3+)',\n\n 'Operating System :: OS Independent',\n\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Text Processing :: Linguistic'],\n cmdclass={'docs': BuildDocsCommand,\n 'test': PyTestCommand})\n", "path": "setup.py"}]}
1,698
117
gh_patches_debug_15946
rasdani/github-patches
git_diff
microsoft__Qcodes-485
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Keithely 2600 "resolution" @MerlinSmiles right now we are limiting the set to 8 digits (https://github.com/QCoDeS/Qcodes/blob/master/qcodes/instrument_drivers/tektronix/Keithley_2600.py#L23) Afaik it can go to to 12 digits. Do you confirm ? </issue> <code> [start of qcodes/instrument_drivers/tektronix/Keithley_2600.py] 1 from qcodes import VisaInstrument 2 3 4 class Keithley_2600(VisaInstrument): 5 """ 6 channel: use channel 'a' or 'b' 7 8 This is the qcodes driver for the Keithley_2600 Source-Meter series, 9 tested with Keithley_2614B 10 11 Status: beta-version. 12 TODO: 13 - Add all parameters that are in the manual 14 - range and limit should be set according to mode 15 - add ramping and such stuff 16 17 """ 18 def __init__(self, name, address, channel, **kwargs): 19 super().__init__(name, address, terminator='\n', **kwargs) 20 self._channel = channel 21 22 self.add_parameter('volt', get_cmd='measure.v()', 23 get_parser=float, set_cmd='source.levelv={:.8f}', 24 label='Voltage', 25 unit='V') 26 self.add_parameter('curr', get_cmd='measure.i()', 27 get_parser=float, set_cmd='source.leveli={:.8f}', 28 label='Current', 29 unit='A') 30 self.add_parameter('mode', 31 get_cmd='source.func', 32 set_cmd='source.func={:d}', 33 val_mapping={'current': 0, 'voltage': 1}) 34 self.add_parameter('output', 35 get_cmd='source.output', 36 set_cmd='source.output={:d}', 37 val_mapping={'on': 1, 'off': 0}) 38 # Source range 39 # needs get after set 40 self.add_parameter('rangev', 41 get_cmd='source.rangev', 42 get_parser=float, 43 set_cmd='source.rangev={:.4f}', 44 unit='V') 45 # Measure range 46 # needs get after set 47 self.add_parameter('rangei', 48 get_cmd='source.rangei', 49 get_parser=float, 50 set_cmd='source.rangei={:.4f}', 51 unit='A') 52 # Compliance limit 53 self.add_parameter('limitv', 54 get_cmd='source.limitv', 55 get_parser=float, 56 set_cmd='source.limitv={:.4f}', 57 unit='V') 58 # Compliance limit 59 self.add_parameter('limiti', 60 get_cmd='source.limiti', 61 get_parser=float, 62 set_cmd='source.limiti={:.4f}', 63 unit='A') 64 65 self.connect_message() 66 67 def get_idn(self): 68 IDN = self.ask_raw('*IDN?') 69 vendor, model, serial, firmware = map(str.strip, IDN.split(',')) 70 model = model[6:] 71 72 IDN = {'vendor': vendor, 'model': model, 73 'serial': serial, 'firmware': firmware} 74 return IDN 75 76 def reset(self): 77 self.write('reset()') 78 79 def ask(self, cmd): 80 return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd)) 81 82 def write(self, cmd): 83 super().write('smu{:s}.{:s}'.format(self._channel, cmd)) 84 [end of qcodes/instrument_drivers/tektronix/Keithley_2600.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py --- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py +++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py @@ -20,11 +20,11 @@ self._channel = channel self.add_parameter('volt', get_cmd='measure.v()', - get_parser=float, set_cmd='source.levelv={:.8f}', + get_parser=float, set_cmd='source.levelv={:.12f}', label='Voltage', unit='V') self.add_parameter('curr', get_cmd='measure.i()', - get_parser=float, set_cmd='source.leveli={:.8f}', + get_parser=float, set_cmd='source.leveli={:.12f}', label='Current', unit='A') self.add_parameter('mode',
{"golden_diff": "diff --git a/qcodes/instrument_drivers/tektronix/Keithley_2600.py b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n--- a/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n+++ b/qcodes/instrument_drivers/tektronix/Keithley_2600.py\n@@ -20,11 +20,11 @@\n self._channel = channel\n \n self.add_parameter('volt', get_cmd='measure.v()',\n- get_parser=float, set_cmd='source.levelv={:.8f}',\n+ get_parser=float, set_cmd='source.levelv={:.12f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n- get_parser=float, set_cmd='source.leveli={:.8f}',\n+ get_parser=float, set_cmd='source.leveli={:.12f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n", "issue": "Keithely 2600 \"resolution\"\n@MerlinSmiles right now we are limiting the set to 8 digits (https://github.com/QCoDeS/Qcodes/blob/master/qcodes/instrument_drivers/tektronix/Keithley_2600.py#L23)\r\nAfaik it can go to to 12 digits. Do you confirm ? \r\n\n", "before_files": [{"content": "from qcodes import VisaInstrument\n\n\nclass Keithley_2600(VisaInstrument):\n \"\"\"\n channel: use channel 'a' or 'b'\n\n This is the qcodes driver for the Keithley_2600 Source-Meter series,\n tested with Keithley_2614B\n\n Status: beta-version.\n TODO:\n - Add all parameters that are in the manual\n - range and limit should be set according to mode\n - add ramping and such stuff\n\n \"\"\"\n def __init__(self, name, address, channel, **kwargs):\n super().__init__(name, address, terminator='\\n', **kwargs)\n self._channel = channel\n\n self.add_parameter('volt', get_cmd='measure.v()',\n get_parser=float, set_cmd='source.levelv={:.8f}',\n label='Voltage',\n unit='V')\n self.add_parameter('curr', get_cmd='measure.i()',\n get_parser=float, set_cmd='source.leveli={:.8f}',\n label='Current',\n unit='A')\n self.add_parameter('mode',\n get_cmd='source.func',\n set_cmd='source.func={:d}',\n val_mapping={'current': 0, 'voltage': 1})\n self.add_parameter('output',\n get_cmd='source.output',\n set_cmd='source.output={:d}',\n val_mapping={'on': 1, 'off': 0})\n # Source range\n # needs get after set\n self.add_parameter('rangev',\n get_cmd='source.rangev',\n get_parser=float,\n set_cmd='source.rangev={:.4f}',\n unit='V')\n # Measure range\n # needs get after set\n self.add_parameter('rangei',\n get_cmd='source.rangei',\n get_parser=float,\n set_cmd='source.rangei={:.4f}',\n unit='A')\n # Compliance limit\n self.add_parameter('limitv',\n get_cmd='source.limitv',\n get_parser=float,\n set_cmd='source.limitv={:.4f}',\n unit='V')\n # Compliance limit\n self.add_parameter('limiti',\n get_cmd='source.limiti',\n get_parser=float,\n set_cmd='source.limiti={:.4f}',\n unit='A')\n\n self.connect_message()\n\n def get_idn(self):\n IDN = self.ask_raw('*IDN?')\n vendor, model, serial, firmware = map(str.strip, IDN.split(','))\n model = model[6:]\n\n IDN = {'vendor': vendor, 'model': model,\n 'serial': serial, 'firmware': firmware}\n return IDN\n\n def reset(self):\n self.write('reset()')\n\n def ask(self, cmd):\n return super().ask('print(smu{:s}.{:s})'.format(self._channel, cmd))\n\n def write(self, cmd):\n super().write('smu{:s}.{:s}'.format(self._channel, cmd))\n", "path": "qcodes/instrument_drivers/tektronix/Keithley_2600.py"}]}
1,464
235
gh_patches_debug_4901
rasdani/github-patches
git_diff
certbot__certbot-6349
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> KeyError handle_modules with 0.27.0 on openSUSE ## My operating system is (include version): openSUSE Leap 42.1 ## I installed Certbot with (certbot-auto, OS package manager, pip, etc): certbot-auto ## I ran this command and it produced this output: ```` kevdev36:~ # certbot-auto --version Upgrading certbot-auto 0.26.1 to 0.27.0... Replacing certbot-auto... Creating virtual environment... Installing Python packages... Installation succeeded. An unexpected error occurred: KeyError: 'handle_modules' Please see the logfile '/tmp/tmpMAZJox' for more details. ```` ## Certbot's behavior differed from what I expected because: It did not print the version. ## Here is a Certbot log showing the issue (if available): /tmp/tmpMAZJox ```` 2018-09-06 09:59:58,652:DEBUG:certbot.main:certbot version: 0.27.0 2018-09-06 09:59:58,652:DEBUG:certbot.main:Arguments: ['--version'] 2018-09-06 09:59:58,653:DEBUG:certbot.main:Discovered plugins: PluginsRegistry(PluginEntryPoint#apache,PluginEntryPoint#manual,PluginEntryPoint#nginx,PluginEntryPoint#null,PluginEntryPoint#standalone,PluginEntryPoint#webroot) 2018-09-06 09:59:58,660:DEBUG:certbot.log:Exiting abnormally: Traceback (most recent call last): File "/opt/eff.org/certbot/venv/bin/letsencrypt", line 11, in <module> sys.exit(main()) File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/main.py", line 1345, in main args = cli.prepare_and_parse_args(plugins, cli_args) File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py", line 1243, in prepare_and_parse_args _plugins_parsing(helpful, plugins) File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py", line 1458, in _plugins_parsing helpful.add_plugin_args(plugins) File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py", line 840, in add_plugin_args plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name) File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/plugins/common.py", line 81, in inject_parser_options return cls.add_parser_arguments(add) File "/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot_apache/configurator.py", line 159, in add_parser_arguments add("handle-modules", default=cls.OS_DEFAULTS["handle_modules"], KeyError: 'handle_modules' 2018-09-06 09:59:58,660:ERROR:certbot.log:An unexpected error occurred: ```` ## Workaround Downgrade to 0.26.1 and use `certbot-auto` with `--no-self-upgrade`. ```` kevdev36:~ # wget https://raw.githubusercontent.com/certbot/certbot/v0.26.1/certbot-auto kevdev36:~ # chmod +x certbot-auto kevdev36:~ # /opt/eff.org/certbot/venv/bin/pip install certbot==0.26.1 certbot-apache==0.26.1 certbot-nginx==0.26.1 kevdev36:~ # ./certbot-auto --no-self-upgrade --version certbot 0.26.1 ```` </issue> <code> [start of certbot-apache/certbot_apache/override_suse.py] 1 """ Distribution specific override class for OpenSUSE """ 2 import pkg_resources 3 4 import zope.interface 5 6 from certbot import interfaces 7 8 from certbot_apache import configurator 9 10 @zope.interface.provider(interfaces.IPluginFactory) 11 class OpenSUSEConfigurator(configurator.ApacheConfigurator): 12 """OpenSUSE specific ApacheConfigurator override class""" 13 14 OS_DEFAULTS = dict( 15 server_root="/etc/apache2", 16 vhost_root="/etc/apache2/vhosts.d", 17 vhost_files="*.conf", 18 logs_root="/var/log/apache2", 19 ctl="apache2ctl", 20 version_cmd=['apache2ctl', '-v'], 21 restart_cmd=['apache2ctl', 'graceful'], 22 conftest_cmd=['apache2ctl', 'configtest'], 23 enmod="a2enmod", 24 dismod="a2dismod", 25 le_vhost_ext="-le-ssl.conf", 26 handle_mods=False, 27 handle_sites=False, 28 challenge_location="/etc/apache2/vhosts.d", 29 MOD_SSL_CONF_SRC=pkg_resources.resource_filename( 30 "certbot_apache", "options-ssl-apache.conf") 31 ) 32 [end of certbot-apache/certbot_apache/override_suse.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/certbot-apache/certbot_apache/override_suse.py b/certbot-apache/certbot_apache/override_suse.py --- a/certbot-apache/certbot_apache/override_suse.py +++ b/certbot-apache/certbot_apache/override_suse.py @@ -23,7 +23,7 @@ enmod="a2enmod", dismod="a2dismod", le_vhost_ext="-le-ssl.conf", - handle_mods=False, + handle_modules=False, handle_sites=False, challenge_location="/etc/apache2/vhosts.d", MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
{"golden_diff": "diff --git a/certbot-apache/certbot_apache/override_suse.py b/certbot-apache/certbot_apache/override_suse.py\n--- a/certbot-apache/certbot_apache/override_suse.py\n+++ b/certbot-apache/certbot_apache/override_suse.py\n@@ -23,7 +23,7 @@\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n- handle_mods=False,\n+ handle_modules=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n", "issue": "KeyError handle_modules with 0.27.0 on openSUSE\n## My operating system is (include version):\r\n\r\nopenSUSE Leap 42.1\r\n\r\n## I installed Certbot with (certbot-auto, OS package manager, pip, etc):\r\n\r\ncertbot-auto\r\n\r\n## I ran this command and it produced this output:\r\n\r\n````\r\nkevdev36:~ # certbot-auto --version\r\nUpgrading certbot-auto 0.26.1 to 0.27.0...\r\nReplacing certbot-auto...\r\nCreating virtual environment...\r\nInstalling Python packages...\r\nInstallation succeeded.\r\nAn unexpected error occurred:\r\nKeyError: 'handle_modules'\r\nPlease see the logfile '/tmp/tmpMAZJox' for more details.\r\n````\r\n\r\n## Certbot's behavior differed from what I expected because:\r\n\r\nIt did not print the version.\r\n\r\n## Here is a Certbot log showing the issue (if available):\r\n\r\n/tmp/tmpMAZJox\r\n\r\n````\r\n2018-09-06 09:59:58,652:DEBUG:certbot.main:certbot version: 0.27.0\r\n2018-09-06 09:59:58,652:DEBUG:certbot.main:Arguments: ['--version']\r\n2018-09-06 09:59:58,653:DEBUG:certbot.main:Discovered plugins: PluginsRegistry(PluginEntryPoint#apache,PluginEntryPoint#manual,PluginEntryPoint#nginx,PluginEntryPoint#null,PluginEntryPoint#standalone,PluginEntryPoint#webroot)\r\n2018-09-06 09:59:58,660:DEBUG:certbot.log:Exiting abnormally:\r\nTraceback (most recent call last):\r\n File \"/opt/eff.org/certbot/venv/bin/letsencrypt\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/main.py\", line 1345, in main\r\n args = cli.prepare_and_parse_args(plugins, cli_args)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py\", line 1243, in prepare_and_parse_args\r\n _plugins_parsing(helpful, plugins)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py\", line 1458, in _plugins_parsing\r\n helpful.add_plugin_args(plugins)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/cli.py\", line 840, in add_plugin_args\r\n plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot/plugins/common.py\", line 81, in inject_parser_options\r\n return cls.add_parser_arguments(add)\r\n File \"/opt/eff.org/certbot/venv/lib/python2.7/site-packages/certbot_apache/configurator.py\", line 159, in add_parser_arguments\r\n add(\"handle-modules\", default=cls.OS_DEFAULTS[\"handle_modules\"],\r\nKeyError: 'handle_modules'\r\n2018-09-06 09:59:58,660:ERROR:certbot.log:An unexpected error occurred:\r\n````\r\n\r\n## Workaround\r\n\r\nDowngrade to 0.26.1 and use `certbot-auto` with `--no-self-upgrade`.\r\n\r\n````\r\nkevdev36:~ # wget https://raw.githubusercontent.com/certbot/certbot/v0.26.1/certbot-auto\r\nkevdev36:~ # chmod +x certbot-auto\r\nkevdev36:~ # /opt/eff.org/certbot/venv/bin/pip install certbot==0.26.1 certbot-apache==0.26.1 certbot-nginx==0.26.1\r\nkevdev36:~ # ./certbot-auto --no-self-upgrade --version\r\ncertbot 0.26.1\r\n````\n", "before_files": [{"content": "\"\"\" Distribution specific override class for OpenSUSE \"\"\"\nimport pkg_resources\n\nimport zope.interface\n\nfrom certbot import interfaces\n\nfrom certbot_apache import configurator\n\[email protected](interfaces.IPluginFactory)\nclass OpenSUSEConfigurator(configurator.ApacheConfigurator):\n \"\"\"OpenSUSE specific ApacheConfigurator override class\"\"\"\n\n OS_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n vhost_root=\"/etc/apache2/vhosts.d\",\n vhost_files=\"*.conf\",\n logs_root=\"/var/log/apache2\",\n ctl=\"apache2ctl\",\n version_cmd=['apache2ctl', '-v'],\n restart_cmd=['apache2ctl', 'graceful'],\n conftest_cmd=['apache2ctl', 'configtest'],\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n handle_mods=False,\n handle_sites=False,\n challenge_location=\"/etc/apache2/vhosts.d\",\n MOD_SSL_CONF_SRC=pkg_resources.resource_filename(\n \"certbot_apache\", \"options-ssl-apache.conf\")\n )\n", "path": "certbot-apache/certbot_apache/override_suse.py"}]}
1,783
155
gh_patches_debug_15953
rasdani/github-patches
git_diff
pytorch__audio-1465
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove unused module [`torchaudio._internal.misc_ops`](https://github.com/pytorch/audio/blob/b059f08742e70700ce4c92296a1131118f67a588/torchaudio/_internal/misc_ops.py) is a residue from refactoring of I/O features in the past releases. We can get rid of the whole module. </issue> <code> [start of torchaudio/_internal/misc_ops.py] 1 from typing import Union, Callable 2 3 import torch 4 from torch import Tensor 5 6 7 def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None: 8 """Audio normalization of a tensor in-place. The normalization can be a bool, 9 a number, or a callable that takes the audio tensor as an input. SoX uses 10 32-bit signed integers internally, thus bool normalizes based on that assumption. 11 """ 12 13 if not normalization: 14 return 15 16 if isinstance(normalization, bool): 17 normalization = 1 << 31 18 19 if isinstance(normalization, (float, int)): 20 # normalize with custom value 21 signal /= normalization 22 elif callable(normalization): 23 signal /= normalization(signal) 24 25 26 def check_input(src: Tensor) -> None: 27 if not torch.is_tensor(src): 28 raise TypeError('Expected a tensor, got %s' % type(src)) 29 if src.is_cuda: 30 raise TypeError('Expected a CPU based tensor, got %s' % type(src)) 31 [end of torchaudio/_internal/misc_ops.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchaudio/_internal/misc_ops.py b/torchaudio/_internal/misc_ops.py deleted file mode 100644 --- a/torchaudio/_internal/misc_ops.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Union, Callable - -import torch -from torch import Tensor - - -def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None: - """Audio normalization of a tensor in-place. The normalization can be a bool, - a number, or a callable that takes the audio tensor as an input. SoX uses - 32-bit signed integers internally, thus bool normalizes based on that assumption. - """ - - if not normalization: - return - - if isinstance(normalization, bool): - normalization = 1 << 31 - - if isinstance(normalization, (float, int)): - # normalize with custom value - signal /= normalization - elif callable(normalization): - signal /= normalization(signal) - - -def check_input(src: Tensor) -> None: - if not torch.is_tensor(src): - raise TypeError('Expected a tensor, got %s' % type(src)) - if src.is_cuda: - raise TypeError('Expected a CPU based tensor, got %s' % type(src))
{"golden_diff": "diff --git a/torchaudio/_internal/misc_ops.py b/torchaudio/_internal/misc_ops.py\ndeleted file mode 100644\n--- a/torchaudio/_internal/misc_ops.py\n+++ /dev/null\n@@ -1,30 +0,0 @@\n-from typing import Union, Callable\n-\n-import torch\n-from torch import Tensor\n-\n-\n-def normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n- \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n- a number, or a callable that takes the audio tensor as an input. SoX uses\n- 32-bit signed integers internally, thus bool normalizes based on that assumption.\n- \"\"\"\n-\n- if not normalization:\n- return\n-\n- if isinstance(normalization, bool):\n- normalization = 1 << 31\n-\n- if isinstance(normalization, (float, int)):\n- # normalize with custom value\n- signal /= normalization\n- elif callable(normalization):\n- signal /= normalization(signal)\n-\n-\n-def check_input(src: Tensor) -> None:\n- if not torch.is_tensor(src):\n- raise TypeError('Expected a tensor, got %s' % type(src))\n- if src.is_cuda:\n- raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "issue": "Remove unused module\n[`torchaudio._internal.misc_ops`](https://github.com/pytorch/audio/blob/b059f08742e70700ce4c92296a1131118f67a588/torchaudio/_internal/misc_ops.py) is a residue from refactoring of I/O features in the past releases. We can get rid of the whole module.\r\n\r\n\n", "before_files": [{"content": "from typing import Union, Callable\n\nimport torch\nfrom torch import Tensor\n\n\ndef normalize_audio(signal: Tensor, normalization: Union[bool, float, Callable]) -> None:\n \"\"\"Audio normalization of a tensor in-place. The normalization can be a bool,\n a number, or a callable that takes the audio tensor as an input. SoX uses\n 32-bit signed integers internally, thus bool normalizes based on that assumption.\n \"\"\"\n\n if not normalization:\n return\n\n if isinstance(normalization, bool):\n normalization = 1 << 31\n\n if isinstance(normalization, (float, int)):\n # normalize with custom value\n signal /= normalization\n elif callable(normalization):\n signal /= normalization(signal)\n\n\ndef check_input(src: Tensor) -> None:\n if not torch.is_tensor(src):\n raise TypeError('Expected a tensor, got %s' % type(src))\n if src.is_cuda:\n raise TypeError('Expected a CPU based tensor, got %s' % type(src))\n", "path": "torchaudio/_internal/misc_ops.py"}]}
911
301
gh_patches_debug_691
rasdani/github-patches
git_diff
ivy-llc__ivy-15263
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> eigh </issue> <code> [start of ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.numpy.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 from_zero_dim_arrays_to_scalar, 6 ) 7 8 9 @to_ivy_arrays_and_back 10 @from_zero_dim_arrays_to_scalar 11 def eigvalsh(a, /, UPLO="L"): 12 return ivy.eigvalsh(a, UPLO=UPLO) 13 14 15 @to_ivy_arrays_and_back 16 def eig(a): 17 return ivy.eig(a) 18 19 20 @from_zero_dim_arrays_to_scalar 21 def eigh(a, /, UPLO="L"): 22 return ivy.eigh(a, UPLO=UPLO) 23 [end of ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py --- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py +++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py @@ -17,6 +17,7 @@ return ivy.eig(a) +@to_ivy_arrays_and_back @from_zero_dim_arrays_to_scalar def eigh(a, /, UPLO="L"): return ivy.eigh(a, UPLO=UPLO)
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n--- a/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n+++ b/ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py\n@@ -17,6 +17,7 @@\n return ivy.eig(a)\n \n \n+@to_ivy_arrays_and_back\n @from_zero_dim_arrays_to_scalar\n def eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "issue": "eigh\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n from_zero_dim_arrays_to_scalar,\n)\n\n\n@to_ivy_arrays_and_back\n@from_zero_dim_arrays_to_scalar\ndef eigvalsh(a, /, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@from_zero_dim_arrays_to_scalar\ndef eigh(a, /, UPLO=\"L\"):\n return ivy.eigh(a, UPLO=UPLO)\n", "path": "ivy/functional/frontends/numpy/linalg/matrix_eigenvalues.py"}]}
738
141
gh_patches_debug_3635
rasdani/github-patches
git_diff
ansible__ansible-lint-1625
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> False positive: async jobs <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and master branch are affected too --> ##### Summary <!--- Explain the problem briefly below --> A `command` module task that is run as an async job is incorrectly treated as a normal sync task. For async tasks the options like `changed_when` (and `failed_when` and so on) are not given to the async `command` task itself, they are given to the `async_status` module task that is run after the async task. Ansible-lint does not understand this and complains for rule `no-changed-when` for the `command` task. Example: ```yaml --- - name: Asynchronous long task command: alongtask.sh async: 1000 poll: 0 register: job_sleeper - name: Wait for asynchronous job to end async_status: jid: '{{ job_sleeper.ansible_job_id }}' register: job_result until: job_result.finished retries: 100 delay: 10 changed_when: [....] ``` Note how the `changed_when` is given in the `async_status` task and not in the `command` task. ##### Issue Type - Bug Report ##### Ansible and Ansible Lint details <!--- Paste verbatim output between triple backticks --> ```console (paste below) ansible --version 2.9.21 ansible-lint --version 5.0.8 ``` - ansible installation method: pip - ansible-lint installation method: pip ##### OS / ENVIRONMENT <!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. --> EL7.9 all updated ##### STEPS TO REPRODUCE <!--- Describe exactly how to reproduce the problem, using a minimal test-case --> ```yaml --- - name: Asynchronous yum task command: alongtask.sh async: 1000 poll: 0 register: job_sleeper - name: Wait for asynchronous job to end async_status: jid: '{{ job_sleeper.ansible_job_id }}' register: job_result until: job_result.finished retries: 100 delay: 10 changed_when: [....] ``` <!--- Paste example playbooks or commands between triple backticks below --> ```console (paste below) ``` <!--- HINT: You can paste gist.github.com links for larger files --> ##### Desired Behaviour <!--- Describe what you expected to happen when running the steps above --> Ansible-lint should not detect `no-changed-when` for `command` module task run as async job since the `changed_when` cannot be given to the `command` module task itself. It should detect that there is a `changed_when` in the following `async_status` task. ##### Actual Behaviour <!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) --> Ansible-lint detects false positive `no-changed-when` for `command` module task run as async job even though `changed_when` cannot be correctly given for an async task - the `changed_when` is given for the subsequent `async_status` module task. <!--- Paste verbatim command output between triple backticks --> ```paste below ``` [minimum complete verifiable example]: http://stackoverflow.com/help/mcve </issue> <code> [start of src/ansiblelint/rules/CommandHasChangesCheckRule.py] 1 # Copyright (c) 2016 Will Thames <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 21 from typing import TYPE_CHECKING, Any, Dict, Union 22 23 from ansiblelint.rules import AnsibleLintRule 24 25 if TYPE_CHECKING: 26 from typing import Optional 27 28 from ansiblelint.file_utils import Lintable 29 30 31 class CommandHasChangesCheckRule(AnsibleLintRule): 32 id = 'no-changed-when' 33 shortdesc = 'Commands should not change things if nothing needs doing' 34 description = ( 35 'Commands should either read information (and thus set ' 36 '``changed_when``) or not do something if it has already been ' 37 'done (using creates/removes) or only do it if another ' 38 'check has a particular result (``when``)' 39 ) 40 severity = 'HIGH' 41 tags = ['command-shell', 'idempotency'] 42 version_added = 'historic' 43 44 _commands = ['command', 'shell', 'raw'] 45 46 def matchtask( 47 self, task: Dict[str, Any], file: 'Optional[Lintable]' = None 48 ) -> Union[bool, str]: 49 if task["__ansible_action_type__"] == 'task': 50 if task["action"]["__ansible_module__"] in self._commands: 51 return ( 52 'changed_when' not in task 53 and 'when' not in task 54 and 'creates' not in task['action'] 55 and 'removes' not in task['action'] 56 ) 57 return False 58 [end of src/ansiblelint/rules/CommandHasChangesCheckRule.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/ansiblelint/rules/CommandHasChangesCheckRule.py b/src/ansiblelint/rules/CommandHasChangesCheckRule.py --- a/src/ansiblelint/rules/CommandHasChangesCheckRule.py +++ b/src/ansiblelint/rules/CommandHasChangesCheckRule.py @@ -53,5 +53,6 @@ and 'when' not in task and 'creates' not in task['action'] and 'removes' not in task['action'] + and not ('async' in task and task.get('poll') == 0) ) return False
{"golden_diff": "diff --git a/src/ansiblelint/rules/CommandHasChangesCheckRule.py b/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n--- a/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n+++ b/src/ansiblelint/rules/CommandHasChangesCheckRule.py\n@@ -53,5 +53,6 @@\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n+ and not ('async' in task and task.get('poll') == 0)\n )\n return False\n", "issue": "False positive: async jobs\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and master branch are affected too -->\r\n\r\n##### Summary\r\n<!--- Explain the problem briefly below -->\r\nA `command` module task that is run as an async job is incorrectly treated as a normal sync task.\r\n\r\nFor async tasks the options like `changed_when` (and `failed_when` and so on) are not given to the async `command` task itself, they are given to the `async_status` module task that is run after the async task.\r\n\r\nAnsible-lint does not understand this and complains for rule `no-changed-when` for the `command` task.\r\n\r\nExample:\r\n```yaml\r\n---\r\n- name: Asynchronous long task\r\n command: alongtask.sh\r\n async: 1000\r\n poll: 0\r\n register: job_sleeper\r\n\r\n- name: Wait for asynchronous job to end\r\n async_status:\r\n jid: '{{ job_sleeper.ansible_job_id }}'\r\n register: job_result\r\n until: job_result.finished\r\n retries: 100\r\n delay: 10\r\n changed_when: [....]\r\n```\r\n\r\nNote how the `changed_when` is given in the `async_status` task and not in the `command` task.\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between triple backticks -->\r\n```console (paste below)\r\nansible --version\r\n2.9.21\r\n\r\nansible-lint --version\r\n5.0.8\r\n\r\n```\r\n\r\n- ansible installation method: pip\r\n- ansible-lint installation method: pip\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nEL7.9 all updated\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n```yaml\r\n---\r\n- name: Asynchronous yum task\r\n command: alongtask.sh\r\n async: 1000\r\n poll: 0\r\n register: job_sleeper\r\n\r\n- name: Wait for asynchronous job to end\r\n async_status:\r\n jid: '{{ job_sleeper.ansible_job_id }}'\r\n register: job_result\r\n until: job_result.finished\r\n retries: 100\r\n delay: 10\r\n changed_when: [....]\r\n```\r\n\r\n<!--- Paste example playbooks or commands between triple backticks below -->\r\n```console (paste below)\r\n\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### Desired Behaviour\r\n<!--- Describe what you expected to happen when running the steps above -->\r\nAnsible-lint should not detect `no-changed-when` for `command` module task run as async job since the `changed_when` cannot be given to the `command` module task itself.\r\n\r\nIt should detect that there is a `changed_when` in the following `async_status` task.\r\n\r\n##### Actual Behaviour\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\nAnsible-lint detects false positive `no-changed-when` for `command` module task run as async job even though `changed_when` cannot be correctly given for an async task - the `changed_when` is given for the subsequent `async_status` module task.\r\n\r\n<!--- Paste verbatim command output between triple backticks -->\r\n```paste below\r\n\r\n```\r\n\r\n\r\n[minimum complete verifiable example]: http://stackoverflow.com/help/mcve\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nfrom typing import TYPE_CHECKING, Any, Dict, Union\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from typing import Optional\n\n from ansiblelint.file_utils import Lintable\n\n\nclass CommandHasChangesCheckRule(AnsibleLintRule):\n id = 'no-changed-when'\n shortdesc = 'Commands should not change things if nothing needs doing'\n description = (\n 'Commands should either read information (and thus set '\n '``changed_when``) or not do something if it has already been '\n 'done (using creates/removes) or only do it if another '\n 'check has a particular result (``when``)'\n )\n severity = 'HIGH'\n tags = ['command-shell', 'idempotency']\n version_added = 'historic'\n\n _commands = ['command', 'shell', 'raw']\n\n def matchtask(\n self, task: Dict[str, Any], file: 'Optional[Lintable]' = None\n ) -> Union[bool, str]:\n if task[\"__ansible_action_type__\"] == 'task':\n if task[\"action\"][\"__ansible_module__\"] in self._commands:\n return (\n 'changed_when' not in task\n and 'when' not in task\n and 'creates' not in task['action']\n and 'removes' not in task['action']\n )\n return False\n", "path": "src/ansiblelint/rules/CommandHasChangesCheckRule.py"}]}
1,959
130
gh_patches_debug_42365
rasdani/github-patches
git_diff
scrapy__scrapy-3660
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document LogFormatter Currently, the `LogFormatter` class is only mentioned in the [Release notes](https://docs.scrapy.org/en/latest/news.html) page of the documentation. This class should be properly documented, both its API members and a small section introducing it on the documentation page about [Logging](https://docs.scrapy.org/en/latest/topics/logging.html). The responses to [Scrapy - Silently drop an item](https://stackoverflow.com/q/13527921/939364) in StackOverflow would be a good starting point. </issue> <code> [start of scrapy/logformatter.py] 1 import os 2 import logging 3 4 from twisted.python.failure import Failure 5 6 from scrapy.utils.request import referer_str 7 8 SCRAPEDMSG = u"Scraped from %(src)s" + os.linesep + "%(item)s" 9 DROPPEDMSG = u"Dropped: %(exception)s" + os.linesep + "%(item)s" 10 CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s" 11 12 13 class LogFormatter(object): 14 """Class for generating log messages for different actions. 15 16 All methods must return a dictionary listing the parameters ``level``, 17 ``msg`` and ``args`` which are going to be used for constructing the log 18 message when calling logging.log. 19 20 Dictionary keys for the method outputs: 21 * ``level`` should be the log level for that action, you can use those 22 from the python logging library: logging.DEBUG, logging.INFO, 23 logging.WARNING, logging.ERROR and logging.CRITICAL. 24 25 * ``msg`` should be a string that can contain different formatting 26 placeholders. This string, formatted with the provided ``args``, is 27 going to be the log message for that action. 28 29 * ``args`` should be a tuple or dict with the formatting placeholders 30 for ``msg``. The final log message is computed as output['msg'] % 31 output['args']. 32 """ 33 34 def crawled(self, request, response, spider): 35 request_flags = ' %s' % str(request.flags) if request.flags else '' 36 response_flags = ' %s' % str(response.flags) if response.flags else '' 37 return { 38 'level': logging.DEBUG, 39 'msg': CRAWLEDMSG, 40 'args': { 41 'status': response.status, 42 'request': request, 43 'request_flags' : request_flags, 44 'referer': referer_str(request), 45 'response_flags': response_flags, 46 # backward compatibility with Scrapy logformatter below 1.4 version 47 'flags': response_flags 48 } 49 } 50 51 def scraped(self, item, response, spider): 52 if isinstance(response, Failure): 53 src = response.getErrorMessage() 54 else: 55 src = response 56 return { 57 'level': logging.DEBUG, 58 'msg': SCRAPEDMSG, 59 'args': { 60 'src': src, 61 'item': item, 62 } 63 } 64 65 def dropped(self, item, exception, response, spider): 66 return { 67 'level': logging.WARNING, 68 'msg': DROPPEDMSG, 69 'args': { 70 'exception': exception, 71 'item': item, 72 } 73 } 74 75 @classmethod 76 def from_crawler(cls, crawler): 77 return cls() 78 [end of scrapy/logformatter.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py --- a/scrapy/logformatter.py +++ b/scrapy/logformatter.py @@ -12,26 +12,40 @@ class LogFormatter(object): """Class for generating log messages for different actions. - - All methods must return a dictionary listing the parameters ``level``, - ``msg`` and ``args`` which are going to be used for constructing the log - message when calling logging.log. + + All methods must return a dictionary listing the parameters ``level``, ``msg`` + and ``args`` which are going to be used for constructing the log message when + calling ``logging.log``. Dictionary keys for the method outputs: - * ``level`` should be the log level for that action, you can use those - from the python logging library: logging.DEBUG, logging.INFO, - logging.WARNING, logging.ERROR and logging.CRITICAL. - * ``msg`` should be a string that can contain different formatting - placeholders. This string, formatted with the provided ``args``, is - going to be the log message for that action. + * ``level`` is the log level for that action, you can use those from the + `python logging library <https://docs.python.org/3/library/logging.html>`_ : + ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR`` + and ``logging.CRITICAL``. + * ``msg`` should be a string that can contain different formatting placeholders. + This string, formatted with the provided ``args``, is going to be the long message + for that action. + * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``. + The final log message is computed as ``msg % args``. - * ``args`` should be a tuple or dict with the formatting placeholders - for ``msg``. The final log message is computed as output['msg'] % - output['args']. - """ + Here is an example on how to create a custom log formatter to lower the severity level of + the log message when an item is dropped from the pipeline:: + class PoliteLogFormatter(logformatter.LogFormatter): + def dropped(self, item, exception, response, spider): + return { + 'level': logging.INFO, # lowering the level from logging.WARNING + 'msg': u"Dropped: %(exception)s" + os.linesep + "%(item)s", + 'args': { + 'exception': exception, + 'item': item, + } + } + """ + def crawled(self, request, response, spider): + """Logs a message when the crawler finds a webpage.""" request_flags = ' %s' % str(request.flags) if request.flags else '' response_flags = ' %s' % str(response.flags) if response.flags else '' return { @@ -40,7 +54,7 @@ 'args': { 'status': response.status, 'request': request, - 'request_flags' : request_flags, + 'request_flags': request_flags, 'referer': referer_str(request), 'response_flags': response_flags, # backward compatibility with Scrapy logformatter below 1.4 version @@ -49,6 +63,7 @@ } def scraped(self, item, response, spider): + """Logs a message when an item is scraped by a spider.""" if isinstance(response, Failure): src = response.getErrorMessage() else: @@ -63,6 +78,7 @@ } def dropped(self, item, exception, response, spider): + """Logs a message when an item is dropped while it is passing through the item pipeline.""" return { 'level': logging.WARNING, 'msg': DROPPEDMSG,
{"golden_diff": "diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py\n--- a/scrapy/logformatter.py\n+++ b/scrapy/logformatter.py\n@@ -12,26 +12,40 @@\n \n class LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n-\n- All methods must return a dictionary listing the parameters ``level``,\n- ``msg`` and ``args`` which are going to be used for constructing the log\n- message when calling logging.log.\n+ \n+ All methods must return a dictionary listing the parameters ``level``, ``msg``\n+ and ``args`` which are going to be used for constructing the log message when\n+ calling ``logging.log``.\n \n Dictionary keys for the method outputs:\n- * ``level`` should be the log level for that action, you can use those\n- from the python logging library: logging.DEBUG, logging.INFO,\n- logging.WARNING, logging.ERROR and logging.CRITICAL.\n \n- * ``msg`` should be a string that can contain different formatting\n- placeholders. This string, formatted with the provided ``args``, is\n- going to be the log message for that action.\n+ * ``level`` is the log level for that action, you can use those from the\n+ `python logging library <https://docs.python.org/3/library/logging.html>`_ :\n+ ``logging.DEBUG``, ``logging.INFO``, ``logging.WARNING``, ``logging.ERROR``\n+ and ``logging.CRITICAL``.\n+ * ``msg`` should be a string that can contain different formatting placeholders.\n+ This string, formatted with the provided ``args``, is going to be the long message\n+ for that action.\n+ * ``args`` should be a tuple or dict with the formatting placeholders for ``msg``.\n+ The final log message is computed as ``msg % args``.\n \n- * ``args`` should be a tuple or dict with the formatting placeholders\n- for ``msg``. The final log message is computed as output['msg'] %\n- output['args'].\n- \"\"\"\n+ Here is an example on how to create a custom log formatter to lower the severity level of\n+ the log message when an item is dropped from the pipeline::\n \n+ class PoliteLogFormatter(logformatter.LogFormatter):\n+ def dropped(self, item, exception, response, spider):\n+ return {\n+ 'level': logging.INFO, # lowering the level from logging.WARNING\n+ 'msg': u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\",\n+ 'args': {\n+ 'exception': exception,\n+ 'item': item,\n+ }\n+ }\n+ \"\"\"\n+ \n def crawled(self, request, response, spider):\n+ \"\"\"Logs a message when the crawler finds a webpage.\"\"\"\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n@@ -40,7 +54,7 @@\n 'args': {\n 'status': response.status,\n 'request': request,\n- 'request_flags' : request_flags,\n+ 'request_flags': request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n@@ -49,6 +63,7 @@\n }\n \n def scraped(self, item, response, spider):\n+ \"\"\"Logs a message when an item is scraped by a spider.\"\"\"\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n@@ -63,6 +78,7 @@\n }\n \n def dropped(self, item, exception, response, spider):\n+ \"\"\"Logs a message when an item is dropped while it is passing through the item pipeline.\"\"\"\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n", "issue": "Document LogFormatter\nCurrently, the `LogFormatter` class is only mentioned in the [Release notes](https://docs.scrapy.org/en/latest/news.html) page of the documentation. This class should be properly documented, both its API members and a small section introducing it on the documentation page about [Logging](https://docs.scrapy.org/en/latest/topics/logging.html).\r\n\r\nThe responses to [Scrapy - Silently drop an item](https://stackoverflow.com/q/13527921/939364) in StackOverflow would be a good starting point.\n", "before_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n\n All methods must return a dictionary listing the parameters ``level``,\n ``msg`` and ``args`` which are going to be used for constructing the log\n message when calling logging.log.\n\n Dictionary keys for the method outputs:\n * ``level`` should be the log level for that action, you can use those\n from the python logging library: logging.DEBUG, logging.INFO,\n logging.WARNING, logging.ERROR and logging.CRITICAL.\n\n * ``msg`` should be a string that can contain different formatting\n placeholders. This string, formatted with the provided ``args``, is\n going to be the log message for that action.\n\n * ``args`` should be a tuple or dict with the formatting placeholders\n for ``msg``. The final log message is computed as output['msg'] %\n output['args'].\n \"\"\"\n\n def crawled(self, request, response, spider):\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n # backward compatibility with Scrapy logformatter below 1.4 version\n 'flags': response_flags\n }\n }\n\n def scraped(self, item, response, spider):\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}]}
1,390
859
gh_patches_debug_32933
rasdani/github-patches
git_diff
kserve__kserve-524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Sample of image_transformer does not work /kind bug Sample under docs/samples/transformer/image_transformer is broken, there's python error in it. **What steps did you take and what happened:** [A clear and concise description of what the bug is.] It's due to PR #492, kfmodel and kfserver is refactored now but the sample still inherit from transformer which does not exist now. Also some other symbols need be renamed. **What did you expect to happen:** Sample still works </issue> <code> [start of docs/samples/transformer/image_transformer/image_transformer/__main__.py] 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import kfserving 16 import argparse 17 from .image_transformer import ImageTransformer 18 19 DEFAULT_MODEL_NAME = "model" 20 21 parser = argparse.ArgumentParser(parents=[kfserving.server.parser]) 22 parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME, 23 help='The name that the model is served under.') 24 parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True) 25 26 args, _ = parser.parse_known_args() 27 28 if __name__ == "__main__": 29 transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host, 30 protocol=args.protocol) 31 kfserver = kfserving.KFServer() 32 kfserver.start(models=[transformer]) 33 [end of docs/samples/transformer/image_transformer/image_transformer/__main__.py] [start of docs/samples/transformer/image_transformer/image_transformer/image_transformer.py] 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import kfserving 16 from typing import List, Dict 17 from kfserving.transformer import Transformer 18 from PIL import Image 19 import torchvision.transforms as transforms 20 import logging 21 import io 22 import numpy as np 23 import base64 24 25 logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL) 26 27 transform = transforms.Compose( 28 [transforms.ToTensor(), 29 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) 30 31 32 def image_transform(instance): 33 byte_array = base64.b64decode(instance['image_bytes']['b64']) 34 image = Image.open(io.BytesIO(byte_array)) 35 a = np.asarray(image) 36 im = Image.fromarray(a) 37 res = transform(im) 38 logging.info(res) 39 return res.tolist() 40 41 42 class ImageTransformer(Transformer): 43 44 def preprocess(self, inputs: Dict) -> Dict: 45 return {'instances': [image_transform(instance) for instance in inputs['instances']]} 46 47 def postprocess(self, inputs: List) -> List: 48 return inputs 49 [end of docs/samples/transformer/image_transformer/image_transformer/image_transformer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/samples/transformer/image_transformer/image_transformer/__main__.py b/docs/samples/transformer/image_transformer/image_transformer/__main__.py --- a/docs/samples/transformer/image_transformer/image_transformer/__main__.py +++ b/docs/samples/transformer/image_transformer/image_transformer/__main__.py @@ -18,7 +18,7 @@ DEFAULT_MODEL_NAME = "model" -parser = argparse.ArgumentParser(parents=[kfserving.server.parser]) +parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser]) parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME, help='The name that the model is served under.') parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True) @@ -26,7 +26,6 @@ args, _ = parser.parse_known_args() if __name__ == "__main__": - transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host, - protocol=args.protocol) + transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host) kfserver = kfserving.KFServer() kfserver.start(models=[transformer]) diff --git a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py --- a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py +++ b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py @@ -14,7 +14,6 @@ import kfserving from typing import List, Dict -from kfserving.transformer import Transformer from PIL import Image import torchvision.transforms as transforms import logging @@ -39,7 +38,10 @@ return res.tolist() -class ImageTransformer(Transformer): +class ImageTransformer(kfserving.KFModel): + def __init__(self, name: str, predictor_host: str): + super().__init__(name) + self.predictor_host = predictor_host def preprocess(self, inputs: Dict) -> Dict: return {'instances': [image_transform(instance) for instance in inputs['instances']]}
{"golden_diff": "diff --git a/docs/samples/transformer/image_transformer/image_transformer/__main__.py b/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n--- a/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n+++ b/docs/samples/transformer/image_transformer/image_transformer/__main__.py\n@@ -18,7 +18,7 @@\n \n DEFAULT_MODEL_NAME = \"model\"\n \n-parser = argparse.ArgumentParser(parents=[kfserving.server.parser])\n+parser = argparse.ArgumentParser(parents=[kfserving.kfserver.parser])\n parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\n parser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n@@ -26,7 +26,6 @@\n args, _ = parser.parse_known_args()\n \n if __name__ == \"__main__\":\n- transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n- protocol=args.protocol)\n+ transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\ndiff --git a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n--- a/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n+++ b/docs/samples/transformer/image_transformer/image_transformer/image_transformer.py\n@@ -14,7 +14,6 @@\n \n import kfserving\n from typing import List, Dict\n-from kfserving.transformer import Transformer\n from PIL import Image\n import torchvision.transforms as transforms\n import logging\n@@ -39,7 +38,10 @@\n return res.tolist()\n \n \n-class ImageTransformer(Transformer):\n+class ImageTransformer(kfserving.KFModel):\n+ def __init__(self, name: str, predictor_host: str):\n+ super().__init__(name)\n+ self.predictor_host = predictor_host\n \n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n", "issue": "Sample of image_transformer does not work\n/kind bug\r\nSample under docs/samples/transformer/image_transformer is broken, there's python error in it.\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nIt's due to PR #492, kfmodel and kfserver is refactored now but the sample still inherit from transformer which does not exist now. Also some other symbols need be renamed.\r\n\r\n**What did you expect to happen:**\r\nSample still works\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nimport argparse\nfrom .image_transformer import ImageTransformer\n\nDEFAULT_MODEL_NAME = \"model\"\n\nparser = argparse.ArgumentParser(parents=[kfserving.server.parser])\nparser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,\n help='The name that the model is served under.')\nparser.add_argument('--predictor_host', help='The URL for the model predict function', required=True)\n\nargs, _ = parser.parse_known_args()\n\nif __name__ == \"__main__\":\n transformer = ImageTransformer(args.model_name, predictor_host=args.predictor_host,\n protocol=args.protocol)\n kfserver = kfserving.KFServer()\n kfserver.start(models=[transformer])\n", "path": "docs/samples/transformer/image_transformer/image_transformer/__main__.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfserving\nfrom typing import List, Dict\nfrom kfserving.transformer import Transformer\nfrom PIL import Image\nimport torchvision.transforms as transforms\nimport logging\nimport io\nimport numpy as np\nimport base64\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\ntransform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n\ndef image_transform(instance):\n byte_array = base64.b64decode(instance['image_bytes']['b64'])\n image = Image.open(io.BytesIO(byte_array))\n a = np.asarray(image)\n im = Image.fromarray(a)\n res = transform(im)\n logging.info(res)\n return res.tolist()\n\n\nclass ImageTransformer(Transformer):\n\n def preprocess(self, inputs: Dict) -> Dict:\n return {'instances': [image_transform(instance) for instance in inputs['instances']]}\n\n def postprocess(self, inputs: List) -> List:\n return inputs\n", "path": "docs/samples/transformer/image_transformer/image_transformer/image_transformer.py"}]}
1,503
488
gh_patches_debug_53989
rasdani/github-patches
git_diff
mkdocs__mkdocs-1329
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Provide length of TableOfContents Currently, you can only iter over `TableOfContents`. I would like to know the length of it. </issue> <code> [start of mkdocs/toc.py] 1 # coding: utf-8 2 3 """ 4 Deals with generating the per-page table of contents. 5 6 For the sake of simplicity we use an existing markdown extension to generate 7 an HTML table of contents, and then parse that into the underlying data. 8 9 The steps we take to generate a table of contents are: 10 11 * Pre-process the markdown, injecting a [TOC] marker. 12 * Generate HTML from markdown. 13 * Post-process the HTML, spliting the content and the table of contents. 14 * Parse table of contents HTML into the underlying data structure. 15 """ 16 17 from __future__ import unicode_literals 18 19 try: # pragma: no cover 20 from html.parser import HTMLParser # noqa 21 except ImportError: # pragma: no cover 22 from HTMLParser import HTMLParser # noqa 23 24 25 class TableOfContents(object): 26 """ 27 Represents the table of contents for a given page. 28 """ 29 def __init__(self, html): 30 self.items = _parse_html_table_of_contents(html) 31 32 def __iter__(self): 33 return iter(self.items) 34 35 def __str__(self): 36 return ''.join([str(item) for item in self]) 37 38 39 class AnchorLink(object): 40 """ 41 A single entry in the table of contents. 42 """ 43 def __init__(self, title, url): 44 self.title, self.url = title, url 45 self.children = [] 46 47 def __str__(self): 48 return self.indent_print() 49 50 def indent_print(self, depth=0): 51 indent = ' ' * depth 52 ret = '%s%s - %s\n' % (indent, self.title, self.url) 53 for item in self.children: 54 ret += item.indent_print(depth + 1) 55 return ret 56 57 58 class TOCParser(HTMLParser): 59 60 def __init__(self): 61 HTMLParser.__init__(self) 62 self.links = [] 63 64 self.in_anchor = False 65 self.attrs = None 66 self.title = '' 67 68 # Prior to Python3.4 no convert_charrefs keyword existed. 69 # However, in Python3.5 the default was changed to True. 70 # We need the False behavior in all versions but can only 71 # set it if it exists. 72 if hasattr(self, 'convert_charrefs'): 73 self.convert_charrefs = False 74 75 def handle_starttag(self, tag, attrs): 76 77 if not self.in_anchor: 78 if tag == 'a': 79 self.in_anchor = True 80 self.attrs = dict(attrs) 81 82 def handle_endtag(self, tag): 83 if tag == 'a': 84 self.in_anchor = False 85 86 def handle_data(self, data): 87 88 if self.in_anchor: 89 self.title += data 90 91 def handle_charref(self, ref): 92 self.handle_entityref("#" + ref) 93 94 def handle_entityref(self, ref): 95 self.handle_data("&%s;" % ref) 96 97 98 def _parse_html_table_of_contents(html): 99 """ 100 Given a table of contents string that has been automatically generated by 101 the markdown library, parse it into a tree of AnchorLink instances. 102 103 Returns a list of all the parent AnchorLink instances. 104 """ 105 lines = html.splitlines()[2:-2] 106 parents = [] 107 ret = [] 108 for line in lines: 109 parser = TOCParser() 110 parser.feed(line) 111 if parser.title: 112 try: 113 href = parser.attrs['href'] 114 except KeyError: 115 continue 116 title = parser.title 117 nav = AnchorLink(title, href) 118 # Add the item to its parent if required. If it is a topmost 119 # item then instead append it to our return value. 120 if parents: 121 parents[-1].children.append(nav) 122 else: 123 ret.append(nav) 124 # If this item has children, store it as the current parent 125 if line.endswith('<ul>'): 126 parents.append(nav) 127 elif line.startswith('</ul>'): 128 if parents: 129 parents.pop() 130 131 # For the table of contents, always mark the first element as active 132 if ret: 133 ret[0].active = True 134 135 return ret 136 [end of mkdocs/toc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/toc.py b/mkdocs/toc.py --- a/mkdocs/toc.py +++ b/mkdocs/toc.py @@ -32,6 +32,9 @@ def __iter__(self): return iter(self.items) + def __len__(self): + return len(self.items) + def __str__(self): return ''.join([str(item) for item in self])
{"golden_diff": "diff --git a/mkdocs/toc.py b/mkdocs/toc.py\n--- a/mkdocs/toc.py\n+++ b/mkdocs/toc.py\n@@ -32,6 +32,9 @@\n def __iter__(self):\n return iter(self.items)\n \n+ def __len__(self):\n+ return len(self.items)\n+\n def __str__(self):\n return ''.join([str(item) for item in self])\n", "issue": "Provide length of TableOfContents\nCurrently, you can only iter over `TableOfContents`. I would like to know the length of it.\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nDeals with generating the per-page table of contents.\n\nFor the sake of simplicity we use an existing markdown extension to generate\nan HTML table of contents, and then parse that into the underlying data.\n\nThe steps we take to generate a table of contents are:\n\n* Pre-process the markdown, injecting a [TOC] marker.\n* Generate HTML from markdown.\n* Post-process the HTML, spliting the content and the table of contents.\n* Parse table of contents HTML into the underlying data structure.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\ntry: # pragma: no cover\n from html.parser import HTMLParser # noqa\nexcept ImportError: # pragma: no cover\n from HTMLParser import HTMLParser # noqa\n\n\nclass TableOfContents(object):\n \"\"\"\n Represents the table of contents for a given page.\n \"\"\"\n def __init__(self, html):\n self.items = _parse_html_table_of_contents(html)\n\n def __iter__(self):\n return iter(self.items)\n\n def __str__(self):\n return ''.join([str(item) for item in self])\n\n\nclass AnchorLink(object):\n \"\"\"\n A single entry in the table of contents.\n \"\"\"\n def __init__(self, title, url):\n self.title, self.url = title, url\n self.children = []\n\n def __str__(self):\n return self.indent_print()\n\n def indent_print(self, depth=0):\n indent = ' ' * depth\n ret = '%s%s - %s\\n' % (indent, self.title, self.url)\n for item in self.children:\n ret += item.indent_print(depth + 1)\n return ret\n\n\nclass TOCParser(HTMLParser):\n\n def __init__(self):\n HTMLParser.__init__(self)\n self.links = []\n\n self.in_anchor = False\n self.attrs = None\n self.title = ''\n\n # Prior to Python3.4 no convert_charrefs keyword existed.\n # However, in Python3.5 the default was changed to True.\n # We need the False behavior in all versions but can only\n # set it if it exists.\n if hasattr(self, 'convert_charrefs'):\n self.convert_charrefs = False\n\n def handle_starttag(self, tag, attrs):\n\n if not self.in_anchor:\n if tag == 'a':\n self.in_anchor = True\n self.attrs = dict(attrs)\n\n def handle_endtag(self, tag):\n if tag == 'a':\n self.in_anchor = False\n\n def handle_data(self, data):\n\n if self.in_anchor:\n self.title += data\n\n def handle_charref(self, ref):\n self.handle_entityref(\"#\" + ref)\n\n def handle_entityref(self, ref):\n self.handle_data(\"&%s;\" % ref)\n\n\ndef _parse_html_table_of_contents(html):\n \"\"\"\n Given a table of contents string that has been automatically generated by\n the markdown library, parse it into a tree of AnchorLink instances.\n\n Returns a list of all the parent AnchorLink instances.\n \"\"\"\n lines = html.splitlines()[2:-2]\n parents = []\n ret = []\n for line in lines:\n parser = TOCParser()\n parser.feed(line)\n if parser.title:\n try:\n href = parser.attrs['href']\n except KeyError:\n continue\n title = parser.title\n nav = AnchorLink(title, href)\n # Add the item to its parent if required. If it is a topmost\n # item then instead append it to our return value.\n if parents:\n parents[-1].children.append(nav)\n else:\n ret.append(nav)\n # If this item has children, store it as the current parent\n if line.endswith('<ul>'):\n parents.append(nav)\n elif line.startswith('</ul>'):\n if parents:\n parents.pop()\n\n # For the table of contents, always mark the first element as active\n if ret:\n ret[0].active = True\n\n return ret\n", "path": "mkdocs/toc.py"}]}
1,745
99